Search is not available for this dataset
repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
| parameters
sequence | question
stringlengths 9
114
| answer
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pypyr/pypyr-cli | pypyr/dsl.py | PyString.get_value | def get_value(self, context):
"""Run python eval on the input string."""
if self.value:
return expressions.eval_string(self.value, context)
else:
# Empty input raises cryptic EOF syntax err, this more human
# friendly
raise ValueError('!py string expression is empty. It must be a '
'valid python expression instead.') | python | def get_value(self, context):
"""Run python eval on the input string."""
if self.value:
return expressions.eval_string(self.value, context)
else:
# Empty input raises cryptic EOF syntax err, this more human
# friendly
raise ValueError('!py string expression is empty. It must be a '
'valid python expression instead.') | [
"def",
"get_value",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"value",
":",
"return",
"expressions",
".",
"eval_string",
"(",
"self",
".",
"value",
",",
"context",
")",
"else",
":",
"# Empty input raises cryptic EOF syntax err, this more human",
"# friendly",
"raise",
"ValueError",
"(",
"'!py string expression is empty. It must be a '",
"'valid python expression instead.'",
")"
] | Run python eval on the input string. | [
"Run",
"python",
"eval",
"on",
"the",
"input",
"string",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L107-L115 | [
"context"
] | What does this function do? | [
"Run",
"python",
"eval",
"on",
"the",
"input",
"string",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.foreach_loop | def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") | python | def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") | [
"def",
"foreach_loop",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# Loop decorators only evaluated once, not for every step repeat",
"# execution.",
"foreach",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"foreach_items",
")",
"foreach_length",
"=",
"len",
"(",
"foreach",
")",
"logger",
".",
"info",
"(",
"f\"foreach decorator will loop {foreach_length} times.\"",
")",
"for",
"i",
"in",
"foreach",
":",
"logger",
".",
"info",
"(",
"f\"foreach: running step {i}\"",
")",
"# the iterator must be available to the step when it executes",
"context",
"[",
"'i'",
"]",
"=",
"i",
"# conditional operators apply to each iteration, so might be an",
"# iteration run, skips or swallows.",
"self",
".",
"run_conditional_decorators",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"f\"foreach: done step {i}\"",
")",
"logger",
".",
"debug",
"(",
"f\"foreach decorator looped {foreach_length} times.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Run",
"step",
"once",
"for",
"each",
"item",
"in",
"foreach_items",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L253-L283 | [
"context"
] | What does this function do? | [
"Run",
"step",
"once",
"for",
"each",
"item",
"in",
"foreach_items",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.invoke_step | def invoke_step(self, context):
"""Invoke 'run_step' in the dynamically loaded step module.
Don't invoke this from outside the Step class. Use
pypyr.dsl.Step.run_step instead.
invoke_step just does the bare module step invocation, it does not
evaluate any of the decorator logic surrounding the step. So unless
you really know what you're doing, use run_step if you intend on
executing the step the same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
logger.debug(f"running step {self.module}")
self.run_step_function(context)
logger.debug(f"step {self.module} done") | python | def invoke_step(self, context):
"""Invoke 'run_step' in the dynamically loaded step module.
Don't invoke this from outside the Step class. Use
pypyr.dsl.Step.run_step instead.
invoke_step just does the bare module step invocation, it does not
evaluate any of the decorator logic surrounding the step. So unless
you really know what you're doing, use run_step if you intend on
executing the step the same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
logger.debug(f"running step {self.module}")
self.run_step_function(context)
logger.debug(f"step {self.module} done") | [
"def",
"invoke_step",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"logger",
".",
"debug",
"(",
"f\"running step {self.module}\"",
")",
"self",
".",
"run_step_function",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"f\"step {self.module} done\"",
")"
] | Invoke 'run_step' in the dynamically loaded step module.
Don't invoke this from outside the Step class. Use
pypyr.dsl.Step.run_step instead.
invoke_step just does the bare module step invocation, it does not
evaluate any of the decorator logic surrounding the step. So unless
you really know what you're doing, use run_step if you intend on
executing the step the same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Invoke",
"run_step",
"in",
"the",
"dynamically",
"loaded",
"step",
"module",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L285-L305 | [
"context"
] | What does this function do? | [
"Invoke",
"run_step",
"in",
"the",
"dynamically",
"loaded",
"step",
"module",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.run_conditional_decorators | def run_conditional_decorators(self, context):
"""Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# The decorator attributes might contain formatting expressions that
# change whether they evaluate True or False, thus apply formatting at
# last possible instant.
run_me = context.get_formatted_as_type(self.run_me, out_type=bool)
skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)
swallow_me = context.get_formatted_as_type(self.swallow_me,
out_type=bool)
if run_me:
if not skip_me:
try:
if self.retry_decorator:
self.retry_decorator.retry_loop(context,
self.invoke_step)
else:
self.invoke_step(context=context)
except Exception as ex_info:
if swallow_me:
logger.error(
f"{self.name} Ignoring error because swallow "
"is True for this step.\n"
f"{type(ex_info).__name__}: {ex_info}")
else:
raise
else:
logger.info(
f"{self.name} not running because skip is True.")
else:
logger.info(f"{self.name} not running because run is False.")
logger.debug("done") | python | def run_conditional_decorators(self, context):
"""Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# The decorator attributes might contain formatting expressions that
# change whether they evaluate True or False, thus apply formatting at
# last possible instant.
run_me = context.get_formatted_as_type(self.run_me, out_type=bool)
skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)
swallow_me = context.get_formatted_as_type(self.swallow_me,
out_type=bool)
if run_me:
if not skip_me:
try:
if self.retry_decorator:
self.retry_decorator.retry_loop(context,
self.invoke_step)
else:
self.invoke_step(context=context)
except Exception as ex_info:
if swallow_me:
logger.error(
f"{self.name} Ignoring error because swallow "
"is True for this step.\n"
f"{type(ex_info).__name__}: {ex_info}")
else:
raise
else:
logger.info(
f"{self.name} not running because skip is True.")
else:
logger.info(f"{self.name} not running because run is False.")
logger.debug("done") | [
"def",
"run_conditional_decorators",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# The decorator attributes might contain formatting expressions that",
"# change whether they evaluate True or False, thus apply formatting at",
"# last possible instant.",
"run_me",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"run_me",
",",
"out_type",
"=",
"bool",
")",
"skip_me",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"skip_me",
",",
"out_type",
"=",
"bool",
")",
"swallow_me",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"swallow_me",
",",
"out_type",
"=",
"bool",
")",
"if",
"run_me",
":",
"if",
"not",
"skip_me",
":",
"try",
":",
"if",
"self",
".",
"retry_decorator",
":",
"self",
".",
"retry_decorator",
".",
"retry_loop",
"(",
"context",
",",
"self",
".",
"invoke_step",
")",
"else",
":",
"self",
".",
"invoke_step",
"(",
"context",
"=",
"context",
")",
"except",
"Exception",
"as",
"ex_info",
":",
"if",
"swallow_me",
":",
"logger",
".",
"error",
"(",
"f\"{self.name} Ignoring error because swallow \"",
"\"is True for this step.\\n\"",
"f\"{type(ex_info).__name__}: {ex_info}\"",
")",
"else",
":",
"raise",
"else",
":",
"logger",
".",
"info",
"(",
"f\"{self.name} not running because skip is True.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"{self.name} not running because run is False.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Evaluate",
"the",
"step",
"decorators",
"to",
"decide",
"whether",
"to",
"run",
"step",
"or",
"not",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L307-L349 | [
"context"
] | What does this function do? | [
"Evaluate",
"the",
"step",
"decorators",
"to",
"decide",
"whether",
"to",
"run",
"step",
"or",
"not",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.run_foreach_or_conditional | def run_foreach_or_conditional(self, context):
"""Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# friendly reminder [] list obj (i.e empty) evals False
if self.foreach_items:
self.foreach_loop(context)
else:
# since no looping required, don't pollute output with looping info
self.run_conditional_decorators(context)
logger.debug("done") | python | def run_foreach_or_conditional(self, context):
"""Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# friendly reminder [] list obj (i.e empty) evals False
if self.foreach_items:
self.foreach_loop(context)
else:
# since no looping required, don't pollute output with looping info
self.run_conditional_decorators(context)
logger.debug("done") | [
"def",
"run_foreach_or_conditional",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# friendly reminder [] list obj (i.e empty) evals False",
"if",
"self",
".",
"foreach_items",
":",
"self",
".",
"foreach_loop",
"(",
"context",
")",
"else",
":",
"# since no looping required, don't pollute output with looping info",
"self",
".",
"run_conditional_decorators",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Run",
"the",
"foreach",
"sequence",
"or",
"the",
"conditional",
"evaluation",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L351-L366 | [
"context"
] | What does this function do? | [
"Run",
"the",
"foreach",
"sequence",
"or",
"the",
"conditional",
"evaluation",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.run_step | def run_step(self, context):
"""Run a single pipeline step.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# the in params should be added to context before step execution.
self.set_step_input_context(context)
if self.while_decorator:
self.while_decorator.while_loop(context,
self.run_foreach_or_conditional)
else:
self.run_foreach_or_conditional(context)
logger.debug("done") | python | def run_step(self, context):
"""Run a single pipeline step.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# the in params should be added to context before step execution.
self.set_step_input_context(context)
if self.while_decorator:
self.while_decorator.while_loop(context,
self.run_foreach_or_conditional)
else:
self.run_foreach_or_conditional(context)
logger.debug("done") | [
"def",
"run_step",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# the in params should be added to context before step execution.",
"self",
".",
"set_step_input_context",
"(",
"context",
")",
"if",
"self",
".",
"while_decorator",
":",
"self",
".",
"while_decorator",
".",
"while_loop",
"(",
"context",
",",
"self",
".",
"run_foreach_or_conditional",
")",
"else",
":",
"self",
".",
"run_foreach_or_conditional",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run a single pipeline step.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Run",
"a",
"single",
"pipeline",
"step",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L368-L385 | [
"context"
] | What does this function do? | [
"Run",
"a",
"single",
"pipeline",
"step",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | Step.set_step_input_context | def set_step_input_context(self, context):
"""Append step's 'in' parameters to context, if they exist.
Append the[in] dictionary to the context. This will overwrite
existing values if the same keys are already in there. I.e if
in_parameters has {'eggs': 'boiled'} and key 'eggs' already
exists in context, context['eggs'] hereafter will be 'boiled'.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
"""
logger.debug("starting")
if self.in_parameters is not None:
parameter_count = len(self.in_parameters)
if parameter_count > 0:
logger.debug(
f"Updating context with {parameter_count} 'in' "
"parameters.")
context.update(self.in_parameters)
logger.debug("done") | python | def set_step_input_context(self, context):
"""Append step's 'in' parameters to context, if they exist.
Append the[in] dictionary to the context. This will overwrite
existing values if the same keys are already in there. I.e if
in_parameters has {'eggs': 'boiled'} and key 'eggs' already
exists in context, context['eggs'] hereafter will be 'boiled'.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
"""
logger.debug("starting")
if self.in_parameters is not None:
parameter_count = len(self.in_parameters)
if parameter_count > 0:
logger.debug(
f"Updating context with {parameter_count} 'in' "
"parameters.")
context.update(self.in_parameters)
logger.debug("done") | [
"def",
"set_step_input_context",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"if",
"self",
".",
"in_parameters",
"is",
"not",
"None",
":",
"parameter_count",
"=",
"len",
"(",
"self",
".",
"in_parameters",
")",
"if",
"parameter_count",
">",
"0",
":",
"logger",
".",
"debug",
"(",
"f\"Updating context with {parameter_count} 'in' \"",
"\"parameters.\"",
")",
"context",
".",
"update",
"(",
"self",
".",
"in_parameters",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Append step's 'in' parameters to context, if they exist.
Append the[in] dictionary to the context. This will overwrite
existing values if the same keys are already in there. I.e if
in_parameters has {'eggs': 'boiled'} and key 'eggs' already
exists in context, context['eggs'] hereafter will be 'boiled'.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context. | [
"Append",
"step",
"s",
"in",
"parameters",
"to",
"context",
"if",
"they",
"exist",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L387-L409 | [
"context"
] | What does this function do? | [
"Append",
"step",
"s",
"in",
"parameters",
"to",
"context",
"if",
"they",
"exist",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | RetryDecorator.exec_iteration | def exec_iteration(self, counter, context, step_method):
"""Run a single retry iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.retry_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if step execution completed without error.
False if error occured during step execution.
"""
logger.debug("starting")
context['retryCounter'] = counter
logger.info(f"retry: running step with counter {counter}")
try:
step_method(context)
result = True
except Exception as ex_info:
if self.max:
if counter == self.max:
logger.debug(f"retry: max {counter} retries exhausted. "
"raising error.")
# arguably shouldn't be using errs for control of flow.
# but would lose the err info if not, so lesser of 2 evils.
raise
if self.stop_on or self.retry_on:
error_name = get_error_name(ex_info)
if self.stop_on:
formatted_stop_list = context.get_formatted_iterable(
self.stop_on)
if error_name in formatted_stop_list:
logger.error(f"{error_name} in stopOn. Raising error "
"and exiting retry.")
raise
else:
logger.debug(f"{error_name} not in stopOn. Continue.")
if self.retry_on:
formatted_retry_list = context.get_formatted_iterable(
self.retry_on)
if error_name not in formatted_retry_list:
logger.error(f"{error_name} not in retryOn. Raising "
"error and exiting retry.")
raise
else:
logger.debug(f"{error_name} in retryOn. Retry again.")
result = False
logger.error(f"retry: ignoring error because retryCounter < max.\n"
f"{type(ex_info).__name__}: {ex_info}")
logger.debug(f"retry: done step with counter {counter}")
logger.debug("done")
return result | python | def exec_iteration(self, counter, context, step_method):
"""Run a single retry iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.retry_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if step execution completed without error.
False if error occured during step execution.
"""
logger.debug("starting")
context['retryCounter'] = counter
logger.info(f"retry: running step with counter {counter}")
try:
step_method(context)
result = True
except Exception as ex_info:
if self.max:
if counter == self.max:
logger.debug(f"retry: max {counter} retries exhausted. "
"raising error.")
# arguably shouldn't be using errs for control of flow.
# but would lose the err info if not, so lesser of 2 evils.
raise
if self.stop_on or self.retry_on:
error_name = get_error_name(ex_info)
if self.stop_on:
formatted_stop_list = context.get_formatted_iterable(
self.stop_on)
if error_name in formatted_stop_list:
logger.error(f"{error_name} in stopOn. Raising error "
"and exiting retry.")
raise
else:
logger.debug(f"{error_name} not in stopOn. Continue.")
if self.retry_on:
formatted_retry_list = context.get_formatted_iterable(
self.retry_on)
if error_name not in formatted_retry_list:
logger.error(f"{error_name} not in retryOn. Raising "
"error and exiting retry.")
raise
else:
logger.debug(f"{error_name} in retryOn. Retry again.")
result = False
logger.error(f"retry: ignoring error because retryCounter < max.\n"
f"{type(ex_info).__name__}: {ex_info}")
logger.debug(f"retry: done step with counter {counter}")
logger.debug("done")
return result | [
"def",
"exec_iteration",
"(",
"self",
",",
"counter",
",",
"context",
",",
"step_method",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"context",
"[",
"'retryCounter'",
"]",
"=",
"counter",
"logger",
".",
"info",
"(",
"f\"retry: running step with counter {counter}\"",
")",
"try",
":",
"step_method",
"(",
"context",
")",
"result",
"=",
"True",
"except",
"Exception",
"as",
"ex_info",
":",
"if",
"self",
".",
"max",
":",
"if",
"counter",
"==",
"self",
".",
"max",
":",
"logger",
".",
"debug",
"(",
"f\"retry: max {counter} retries exhausted. \"",
"\"raising error.\"",
")",
"# arguably shouldn't be using errs for control of flow.",
"# but would lose the err info if not, so lesser of 2 evils.",
"raise",
"if",
"self",
".",
"stop_on",
"or",
"self",
".",
"retry_on",
":",
"error_name",
"=",
"get_error_name",
"(",
"ex_info",
")",
"if",
"self",
".",
"stop_on",
":",
"formatted_stop_list",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"stop_on",
")",
"if",
"error_name",
"in",
"formatted_stop_list",
":",
"logger",
".",
"error",
"(",
"f\"{error_name} in stopOn. Raising error \"",
"\"and exiting retry.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"{error_name} not in stopOn. Continue.\"",
")",
"if",
"self",
".",
"retry_on",
":",
"formatted_retry_list",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"retry_on",
")",
"if",
"error_name",
"not",
"in",
"formatted_retry_list",
":",
"logger",
".",
"error",
"(",
"f\"{error_name} not in retryOn. Raising \"",
"\"error and exiting retry.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"{error_name} in retryOn. Retry again.\"",
")",
"result",
"=",
"False",
"logger",
".",
"error",
"(",
"f\"retry: ignoring error because retryCounter < max.\\n\"",
"f\"{type(ex_info).__name__}: {ex_info}\"",
")",
"logger",
".",
"debug",
"(",
"f\"retry: done step with counter {counter}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"result"
] | Run a single retry iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.retry_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if step execution completed without error.
False if error occured during step execution. | [
"Run",
"a",
"single",
"retry",
"iteration",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L469-L536 | [
"counter",
"context",
"step_method"
] | What does this function do? | [
"Run",
"a",
"single",
"retry",
"iteration",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | RetryDecorator.retry_loop | def retry_loop(self, context, step_method):
"""Run step inside a retry loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
"""
logger.debug("starting")
context['retryCounter'] = 0
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if self.max:
max = context.get_formatted_as_type(self.max, out_type=int)
logger.info(f"retry decorator will try {max} times at {sleep}s "
"intervals.")
else:
max = None
logger.info(f"retry decorator will try indefinitely at {sleep}s "
"intervals.")
# this will never be false. because on counter == max,
# exec_iteration raises an exception, breaking out of the loop.
# pragma because cov doesn't know the implied else is impossible.
# unit test cov is 100%, though.
if poll.while_until_true(interval=sleep,
max_attempts=max)(
self.exec_iteration)(context=context,
step_method=step_method
): # pragma: no cover
logger.debug("retry loop complete, reporting success.")
logger.debug("retry loop done")
logger.debug("done") | python | def retry_loop(self, context, step_method):
"""Run step inside a retry loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
"""
logger.debug("starting")
context['retryCounter'] = 0
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if self.max:
max = context.get_formatted_as_type(self.max, out_type=int)
logger.info(f"retry decorator will try {max} times at {sleep}s "
"intervals.")
else:
max = None
logger.info(f"retry decorator will try indefinitely at {sleep}s "
"intervals.")
# this will never be false. because on counter == max,
# exec_iteration raises an exception, breaking out of the loop.
# pragma because cov doesn't know the implied else is impossible.
# unit test cov is 100%, though.
if poll.while_until_true(interval=sleep,
max_attempts=max)(
self.exec_iteration)(context=context,
step_method=step_method
): # pragma: no cover
logger.debug("retry loop complete, reporting success.")
logger.debug("retry loop done")
logger.debug("done") | [
"def",
"retry_loop",
"(",
"self",
",",
"context",
",",
"step_method",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"context",
"[",
"'retryCounter'",
"]",
"=",
"0",
"sleep",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"sleep",
",",
"out_type",
"=",
"float",
")",
"if",
"self",
".",
"max",
":",
"max",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"max",
",",
"out_type",
"=",
"int",
")",
"logger",
".",
"info",
"(",
"f\"retry decorator will try {max} times at {sleep}s \"",
"\"intervals.\"",
")",
"else",
":",
"max",
"=",
"None",
"logger",
".",
"info",
"(",
"f\"retry decorator will try indefinitely at {sleep}s \"",
"\"intervals.\"",
")",
"# this will never be false. because on counter == max,",
"# exec_iteration raises an exception, breaking out of the loop.",
"# pragma because cov doesn't know the implied else is impossible.",
"# unit test cov is 100%, though.",
"if",
"poll",
".",
"while_until_true",
"(",
"interval",
"=",
"sleep",
",",
"max_attempts",
"=",
"max",
")",
"(",
"self",
".",
"exec_iteration",
")",
"(",
"context",
"=",
"context",
",",
"step_method",
"=",
"step_method",
")",
":",
"# pragma: no cover",
"logger",
".",
"debug",
"(",
"\"retry loop complete, reporting success.\"",
")",
"logger",
".",
"debug",
"(",
"\"retry loop done\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run step inside a retry loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context) | [
"Run",
"step",
"inside",
"a",
"retry",
"loop",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L538-L578 | [
"context",
"step_method"
] | What does this function do? | [
"Run",
"step",
"inside",
"a",
"retry",
"loop",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | WhileDecorator.exec_iteration | def exec_iteration(self, counter, context, step_method):
"""Run a single loop iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.while_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if self.stop evaluates to True after step execution,
False otherwise.
"""
logger.debug("starting")
context['whileCounter'] = counter
logger.info(f"while: running step with counter {counter}")
step_method(context)
logger.debug(f"while: done step {counter}")
result = False
# if no stop, just iterating to max)
if self.stop:
# dynamically evaluate stop after step execution, since the step
# might have changed True/False status for stop.
result = context.get_formatted_as_type(self.stop, out_type=bool)
logger.debug("done")
return result | python | def exec_iteration(self, counter, context, step_method):
"""Run a single loop iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.while_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if self.stop evaluates to True after step execution,
False otherwise.
"""
logger.debug("starting")
context['whileCounter'] = counter
logger.info(f"while: running step with counter {counter}")
step_method(context)
logger.debug(f"while: done step {counter}")
result = False
# if no stop, just iterating to max)
if self.stop:
# dynamically evaluate stop after step execution, since the step
# might have changed True/False status for stop.
result = context.get_formatted_as_type(self.stop, out_type=bool)
logger.debug("done")
return result | [
"def",
"exec_iteration",
"(",
"self",
",",
"counter",
",",
"context",
",",
"step_method",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"context",
"[",
"'whileCounter'",
"]",
"=",
"counter",
"logger",
".",
"info",
"(",
"f\"while: running step with counter {counter}\"",
")",
"step_method",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"f\"while: done step {counter}\"",
")",
"result",
"=",
"False",
"# if no stop, just iterating to max)",
"if",
"self",
".",
"stop",
":",
"# dynamically evaluate stop after step execution, since the step",
"# might have changed True/False status for stop.",
"result",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"stop",
",",
"out_type",
"=",
"bool",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"result"
] | Run a single loop iteration.
This method abides by the signature invoked by poll.while_until_true,
which is to say (counter, *args, **kwargs). In a normal execution
chain, this method's args passed by self.while_loop where context
and step_method set. while_until_true injects counter as a 1st arg.
Args:
counter. int. loop counter, which number of iteration is this.
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
Returns:
bool. True if self.stop evaluates to True after step execution,
False otherwise. | [
"Run",
"a",
"single",
"loop",
"iteration",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L645-L682 | [
"counter",
"context",
"step_method"
] | What does this function do? | [
"Run",
"a",
"single",
"loop",
"iteration",
"."
] |
pypyr/pypyr-cli | pypyr/dsl.py | WhileDecorator.while_loop | def while_loop(self, context, step_method):
"""Run step inside a while loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
"""
logger.debug("starting")
context['whileCounter'] = 0
if self.stop is None and self.max is None:
# the ctor already does this check, but guess theoretically
# consumer could have messed with the props since ctor
logger.error(f"while decorator missing both max and stop.")
raise PipelineDefinitionError("the while decorator must have "
"either max or stop, or both. "
"But not neither.")
error_on_max = context.get_formatted_as_type(
self.error_on_max, out_type=bool)
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if self.max is None:
max = None
logger.info(f"while decorator will loop until {self.stop} "
f"evaluates to True at {sleep}s intervals.")
else:
max = context.get_formatted_as_type(self.max, out_type=int)
if max < 1:
logger.info(
f"max {self.max} is {max}. while only runs when max > 0.")
logger.debug("done")
return
if self.stop is None:
logger.info(f"while decorator will loop {max} times at "
f"{sleep}s intervals.")
else:
logger.info(f"while decorator will loop {max} times, or "
f"until {self.stop} evaluates to True at "
f"{sleep}s intervals.")
if not poll.while_until_true(interval=sleep,
max_attempts=max)(
self.exec_iteration)(context=context,
step_method=step_method):
# False means loop exhausted and stop never eval-ed True.
if error_on_max:
logger.error(f"exhausted {max} iterations of while loop, "
"and errorOnMax is True.")
if self.stop and max:
raise LoopMaxExhaustedError("while loop reached "
f"{max} and {self.stop} "
"never evaluated to True.")
else:
raise LoopMaxExhaustedError(f"while loop reached {max}.")
else:
if self.stop and max:
logger.info(
f"while decorator looped {max} times, "
f"and {self.stop} never evaluated to True.")
logger.debug("while loop done")
else:
logger.info(f"while loop done, stop condition {self.stop} "
"evaluated True.")
logger.debug("done") | python | def while_loop(self, context, step_method):
"""Run step inside a while loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
"""
logger.debug("starting")
context['whileCounter'] = 0
if self.stop is None and self.max is None:
# the ctor already does this check, but guess theoretically
# consumer could have messed with the props since ctor
logger.error(f"while decorator missing both max and stop.")
raise PipelineDefinitionError("the while decorator must have "
"either max or stop, or both. "
"But not neither.")
error_on_max = context.get_formatted_as_type(
self.error_on_max, out_type=bool)
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if self.max is None:
max = None
logger.info(f"while decorator will loop until {self.stop} "
f"evaluates to True at {sleep}s intervals.")
else:
max = context.get_formatted_as_type(self.max, out_type=int)
if max < 1:
logger.info(
f"max {self.max} is {max}. while only runs when max > 0.")
logger.debug("done")
return
if self.stop is None:
logger.info(f"while decorator will loop {max} times at "
f"{sleep}s intervals.")
else:
logger.info(f"while decorator will loop {max} times, or "
f"until {self.stop} evaluates to True at "
f"{sleep}s intervals.")
if not poll.while_until_true(interval=sleep,
max_attempts=max)(
self.exec_iteration)(context=context,
step_method=step_method):
# False means loop exhausted and stop never eval-ed True.
if error_on_max:
logger.error(f"exhausted {max} iterations of while loop, "
"and errorOnMax is True.")
if self.stop and max:
raise LoopMaxExhaustedError("while loop reached "
f"{max} and {self.stop} "
"never evaluated to True.")
else:
raise LoopMaxExhaustedError(f"while loop reached {max}.")
else:
if self.stop and max:
logger.info(
f"while decorator looped {max} times, "
f"and {self.stop} never evaluated to True.")
logger.debug("while loop done")
else:
logger.info(f"while loop done, stop condition {self.stop} "
"evaluated True.")
logger.debug("done") | [
"def",
"while_loop",
"(",
"self",
",",
"context",
",",
"step_method",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"context",
"[",
"'whileCounter'",
"]",
"=",
"0",
"if",
"self",
".",
"stop",
"is",
"None",
"and",
"self",
".",
"max",
"is",
"None",
":",
"# the ctor already does this check, but guess theoretically",
"# consumer could have messed with the props since ctor",
"logger",
".",
"error",
"(",
"f\"while decorator missing both max and stop.\"",
")",
"raise",
"PipelineDefinitionError",
"(",
"\"the while decorator must have \"",
"\"either max or stop, or both. \"",
"\"But not neither.\"",
")",
"error_on_max",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"error_on_max",
",",
"out_type",
"=",
"bool",
")",
"sleep",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"sleep",
",",
"out_type",
"=",
"float",
")",
"if",
"self",
".",
"max",
"is",
"None",
":",
"max",
"=",
"None",
"logger",
".",
"info",
"(",
"f\"while decorator will loop until {self.stop} \"",
"f\"evaluates to True at {sleep}s intervals.\"",
")",
"else",
":",
"max",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"self",
".",
"max",
",",
"out_type",
"=",
"int",
")",
"if",
"max",
"<",
"1",
":",
"logger",
".",
"info",
"(",
"f\"max {self.max} is {max}. while only runs when max > 0.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"if",
"self",
".",
"stop",
"is",
"None",
":",
"logger",
".",
"info",
"(",
"f\"while decorator will loop {max} times at \"",
"f\"{sleep}s intervals.\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"while decorator will loop {max} times, or \"",
"f\"until {self.stop} evaluates to True at \"",
"f\"{sleep}s intervals.\"",
")",
"if",
"not",
"poll",
".",
"while_until_true",
"(",
"interval",
"=",
"sleep",
",",
"max_attempts",
"=",
"max",
")",
"(",
"self",
".",
"exec_iteration",
")",
"(",
"context",
"=",
"context",
",",
"step_method",
"=",
"step_method",
")",
":",
"# False means loop exhausted and stop never eval-ed True.",
"if",
"error_on_max",
":",
"logger",
".",
"error",
"(",
"f\"exhausted {max} iterations of while loop, \"",
"\"and errorOnMax is True.\"",
")",
"if",
"self",
".",
"stop",
"and",
"max",
":",
"raise",
"LoopMaxExhaustedError",
"(",
"\"while loop reached \"",
"f\"{max} and {self.stop} \"",
"\"never evaluated to True.\"",
")",
"else",
":",
"raise",
"LoopMaxExhaustedError",
"(",
"f\"while loop reached {max}.\"",
")",
"else",
":",
"if",
"self",
".",
"stop",
"and",
"max",
":",
"logger",
".",
"info",
"(",
"f\"while decorator looped {max} times, \"",
"f\"and {self.stop} never evaluated to True.\"",
")",
"logger",
".",
"debug",
"(",
"\"while loop done\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"while loop done, stop condition {self.stop} \"",
"\"evaluated True.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run step inside a while loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context) | [
"Run",
"step",
"inside",
"a",
"while",
"loop",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L684-L757 | [
"context",
"step_method"
] | What does this function do? | [
"Run",
"step",
"inside",
"a",
"while",
"loop",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fetchyaml.py | run_step | def run_step(context):
"""Load a yaml file into the pypyr context.
Yaml parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchYaml
- path. path-like. Path to file on disk.
- key. string. If exists, write yaml to this context key.
Else yaml writes to context root.
All inputs support formatting expressions.
Also supports a passing path as string to fetchYaml, but in this case you
won't be able to specify a key.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchYaml', caller=__name__)
fetch_yaml_input = context.get_formatted('fetchYaml')
if isinstance(fetch_yaml_input, str):
file_path = fetch_yaml_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchYaml',
child='path',
caller=__name__)
file_path = fetch_yaml_input['path']
destination_key_expression = fetch_yaml_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as yaml_file:
yaml_loader = yaml.YAML(typ='safe', pure=True)
payload = yaml_loader.load(yaml_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"yaml file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
"yaml input should describe a dictionary at the top "
"level when fetchYamlKey isn't specified. You should have "
"something like \n'key1: value1'\n key2: value2'\n"
"in the yaml top-level, not \n'- value1\n - value2'")
logger.debug("yaml file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"yaml file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | python | def run_step(context):
"""Load a yaml file into the pypyr context.
Yaml parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchYaml
- path. path-like. Path to file on disk.
- key. string. If exists, write yaml to this context key.
Else yaml writes to context root.
All inputs support formatting expressions.
Also supports a passing path as string to fetchYaml, but in this case you
won't be able to specify a key.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchYaml', caller=__name__)
fetch_yaml_input = context.get_formatted('fetchYaml')
if isinstance(fetch_yaml_input, str):
file_path = fetch_yaml_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchYaml',
child='path',
caller=__name__)
file_path = fetch_yaml_input['path']
destination_key_expression = fetch_yaml_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as yaml_file:
yaml_loader = yaml.YAML(typ='safe', pure=True)
payload = yaml_loader.load(yaml_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"yaml file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
"yaml input should describe a dictionary at the top "
"level when fetchYamlKey isn't specified. You should have "
"something like \n'key1: value1'\n key2: value2'\n"
"in the yaml top-level, not \n'- value1\n - value2'")
logger.debug("yaml file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"yaml file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'fetchYaml'",
",",
"caller",
"=",
"__name__",
")",
"fetch_yaml_input",
"=",
"context",
".",
"get_formatted",
"(",
"'fetchYaml'",
")",
"if",
"isinstance",
"(",
"fetch_yaml_input",
",",
"str",
")",
":",
"file_path",
"=",
"fetch_yaml_input",
"destination_key_expression",
"=",
"None",
"else",
":",
"context",
".",
"assert_child_key_has_value",
"(",
"parent",
"=",
"'fetchYaml'",
",",
"child",
"=",
"'path'",
",",
"caller",
"=",
"__name__",
")",
"file_path",
"=",
"fetch_yaml_input",
"[",
"'path'",
"]",
"destination_key_expression",
"=",
"fetch_yaml_input",
".",
"get",
"(",
"'key'",
",",
"None",
")",
"logger",
".",
"debug",
"(",
"f\"attempting to open file: {file_path}\"",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"yaml_file",
":",
"yaml_loader",
"=",
"yaml",
".",
"YAML",
"(",
"typ",
"=",
"'safe'",
",",
"pure",
"=",
"True",
")",
"payload",
"=",
"yaml_loader",
".",
"load",
"(",
"yaml_file",
")",
"if",
"destination_key_expression",
":",
"destination_key",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"destination_key_expression",
")",
"logger",
".",
"debug",
"(",
"f\"yaml file loaded. Writing to context {destination_key}\"",
")",
"context",
"[",
"destination_key",
"]",
"=",
"payload",
"else",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"MutableMapping",
")",
":",
"raise",
"TypeError",
"(",
"\"yaml input should describe a dictionary at the top \"",
"\"level when fetchYamlKey isn't specified. You should have \"",
"\"something like \\n'key1: value1'\\n key2: value2'\\n\"",
"\"in the yaml top-level, not \\n'- value1\\n - value2'\"",
")",
"logger",
".",
"debug",
"(",
"\"yaml file loaded. Merging into pypyr context. . .\"",
")",
"context",
".",
"update",
"(",
"payload",
")",
"logger",
".",
"info",
"(",
"f\"yaml file written into pypyr context. Count: {len(payload)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Load a yaml file into the pypyr context.
Yaml parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchYaml
- path. path-like. Path to file on disk.
- key. string. If exists, write yaml to this context key.
Else yaml writes to context root.
All inputs support formatting expressions.
Also supports a passing path as string to fetchYaml, but in this case you
won't be able to specify a key.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is
None. | [
"Load",
"a",
"yaml",
"file",
"into",
"the",
"pypyr",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchyaml.py#L10-L81 | [
"context"
] | What does this function do? | [
"Load",
"a",
"yaml",
"file",
"into",
"the",
"pypyr",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fileformatyaml.py | run_step | def run_step(context):
"""Parse input yaml file and substitute {tokens} from context.
Loads yaml into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatYaml
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatYaml or
fileFormatYaml['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatYaml or
fileFormatYaml['in'] exists but is None.
"""
logger.debug("started")
deprecated(context)
ObjectRewriterStep(__name__, 'fileFormatYaml', context).run_step(
YamlRepresenter())
logger.debug("done") | python | def run_step(context):
"""Parse input yaml file and substitute {tokens} from context.
Loads yaml into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatYaml
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatYaml or
fileFormatYaml['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatYaml or
fileFormatYaml['in'] exists but is None.
"""
logger.debug("started")
deprecated(context)
ObjectRewriterStep(__name__, 'fileFormatYaml', context).run_step(
YamlRepresenter())
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"ObjectRewriterStep",
"(",
"__name__",
",",
"'fileFormatYaml'",
",",
"context",
")",
".",
"run_step",
"(",
"YamlRepresenter",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Parse input yaml file and substitute {tokens} from context.
Loads yaml into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatYaml
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatYaml or
fileFormatYaml['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatYaml or
fileFormatYaml['in'] exists but is None. | [
"Parse",
"input",
"yaml",
"file",
"and",
"substitute",
"{",
"tokens",
"}",
"from",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fileformatyaml.py#L10-L52 | [
"context"
] | What does this function do? | [
"Parse",
"input",
"yaml",
"file",
"and",
"substitute",
"{",
"tokens",
"}",
"from",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/utils/poll.py | wait_until_true | def wait_until_true(interval, max_attempts):
"""Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st. The wrapped function can have any given signature.
Use me if you always want to time out at max_attempts and you don't care
about the while loop position counter value. If you do care, use
while_until_true instead.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True.
"""
def decorator(f):
logger.debug("started")
def sleep_looper(*args, **kwargs):
logger.debug(f"Looping every {interval} seconds for "
f"{max_attempts} attempts")
for i in range(1, max_attempts + 1):
result = f(*args, **kwargs)
if result:
logger.debug(f"iteration {i}. Desired state reached.")
return True
if i < max_attempts:
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
logger.debug("done")
return False
return sleep_looper
return decorator | python | def wait_until_true(interval, max_attempts):
"""Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st. The wrapped function can have any given signature.
Use me if you always want to time out at max_attempts and you don't care
about the while loop position counter value. If you do care, use
while_until_true instead.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True.
"""
def decorator(f):
logger.debug("started")
def sleep_looper(*args, **kwargs):
logger.debug(f"Looping every {interval} seconds for "
f"{max_attempts} attempts")
for i in range(1, max_attempts + 1):
result = f(*args, **kwargs)
if result:
logger.debug(f"iteration {i}. Desired state reached.")
return True
if i < max_attempts:
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
logger.debug("done")
return False
return sleep_looper
return decorator | [
"def",
"wait_until_true",
"(",
"interval",
",",
"max_attempts",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"def",
"sleep_looper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Looping every {interval} seconds for \"",
"f\"{max_attempts} attempts\"",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"max_attempts",
"+",
"1",
")",
":",
"result",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
":",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Desired state reached.\"",
")",
"return",
"True",
"if",
"i",
"<",
"max_attempts",
":",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Still waiting. . .\"",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"False",
"return",
"sleep_looper",
"return",
"decorator"
] | Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st. The wrapped function can have any given signature.
Use me if you always want to time out at max_attempts and you don't care
about the while loop position counter value. If you do care, use
while_until_true instead.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True. | [
"Decorator",
"that",
"executes",
"a",
"function",
"until",
"it",
"returns",
"True",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/poll.py#L9-L47 | [
"interval",
"max_attempts"
] | What does this function do? | [
"Decorator",
"that",
"executes",
"a",
"function",
"until",
"it",
"returns",
"True",
"."
] |
pypyr/pypyr-cli | pypyr/utils/poll.py | while_until_true | def while_until_true(interval, max_attempts):
"""Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st.
The difference between while_until_true and wait_until_true is that the
latter will always loop to a max_attempts, whereas while_until_true will
keep going indefinitely.
The other notable difference to wait_until_true is that the wrapped
function signature must be:
func(counter, *args, **kwargs)
This is because this decorator injects the while loop counter into the
invoked function.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit. None
means infinite (or until wrapped function returns True).
Passing anything <0 also means infinite.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True.
"""
def decorator(f):
logger.debug("started")
def sleep_looper(*args, **kwargs):
if max_attempts:
logger.debug(f"Looping every {interval} seconds for "
f"{max_attempts} attempts")
else:
logger.debug(f"Looping every {interval} seconds.")
i = 0
result = False
# pragma for coverage: cov can't figure out the branch construct
# with the dynamic function invocation, it seems, so marks the
# branch partial. unit test cov is 100%, though.
while not result: # pragma: no branch
i += 1
result = f(i, *args, **kwargs)
if result:
logger.debug(f"iteration {i}. Desired state reached.")
break
elif max_attempts:
if i < max_attempts:
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
else:
logger.debug(f"iteration {i}. Max attempts exhausted.")
break
else:
# result False AND max_attempts is None means keep looping
# because None = infinite
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
logger.debug("done")
return result
return sleep_looper
return decorator | python | def while_until_true(interval, max_attempts):
"""Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st.
The difference between while_until_true and wait_until_true is that the
latter will always loop to a max_attempts, whereas while_until_true will
keep going indefinitely.
The other notable difference to wait_until_true is that the wrapped
function signature must be:
func(counter, *args, **kwargs)
This is because this decorator injects the while loop counter into the
invoked function.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit. None
means infinite (or until wrapped function returns True).
Passing anything <0 also means infinite.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True.
"""
def decorator(f):
logger.debug("started")
def sleep_looper(*args, **kwargs):
if max_attempts:
logger.debug(f"Looping every {interval} seconds for "
f"{max_attempts} attempts")
else:
logger.debug(f"Looping every {interval} seconds.")
i = 0
result = False
# pragma for coverage: cov can't figure out the branch construct
# with the dynamic function invocation, it seems, so marks the
# branch partial. unit test cov is 100%, though.
while not result: # pragma: no branch
i += 1
result = f(i, *args, **kwargs)
if result:
logger.debug(f"iteration {i}. Desired state reached.")
break
elif max_attempts:
if i < max_attempts:
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
else:
logger.debug(f"iteration {i}. Max attempts exhausted.")
break
else:
# result False AND max_attempts is None means keep looping
# because None = infinite
logger.debug(f"iteration {i}. Still waiting. . .")
time.sleep(interval)
logger.debug("done")
return result
return sleep_looper
return decorator | [
"def",
"while_until_true",
"(",
"interval",
",",
"max_attempts",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"def",
"sleep_looper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"max_attempts",
":",
"logger",
".",
"debug",
"(",
"f\"Looping every {interval} seconds for \"",
"f\"{max_attempts} attempts\"",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"Looping every {interval} seconds.\"",
")",
"i",
"=",
"0",
"result",
"=",
"False",
"# pragma for coverage: cov can't figure out the branch construct",
"# with the dynamic function invocation, it seems, so marks the",
"# branch partial. unit test cov is 100%, though.",
"while",
"not",
"result",
":",
"# pragma: no branch",
"i",
"+=",
"1",
"result",
"=",
"f",
"(",
"i",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"result",
":",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Desired state reached.\"",
")",
"break",
"elif",
"max_attempts",
":",
"if",
"i",
"<",
"max_attempts",
":",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Still waiting. . .\"",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Max attempts exhausted.\"",
")",
"break",
"else",
":",
"# result False AND max_attempts is None means keep looping",
"# because None = infinite",
"logger",
".",
"debug",
"(",
"f\"iteration {i}. Still waiting. . .\"",
")",
"time",
".",
"sleep",
"(",
"interval",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"result",
"return",
"sleep_looper",
"return",
"decorator"
] | Decorator that executes a function until it returns True.
Executes wrapped function at every number of seconds specified by interval,
until wrapped function either returns True or max_attempts are exhausted,
whichever comes 1st.
The difference between while_until_true and wait_until_true is that the
latter will always loop to a max_attempts, whereas while_until_true will
keep going indefinitely.
The other notable difference to wait_until_true is that the wrapped
function signature must be:
func(counter, *args, **kwargs)
This is because this decorator injects the while loop counter into the
invoked function.
Args:
interval: In seconds. How long to wait between executing the wrapped
function.
max_attempts: int. Execute wrapped function up to this limit. None
means infinite (or until wrapped function returns True).
Passing anything <0 also means infinite.
Returns:
Bool. True if wrapped function returned True. False if reached
max_attempts without the wrapped function ever returning True. | [
"Decorator",
"that",
"executes",
"a",
"function",
"until",
"it",
"returns",
"True",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/poll.py#L50-L118 | [
"interval",
"max_attempts"
] | What does this function do? | [
"Decorator",
"that",
"executes",
"a",
"function",
"until",
"it",
"returns",
"True",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fileformat.py | run_step | def run_step(context):
"""Parse input file and substitutes {tokens} from context.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileFormat
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormat missing in context.
pypyr.errors.KeyInContextHasNoValueError: in or out exists but is None.
"""
logger.debug("started")
deprecated(context)
StreamRewriterStep(__name__, 'fileFormat', context).run_step()
logger.debug("done") | python | def run_step(context):
"""Parse input file and substitutes {tokens} from context.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileFormat
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormat missing in context.
pypyr.errors.KeyInContextHasNoValueError: in or out exists but is None.
"""
logger.debug("started")
deprecated(context)
StreamRewriterStep(__name__, 'fileFormat', context).run_step()
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"StreamRewriterStep",
"(",
"__name__",
",",
"'fileFormat'",
",",
"context",
")",
".",
"run_step",
"(",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Parse input file and substitutes {tokens} from context.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileFormat
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormat missing in context.
pypyr.errors.KeyInContextHasNoValueError: in or out exists but is None. | [
"Parse",
"input",
"file",
"and",
"substitutes",
"{",
"tokens",
"}",
"from",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fileformat.py#L9-L46 | [
"context"
] | What does this function do? | [
"Parse",
"input",
"file",
"and",
"substitutes",
"{",
"tokens",
"}",
"from",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fileformat.py | deprecated | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fileFormatIn' in context:
context.assert_keys_have_values(__name__,
'fileFormatIn',
'fileFormatOut')
context['fileFormat'] = {'in': context['fileFormatIn'],
'out': context['fileFormatOut']}
logger.warning("fileFormatIn and fileFormatOut "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fileFormat "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fileFormat key for you "
"under the hood.") | python | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fileFormatIn' in context:
context.assert_keys_have_values(__name__,
'fileFormatIn',
'fileFormatOut')
context['fileFormat'] = {'in': context['fileFormatIn'],
'out': context['fileFormatOut']}
logger.warning("fileFormatIn and fileFormatOut "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fileFormat "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fileFormat key for you "
"under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"if",
"'fileFormatIn'",
"in",
"context",
":",
"context",
".",
"assert_keys_have_values",
"(",
"__name__",
",",
"'fileFormatIn'",
",",
"'fileFormatOut'",
")",
"context",
"[",
"'fileFormat'",
"]",
"=",
"{",
"'in'",
":",
"context",
"[",
"'fileFormatIn'",
"]",
",",
"'out'",
":",
"context",
"[",
"'fileFormatOut'",
"]",
"}",
"logger",
".",
"warning",
"(",
"\"fileFormatIn and fileFormatOut \"",
"\"are deprecated. They will stop working upon the next \"",
"\"major release. Use the new context key fileFormat \"",
"\"instead. It's a lot better, promise! For the moment \"",
"\"pypyr is creating the new fileFormat key for you \"",
"\"under the hood.\"",
")"
] | Create new style in params from deprecated. | [
"Create",
"new",
"style",
"in",
"params",
"from",
"deprecated",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fileformat.py#L49-L64 | [
"context"
] | What does this function do? | [
"Create",
"new",
"style",
"in",
"params",
"from",
"deprecated",
"."
] |
pypyr/pypyr-cli | pypyr/steps/nowutc.py | run_step | def run_step(context):
"""pypyr step saves current utc datetime to context.
Args:
context: pypyr.context.Context. Mandatory.
The following context key is optional:
- nowUtcIn. str. Datetime formatting expression. For full list
of possible expressions, check here:
https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior
All inputs support pypyr formatting expressions.
This step creates now in context, containing a string representation of the
timestamp. If input formatting not specified, defaults to ISO8601.
Default is:
YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0,
YYYY-MM-DDTHH:MM:SS
Returns:
None. updates context arg.
"""
logger.debug("started")
format_expression = context.get('nowUtcIn', None)
if format_expression:
formatted_expression = context.get_formatted_string(format_expression)
context['nowUtc'] = datetime.now(
timezone.utc).strftime(formatted_expression)
else:
context['nowUtc'] = datetime.now(timezone.utc).isoformat()
logger.info(f"timestamp {context['nowUtc']} saved to context nowUtc")
logger.debug("done") | python | def run_step(context):
"""pypyr step saves current utc datetime to context.
Args:
context: pypyr.context.Context. Mandatory.
The following context key is optional:
- nowUtcIn. str. Datetime formatting expression. For full list
of possible expressions, check here:
https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior
All inputs support pypyr formatting expressions.
This step creates now in context, containing a string representation of the
timestamp. If input formatting not specified, defaults to ISO8601.
Default is:
YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0,
YYYY-MM-DDTHH:MM:SS
Returns:
None. updates context arg.
"""
logger.debug("started")
format_expression = context.get('nowUtcIn', None)
if format_expression:
formatted_expression = context.get_formatted_string(format_expression)
context['nowUtc'] = datetime.now(
timezone.utc).strftime(formatted_expression)
else:
context['nowUtc'] = datetime.now(timezone.utc).isoformat()
logger.info(f"timestamp {context['nowUtc']} saved to context nowUtc")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"format_expression",
"=",
"context",
".",
"get",
"(",
"'nowUtcIn'",
",",
"None",
")",
"if",
"format_expression",
":",
"formatted_expression",
"=",
"context",
".",
"get_formatted_string",
"(",
"format_expression",
")",
"context",
"[",
"'nowUtc'",
"]",
"=",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
".",
"strftime",
"(",
"formatted_expression",
")",
"else",
":",
"context",
"[",
"'nowUtc'",
"]",
"=",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
".",
"isoformat",
"(",
")",
"logger",
".",
"info",
"(",
"f\"timestamp {context['nowUtc']} saved to context nowUtc\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | pypyr step saves current utc datetime to context.
Args:
context: pypyr.context.Context. Mandatory.
The following context key is optional:
- nowUtcIn. str. Datetime formatting expression. For full list
of possible expressions, check here:
https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior
All inputs support pypyr formatting expressions.
This step creates now in context, containing a string representation of the
timestamp. If input formatting not specified, defaults to ISO8601.
Default is:
YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0,
YYYY-MM-DDTHH:MM:SS
Returns:
None. updates context arg. | [
"pypyr",
"step",
"saves",
"current",
"utc",
"datetime",
"to",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/nowutc.py#L9-L44 | [
"context"
] | What does this function do? | [
"pypyr",
"step",
"saves",
"current",
"utc",
"datetime",
"to",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/env.py | run_step | def run_step(context):
"""Get, set, unset $ENVs.
Context is a dictionary or dictionary-like. context is mandatory.
Input context is:
env:
get: {dict}
set: {dict}
unset: [list]
At least one of env's sub-keys (get, set or unset) must exist.
This step will run whatever combination of Get, Set and Unset you specify.
Regardless of combination, execution order is Get, Set, Unset.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
context.assert_key_has_value('env', __name__)
found_get = env_get(context)
found_set = env_set(context)
found_unset = env_unset(context)
# at least 1 of envGet, envSet or envUnset must exist in context
if not (found_get or found_set or found_unset):
raise KeyNotInContextError(
"context must contain any combination of "
"env['get'], env['set'] or env['unset'] for "
f"{__name__}")
logger.debug("done") | python | def run_step(context):
"""Get, set, unset $ENVs.
Context is a dictionary or dictionary-like. context is mandatory.
Input context is:
env:
get: {dict}
set: {dict}
unset: [list]
At least one of env's sub-keys (get, set or unset) must exist.
This step will run whatever combination of Get, Set and Unset you specify.
Regardless of combination, execution order is Get, Set, Unset.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
context.assert_key_has_value('env', __name__)
found_get = env_get(context)
found_set = env_set(context)
found_unset = env_unset(context)
# at least 1 of envGet, envSet or envUnset must exist in context
if not (found_get or found_set or found_unset):
raise KeyNotInContextError(
"context must contain any combination of "
"env['get'], env['set'] or env['unset'] for "
f"{__name__}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"assert",
"context",
",",
"f\"context must have value for {__name__}\"",
"deprecated",
"(",
"context",
")",
"context",
".",
"assert_key_has_value",
"(",
"'env'",
",",
"__name__",
")",
"found_get",
"=",
"env_get",
"(",
"context",
")",
"found_set",
"=",
"env_set",
"(",
"context",
")",
"found_unset",
"=",
"env_unset",
"(",
"context",
")",
"# at least 1 of envGet, envSet or envUnset must exist in context",
"if",
"not",
"(",
"found_get",
"or",
"found_set",
"or",
"found_unset",
")",
":",
"raise",
"KeyNotInContextError",
"(",
"\"context must contain any combination of \"",
"\"env['get'], env['set'] or env['unset'] for \"",
"f\"{__name__}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Get, set, unset $ENVs.
Context is a dictionary or dictionary-like. context is mandatory.
Input context is:
env:
get: {dict}
set: {dict}
unset: [list]
At least one of env's sub-keys (get, set or unset) must exist.
This step will run whatever combination of Get, Set and Unset you specify.
Regardless of combination, execution order is Get, Set, Unset. | [
"Get",
"set",
"unset",
"$ENVs",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/env.py#L10-L43 | [
"context"
] | What does this function do? | [
"Get",
"set",
"unset",
"$ENVs",
"."
] |
pypyr/pypyr-cli | pypyr/steps/env.py | env_get | def env_get(context):
"""Get $ENVs into the pypyr context.
Context is a dictionary or dictionary-like. context is mandatory.
context['env']['get'] must exist. It's a dictionary.
Values are the names of the $ENVs to write to the pypyr context.
Keys are the pypyr context item to which to write the $ENV values.
For example, say input context is:
key1: value1
key2: value2
pypyrCurrentDir: value3
env:
get:
pypyrUser: USER
pypyrCurrentDir: PWD
This will result in context:
key1: value1
key2: value2
key3: value3
pypyrUser: <<value of $USER here>>
pypyrCurrentDir: <<value of $PWD here, not value3>>
"""
get = context['env'].get('get', None)
exists = False
if get:
logger.debug("start")
for k, v in get.items():
logger.debug(f"setting context {k} to $ENV {v}")
context[k] = os.environ[v]
logger.info(f"saved {len(get)} $ENVs to context.")
exists = True
logger.debug("done")
return exists | python | def env_get(context):
"""Get $ENVs into the pypyr context.
Context is a dictionary or dictionary-like. context is mandatory.
context['env']['get'] must exist. It's a dictionary.
Values are the names of the $ENVs to write to the pypyr context.
Keys are the pypyr context item to which to write the $ENV values.
For example, say input context is:
key1: value1
key2: value2
pypyrCurrentDir: value3
env:
get:
pypyrUser: USER
pypyrCurrentDir: PWD
This will result in context:
key1: value1
key2: value2
key3: value3
pypyrUser: <<value of $USER here>>
pypyrCurrentDir: <<value of $PWD here, not value3>>
"""
get = context['env'].get('get', None)
exists = False
if get:
logger.debug("start")
for k, v in get.items():
logger.debug(f"setting context {k} to $ENV {v}")
context[k] = os.environ[v]
logger.info(f"saved {len(get)} $ENVs to context.")
exists = True
logger.debug("done")
return exists | [
"def",
"env_get",
"(",
"context",
")",
":",
"get",
"=",
"context",
"[",
"'env'",
"]",
".",
"get",
"(",
"'get'",
",",
"None",
")",
"exists",
"=",
"False",
"if",
"get",
":",
"logger",
".",
"debug",
"(",
"\"start\"",
")",
"for",
"k",
",",
"v",
"in",
"get",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"f\"setting context {k} to $ENV {v}\"",
")",
"context",
"[",
"k",
"]",
"=",
"os",
".",
"environ",
"[",
"v",
"]",
"logger",
".",
"info",
"(",
"f\"saved {len(get)} $ENVs to context.\"",
")",
"exists",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"exists"
] | Get $ENVs into the pypyr context.
Context is a dictionary or dictionary-like. context is mandatory.
context['env']['get'] must exist. It's a dictionary.
Values are the names of the $ENVs to write to the pypyr context.
Keys are the pypyr context item to which to write the $ENV values.
For example, say input context is:
key1: value1
key2: value2
pypyrCurrentDir: value3
env:
get:
pypyrUser: USER
pypyrCurrentDir: PWD
This will result in context:
key1: value1
key2: value2
key3: value3
pypyrUser: <<value of $USER here>>
pypyrCurrentDir: <<value of $PWD here, not value3>> | [
"Get",
"$ENVs",
"into",
"the",
"pypyr",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/env.py#L46-L85 | [
"context"
] | What does this function do? | [
"Get",
"$ENVs",
"into",
"the",
"pypyr",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/env.py | env_set | def env_set(context):
"""Set $ENVs to specified string. from the pypyr context.
Args:
context: is dictionary-like. context is mandatory.
context['env']['set'] must exist. It's a dictionary.
Values are strings to write to $ENV.
Keys are the names of the $ENV values to which to write.
For example, say input context is:
key1: value1
key2: value2
key3: value3
env:
set:
MYVAR1: {key1}
MYVAR2: before_{key3}_after
MYVAR3: arbtexthere
This will result in the following $ENVs:
$MYVAR1 = value1
$MYVAR2 = before_value3_after
$MYVAR3 = arbtexthere
Note that the $ENVs are not persisted system-wide, they only exist for
pypyr sub-processes, and as such for the following steps during this pypyr
pipeline execution. If you set an $ENV here, don't expect to see it in your
system environment variables after the pipeline finishes running.
"""
env_set = context['env'].get('set', None)
exists = False
if env_set:
logger.debug("started")
for k, v in env_set.items():
logger.debug(f"setting ${k} to context[{v}]")
os.environ[k] = context.get_formatted_string(v)
logger.info(f"set {len(env_set)} $ENVs from context.")
exists = True
logger.debug("done")
return exists | python | def env_set(context):
"""Set $ENVs to specified string. from the pypyr context.
Args:
context: is dictionary-like. context is mandatory.
context['env']['set'] must exist. It's a dictionary.
Values are strings to write to $ENV.
Keys are the names of the $ENV values to which to write.
For example, say input context is:
key1: value1
key2: value2
key3: value3
env:
set:
MYVAR1: {key1}
MYVAR2: before_{key3}_after
MYVAR3: arbtexthere
This will result in the following $ENVs:
$MYVAR1 = value1
$MYVAR2 = before_value3_after
$MYVAR3 = arbtexthere
Note that the $ENVs are not persisted system-wide, they only exist for
pypyr sub-processes, and as such for the following steps during this pypyr
pipeline execution. If you set an $ENV here, don't expect to see it in your
system environment variables after the pipeline finishes running.
"""
env_set = context['env'].get('set', None)
exists = False
if env_set:
logger.debug("started")
for k, v in env_set.items():
logger.debug(f"setting ${k} to context[{v}]")
os.environ[k] = context.get_formatted_string(v)
logger.info(f"set {len(env_set)} $ENVs from context.")
exists = True
logger.debug("done")
return exists | [
"def",
"env_set",
"(",
"context",
")",
":",
"env_set",
"=",
"context",
"[",
"'env'",
"]",
".",
"get",
"(",
"'set'",
",",
"None",
")",
"exists",
"=",
"False",
"if",
"env_set",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"for",
"k",
",",
"v",
"in",
"env_set",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"f\"setting ${k} to context[{v}]\"",
")",
"os",
".",
"environ",
"[",
"k",
"]",
"=",
"context",
".",
"get_formatted_string",
"(",
"v",
")",
"logger",
".",
"info",
"(",
"f\"set {len(env_set)} $ENVs from context.\"",
")",
"exists",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"exists"
] | Set $ENVs to specified string. from the pypyr context.
Args:
context: is dictionary-like. context is mandatory.
context['env']['set'] must exist. It's a dictionary.
Values are strings to write to $ENV.
Keys are the names of the $ENV values to which to write.
For example, say input context is:
key1: value1
key2: value2
key3: value3
env:
set:
MYVAR1: {key1}
MYVAR2: before_{key3}_after
MYVAR3: arbtexthere
This will result in the following $ENVs:
$MYVAR1 = value1
$MYVAR2 = before_value3_after
$MYVAR3 = arbtexthere
Note that the $ENVs are not persisted system-wide, they only exist for
pypyr sub-processes, and as such for the following steps during this pypyr
pipeline execution. If you set an $ENV here, don't expect to see it in your
system environment variables after the pipeline finishes running. | [
"Set",
"$ENVs",
"to",
"specified",
"string",
".",
"from",
"the",
"pypyr",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/env.py#L88-L132 | [
"context"
] | What does this function do? | [
"Set",
"$ENVs",
"to",
"specified",
"string",
".",
"from",
"the",
"pypyr",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/env.py | deprecated | def deprecated(context):
"""Handle deprecated context input."""
env = context.get('env', None)
get_info, set_info, unset_info = context.keys_of_type_exist(
('envGet', dict),
('envSet', dict),
('envUnset', list)
)
found_at_least_one = (get_info.key_in_context or set_info.key_in_context
or unset_info.key_in_context)
if found_at_least_one:
env = context['env'] = {}
else:
return
if get_info.key_in_context and get_info.is_expected_type:
env['get'] = context[get_info.key]
if set_info.key_in_context and set_info.is_expected_type:
env['set'] = context[set_info.key]
if unset_info.key_in_context and unset_info.is_expected_type:
env['unset'] = context[unset_info.key]
logger.warning("envGet, envSet and envUnset are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key env instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new env key for you under the hood.") | python | def deprecated(context):
"""Handle deprecated context input."""
env = context.get('env', None)
get_info, set_info, unset_info = context.keys_of_type_exist(
('envGet', dict),
('envSet', dict),
('envUnset', list)
)
found_at_least_one = (get_info.key_in_context or set_info.key_in_context
or unset_info.key_in_context)
if found_at_least_one:
env = context['env'] = {}
else:
return
if get_info.key_in_context and get_info.is_expected_type:
env['get'] = context[get_info.key]
if set_info.key_in_context and set_info.is_expected_type:
env['set'] = context[set_info.key]
if unset_info.key_in_context and unset_info.is_expected_type:
env['unset'] = context[unset_info.key]
logger.warning("envGet, envSet and envUnset are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key env instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new env key for you under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"env",
"=",
"context",
".",
"get",
"(",
"'env'",
",",
"None",
")",
"get_info",
",",
"set_info",
",",
"unset_info",
"=",
"context",
".",
"keys_of_type_exist",
"(",
"(",
"'envGet'",
",",
"dict",
")",
",",
"(",
"'envSet'",
",",
"dict",
")",
",",
"(",
"'envUnset'",
",",
"list",
")",
")",
"found_at_least_one",
"=",
"(",
"get_info",
".",
"key_in_context",
"or",
"set_info",
".",
"key_in_context",
"or",
"unset_info",
".",
"key_in_context",
")",
"if",
"found_at_least_one",
":",
"env",
"=",
"context",
"[",
"'env'",
"]",
"=",
"{",
"}",
"else",
":",
"return",
"if",
"get_info",
".",
"key_in_context",
"and",
"get_info",
".",
"is_expected_type",
":",
"env",
"[",
"'get'",
"]",
"=",
"context",
"[",
"get_info",
".",
"key",
"]",
"if",
"set_info",
".",
"key_in_context",
"and",
"set_info",
".",
"is_expected_type",
":",
"env",
"[",
"'set'",
"]",
"=",
"context",
"[",
"set_info",
".",
"key",
"]",
"if",
"unset_info",
".",
"key_in_context",
"and",
"unset_info",
".",
"is_expected_type",
":",
"env",
"[",
"'unset'",
"]",
"=",
"context",
"[",
"unset_info",
".",
"key",
"]",
"logger",
".",
"warning",
"(",
"\"envGet, envSet and envUnset are deprecated. They will \"",
"\"stop working upon the next major release. \"",
"\"Use the new context key env instead. It's a lot \"",
"\"better, promise! For the moment pypyr is creating the \"",
"\"new env key for you under the hood.\"",
")"
] | Handle deprecated context input. | [
"Handle",
"deprecated",
"context",
"input",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/env.py#L180-L211 | [
"context"
] | What does this function do? | [
"Handle",
"deprecated",
"context",
"input",
"."
] |
pypyr/pypyr-cli | pypyr/steps/assert.py | run_step | def run_step(context):
"""Assert that something is True or equal to something else.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- assert
- this. mandatory. Any type. If assert['equals'] not specified,
evals as boolean.
- equals. optional. Any type.
If assert['this'] evaluates to False raises error.
If assert['equals'] is specified, raises error if
assert.this != assert.equals.
assert['this'] & assert['equals'] both support string substitutions.
Returns:
None
Raises:
ContextError: if assert evaluates to False.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
context.assert_key_has_value('assert', __name__)
assert_this = context['assert']['this']
is_equals_there = 'equals' in context['assert']
if is_equals_there:
assert_equals = context['assert']['equals']
# compare assertThis to assertEquals
logger.debug("comparing assert['this'] to assert['equals'].")
assert_result = (context.get_formatted_iterable(assert_this)
== context.get_formatted_iterable(assert_equals))
else:
# nothing to compare means treat assertThis as a bool.
logger.debug("evaluating assert['this'] as a boolean.")
assert_result = context.get_formatted_as_type(assert_this,
out_type=bool)
logger.info(f"assert evaluated to {assert_result}")
if not assert_result:
if is_equals_there:
# emit type to help user, but not the actual field contents.
type_this = (
type(context.get_formatted_iterable(assert_this)).__name__)
type_equals = (
type(context.get_formatted_iterable(assert_equals)).__name__)
error_text = (
f"assert assert['this'] is of type {type_this} "
f"and does not equal assert['equals'] of type {type_equals}.")
else:
# if it's a bool it's presumably not a sensitive value.
error_text = (
f"assert {assert_this} evaluated to False.")
raise ContextError(error_text)
logger.debug("done") | python | def run_step(context):
"""Assert that something is True or equal to something else.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- assert
- this. mandatory. Any type. If assert['equals'] not specified,
evals as boolean.
- equals. optional. Any type.
If assert['this'] evaluates to False raises error.
If assert['equals'] is specified, raises error if
assert.this != assert.equals.
assert['this'] & assert['equals'] both support string substitutions.
Returns:
None
Raises:
ContextError: if assert evaluates to False.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
context.assert_key_has_value('assert', __name__)
assert_this = context['assert']['this']
is_equals_there = 'equals' in context['assert']
if is_equals_there:
assert_equals = context['assert']['equals']
# compare assertThis to assertEquals
logger.debug("comparing assert['this'] to assert['equals'].")
assert_result = (context.get_formatted_iterable(assert_this)
== context.get_formatted_iterable(assert_equals))
else:
# nothing to compare means treat assertThis as a bool.
logger.debug("evaluating assert['this'] as a boolean.")
assert_result = context.get_formatted_as_type(assert_this,
out_type=bool)
logger.info(f"assert evaluated to {assert_result}")
if not assert_result:
if is_equals_there:
# emit type to help user, but not the actual field contents.
type_this = (
type(context.get_formatted_iterable(assert_this)).__name__)
type_equals = (
type(context.get_formatted_iterable(assert_equals)).__name__)
error_text = (
f"assert assert['this'] is of type {type_this} "
f"and does not equal assert['equals'] of type {type_equals}.")
else:
# if it's a bool it's presumably not a sensitive value.
error_text = (
f"assert {assert_this} evaluated to False.")
raise ContextError(error_text)
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"assert",
"context",
",",
"f\"context must have value for {__name__}\"",
"deprecated",
"(",
"context",
")",
"context",
".",
"assert_key_has_value",
"(",
"'assert'",
",",
"__name__",
")",
"assert_this",
"=",
"context",
"[",
"'assert'",
"]",
"[",
"'this'",
"]",
"is_equals_there",
"=",
"'equals'",
"in",
"context",
"[",
"'assert'",
"]",
"if",
"is_equals_there",
":",
"assert_equals",
"=",
"context",
"[",
"'assert'",
"]",
"[",
"'equals'",
"]",
"# compare assertThis to assertEquals",
"logger",
".",
"debug",
"(",
"\"comparing assert['this'] to assert['equals'].\"",
")",
"assert_result",
"=",
"(",
"context",
".",
"get_formatted_iterable",
"(",
"assert_this",
")",
"==",
"context",
".",
"get_formatted_iterable",
"(",
"assert_equals",
")",
")",
"else",
":",
"# nothing to compare means treat assertThis as a bool.",
"logger",
".",
"debug",
"(",
"\"evaluating assert['this'] as a boolean.\"",
")",
"assert_result",
"=",
"context",
".",
"get_formatted_as_type",
"(",
"assert_this",
",",
"out_type",
"=",
"bool",
")",
"logger",
".",
"info",
"(",
"f\"assert evaluated to {assert_result}\"",
")",
"if",
"not",
"assert_result",
":",
"if",
"is_equals_there",
":",
"# emit type to help user, but not the actual field contents.",
"type_this",
"=",
"(",
"type",
"(",
"context",
".",
"get_formatted_iterable",
"(",
"assert_this",
")",
")",
".",
"__name__",
")",
"type_equals",
"=",
"(",
"type",
"(",
"context",
".",
"get_formatted_iterable",
"(",
"assert_equals",
")",
")",
".",
"__name__",
")",
"error_text",
"=",
"(",
"f\"assert assert['this'] is of type {type_this} \"",
"f\"and does not equal assert['equals'] of type {type_equals}.\"",
")",
"else",
":",
"# if it's a bool it's presumably not a sensitive value.",
"error_text",
"=",
"(",
"f\"assert {assert_this} evaluated to False.\"",
")",
"raise",
"ContextError",
"(",
"error_text",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Assert that something is True or equal to something else.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- assert
- this. mandatory. Any type. If assert['equals'] not specified,
evals as boolean.
- equals. optional. Any type.
If assert['this'] evaluates to False raises error.
If assert['equals'] is specified, raises error if
assert.this != assert.equals.
assert['this'] & assert['equals'] both support string substitutions.
Returns:
None
Raises:
ContextError: if assert evaluates to False. | [
"Assert",
"that",
"something",
"is",
"True",
"or",
"equal",
"to",
"something",
"else",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/assert.py#L9-L71 | [
"context"
] | What does this function do? | [
"Assert",
"that",
"something",
"is",
"True",
"or",
"equal",
"to",
"something",
"else",
"."
] |
pypyr/pypyr-cli | pypyr/steps/assert.py | deprecated | def deprecated(context):
"""Handle deprecated context input."""
assert_context = context.get('assert', None)
# specifically do "key in dict" to avoid python bool eval thinking
# None/Empty values mean the key isn't there.
if 'assertThis' in context:
assert_this = context['assertThis']
assert_context = context['assert'] = {'this': assert_this}
if 'assertEquals' in context:
assert_equals = context['assertEquals']
assert_context['equals'] = assert_equals
logger.warning("assertThis and assertEquals are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key assert instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new assert key for you under the hood.") | python | def deprecated(context):
"""Handle deprecated context input."""
assert_context = context.get('assert', None)
# specifically do "key in dict" to avoid python bool eval thinking
# None/Empty values mean the key isn't there.
if 'assertThis' in context:
assert_this = context['assertThis']
assert_context = context['assert'] = {'this': assert_this}
if 'assertEquals' in context:
assert_equals = context['assertEquals']
assert_context['equals'] = assert_equals
logger.warning("assertThis and assertEquals are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key assert instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new assert key for you under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"assert_context",
"=",
"context",
".",
"get",
"(",
"'assert'",
",",
"None",
")",
"# specifically do \"key in dict\" to avoid python bool eval thinking",
"# None/Empty values mean the key isn't there.",
"if",
"'assertThis'",
"in",
"context",
":",
"assert_this",
"=",
"context",
"[",
"'assertThis'",
"]",
"assert_context",
"=",
"context",
"[",
"'assert'",
"]",
"=",
"{",
"'this'",
":",
"assert_this",
"}",
"if",
"'assertEquals'",
"in",
"context",
":",
"assert_equals",
"=",
"context",
"[",
"'assertEquals'",
"]",
"assert_context",
"[",
"'equals'",
"]",
"=",
"assert_equals",
"logger",
".",
"warning",
"(",
"\"assertThis and assertEquals are deprecated. They will \"",
"\"stop working upon the next major release. \"",
"\"Use the new context key assert instead. It's a lot \"",
"\"better, promise! For the moment pypyr is creating the \"",
"\"new assert key for you under the hood.\"",
")"
] | Handle deprecated context input. | [
"Handle",
"deprecated",
"context",
"input",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/assert.py#L74-L91 | [
"context"
] | What does this function do? | [
"Handle",
"deprecated",
"context",
"input",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | run_step | def run_step(context):
"""Archive and/or extract tars with or without compression.
Args:
context: dictionary-like. Mandatory.
Expects the following context:
tar:
extract:
- in: /path/my.tar
out: /out/path
archive:
- in: /dir/to/archive
out: /out/destination.tar
format: ''
tar['format'] - if not specified, defaults to lzma/xz
Available options:
- '' - no compression
- gz (gzip)
- bz2 (bzip2)
- xz (lzma)
This step will run whatever combination of Extract and Archive you specify.
Regardless of combination, execution order is Extract, Archive.
Source and destination paths support {key} string interpolation.
Never extract archives from untrusted sources without prior inspection.
It is possible that files are created outside of path, e.g. members that
have absolute filenames starting with "/" or filenames with two dots "..".
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
found_at_least_one = False
context.assert_key_has_value('tar', __name__)
tar = context['tar']
if 'extract' in tar:
found_at_least_one = True
tar_extract(context)
if 'archive' in tar:
found_at_least_one = True
tar_archive(context)
if not found_at_least_one:
# This will raise exception on first item with a problem.
raise KeyNotInContextError('pypyr.steps.tar must have either extract '
'or archive specified under the tar key. '
'Or both of these. It has neither.')
logger.debug("done") | python | def run_step(context):
"""Archive and/or extract tars with or without compression.
Args:
context: dictionary-like. Mandatory.
Expects the following context:
tar:
extract:
- in: /path/my.tar
out: /out/path
archive:
- in: /dir/to/archive
out: /out/destination.tar
format: ''
tar['format'] - if not specified, defaults to lzma/xz
Available options:
- '' - no compression
- gz (gzip)
- bz2 (bzip2)
- xz (lzma)
This step will run whatever combination of Extract and Archive you specify.
Regardless of combination, execution order is Extract, Archive.
Source and destination paths support {key} string interpolation.
Never extract archives from untrusted sources without prior inspection.
It is possible that files are created outside of path, e.g. members that
have absolute filenames starting with "/" or filenames with two dots "..".
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
found_at_least_one = False
context.assert_key_has_value('tar', __name__)
tar = context['tar']
if 'extract' in tar:
found_at_least_one = True
tar_extract(context)
if 'archive' in tar:
found_at_least_one = True
tar_archive(context)
if not found_at_least_one:
# This will raise exception on first item with a problem.
raise KeyNotInContextError('pypyr.steps.tar must have either extract '
'or archive specified under the tar key. '
'Or both of these. It has neither.')
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"assert",
"context",
",",
"f\"context must have value for {__name__}\"",
"deprecated",
"(",
"context",
")",
"found_at_least_one",
"=",
"False",
"context",
".",
"assert_key_has_value",
"(",
"'tar'",
",",
"__name__",
")",
"tar",
"=",
"context",
"[",
"'tar'",
"]",
"if",
"'extract'",
"in",
"tar",
":",
"found_at_least_one",
"=",
"True",
"tar_extract",
"(",
"context",
")",
"if",
"'archive'",
"in",
"tar",
":",
"found_at_least_one",
"=",
"True",
"tar_archive",
"(",
"context",
")",
"if",
"not",
"found_at_least_one",
":",
"# This will raise exception on first item with a problem.",
"raise",
"KeyNotInContextError",
"(",
"'pypyr.steps.tar must have either extract '",
"'or archive specified under the tar key. '",
"'Or both of these. It has neither.'",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Archive and/or extract tars with or without compression.
Args:
context: dictionary-like. Mandatory.
Expects the following context:
tar:
extract:
- in: /path/my.tar
out: /out/path
archive:
- in: /dir/to/archive
out: /out/destination.tar
format: ''
tar['format'] - if not specified, defaults to lzma/xz
Available options:
- '' - no compression
- gz (gzip)
- bz2 (bzip2)
- xz (lzma)
This step will run whatever combination of Extract and Archive you specify.
Regardless of combination, execution order is Extract, Archive.
Source and destination paths support {key} string interpolation.
Never extract archives from untrusted sources without prior inspection.
It is possible that files are created outside of path, e.g. members that
have absolute filenames starting with "/" or filenames with two dots "..". | [
"Archive",
"and",
"/",
"or",
"extract",
"tars",
"with",
"or",
"without",
"compression",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L10-L66 | [
"context"
] | What does this function do? | [
"Archive",
"and",
"/",
"or",
"extract",
"tars",
"with",
"or",
"without",
"compression",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | get_file_mode_for_reading | def get_file_mode_for_reading(context):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context['tar'].get('format', None)
if format or format == '':
mode = f"r:{context.get_formatted_string(format)}"
else:
mode = 'r:*'
return mode | python | def get_file_mode_for_reading(context):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context['tar'].get('format', None)
if format or format == '':
mode = f"r:{context.get_formatted_string(format)}"
else:
mode = 'r:*'
return mode | [
"def",
"get_file_mode_for_reading",
"(",
"context",
")",
":",
"format",
"=",
"context",
"[",
"'tar'",
"]",
".",
"get",
"(",
"'format'",
",",
"None",
")",
"if",
"format",
"or",
"format",
"==",
"''",
":",
"mode",
"=",
"f\"r:{context.get_formatted_string(format)}\"",
"else",
":",
"mode",
"=",
"'r:*'",
"return",
"mode"
] | Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format. | [
"Get",
"file",
"mode",
"for",
"reading",
"from",
"tar",
"[",
"format",
"]",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L69-L84 | [
"context"
] | What does this function do? | [
"Get",
"file",
"mode",
"for",
"reading",
"from",
"tar",
"[",
"format",
"]",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | get_file_mode_for_writing | def get_file_mode_for_writing(context):
"""Get file mode for writing from tar['format'].
This should return w:, w:gz, w:bz2 or w:xz. If user specified something
wacky in tar.Format, that's their business.
"""
format = context['tar'].get('format', None)
# slightly weird double-check because falsy format could mean either format
# doesn't exist in input, OR that it exists and is empty. Exists-but-empty
# has special meaning - default to no compression.
if format or format == '':
mode = f"w:{context.get_formatted_string(format)}"
else:
mode = 'w:xz'
return mode | python | def get_file_mode_for_writing(context):
"""Get file mode for writing from tar['format'].
This should return w:, w:gz, w:bz2 or w:xz. If user specified something
wacky in tar.Format, that's their business.
"""
format = context['tar'].get('format', None)
# slightly weird double-check because falsy format could mean either format
# doesn't exist in input, OR that it exists and is empty. Exists-but-empty
# has special meaning - default to no compression.
if format or format == '':
mode = f"w:{context.get_formatted_string(format)}"
else:
mode = 'w:xz'
return mode | [
"def",
"get_file_mode_for_writing",
"(",
"context",
")",
":",
"format",
"=",
"context",
"[",
"'tar'",
"]",
".",
"get",
"(",
"'format'",
",",
"None",
")",
"# slightly weird double-check because falsy format could mean either format",
"# doesn't exist in input, OR that it exists and is empty. Exists-but-empty",
"# has special meaning - default to no compression.",
"if",
"format",
"or",
"format",
"==",
"''",
":",
"mode",
"=",
"f\"w:{context.get_formatted_string(format)}\"",
"else",
":",
"mode",
"=",
"'w:xz'",
"return",
"mode"
] | Get file mode for writing from tar['format'].
This should return w:, w:gz, w:bz2 or w:xz. If user specified something
wacky in tar.Format, that's their business. | [
"Get",
"file",
"mode",
"for",
"writing",
"from",
"tar",
"[",
"format",
"]",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L87-L102 | [
"context"
] | What does this function do? | [
"Get",
"file",
"mode",
"for",
"writing",
"from",
"tar",
"[",
"format",
"]",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | tar_archive | def tar_archive(context):
"""Archive specified path to a tar archive.
Args:
context: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs
"""
logger.debug("start")
mode = get_file_mode_for_writing(context)
for item in context['tar']['archive']:
# value is the destination tar. Allow string interpolation.
destination = context.get_formatted_string(item['out'])
# key is the source to archive
source = context.get_formatted_string(item['in'])
with tarfile.open(destination, mode) as archive_me:
logger.debug(f"Archiving '{source}' to '{destination}'")
archive_me.add(source, arcname='.')
logger.info(f"Archived '{source}' to '{destination}'")
logger.debug("end") | python | def tar_archive(context):
"""Archive specified path to a tar archive.
Args:
context: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs
"""
logger.debug("start")
mode = get_file_mode_for_writing(context)
for item in context['tar']['archive']:
# value is the destination tar. Allow string interpolation.
destination = context.get_formatted_string(item['out'])
# key is the source to archive
source = context.get_formatted_string(item['in'])
with tarfile.open(destination, mode) as archive_me:
logger.debug(f"Archiving '{source}' to '{destination}'")
archive_me.add(source, arcname='.')
logger.info(f"Archived '{source}' to '{destination}'")
logger.debug("end") | [
"def",
"tar_archive",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"start\"",
")",
"mode",
"=",
"get_file_mode_for_writing",
"(",
"context",
")",
"for",
"item",
"in",
"context",
"[",
"'tar'",
"]",
"[",
"'archive'",
"]",
":",
"# value is the destination tar. Allow string interpolation.",
"destination",
"=",
"context",
".",
"get_formatted_string",
"(",
"item",
"[",
"'out'",
"]",
")",
"# key is the source to archive",
"source",
"=",
"context",
".",
"get_formatted_string",
"(",
"item",
"[",
"'in'",
"]",
")",
"with",
"tarfile",
".",
"open",
"(",
"destination",
",",
"mode",
")",
"as",
"archive_me",
":",
"logger",
".",
"debug",
"(",
"f\"Archiving '{source}' to '{destination}'\"",
")",
"archive_me",
".",
"add",
"(",
"source",
",",
"arcname",
"=",
"'.'",
")",
"logger",
".",
"info",
"(",
"f\"Archived '{source}' to '{destination}'\"",
")",
"logger",
".",
"debug",
"(",
"\"end\"",
")"
] | Archive specified path to a tar archive.
Args:
context: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs | [
"Archive",
"specified",
"path",
"to",
"a",
"tar",
"archive",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L105-L140 | [
"context"
] | What does this function do? | [
"Archive",
"specified",
"path",
"to",
"a",
"tar",
"archive",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | tar_extract | def tar_extract(context):
"""Extract all members of tar archive to specified path.
Args:
context: dictionary-like. context is mandatory.
context['tar']['extract'] must exist. It's a dictionary.
keys are the path to the tar to extract.
values are the destination paths.
Example:
tar:
extract:
- in: path/to/my.tar.xs
out: /path/extract/here
- in: another/tar.xs
out: .
This will extract path/to/my.tar.xs to /path/extract/here, and also
extract another/tar.xs to $PWD.
"""
logger.debug("start")
mode = get_file_mode_for_reading(context)
for item in context['tar']['extract']:
# in is the path to the tar to extract. Allows string interpolation.
source = context.get_formatted_string(item['in'])
# out is the outdir, dhur. Allows string interpolation.
destination = context.get_formatted_string(item['out'])
with tarfile.open(source, mode) as extract_me:
logger.debug(f"Extracting '{source}' to '{destination}'")
extract_me.extractall(destination)
logger.info(f"Extracted '{source}' to '{destination}'")
logger.debug("end") | python | def tar_extract(context):
"""Extract all members of tar archive to specified path.
Args:
context: dictionary-like. context is mandatory.
context['tar']['extract'] must exist. It's a dictionary.
keys are the path to the tar to extract.
values are the destination paths.
Example:
tar:
extract:
- in: path/to/my.tar.xs
out: /path/extract/here
- in: another/tar.xs
out: .
This will extract path/to/my.tar.xs to /path/extract/here, and also
extract another/tar.xs to $PWD.
"""
logger.debug("start")
mode = get_file_mode_for_reading(context)
for item in context['tar']['extract']:
# in is the path to the tar to extract. Allows string interpolation.
source = context.get_formatted_string(item['in'])
# out is the outdir, dhur. Allows string interpolation.
destination = context.get_formatted_string(item['out'])
with tarfile.open(source, mode) as extract_me:
logger.debug(f"Extracting '{source}' to '{destination}'")
extract_me.extractall(destination)
logger.info(f"Extracted '{source}' to '{destination}'")
logger.debug("end") | [
"def",
"tar_extract",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"start\"",
")",
"mode",
"=",
"get_file_mode_for_reading",
"(",
"context",
")",
"for",
"item",
"in",
"context",
"[",
"'tar'",
"]",
"[",
"'extract'",
"]",
":",
"# in is the path to the tar to extract. Allows string interpolation.",
"source",
"=",
"context",
".",
"get_formatted_string",
"(",
"item",
"[",
"'in'",
"]",
")",
"# out is the outdir, dhur. Allows string interpolation.",
"destination",
"=",
"context",
".",
"get_formatted_string",
"(",
"item",
"[",
"'out'",
"]",
")",
"with",
"tarfile",
".",
"open",
"(",
"source",
",",
"mode",
")",
"as",
"extract_me",
":",
"logger",
".",
"debug",
"(",
"f\"Extracting '{source}' to '{destination}'\"",
")",
"extract_me",
".",
"extractall",
"(",
"destination",
")",
"logger",
".",
"info",
"(",
"f\"Extracted '{source}' to '{destination}'\"",
")",
"logger",
".",
"debug",
"(",
"\"end\"",
")"
] | Extract all members of tar archive to specified path.
Args:
context: dictionary-like. context is mandatory.
context['tar']['extract'] must exist. It's a dictionary.
keys are the path to the tar to extract.
values are the destination paths.
Example:
tar:
extract:
- in: path/to/my.tar.xs
out: /path/extract/here
- in: another/tar.xs
out: .
This will extract path/to/my.tar.xs to /path/extract/here, and also
extract another/tar.xs to $PWD. | [
"Extract",
"all",
"members",
"of",
"tar",
"archive",
"to",
"specified",
"path",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L143-L178 | [
"context"
] | What does this function do? | [
"Extract",
"all",
"members",
"of",
"tar",
"archive",
"to",
"specified",
"path",
"."
] |
pypyr/pypyr-cli | pypyr/steps/tar.py | deprecated | def deprecated(context):
"""Handle deprecated context input."""
tar = context.get('tar', None)
# at least 1 of tarExtract or tarArchive must exist in context
tar_extract, tar_archive = context.keys_of_type_exist(
('tarExtract', list),
('tarArchive', list))
found_at_least_one = (tar_extract.key_in_context
or tar_archive.key_in_context)
if tar and not found_at_least_one:
return
elif found_at_least_one:
tar = context['tar'] = {}
if tar_extract.key_in_context and tar_extract.is_expected_type:
tar['extract'] = context[tar_extract.key]
if tar_archive.key_in_context and tar_archive.is_expected_type:
tar['archive'] = context[tar_archive.key]
if 'tarFormat' in context:
tar['format'] = context['tarFormat']
logger.warning("tarExtract and tarArchive are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key env instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new env key for you under the hood.") | python | def deprecated(context):
"""Handle deprecated context input."""
tar = context.get('tar', None)
# at least 1 of tarExtract or tarArchive must exist in context
tar_extract, tar_archive = context.keys_of_type_exist(
('tarExtract', list),
('tarArchive', list))
found_at_least_one = (tar_extract.key_in_context
or tar_archive.key_in_context)
if tar and not found_at_least_one:
return
elif found_at_least_one:
tar = context['tar'] = {}
if tar_extract.key_in_context and tar_extract.is_expected_type:
tar['extract'] = context[tar_extract.key]
if tar_archive.key_in_context and tar_archive.is_expected_type:
tar['archive'] = context[tar_archive.key]
if 'tarFormat' in context:
tar['format'] = context['tarFormat']
logger.warning("tarExtract and tarArchive are deprecated. They will "
"stop working upon the next major release. "
"Use the new context key env instead. It's a lot "
"better, promise! For the moment pypyr is creating the "
"new env key for you under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"tar",
"=",
"context",
".",
"get",
"(",
"'tar'",
",",
"None",
")",
"# at least 1 of tarExtract or tarArchive must exist in context",
"tar_extract",
",",
"tar_archive",
"=",
"context",
".",
"keys_of_type_exist",
"(",
"(",
"'tarExtract'",
",",
"list",
")",
",",
"(",
"'tarArchive'",
",",
"list",
")",
")",
"found_at_least_one",
"=",
"(",
"tar_extract",
".",
"key_in_context",
"or",
"tar_archive",
".",
"key_in_context",
")",
"if",
"tar",
"and",
"not",
"found_at_least_one",
":",
"return",
"elif",
"found_at_least_one",
":",
"tar",
"=",
"context",
"[",
"'tar'",
"]",
"=",
"{",
"}",
"if",
"tar_extract",
".",
"key_in_context",
"and",
"tar_extract",
".",
"is_expected_type",
":",
"tar",
"[",
"'extract'",
"]",
"=",
"context",
"[",
"tar_extract",
".",
"key",
"]",
"if",
"tar_archive",
".",
"key_in_context",
"and",
"tar_archive",
".",
"is_expected_type",
":",
"tar",
"[",
"'archive'",
"]",
"=",
"context",
"[",
"tar_archive",
".",
"key",
"]",
"if",
"'tarFormat'",
"in",
"context",
":",
"tar",
"[",
"'format'",
"]",
"=",
"context",
"[",
"'tarFormat'",
"]",
"logger",
".",
"warning",
"(",
"\"tarExtract and tarArchive are deprecated. They will \"",
"\"stop working upon the next major release. \"",
"\"Use the new context key env instead. It's a lot \"",
"\"better, promise! For the moment pypyr is creating the \"",
"\"new env key for you under the hood.\"",
")"
] | Handle deprecated context input. | [
"Handle",
"deprecated",
"context",
"input",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L181-L211 | [
"context"
] | What does this function do? | [
"Handle",
"deprecated",
"context",
"input",
"."
] |
pypyr/pypyr-cli | pypyr/steps/shell.py | run_step | def run_step(context):
"""Run shell command without shell interpolation.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute command string in the shell as a sub-process.
The shell defaults to /bin/sh.
The context['cmd'] string must be formatted exactly as it would be when
typed at the shell prompt. This includes, for example, quoting or backslash
escaping filenames with spaces in them.
There is an exception to this: Escape curly braces: if you want a literal
curly brace, double it like {{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1"
"""
logger.debug("started")
CmdStep(name=__name__, context=context).run_step(is_shell=True)
logger.debug("done") | python | def run_step(context):
"""Run shell command without shell interpolation.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute command string in the shell as a sub-process.
The shell defaults to /bin/sh.
The context['cmd'] string must be formatted exactly as it would be when
typed at the shell prompt. This includes, for example, quoting or backslash
escaping filenames with spaces in them.
There is an exception to this: Escape curly braces: if you want a literal
curly brace, double it like {{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1"
"""
logger.debug("started")
CmdStep(name=__name__, context=context).run_step(is_shell=True)
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"CmdStep",
"(",
"name",
"=",
"__name__",
",",
"context",
"=",
"context",
")",
".",
"run_step",
"(",
"is_shell",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run shell command without shell interpolation.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute command string in the shell as a sub-process.
The shell defaults to /bin/sh.
The context['cmd'] string must be formatted exactly as it would be when
typed at the shell prompt. This includes, for example, quoting or backslash
escaping filenames with spaces in them.
There is an exception to this: Escape curly braces: if you want a literal
curly brace, double it like {{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1" | [
"Run",
"shell",
"command",
"without",
"shell",
"interpolation",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/shell.py#L14-L57 | [
"context"
] | What does this function do? | [
"Run",
"shell",
"command",
"without",
"shell",
"interpolation",
"."
] |
pypyr/pypyr-cli | pypyr/steps/envget.py | run_step | def run_step(context):
"""Get $ENVs, allowing a default if not found.
Set context properties from environment variables, and specify a default
if the environment variable is not found.
This differs from pypyr.steps.env get, which raises an error if attempting
to read an $ENV that doesn't exist.
Args:
context. mandatory. Context is a pypyr Context.
Input context is:
envGet:
- env: 'envvarnamehere'
key: 'savetocontexthere'
default: 'save this to key if env doesnt exist'
'env' is the bare environment variable name, do not put the $ in front of
it.
Will process as many env/key/default pairs as exist in the list under
envGet.
Returns:
None.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: envGet env or key doesn't exist.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
context.assert_key_has_value('envGet', __name__)
# allow a list OR a single getenv dict
if isinstance(context['envGet'], list):
get_items = context['envGet']
else:
get_items = [context['envGet']]
get_count = 0
for get_me in get_items:
(env, key, has_default, default) = get_args(get_me)
logger.debug(f"setting context {key} to $ENV {env}")
formatted_key = context.get_formatted_string(key)
formatted_env = context.get_formatted_string(env)
if formatted_env in os.environ:
context[formatted_key] = os.environ[formatted_env]
get_count += 1
else:
logger.debug(f"$ENV {env} not found.")
if has_default:
logger.debug(f"Using default value for {env} instead.")
formatted_default = context.get_formatted_iterable(default)
context[formatted_key] = os.environ.get(formatted_env,
formatted_default)
get_count += 1
else:
logger.debug(
f"No default value for {env} found. Doin nuthin'.")
logger.info(f"saved {get_count} $ENVs to context.") | python | def run_step(context):
"""Get $ENVs, allowing a default if not found.
Set context properties from environment variables, and specify a default
if the environment variable is not found.
This differs from pypyr.steps.env get, which raises an error if attempting
to read an $ENV that doesn't exist.
Args:
context. mandatory. Context is a pypyr Context.
Input context is:
envGet:
- env: 'envvarnamehere'
key: 'savetocontexthere'
default: 'save this to key if env doesnt exist'
'env' is the bare environment variable name, do not put the $ in front of
it.
Will process as many env/key/default pairs as exist in the list under
envGet.
Returns:
None.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: envGet env or key doesn't exist.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
context.assert_key_has_value('envGet', __name__)
# allow a list OR a single getenv dict
if isinstance(context['envGet'], list):
get_items = context['envGet']
else:
get_items = [context['envGet']]
get_count = 0
for get_me in get_items:
(env, key, has_default, default) = get_args(get_me)
logger.debug(f"setting context {key} to $ENV {env}")
formatted_key = context.get_formatted_string(key)
formatted_env = context.get_formatted_string(env)
if formatted_env in os.environ:
context[formatted_key] = os.environ[formatted_env]
get_count += 1
else:
logger.debug(f"$ENV {env} not found.")
if has_default:
logger.debug(f"Using default value for {env} instead.")
formatted_default = context.get_formatted_iterable(default)
context[formatted_key] = os.environ.get(formatted_env,
formatted_default)
get_count += 1
else:
logger.debug(
f"No default value for {env} found. Doin nuthin'.")
logger.info(f"saved {get_count} $ENVs to context.") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"assert",
"context",
",",
"f\"context must have value for {__name__}\"",
"context",
".",
"assert_key_has_value",
"(",
"'envGet'",
",",
"__name__",
")",
"# allow a list OR a single getenv dict",
"if",
"isinstance",
"(",
"context",
"[",
"'envGet'",
"]",
",",
"list",
")",
":",
"get_items",
"=",
"context",
"[",
"'envGet'",
"]",
"else",
":",
"get_items",
"=",
"[",
"context",
"[",
"'envGet'",
"]",
"]",
"get_count",
"=",
"0",
"for",
"get_me",
"in",
"get_items",
":",
"(",
"env",
",",
"key",
",",
"has_default",
",",
"default",
")",
"=",
"get_args",
"(",
"get_me",
")",
"logger",
".",
"debug",
"(",
"f\"setting context {key} to $ENV {env}\"",
")",
"formatted_key",
"=",
"context",
".",
"get_formatted_string",
"(",
"key",
")",
"formatted_env",
"=",
"context",
".",
"get_formatted_string",
"(",
"env",
")",
"if",
"formatted_env",
"in",
"os",
".",
"environ",
":",
"context",
"[",
"formatted_key",
"]",
"=",
"os",
".",
"environ",
"[",
"formatted_env",
"]",
"get_count",
"+=",
"1",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"$ENV {env} not found.\"",
")",
"if",
"has_default",
":",
"logger",
".",
"debug",
"(",
"f\"Using default value for {env} instead.\"",
")",
"formatted_default",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"default",
")",
"context",
"[",
"formatted_key",
"]",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"formatted_env",
",",
"formatted_default",
")",
"get_count",
"+=",
"1",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"No default value for {env} found. Doin nuthin'.\"",
")",
"logger",
".",
"info",
"(",
"f\"saved {get_count} $ENVs to context.\"",
")"
] | Get $ENVs, allowing a default if not found.
Set context properties from environment variables, and specify a default
if the environment variable is not found.
This differs from pypyr.steps.env get, which raises an error if attempting
to read an $ENV that doesn't exist.
Args:
context. mandatory. Context is a pypyr Context.
Input context is:
envGet:
- env: 'envvarnamehere'
key: 'savetocontexthere'
default: 'save this to key if env doesnt exist'
'env' is the bare environment variable name, do not put the $ in front of
it.
Will process as many env/key/default pairs as exist in the list under
envGet.
Returns:
None.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: envGet env or key doesn't exist. | [
"Get",
"$ENVs",
"allowing",
"a",
"default",
"if",
"not",
"found",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/envget.py#L10-L77 | [
"context"
] | What does this function do? | [
"Get",
"$ENVs",
"allowing",
"a",
"default",
"if",
"not",
"found",
"."
] |
pypyr/pypyr-cli | pypyr/steps/envget.py | get_args | def get_args(get_item):
"""Parse env, key, default out of input dict.
Args:
get_item: dict. contains keys env/key/default
Returns:
(env, key, has_default, default) tuple, where
env: str. env var name.
key: str. save env value to this context key.
has_default: bool. True if default specified.
default: the value of default, if specified.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: If env or key not found in get_config.
"""
if not isinstance(get_item, dict):
raise ContextError('envGet must contain a list of dicts.')
env = get_item.get('env', None)
if not env:
raise KeyNotInContextError(
'context envGet[env] must exist in context for envGet.')
key = get_item.get('key', None)
if not key:
raise KeyNotInContextError(
'context envGet[key] must exist in context for envGet.')
if 'default' in get_item:
has_default = True
default = get_item['default']
else:
has_default = False
default = None
return (env, key, has_default, default) | python | def get_args(get_item):
"""Parse env, key, default out of input dict.
Args:
get_item: dict. contains keys env/key/default
Returns:
(env, key, has_default, default) tuple, where
env: str. env var name.
key: str. save env value to this context key.
has_default: bool. True if default specified.
default: the value of default, if specified.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: If env or key not found in get_config.
"""
if not isinstance(get_item, dict):
raise ContextError('envGet must contain a list of dicts.')
env = get_item.get('env', None)
if not env:
raise KeyNotInContextError(
'context envGet[env] must exist in context for envGet.')
key = get_item.get('key', None)
if not key:
raise KeyNotInContextError(
'context envGet[key] must exist in context for envGet.')
if 'default' in get_item:
has_default = True
default = get_item['default']
else:
has_default = False
default = None
return (env, key, has_default, default) | [
"def",
"get_args",
"(",
"get_item",
")",
":",
"if",
"not",
"isinstance",
"(",
"get_item",
",",
"dict",
")",
":",
"raise",
"ContextError",
"(",
"'envGet must contain a list of dicts.'",
")",
"env",
"=",
"get_item",
".",
"get",
"(",
"'env'",
",",
"None",
")",
"if",
"not",
"env",
":",
"raise",
"KeyNotInContextError",
"(",
"'context envGet[env] must exist in context for envGet.'",
")",
"key",
"=",
"get_item",
".",
"get",
"(",
"'key'",
",",
"None",
")",
"if",
"not",
"key",
":",
"raise",
"KeyNotInContextError",
"(",
"'context envGet[key] must exist in context for envGet.'",
")",
"if",
"'default'",
"in",
"get_item",
":",
"has_default",
"=",
"True",
"default",
"=",
"get_item",
"[",
"'default'",
"]",
"else",
":",
"has_default",
"=",
"False",
"default",
"=",
"None",
"return",
"(",
"env",
",",
"key",
",",
"has_default",
",",
"default",
")"
] | Parse env, key, default out of input dict.
Args:
get_item: dict. contains keys env/key/default
Returns:
(env, key, has_default, default) tuple, where
env: str. env var name.
key: str. save env value to this context key.
has_default: bool. True if default specified.
default: the value of default, if specified.
Raises:
ContextError: envGet is not a list of dicts.
KeyNotInContextError: If env or key not found in get_config. | [
"Parse",
"env",
"key",
"default",
"out",
"of",
"input",
"dict",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/envget.py#L80-L120 | [
"get_item"
] | What does this function do? | [
"Parse",
"env",
"key",
"default",
"out",
"of",
"input",
"dict",
"."
] |
pypyr/pypyr-cli | pypyr/steps/py.py | run_step | def run_step(context):
"""Executes dynamic python code.
Context is a dictionary or dictionary-like.
Context must contain key 'pycode'
Will exec context['pycode'] as dynamically interpreted python statements.
context is mandatory. When you execute the pipeline, it should look
something like this:
pipeline-runner [name here] 'pycode=print(1+1)'.
"""
logger.debug("started")
context.assert_key_has_value(key='pycode', caller=__name__)
logger.debug(f"Executing python string: {context['pycode']}")
locals_dictionary = locals()
exec(context['pycode'], globals(), locals_dictionary)
# It looks like this dance might be unnecessary in python 3.6
logger.debug("looking for context update in exec")
exec_context = locals_dictionary['context']
context.update(exec_context)
logger.debug("exec output context merged with pipeline context")
logger.debug("done") | python | def run_step(context):
"""Executes dynamic python code.
Context is a dictionary or dictionary-like.
Context must contain key 'pycode'
Will exec context['pycode'] as dynamically interpreted python statements.
context is mandatory. When you execute the pipeline, it should look
something like this:
pipeline-runner [name here] 'pycode=print(1+1)'.
"""
logger.debug("started")
context.assert_key_has_value(key='pycode', caller=__name__)
logger.debug(f"Executing python string: {context['pycode']}")
locals_dictionary = locals()
exec(context['pycode'], globals(), locals_dictionary)
# It looks like this dance might be unnecessary in python 3.6
logger.debug("looking for context update in exec")
exec_context = locals_dictionary['context']
context.update(exec_context)
logger.debug("exec output context merged with pipeline context")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'pycode'",
",",
"caller",
"=",
"__name__",
")",
"logger",
".",
"debug",
"(",
"f\"Executing python string: {context['pycode']}\"",
")",
"locals_dictionary",
"=",
"locals",
"(",
")",
"exec",
"(",
"context",
"[",
"'pycode'",
"]",
",",
"globals",
"(",
")",
",",
"locals_dictionary",
")",
"# It looks like this dance might be unnecessary in python 3.6",
"logger",
".",
"debug",
"(",
"\"looking for context update in exec\"",
")",
"exec_context",
"=",
"locals_dictionary",
"[",
"'context'",
"]",
"context",
".",
"update",
"(",
"exec_context",
")",
"logger",
".",
"debug",
"(",
"\"exec output context merged with pipeline context\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Executes dynamic python code.
Context is a dictionary or dictionary-like.
Context must contain key 'pycode'
Will exec context['pycode'] as dynamically interpreted python statements.
context is mandatory. When you execute the pipeline, it should look
something like this:
pipeline-runner [name here] 'pycode=print(1+1)'. | [
"Executes",
"dynamic",
"python",
"code",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/py.py#L11-L35 | [
"context"
] | What does this function do? | [
"Executes",
"dynamic",
"python",
"code",
"."
] |
pypyr/pypyr-cli | pypyr/parser/yamlfile.py | get_parsed_context | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
assert context_arg, ("pipeline must be invoked with context arg set. For "
"this yaml parser you're looking for something "
"like: "
"pypyr pipelinename './myyamlfile.yaml'")
logger.debug("starting")
logger.debug(f"attempting to open file: {context_arg}")
with open(context_arg) as yaml_file:
yaml_loader = yaml.YAML(typ='safe', pure=True)
payload = yaml_loader.load(yaml_file)
logger.debug(f"yaml file parsed. Count: {len(payload)}")
if not isinstance(payload, MutableMapping):
raise TypeError("yaml input should describe a dictionary at the top "
"level. You should have something like "
"\n'key1: value1'\n key2: value2'\n"
"in the yaml top-level, not \n'- value1\n - value2'")
logger.debug("done")
return payload | python | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
assert context_arg, ("pipeline must be invoked with context arg set. For "
"this yaml parser you're looking for something "
"like: "
"pypyr pipelinename './myyamlfile.yaml'")
logger.debug("starting")
logger.debug(f"attempting to open file: {context_arg}")
with open(context_arg) as yaml_file:
yaml_loader = yaml.YAML(typ='safe', pure=True)
payload = yaml_loader.load(yaml_file)
logger.debug(f"yaml file parsed. Count: {len(payload)}")
if not isinstance(payload, MutableMapping):
raise TypeError("yaml input should describe a dictionary at the top "
"level. You should have something like "
"\n'key1: value1'\n key2: value2'\n"
"in the yaml top-level, not \n'- value1\n - value2'")
logger.debug("done")
return payload | [
"def",
"get_parsed_context",
"(",
"context_arg",
")",
":",
"assert",
"context_arg",
",",
"(",
"\"pipeline must be invoked with context arg set. For \"",
"\"this yaml parser you're looking for something \"",
"\"like: \"",
"\"pypyr pipelinename './myyamlfile.yaml'\"",
")",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"logger",
".",
"debug",
"(",
"f\"attempting to open file: {context_arg}\"",
")",
"with",
"open",
"(",
"context_arg",
")",
"as",
"yaml_file",
":",
"yaml_loader",
"=",
"yaml",
".",
"YAML",
"(",
"typ",
"=",
"'safe'",
",",
"pure",
"=",
"True",
")",
"payload",
"=",
"yaml_loader",
".",
"load",
"(",
"yaml_file",
")",
"logger",
".",
"debug",
"(",
"f\"yaml file parsed. Count: {len(payload)}\"",
")",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"MutableMapping",
")",
":",
"raise",
"TypeError",
"(",
"\"yaml input should describe a dictionary at the top \"",
"\"level. You should have something like \"",
"\"\\n'key1: value1'\\n key2: value2'\\n\"",
"\"in the yaml top-level, not \\n'- value1\\n - value2'\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"payload"
] | Parse input context string and returns context as dictionary. | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/parser/yamlfile.py#L11-L32 | [
"context_arg"
] | What does this function do? | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] |
pypyr/pypyr-cli | pypyr/cli.py | get_parser | def get_parser():
"""Return ArgumentParser for pypyr cli."""
parser = argparse.ArgumentParser(
allow_abbrev=True,
description='pypyr pipeline runner')
parser.add_argument('pipeline_name',
help='Name of pipeline to run. It should exist in the '
'./pipelines directory.')
parser.add_argument(dest='pipeline_context',
nargs='?',
help='String for context values. Parsed by the '
'pipeline\'s context_parser function.')
parser.add_argument('--dir', dest='working_dir', default=os.getcwd(),
help='Working directory. Use if your pipelines '
'directory is elsewhere. Defaults to cwd.')
parser.add_argument('--log', '--loglevel', dest='log_level', type=int,
default=20,
help='Integer log level. Defaults to 20 (INFO). '
'10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL'
'.\n Log Level < 10 gives full traceback on errors.')
parser.add_argument('--logpath', dest='log_path',
help='Log-file path. Append log output to this path')
parser.add_argument('--version', action='version',
help='Echo version number.',
version=f'{pypyr.version.get_version()}')
return parser | python | def get_parser():
"""Return ArgumentParser for pypyr cli."""
parser = argparse.ArgumentParser(
allow_abbrev=True,
description='pypyr pipeline runner')
parser.add_argument('pipeline_name',
help='Name of pipeline to run. It should exist in the '
'./pipelines directory.')
parser.add_argument(dest='pipeline_context',
nargs='?',
help='String for context values. Parsed by the '
'pipeline\'s context_parser function.')
parser.add_argument('--dir', dest='working_dir', default=os.getcwd(),
help='Working directory. Use if your pipelines '
'directory is elsewhere. Defaults to cwd.')
parser.add_argument('--log', '--loglevel', dest='log_level', type=int,
default=20,
help='Integer log level. Defaults to 20 (INFO). '
'10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL'
'.\n Log Level < 10 gives full traceback on errors.')
parser.add_argument('--logpath', dest='log_path',
help='Log-file path. Append log output to this path')
parser.add_argument('--version', action='version',
help='Echo version number.',
version=f'{pypyr.version.get_version()}')
return parser | [
"def",
"get_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"allow_abbrev",
"=",
"True",
",",
"description",
"=",
"'pypyr pipeline runner'",
")",
"parser",
".",
"add_argument",
"(",
"'pipeline_name'",
",",
"help",
"=",
"'Name of pipeline to run. It should exist in the '",
"'./pipelines directory.'",
")",
"parser",
".",
"add_argument",
"(",
"dest",
"=",
"'pipeline_context'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'String for context values. Parsed by the '",
"'pipeline\\'s context_parser function.'",
")",
"parser",
".",
"add_argument",
"(",
"'--dir'",
",",
"dest",
"=",
"'working_dir'",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"help",
"=",
"'Working directory. Use if your pipelines '",
"'directory is elsewhere. Defaults to cwd.'",
")",
"parser",
".",
"add_argument",
"(",
"'--log'",
",",
"'--loglevel'",
",",
"dest",
"=",
"'log_level'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"20",
",",
"help",
"=",
"'Integer log level. Defaults to 20 (INFO). '",
"'10=DEBUG\\n20=INFO\\n30=WARNING\\n40=ERROR\\n50=CRITICAL'",
"'.\\n Log Level < 10 gives full traceback on errors.'",
")",
"parser",
".",
"add_argument",
"(",
"'--logpath'",
",",
"dest",
"=",
"'log_path'",
",",
"help",
"=",
"'Log-file path. Append log output to this path'",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"help",
"=",
"'Echo version number.'",
",",
"version",
"=",
"f'{pypyr.version.get_version()}'",
")",
"return",
"parser"
] | Return ArgumentParser for pypyr cli. | [
"Return",
"ArgumentParser",
"for",
"pypyr",
"cli",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/cli.py#L19-L44 | [] | What does this function do? | [
"Return",
"ArgumentParser",
"for",
"pypyr",
"cli",
"."
] |
pypyr/pypyr-cli | pypyr/cli.py | main | def main(args=None):
"""Entry point for pypyr cli.
The setup_py entry_point wraps this in sys.exit already so this effectively
becomes sys.exit(main()).
The __main__ entry point similarly wraps sys.exit().
"""
if args is None:
args = sys.argv[1:]
parsed_args = get_args(args)
try:
return pypyr.pipelinerunner.main(
pipeline_name=parsed_args.pipeline_name,
pipeline_context_input=parsed_args.pipeline_context,
working_dir=parsed_args.working_dir,
log_level=parsed_args.log_level,
log_path=parsed_args.log_path)
except KeyboardInterrupt:
# Shell standard is 128 + signum = 130 (SIGINT = 2)
sys.stdout.write("\n")
return 128 + signal.SIGINT
except Exception as e:
# stderr and exit code 255
sys.stderr.write("\n")
sys.stderr.write(f"\033[91m{type(e).__name__}: {str(e)}\033[0;0m")
sys.stderr.write("\n")
# at this point, you're guaranteed to have args and thus log_level
if parsed_args.log_level < 10:
# traceback prints to stderr by default
traceback.print_exc()
return 255 | python | def main(args=None):
"""Entry point for pypyr cli.
The setup_py entry_point wraps this in sys.exit already so this effectively
becomes sys.exit(main()).
The __main__ entry point similarly wraps sys.exit().
"""
if args is None:
args = sys.argv[1:]
parsed_args = get_args(args)
try:
return pypyr.pipelinerunner.main(
pipeline_name=parsed_args.pipeline_name,
pipeline_context_input=parsed_args.pipeline_context,
working_dir=parsed_args.working_dir,
log_level=parsed_args.log_level,
log_path=parsed_args.log_path)
except KeyboardInterrupt:
# Shell standard is 128 + signum = 130 (SIGINT = 2)
sys.stdout.write("\n")
return 128 + signal.SIGINT
except Exception as e:
# stderr and exit code 255
sys.stderr.write("\n")
sys.stderr.write(f"\033[91m{type(e).__name__}: {str(e)}\033[0;0m")
sys.stderr.write("\n")
# at this point, you're guaranteed to have args and thus log_level
if parsed_args.log_level < 10:
# traceback prints to stderr by default
traceback.print_exc()
return 255 | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"parsed_args",
"=",
"get_args",
"(",
"args",
")",
"try",
":",
"return",
"pypyr",
".",
"pipelinerunner",
".",
"main",
"(",
"pipeline_name",
"=",
"parsed_args",
".",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"parsed_args",
".",
"pipeline_context",
",",
"working_dir",
"=",
"parsed_args",
".",
"working_dir",
",",
"log_level",
"=",
"parsed_args",
".",
"log_level",
",",
"log_path",
"=",
"parsed_args",
".",
"log_path",
")",
"except",
"KeyboardInterrupt",
":",
"# Shell standard is 128 + signum = 130 (SIGINT = 2)",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")",
"return",
"128",
"+",
"signal",
".",
"SIGINT",
"except",
"Exception",
"as",
"e",
":",
"# stderr and exit code 255",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\n\"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"f\"\\033[91m{type(e).__name__}: {str(e)}\\033[0;0m\"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\n\"",
")",
"# at this point, you're guaranteed to have args and thus log_level",
"if",
"parsed_args",
".",
"log_level",
"<",
"10",
":",
"# traceback prints to stderr by default",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"255"
] | Entry point for pypyr cli.
The setup_py entry_point wraps this in sys.exit already so this effectively
becomes sys.exit(main()).
The __main__ entry point similarly wraps sys.exit(). | [
"Entry",
"point",
"for",
"pypyr",
"cli",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/cli.py#L47-L80 | [
"args"
] | What does this function do? | [
"Entry",
"point",
"for",
"pypyr",
"cli",
"."
] |
pypyr/pypyr-cli | pypyr/steps/contextclear.py | run_step | def run_step(context):
"""Remove specified keys from context.
Args:
Context is a dictionary or dictionary-like.
context['contextClear'] must exist. It's a dictionary.
Will iterate context['contextClear'] and remove those keys from
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
key4: value4
contextClear:
- key2
- key4
- contextClear
This will result in return context:
key1: value1
key3: value3
"""
logger.debug("started")
context.assert_key_has_value(key='contextClear', caller=__name__)
for k in context['contextClear']:
logger.debug(f"removing {k} from context")
# slightly unorthodox pop returning None means you don't get a KeyError
# if key doesn't exist
context.pop(k, None)
logger.info(f"removed {k} from context")
logger.debug("done") | python | def run_step(context):
"""Remove specified keys from context.
Args:
Context is a dictionary or dictionary-like.
context['contextClear'] must exist. It's a dictionary.
Will iterate context['contextClear'] and remove those keys from
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
key4: value4
contextClear:
- key2
- key4
- contextClear
This will result in return context:
key1: value1
key3: value3
"""
logger.debug("started")
context.assert_key_has_value(key='contextClear', caller=__name__)
for k in context['contextClear']:
logger.debug(f"removing {k} from context")
# slightly unorthodox pop returning None means you don't get a KeyError
# if key doesn't exist
context.pop(k, None)
logger.info(f"removed {k} from context")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'contextClear'",
",",
"caller",
"=",
"__name__",
")",
"for",
"k",
"in",
"context",
"[",
"'contextClear'",
"]",
":",
"logger",
".",
"debug",
"(",
"f\"removing {k} from context\"",
")",
"# slightly unorthodox pop returning None means you don't get a KeyError",
"# if key doesn't exist",
"context",
".",
"pop",
"(",
"k",
",",
"None",
")",
"logger",
".",
"info",
"(",
"f\"removed {k} from context\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Remove specified keys from context.
Args:
Context is a dictionary or dictionary-like.
context['contextClear'] must exist. It's a dictionary.
Will iterate context['contextClear'] and remove those keys from
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
key4: value4
contextClear:
- key2
- key4
- contextClear
This will result in return context:
key1: value1
key3: value3 | [
"Remove",
"specified",
"keys",
"from",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextclear.py#L13-L46 | [
"context"
] | What does this function do? | [
"Remove",
"specified",
"keys",
"from",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/safeshell.py | run_step | def run_step(context):
"""Run command, program or executable.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute the command string in the shell as a sub-process.
Escape curly braces: if you want a literal curly brace, double it like
{{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1"
"""
logger.debug("started")
pypyr.steps.cmd.run_step(context)
logger.debug("done") | python | def run_step(context):
"""Run command, program or executable.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute the command string in the shell as a sub-process.
Escape curly braces: if you want a literal curly brace, double it like
{{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1"
"""
logger.debug("started")
pypyr.steps.cmd.run_step(context)
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"pypyr",
".",
"steps",
".",
"cmd",
".",
"run_step",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run command, program or executable.
Context is a dictionary or dictionary-like.
Context must contain the following keys:
cmd: <<cmd string>> (command + args to execute.)
OR, as a dict
cmd:
run: str. mandatory. <<cmd string>> command + args to execute.
save: bool. defaults False. save output to cmdOut.
Will execute the command string in the shell as a sub-process.
Escape curly braces: if you want a literal curly brace, double it like
{{ or }}.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
context['cmd'] will interpolate anything in curly braces for values
found in context. So if your context looks like this:
key1: value1
key2: value2
cmd: mything --arg1 {key1}
The cmd passed to the shell will be "mything --arg value1" | [
"Run",
"command",
"program",
"or",
"executable",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/safeshell.py#L16-L55 | [
"context"
] | What does this function do? | [
"Run",
"command",
"program",
"or",
"executable",
"."
] |
pypyr/pypyr-cli | pypyr/steps/default.py | run_step | def run_step(context):
"""Set hierarchy into context with substitutions if it doesn't exist yet.
context is a dictionary or dictionary-like.
context['defaults'] must exist. It's a dictionary.
Will iterate context['defaults'] and add these as new values where
their keys don't already exist. While it's doing so, it will leave
all other values in the existing hierarchy untouched.
List merging is purely additive, with no checks for uniqueness or already
existing list items. E.g context [0,1,2] with contextMerge=[2,3,4]
will result in [0,1,2,2,3,4]
Keep this in mind especially where complex types like
dicts nest inside a list - a merge will always add a new dict list item,
not merge it into whatever dicts might exist on the list already.
For example, say input context is:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
defaults:
key2: 'aaa_{key1}_zzz'
key3:
k33: value33
key4: 'bbb_{key2}_yyy'
This will result in return context:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
k33: value33
key4: bbb_value2_yyy
"""
logger.debug("started")
context.assert_key_has_value(key='defaults', caller=__name__)
context.set_defaults(context['defaults'])
logger.info(f"set {len(context['defaults'])} context item defaults.")
logger.debug("done") | python | def run_step(context):
"""Set hierarchy into context with substitutions if it doesn't exist yet.
context is a dictionary or dictionary-like.
context['defaults'] must exist. It's a dictionary.
Will iterate context['defaults'] and add these as new values where
their keys don't already exist. While it's doing so, it will leave
all other values in the existing hierarchy untouched.
List merging is purely additive, with no checks for uniqueness or already
existing list items. E.g context [0,1,2] with contextMerge=[2,3,4]
will result in [0,1,2,2,3,4]
Keep this in mind especially where complex types like
dicts nest inside a list - a merge will always add a new dict list item,
not merge it into whatever dicts might exist on the list already.
For example, say input context is:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
defaults:
key2: 'aaa_{key1}_zzz'
key3:
k33: value33
key4: 'bbb_{key2}_yyy'
This will result in return context:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
k33: value33
key4: bbb_value2_yyy
"""
logger.debug("started")
context.assert_key_has_value(key='defaults', caller=__name__)
context.set_defaults(context['defaults'])
logger.info(f"set {len(context['defaults'])} context item defaults.")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'defaults'",
",",
"caller",
"=",
"__name__",
")",
"context",
".",
"set_defaults",
"(",
"context",
"[",
"'defaults'",
"]",
")",
"logger",
".",
"info",
"(",
"f\"set {len(context['defaults'])} context item defaults.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Set hierarchy into context with substitutions if it doesn't exist yet.
context is a dictionary or dictionary-like.
context['defaults'] must exist. It's a dictionary.
Will iterate context['defaults'] and add these as new values where
their keys don't already exist. While it's doing so, it will leave
all other values in the existing hierarchy untouched.
List merging is purely additive, with no checks for uniqueness or already
existing list items. E.g context [0,1,2] with contextMerge=[2,3,4]
will result in [0,1,2,2,3,4]
Keep this in mind especially where complex types like
dicts nest inside a list - a merge will always add a new dict list item,
not merge it into whatever dicts might exist on the list already.
For example, say input context is:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
defaults:
key2: 'aaa_{key1}_zzz'
key3:
k33: value33
key4: 'bbb_{key2}_yyy'
This will result in return context:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
k33: value33
key4: bbb_value2_yyy | [
"Set",
"hierarchy",
"into",
"context",
"with",
"substitutions",
"if",
"it",
"doesn",
"t",
"exist",
"yet",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/default.py#L38-L84 | [
"context"
] | What does this function do? | [
"Set",
"hierarchy",
"into",
"context",
"with",
"substitutions",
"if",
"it",
"doesn",
"t",
"exist",
"yet",
"."
] |
pypyr/pypyr-cli | pypyr/stepsrunner.py | get_pipeline_steps | def get_pipeline_steps(pipeline, steps_group):
"""Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
"""
logger.debug("starting")
assert pipeline
assert steps_group
logger.debug(f"retrieving {steps_group} steps from pipeline")
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(
f"{steps_group}: sequence has no elements. So it won't do "
"anything.")
logger.debug("done")
return None
steps_count = len(steps)
logger.debug(f"{steps_count} steps found under {steps_group} in "
"pipeline definition.")
logger.debug("done")
return steps
else:
logger.debug(
f"pipeline doesn't have a {steps_group} collection. Add a "
f"{steps_group}: sequence to the yaml if you want {steps_group} "
"actually to do something.")
logger.debug("done")
return None | python | def get_pipeline_steps(pipeline, steps_group):
"""Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it.
"""
logger.debug("starting")
assert pipeline
assert steps_group
logger.debug(f"retrieving {steps_group} steps from pipeline")
if steps_group in pipeline:
steps = pipeline[steps_group]
if steps is None:
logger.warn(
f"{steps_group}: sequence has no elements. So it won't do "
"anything.")
logger.debug("done")
return None
steps_count = len(steps)
logger.debug(f"{steps_count} steps found under {steps_group} in "
"pipeline definition.")
logger.debug("done")
return steps
else:
logger.debug(
f"pipeline doesn't have a {steps_group} collection. Add a "
f"{steps_group}: sequence to the yaml if you want {steps_group} "
"actually to do something.")
logger.debug("done")
return None | [
"def",
"get_pipeline_steps",
"(",
"pipeline",
",",
"steps_group",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"assert",
"pipeline",
"assert",
"steps_group",
"logger",
".",
"debug",
"(",
"f\"retrieving {steps_group} steps from pipeline\"",
")",
"if",
"steps_group",
"in",
"pipeline",
":",
"steps",
"=",
"pipeline",
"[",
"steps_group",
"]",
"if",
"steps",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"f\"{steps_group}: sequence has no elements. So it won't do \"",
"\"anything.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"None",
"steps_count",
"=",
"len",
"(",
"steps",
")",
"logger",
".",
"debug",
"(",
"f\"{steps_count} steps found under {steps_group} in \"",
"\"pipeline definition.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"steps",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"pipeline doesn't have a {steps_group} collection. Add a \"",
"f\"{steps_group}: sequence to the yaml if you want {steps_group} \"",
"\"actually to do something.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"None"
] | Get the steps attribute of module pipeline.
If there is no steps sequence on the pipeline, return None. Guess you
could theoretically want to run a pipeline with nothing in it. | [
"Get",
"the",
"steps",
"attribute",
"of",
"module",
"pipeline",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L13-L47 | [
"pipeline",
"steps_group"
] | What does this function do? | [
"Get",
"the",
"steps",
"attribute",
"of",
"module",
"pipeline",
"."
] |
pypyr/pypyr-cli | pypyr/stepsrunner.py | run_failure_step_group | def run_failure_step_group(pipeline, context):
"""Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with.
"""
logger.debug("starting")
try:
assert pipeline
# if no on_failure exists, it'll do nothing.
run_step_group(pipeline_definition=pipeline,
step_group_name='on_failure',
context=context)
except Exception as exception:
logger.error("Failure handler also failed. Swallowing.")
logger.error(exception)
logger.debug("done") | python | def run_failure_step_group(pipeline, context):
"""Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with.
"""
logger.debug("starting")
try:
assert pipeline
# if no on_failure exists, it'll do nothing.
run_step_group(pipeline_definition=pipeline,
step_group_name='on_failure',
context=context)
except Exception as exception:
logger.error("Failure handler also failed. Swallowing.")
logger.error(exception)
logger.debug("done") | [
"def",
"run_failure_step_group",
"(",
"pipeline",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"try",
":",
"assert",
"pipeline",
"# if no on_failure exists, it'll do nothing.",
"run_step_group",
"(",
"pipeline_definition",
"=",
"pipeline",
",",
"step_group_name",
"=",
"'on_failure'",
",",
"context",
"=",
"context",
")",
"except",
"Exception",
"as",
"exception",
":",
"logger",
".",
"error",
"(",
"\"Failure handler also failed. Swallowing.\"",
")",
"logger",
".",
"error",
"(",
"exception",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with. | [
"Run",
"the",
"on_failure",
"step",
"group",
"if",
"it",
"exists",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L50-L67 | [
"pipeline",
"context"
] | What does this function do? | [
"Run",
"the",
"on_failure",
"step",
"group",
"if",
"it",
"exists",
"."
] |
pypyr/pypyr-cli | pypyr/stepsrunner.py | run_pipeline_steps | def run_pipeline_steps(steps, context):
"""Run the run_step(context) method of each step in steps.
Args:
steps: list. Sequence of Steps to execute
context: pypyr.context.Context. The pypyr context. Will mutate.
"""
logger.debug("starting")
assert isinstance(
context, dict), "context must be a dictionary, even if empty {}."
if steps is None:
logger.debug("No steps found to execute.")
else:
step_count = 0
for step in steps:
step_instance = Step(step)
step_instance.run_step(context)
step_count += 1
logger.debug(f"executed {step_count} steps")
logger.debug("done") | python | def run_pipeline_steps(steps, context):
"""Run the run_step(context) method of each step in steps.
Args:
steps: list. Sequence of Steps to execute
context: pypyr.context.Context. The pypyr context. Will mutate.
"""
logger.debug("starting")
assert isinstance(
context, dict), "context must be a dictionary, even if empty {}."
if steps is None:
logger.debug("No steps found to execute.")
else:
step_count = 0
for step in steps:
step_instance = Step(step)
step_instance.run_step(context)
step_count += 1
logger.debug(f"executed {step_count} steps")
logger.debug("done") | [
"def",
"run_pipeline_steps",
"(",
"steps",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"assert",
"isinstance",
"(",
"context",
",",
"dict",
")",
",",
"\"context must be a dictionary, even if empty {}.\"",
"if",
"steps",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"No steps found to execute.\"",
")",
"else",
":",
"step_count",
"=",
"0",
"for",
"step",
"in",
"steps",
":",
"step_instance",
"=",
"Step",
"(",
"step",
")",
"step_instance",
".",
"run_step",
"(",
"context",
")",
"step_count",
"+=",
"1",
"logger",
".",
"debug",
"(",
"f\"executed {step_count} steps\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run the run_step(context) method of each step in steps.
Args:
steps: list. Sequence of Steps to execute
context: pypyr.context.Context. The pypyr context. Will mutate. | [
"Run",
"the",
"run_step",
"(",
"context",
")",
"method",
"of",
"each",
"step",
"in",
"steps",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L70-L93 | [
"steps",
"context"
] | What does this function do? | [
"Run",
"the",
"run_step",
"(",
"context",
")",
"method",
"of",
"each",
"step",
"in",
"steps",
"."
] |
pypyr/pypyr-cli | pypyr/stepsrunner.py | run_step_group | def run_step_group(pipeline_definition, step_group_name, context):
"""Get the specified step group from the pipeline and run its steps."""
logger.debug(f"starting {step_group_name}")
assert step_group_name
steps = get_pipeline_steps(pipeline=pipeline_definition,
steps_group=step_group_name)
run_pipeline_steps(steps=steps, context=context)
logger.debug(f"done {step_group_name}") | python | def run_step_group(pipeline_definition, step_group_name, context):
"""Get the specified step group from the pipeline and run its steps."""
logger.debug(f"starting {step_group_name}")
assert step_group_name
steps = get_pipeline_steps(pipeline=pipeline_definition,
steps_group=step_group_name)
run_pipeline_steps(steps=steps, context=context)
logger.debug(f"done {step_group_name}") | [
"def",
"run_step_group",
"(",
"pipeline_definition",
",",
"step_group_name",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"f\"starting {step_group_name}\"",
")",
"assert",
"step_group_name",
"steps",
"=",
"get_pipeline_steps",
"(",
"pipeline",
"=",
"pipeline_definition",
",",
"steps_group",
"=",
"step_group_name",
")",
"run_pipeline_steps",
"(",
"steps",
"=",
"steps",
",",
"context",
"=",
"context",
")",
"logger",
".",
"debug",
"(",
"f\"done {step_group_name}\"",
")"
] | Get the specified step group from the pipeline and run its steps. | [
"Get",
"the",
"specified",
"step",
"group",
"from",
"the",
"pipeline",
"and",
"run",
"its",
"steps",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L96-L106 | [
"pipeline_definition",
"step_group_name",
"context"
] | What does this function do? | [
"Get",
"the",
"specified",
"step",
"group",
"from",
"the",
"pipeline",
"and",
"run",
"its",
"steps",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | ensure_dir | def ensure_dir(path):
"""Create all parent directories of path if they don't exist.
Args:
path. Path-like object. Create parent dirs to this path.
Return:
None.
"""
os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True) | python | def ensure_dir(path):
"""Create all parent directories of path if they don't exist.
Args:
path. Path-like object. Create parent dirs to this path.
Return:
None.
"""
os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True) | [
"def",
"ensure_dir",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
")",
",",
"exist_ok",
"=",
"True",
")"
] | Create all parent directories of path if they don't exist.
Args:
path. Path-like object. Create parent dirs to this path.
Return:
None. | [
"Create",
"all",
"parent",
"directories",
"of",
"path",
"if",
"they",
"don",
"t",
"exist",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L394-L404 | [
"path"
] | What does this function do? | [
"Create",
"all",
"parent",
"directories",
"of",
"path",
"if",
"they",
"don",
"t",
"exist",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | get_glob | def get_glob(path):
"""Process the input path, applying globbing and formatting.
Do note that this will returns files AND directories that match the glob.
No tilde expansion is done, but *, ?, and character ranges expressed with
[] will be correctly matched.
Escape all special characters ('?', '*' and '['). For a literal match, wrap
the meta-characters in brackets. For example, '[?]' matches the character
'?'.
If passing in an iterable of paths, will expand matches for each path in
the iterable. The function will return all the matches for each path
glob expression combined into a single list.
Args:
path: Path-like string, or iterable (list or tuple ) of paths.
Returns:
Combined list of paths found for input glob.
"""
if isinstance(path, str):
return glob.glob(path, recursive=True)
if isinstance(path, os.PathLike):
# hilariously enough, glob doesn't like path-like. Gotta be str.
return glob.glob(str(path), recursive=True)
elif isinstance(path, (list, tuple)):
# each glob returns a list, so chain all the lists into one big list
return list(chain.from_iterable(
glob.glob(str(p), recursive=True) for p in path))
else:
raise TypeError("path should be string, path-like or a list. Instead, "
f"it's a {type(path)}") | python | def get_glob(path):
"""Process the input path, applying globbing and formatting.
Do note that this will returns files AND directories that match the glob.
No tilde expansion is done, but *, ?, and character ranges expressed with
[] will be correctly matched.
Escape all special characters ('?', '*' and '['). For a literal match, wrap
the meta-characters in brackets. For example, '[?]' matches the character
'?'.
If passing in an iterable of paths, will expand matches for each path in
the iterable. The function will return all the matches for each path
glob expression combined into a single list.
Args:
path: Path-like string, or iterable (list or tuple ) of paths.
Returns:
Combined list of paths found for input glob.
"""
if isinstance(path, str):
return glob.glob(path, recursive=True)
if isinstance(path, os.PathLike):
# hilariously enough, glob doesn't like path-like. Gotta be str.
return glob.glob(str(path), recursive=True)
elif isinstance(path, (list, tuple)):
# each glob returns a list, so chain all the lists into one big list
return list(chain.from_iterable(
glob.glob(str(p), recursive=True) for p in path))
else:
raise TypeError("path should be string, path-like or a list. Instead, "
f"it's a {type(path)}") | [
"def",
"get_glob",
"(",
"path",
")",
":",
"if",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"return",
"glob",
".",
"glob",
"(",
"path",
",",
"recursive",
"=",
"True",
")",
"if",
"isinstance",
"(",
"path",
",",
"os",
".",
"PathLike",
")",
":",
"# hilariously enough, glob doesn't like path-like. Gotta be str.",
"return",
"glob",
".",
"glob",
"(",
"str",
"(",
"path",
")",
",",
"recursive",
"=",
"True",
")",
"elif",
"isinstance",
"(",
"path",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# each glob returns a list, so chain all the lists into one big list",
"return",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"glob",
".",
"glob",
"(",
"str",
"(",
"p",
")",
",",
"recursive",
"=",
"True",
")",
"for",
"p",
"in",
"path",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"path should be string, path-like or a list. Instead, \"",
"f\"it's a {type(path)}\"",
")"
] | Process the input path, applying globbing and formatting.
Do note that this will returns files AND directories that match the glob.
No tilde expansion is done, but *, ?, and character ranges expressed with
[] will be correctly matched.
Escape all special characters ('?', '*' and '['). For a literal match, wrap
the meta-characters in brackets. For example, '[?]' matches the character
'?'.
If passing in an iterable of paths, will expand matches for each path in
the iterable. The function will return all the matches for each path
glob expression combined into a single list.
Args:
path: Path-like string, or iterable (list or tuple ) of paths.
Returns:
Combined list of paths found for input glob. | [
"Process",
"the",
"input",
"path",
"applying",
"globbing",
"and",
"formatting",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L407-L441 | [
"path"
] | What does this function do? | [
"Process",
"the",
"input",
"path",
"applying",
"globbing",
"and",
"formatting",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | is_same_file | def is_same_file(path1, path2):
"""Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not.
"""
return (
path1 and path2
and os.path.isfile(path1) and os.path.isfile(path2)
and os.path.samefile(path1, path2)) | python | def is_same_file(path1, path2):
"""Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not.
"""
return (
path1 and path2
and os.path.isfile(path1) and os.path.isfile(path2)
and os.path.samefile(path1, path2)) | [
"def",
"is_same_file",
"(",
"path1",
",",
"path2",
")",
":",
"return",
"(",
"path1",
"and",
"path2",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path1",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"path2",
")",
"and",
"os",
".",
"path",
".",
"samefile",
"(",
"path1",
",",
"path2",
")",
")"
] | Return True if path1 is the same file as path2.
The reason for this dance is that samefile throws if either file doesn't
exist.
Args:
path1: str or path-like.
path2: str or path-like.
Returns:
bool. True if the same file, False if not. | [
"Return",
"True",
"if",
"path1",
"is",
"the",
"same",
"file",
"as",
"path2",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L444-L461 | [
"path1",
"path2"
] | What does this function do? | [
"Return",
"True",
"if",
"path1",
"is",
"the",
"same",
"file",
"as",
"path2",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | move_file | def move_file(src, dest):
"""Move source file to destination.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong.
"""
try:
os.replace(src, dest)
except Exception as ex_replace:
logger.error(f"error moving file {src} to "
f"{dest}. {ex_replace}")
raise | python | def move_file(src, dest):
"""Move source file to destination.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong.
"""
try:
os.replace(src, dest)
except Exception as ex_replace:
logger.error(f"error moving file {src} to "
f"{dest}. {ex_replace}")
raise | [
"def",
"move_file",
"(",
"src",
",",
"dest",
")",
":",
"try",
":",
"os",
".",
"replace",
"(",
"src",
",",
"dest",
")",
"except",
"Exception",
"as",
"ex_replace",
":",
"logger",
".",
"error",
"(",
"f\"error moving file {src} to \"",
"f\"{dest}. {ex_replace}\"",
")",
"raise"
] | Move source file to destination.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. | [
"Move",
"source",
"file",
"to",
"destination",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L464-L486 | [
"src",
"dest"
] | What does this function do? | [
"Move",
"source",
"file",
"to",
"destination",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | move_temp_file | def move_temp_file(src, dest):
"""Move src to dest. Delete src if something goes wrong.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. Does its best to clean up after
itself and remove temp files.
"""
try:
move_file(src, dest)
except Exception:
try:
os.remove(src)
except Exception as ex_clean:
# at this point, something's deeply wrong, so log error.
# raising the original error, though, not this error in the
# error handler, as the 1st was the initial cause of all of
# this.
logger.error(f"error removing temp file {src}. "
f"{ex_clean}")
raise | python | def move_temp_file(src, dest):
"""Move src to dest. Delete src if something goes wrong.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. Does its best to clean up after
itself and remove temp files.
"""
try:
move_file(src, dest)
except Exception:
try:
os.remove(src)
except Exception as ex_clean:
# at this point, something's deeply wrong, so log error.
# raising the original error, though, not this error in the
# error handler, as the 1st was the initial cause of all of
# this.
logger.error(f"error removing temp file {src}. "
f"{ex_clean}")
raise | [
"def",
"move_temp_file",
"(",
"src",
",",
"dest",
")",
":",
"try",
":",
"move_file",
"(",
"src",
",",
"dest",
")",
"except",
"Exception",
":",
"try",
":",
"os",
".",
"remove",
"(",
"src",
")",
"except",
"Exception",
"as",
"ex_clean",
":",
"# at this point, something's deeply wrong, so log error.",
"# raising the original error, though, not this error in the",
"# error handler, as the 1st was the initial cause of all of",
"# this.",
"logger",
".",
"error",
"(",
"f\"error removing temp file {src}. \"",
"f\"{ex_clean}\"",
")",
"raise"
] | Move src to dest. Delete src if something goes wrong.
Overwrites dest.
Args:
src: str or path-like. source file
dest: str or path-like. destination file
Returns:
None.
Raises:
FileNotFoundError: out path parent doesn't exist.
OSError: if any IO operations go wrong. Does its best to clean up after
itself and remove temp files. | [
"Move",
"src",
"to",
"dest",
".",
"Delete",
"src",
"if",
"something",
"goes",
"wrong",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L489-L520 | [
"src",
"dest"
] | What does this function do? | [
"Move",
"src",
"to",
"dest",
".",
"Delete",
"src",
"if",
"something",
"goes",
"wrong",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | FileRewriter.files_in_to_out | def files_in_to_out(self, in_path, out_path=None):
"""Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
in_paths = get_glob(in_path)
in_count = len(in_paths)
if in_count == 0:
logger.debug(f'in path found {in_count} paths.')
else:
logger.debug(f'in path found {in_count} paths:')
for path in in_paths:
logger.debug(f'{path}')
logger.debug(
'herewith ends the paths. will now process each file.')
if in_paths:
# derive the destination directory, ensure it's ready for writing
basedir_out = None
is_outfile_name_known = False
if out_path:
# outpath could be a file, or a dir
pathlib_out = Path(out_path)
# yep, Path() strips trailing /, hence check original string
if isinstance(out_path, str) and out_path.endswith(os.sep):
# ensure dir - mimic posix mkdir -p
pathlib_out.mkdir(parents=True, exist_ok=True)
basedir_out = pathlib_out
elif pathlib_out.is_dir():
basedir_out = pathlib_out
else:
if len(in_paths) > 1:
raise Error(
f'{in_path} resolves to {len(in_paths)} files, '
'but you specified only a single file as out '
f'{out_path}. If the outpath is meant to be a '
'directory, put a / at the end.')
# at this point it must be a file (not dir) path
# make sure that the parent dir exists
basedir_out = pathlib_out.parent
basedir_out.parent.mkdir(parents=True, exist_ok=True)
is_outfile_name_known = True
# loop through all the in files and write them to the out dir
file_counter = 0
is_edit = False
for path in in_paths:
actual_in = Path(path)
# recursive glob returns dirs too, only interested in files
if actual_in.is_file():
if basedir_out:
if is_outfile_name_known:
actual_out = pathlib_out
else:
# default to original src file name if only out dir
# specified without an out file name
actual_out = basedir_out.joinpath(actual_in.name)
logger.debug(f"writing {path} to {actual_out}")
self.in_to_out(in_path=actual_in, out_path=actual_out)
else:
logger.debug(f"editing {path}")
self.in_to_out(in_path=actual_in)
is_edit = True
file_counter += 1
if is_edit:
logger.info(
f"edited & wrote {file_counter} file(s) at {in_path}")
else:
logger.info(
f"read {in_path}, formatted and wrote {file_counter} "
f"file(s) to {out_path}")
else:
logger.info(f"{in_path} found no files") | python | def files_in_to_out(self, in_path, out_path=None):
"""Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
in_paths = get_glob(in_path)
in_count = len(in_paths)
if in_count == 0:
logger.debug(f'in path found {in_count} paths.')
else:
logger.debug(f'in path found {in_count} paths:')
for path in in_paths:
logger.debug(f'{path}')
logger.debug(
'herewith ends the paths. will now process each file.')
if in_paths:
# derive the destination directory, ensure it's ready for writing
basedir_out = None
is_outfile_name_known = False
if out_path:
# outpath could be a file, or a dir
pathlib_out = Path(out_path)
# yep, Path() strips trailing /, hence check original string
if isinstance(out_path, str) and out_path.endswith(os.sep):
# ensure dir - mimic posix mkdir -p
pathlib_out.mkdir(parents=True, exist_ok=True)
basedir_out = pathlib_out
elif pathlib_out.is_dir():
basedir_out = pathlib_out
else:
if len(in_paths) > 1:
raise Error(
f'{in_path} resolves to {len(in_paths)} files, '
'but you specified only a single file as out '
f'{out_path}. If the outpath is meant to be a '
'directory, put a / at the end.')
# at this point it must be a file (not dir) path
# make sure that the parent dir exists
basedir_out = pathlib_out.parent
basedir_out.parent.mkdir(parents=True, exist_ok=True)
is_outfile_name_known = True
# loop through all the in files and write them to the out dir
file_counter = 0
is_edit = False
for path in in_paths:
actual_in = Path(path)
# recursive glob returns dirs too, only interested in files
if actual_in.is_file():
if basedir_out:
if is_outfile_name_known:
actual_out = pathlib_out
else:
# default to original src file name if only out dir
# specified without an out file name
actual_out = basedir_out.joinpath(actual_in.name)
logger.debug(f"writing {path} to {actual_out}")
self.in_to_out(in_path=actual_in, out_path=actual_out)
else:
logger.debug(f"editing {path}")
self.in_to_out(in_path=actual_in)
is_edit = True
file_counter += 1
if is_edit:
logger.info(
f"edited & wrote {file_counter} file(s) at {in_path}")
else:
logger.info(
f"read {in_path}, formatted and wrote {file_counter} "
f"file(s) to {out_path}")
else:
logger.info(f"{in_path} found no files") | [
"def",
"files_in_to_out",
"(",
"self",
",",
"in_path",
",",
"out_path",
"=",
"None",
")",
":",
"in_paths",
"=",
"get_glob",
"(",
"in_path",
")",
"in_count",
"=",
"len",
"(",
"in_paths",
")",
"if",
"in_count",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"f'in path found {in_count} paths.'",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f'in path found {in_count} paths:'",
")",
"for",
"path",
"in",
"in_paths",
":",
"logger",
".",
"debug",
"(",
"f'{path}'",
")",
"logger",
".",
"debug",
"(",
"'herewith ends the paths. will now process each file.'",
")",
"if",
"in_paths",
":",
"# derive the destination directory, ensure it's ready for writing",
"basedir_out",
"=",
"None",
"is_outfile_name_known",
"=",
"False",
"if",
"out_path",
":",
"# outpath could be a file, or a dir",
"pathlib_out",
"=",
"Path",
"(",
"out_path",
")",
"# yep, Path() strips trailing /, hence check original string",
"if",
"isinstance",
"(",
"out_path",
",",
"str",
")",
"and",
"out_path",
".",
"endswith",
"(",
"os",
".",
"sep",
")",
":",
"# ensure dir - mimic posix mkdir -p",
"pathlib_out",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"basedir_out",
"=",
"pathlib_out",
"elif",
"pathlib_out",
".",
"is_dir",
"(",
")",
":",
"basedir_out",
"=",
"pathlib_out",
"else",
":",
"if",
"len",
"(",
"in_paths",
")",
">",
"1",
":",
"raise",
"Error",
"(",
"f'{in_path} resolves to {len(in_paths)} files, '",
"'but you specified only a single file as out '",
"f'{out_path}. If the outpath is meant to be a '",
"'directory, put a / at the end.'",
")",
"# at this point it must be a file (not dir) path",
"# make sure that the parent dir exists",
"basedir_out",
"=",
"pathlib_out",
".",
"parent",
"basedir_out",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"is_outfile_name_known",
"=",
"True",
"# loop through all the in files and write them to the out dir",
"file_counter",
"=",
"0",
"is_edit",
"=",
"False",
"for",
"path",
"in",
"in_paths",
":",
"actual_in",
"=",
"Path",
"(",
"path",
")",
"# recursive glob returns dirs too, only interested in files",
"if",
"actual_in",
".",
"is_file",
"(",
")",
":",
"if",
"basedir_out",
":",
"if",
"is_outfile_name_known",
":",
"actual_out",
"=",
"pathlib_out",
"else",
":",
"# default to original src file name if only out dir",
"# specified without an out file name",
"actual_out",
"=",
"basedir_out",
".",
"joinpath",
"(",
"actual_in",
".",
"name",
")",
"logger",
".",
"debug",
"(",
"f\"writing {path} to {actual_out}\"",
")",
"self",
".",
"in_to_out",
"(",
"in_path",
"=",
"actual_in",
",",
"out_path",
"=",
"actual_out",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"editing {path}\"",
")",
"self",
".",
"in_to_out",
"(",
"in_path",
"=",
"actual_in",
")",
"is_edit",
"=",
"True",
"file_counter",
"+=",
"1",
"if",
"is_edit",
":",
"logger",
".",
"info",
"(",
"f\"edited & wrote {file_counter} file(s) at {in_path}\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"read {in_path}, formatted and wrote {file_counter} \"",
"f\"file(s) to {out_path}\"",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"{in_path} found no files\"",
")"
] | Write in files to out, calling the line_handler on each line.
Calls file_in_to_out under the hood to format the in_path payload. The
formatting processing is done by the self.formatter instance.
Args:
in_path: str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
out_path: str or path-like. Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is no an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
Top tip: Path-like objects strip the trailing slash. If
you want to pass in a dir that does not exist yet as
out-path with a trailing /, you should be passing it as a
str to preserve the /.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None. | [
"Write",
"in",
"files",
"to",
"out",
"calling",
"the",
"line_handler",
"on",
"each",
"line",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L56-L156 | [
"in_path",
"out_path"
] | What does this function do? | [
"Write",
"in",
"files",
"to",
"out",
"calling",
"the",
"line_handler",
"on",
"each",
"line",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | ObjectRewriter.in_to_out | def in_to_out(self, in_path, out_path=None):
"""Load file into object, formats, writes object to out.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
logger.debug(f"opening source file: {in_path}")
with open(in_path) as infile:
obj = self.object_representer.load(infile)
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
logger.debug(f"moving temp file to: {in_path}")
move_temp_file(outfile.name, infile.name) | python | def in_to_out(self, in_path, out_path=None):
"""Load file into object, formats, writes object to out.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
logger.debug(f"opening source file: {in_path}")
with open(in_path) as infile:
obj = self.object_representer.load(infile)
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
self.object_representer.dump(outfile, self.formatter(obj))
logger.debug(f"moving temp file to: {in_path}")
move_temp_file(outfile.name, infile.name) | [
"def",
"in_to_out",
"(",
"self",
",",
"in_path",
",",
"out_path",
"=",
"None",
")",
":",
"if",
"is_same_file",
"(",
"in_path",
",",
"out_path",
")",
":",
"logger",
".",
"debug",
"(",
"\"in path and out path are the same file. writing to temp \"",
"\"file and then replacing in path with the temp file.\"",
")",
"out_path",
"=",
"None",
"logger",
".",
"debug",
"(",
"f\"opening source file: {in_path}\"",
")",
"with",
"open",
"(",
"in_path",
")",
"as",
"infile",
":",
"obj",
"=",
"self",
".",
"object_representer",
".",
"load",
"(",
"infile",
")",
"if",
"out_path",
":",
"logger",
".",
"debug",
"(",
"f\"opening destination file for writing: {out_path}\"",
")",
"ensure_dir",
"(",
"out_path",
")",
"with",
"open",
"(",
"out_path",
",",
"'w'",
")",
"as",
"outfile",
":",
"self",
".",
"object_representer",
".",
"dump",
"(",
"outfile",
",",
"self",
".",
"formatter",
"(",
"obj",
")",
")",
"return",
"else",
":",
"logger",
".",
"debug",
"(",
"\"opening temp file for writing...\"",
")",
"with",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"in_path",
")",
",",
"delete",
"=",
"False",
")",
"as",
"outfile",
":",
"self",
".",
"object_representer",
".",
"dump",
"(",
"outfile",
",",
"self",
".",
"formatter",
"(",
"obj",
")",
")",
"logger",
".",
"debug",
"(",
"f\"moving temp file to: {in_path}\"",
")",
"move_temp_file",
"(",
"outfile",
".",
"name",
",",
"infile",
".",
"name",
")"
] | Load file into object, formats, writes object to out.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None. | [
"Load",
"file",
"into",
"object",
"formats",
"writes",
"object",
"to",
"out",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L188-L233 | [
"in_path",
"out_path"
] | What does this function do? | [
"Load",
"file",
"into",
"object",
"formats",
"writes",
"object",
"to",
"out",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | StreamRewriter.in_to_out | def in_to_out(self, in_path, out_path=None):
"""Write a single file in to out, running self.formatter on each line.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
is_in_place_edit = False
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
is_in_place_edit = True
logger.debug(f"opening source file: {in_path}")
with open(in_path) as infile:
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
outfile.writelines(self.formatter(infile))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
outfile.writelines(self.formatter(infile))
is_in_place_edit = True
# only replace infile AFTER it's closed, outside the with.
# pragma exclude because func actually returns on 287 in if out_path,
# and cov not smart enough to realize that !is_in_place_edit won't ever
# happen here (the function will have exited already)
if is_in_place_edit: # pragma: no branch
logger.debug(f"moving temp file to: {in_path}")
move_temp_file(outfile.name, infile.name) | python | def in_to_out(self, in_path, out_path=None):
"""Write a single file in to out, running self.formatter on each line.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
"""
is_in_place_edit = False
if is_same_file(in_path, out_path):
logger.debug(
"in path and out path are the same file. writing to temp "
"file and then replacing in path with the temp file.")
out_path = None
is_in_place_edit = True
logger.debug(f"opening source file: {in_path}")
with open(in_path) as infile:
if out_path:
logger.debug(
f"opening destination file for writing: {out_path}")
ensure_dir(out_path)
with open(out_path, 'w') as outfile:
outfile.writelines(self.formatter(infile))
return
else:
logger.debug("opening temp file for writing...")
with NamedTemporaryFile(mode='w+t',
dir=os.path.dirname(in_path),
delete=False) as outfile:
outfile.writelines(self.formatter(infile))
is_in_place_edit = True
# only replace infile AFTER it's closed, outside the with.
# pragma exclude because func actually returns on 287 in if out_path,
# and cov not smart enough to realize that !is_in_place_edit won't ever
# happen here (the function will have exited already)
if is_in_place_edit: # pragma: no branch
logger.debug(f"moving temp file to: {in_path}")
move_temp_file(outfile.name, infile.name) | [
"def",
"in_to_out",
"(",
"self",
",",
"in_path",
",",
"out_path",
"=",
"None",
")",
":",
"is_in_place_edit",
"=",
"False",
"if",
"is_same_file",
"(",
"in_path",
",",
"out_path",
")",
":",
"logger",
".",
"debug",
"(",
"\"in path and out path are the same file. writing to temp \"",
"\"file and then replacing in path with the temp file.\"",
")",
"out_path",
"=",
"None",
"is_in_place_edit",
"=",
"True",
"logger",
".",
"debug",
"(",
"f\"opening source file: {in_path}\"",
")",
"with",
"open",
"(",
"in_path",
")",
"as",
"infile",
":",
"if",
"out_path",
":",
"logger",
".",
"debug",
"(",
"f\"opening destination file for writing: {out_path}\"",
")",
"ensure_dir",
"(",
"out_path",
")",
"with",
"open",
"(",
"out_path",
",",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"writelines",
"(",
"self",
".",
"formatter",
"(",
"infile",
")",
")",
"return",
"else",
":",
"logger",
".",
"debug",
"(",
"\"opening temp file for writing...\"",
")",
"with",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+t'",
",",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"in_path",
")",
",",
"delete",
"=",
"False",
")",
"as",
"outfile",
":",
"outfile",
".",
"writelines",
"(",
"self",
".",
"formatter",
"(",
"infile",
")",
")",
"is_in_place_edit",
"=",
"True",
"# only replace infile AFTER it's closed, outside the with.",
"# pragma exclude because func actually returns on 287 in if out_path,",
"# and cov not smart enough to realize that !is_in_place_edit won't ever",
"# happen here (the function will have exited already)",
"if",
"is_in_place_edit",
":",
"# pragma: no branch",
"logger",
".",
"debug",
"(",
"f\"moving temp file to: {in_path}\"",
")",
"move_temp_file",
"(",
"outfile",
".",
"name",
",",
"infile",
".",
"name",
")"
] | Write a single file in to out, running self.formatter on each line.
If in_path and out_path point to the same thing it will in-place edit
and overwrite the in path. Even easier, if you do want to edit a file
in place, don't specify out_path, or set it to None.
Args:
in_path: str or path-like. Must refer to a single existing file.
out_path: str or path-like. Must refer to a single destination file
location. will create directory structure if it doesn't
exist.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None. | [
"Write",
"a",
"single",
"file",
"in",
"to",
"out",
"running",
"self",
".",
"formatter",
"on",
"each",
"line",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L252-L303 | [
"in_path",
"out_path"
] | What does this function do? | [
"Write",
"a",
"single",
"file",
"in",
"to",
"out",
"running",
"self",
".",
"formatter",
"on",
"each",
"line",
"."
] |
pypyr/pypyr-cli | pypyr/utils/filesystem.py | JsonRepresenter.dump | def dump(self, file, payload):
"""Dump json oject to open file output.
Writes json with 2 spaces indentation.
Args:
file: Open file-like object. Must be open for writing.
payload: The Json object to write to file.
Returns:
None.
"""
json.dump(payload, file, indent=2, ensure_ascii=False) | python | def dump(self, file, payload):
"""Dump json oject to open file output.
Writes json with 2 spaces indentation.
Args:
file: Open file-like object. Must be open for writing.
payload: The Json object to write to file.
Returns:
None.
"""
json.dump(payload, file, indent=2, ensure_ascii=False) | [
"def",
"dump",
"(",
"self",
",",
"file",
",",
"payload",
")",
":",
"json",
".",
"dump",
"(",
"payload",
",",
"file",
",",
"indent",
"=",
"2",
",",
"ensure_ascii",
"=",
"False",
")"
] | Dump json oject to open file output.
Writes json with 2 spaces indentation.
Args:
file: Open file-like object. Must be open for writing.
payload: The Json object to write to file.
Returns:
None. | [
"Dump",
"json",
"oject",
"to",
"open",
"file",
"output",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L341-L354 | [
"file",
"payload"
] | What does this function do? | [
"Dump",
"json",
"oject",
"to",
"open",
"file",
"output",
"."
] |
pypyr/pypyr-cli | pypyr/steps/filereplace.py | run_step | def run_step(context):
"""Parse input file and replace a search string.
This also does string substitutions from context on the fileReplacePairs.
It does this before it search & replaces the in file.
Be careful of order. If fileReplacePairs is not an ordered collection,
replacements could evaluate in any given order. If this is coming in from
pipeline yaml it will be an ordered dictionary, so life is good.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileReplace
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
- replacePairs. mandatory. Dictionary where items are:
'find_string': 'replace_string'
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: Any of the required keys missing in
context.
pypyr.errors.KeyInContextHasNoValueError: Any of the required keys
exists but is None.
"""
logger.debug("started")
deprecated(context)
StreamReplacePairsRewriterStep(__name__, 'fileReplace', context).run_step()
logger.debug("done") | python | def run_step(context):
"""Parse input file and replace a search string.
This also does string substitutions from context on the fileReplacePairs.
It does this before it search & replaces the in file.
Be careful of order. If fileReplacePairs is not an ordered collection,
replacements could evaluate in any given order. If this is coming in from
pipeline yaml it will be an ordered dictionary, so life is good.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileReplace
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
- replacePairs. mandatory. Dictionary where items are:
'find_string': 'replace_string'
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: Any of the required keys missing in
context.
pypyr.errors.KeyInContextHasNoValueError: Any of the required keys
exists but is None.
"""
logger.debug("started")
deprecated(context)
StreamReplacePairsRewriterStep(__name__, 'fileReplace', context).run_step()
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"StreamReplacePairsRewriterStep",
"(",
"__name__",
",",
"'fileReplace'",
",",
"context",
")",
".",
"run_step",
"(",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Parse input file and replace a search string.
This also does string substitutions from context on the fileReplacePairs.
It does this before it search & replaces the in file.
Be careful of order. If fileReplacePairs is not an ordered collection,
replacements could evaluate in any given order. If this is coming in from
pipeline yaml it will be an ordered dictionary, so life is good.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileReplace
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
- replacePairs. mandatory. Dictionary where items are:
'find_string': 'replace_string'
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: Any of the required keys missing in
context.
pypyr.errors.KeyInContextHasNoValueError: Any of the required keys
exists but is None. | [
"Parse",
"input",
"file",
"and",
"replace",
"a",
"search",
"string",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filereplace.py#L9-L56 | [
"context"
] | What does this function do? | [
"Parse",
"input",
"file",
"and",
"replace",
"a",
"search",
"string",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fileformatjson.py | run_step | def run_step(context):
"""Parse input json file and substitute {tokens} from context.
Loads json into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatJson
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatJson or
fileFormatJson['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatJson or
fileFormatJson['in'] exists but is None.
"""
logger.debug("started")
deprecated(context)
ObjectRewriterStep(__name__, 'fileFormatJson', context).run_step(
JsonRepresenter())
logger.debug("done") | python | def run_step(context):
"""Parse input json file and substitute {tokens} from context.
Loads json into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatJson
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatJson or
fileFormatJson['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatJson or
fileFormatJson['in'] exists but is None.
"""
logger.debug("started")
deprecated(context)
ObjectRewriterStep(__name__, 'fileFormatJson', context).run_step(
JsonRepresenter())
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"ObjectRewriterStep",
"(",
"__name__",
",",
"'fileFormatJson'",
",",
"context",
")",
".",
"run_step",
"(",
"JsonRepresenter",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Parse input json file and substitute {tokens} from context.
Loads json into memory to do parsing, so be aware of big files.
Args:
context: pypyr.context.Context. Mandatory.
- fileFormatJson
- in. mandatory.
str, path-like, or an iterable (list/tuple) of
strings/paths. Each str/path can be a glob, relative or
absolute path.
- out. optional. path-like.
Can refer to a file or a directory.
will create directory structure if it doesn't exist. If
in-path refers to >1 file (e.g it's a glob or list), out
path can only be a directory - it doesn't make sense to
write >1 file to the same single file (this is not an
appender.) To ensure out_path is read as a directory and
not a file, be sure to have the path separator (/) at the
end.
If out_path is not specified or None, will in-place edit
and overwrite the in-files.
Returns:
None.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fileFormatJson or
fileFormatJson['in'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileFormatJson or
fileFormatJson['in'] exists but is None. | [
"Parse",
"input",
"json",
"file",
"and",
"substitute",
"{",
"tokens",
"}",
"from",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fileformatjson.py#L10-L52 | [
"context"
] | What does this function do? | [
"Parse",
"input",
"json",
"file",
"and",
"substitute",
"{",
"tokens",
"}",
"from",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/log/logger.py | set_logging_config | def set_logging_config(log_level, handlers):
"""Set python logging library config.
Run this ONCE at the start of your process. It formats the python logging
module's output.
Defaults logging level to INFO = 20)
"""
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=log_level,
handlers=handlers) | python | def set_logging_config(log_level, handlers):
"""Set python logging library config.
Run this ONCE at the start of your process. It formats the python logging
module's output.
Defaults logging level to INFO = 20)
"""
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=log_level,
handlers=handlers) | [
"def",
"set_logging_config",
"(",
"log_level",
",",
"handlers",
")",
":",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"'%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s'",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
",",
"level",
"=",
"log_level",
",",
"handlers",
"=",
"handlers",
")"
] | Set python logging library config.
Run this ONCE at the start of your process. It formats the python logging
module's output.
Defaults logging level to INFO = 20) | [
"Set",
"python",
"logging",
"library",
"config",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/log/logger.py#L8-L19 | [
"log_level",
"handlers"
] | What does this function do? | [
"Set",
"python",
"logging",
"library",
"config",
"."
] |
pypyr/pypyr-cli | pypyr/log/logger.py | set_root_logger | def set_root_logger(root_log_level, log_path=None):
"""Set the root logger 'pypyr'. Do this before you do anything else.
Run once and only once at initialization.
"""
handlers = []
console_handler = logging.StreamHandler()
handlers.append(console_handler)
if log_path:
file_handler = logging.FileHandler(log_path)
handlers.append(file_handler)
set_logging_config(root_log_level, handlers=handlers)
root_logger = logging.getLogger("pypyr")
root_logger.debug(
f"Root logger {root_logger.name} configured with level "
f"{root_log_level}") | python | def set_root_logger(root_log_level, log_path=None):
"""Set the root logger 'pypyr'. Do this before you do anything else.
Run once and only once at initialization.
"""
handlers = []
console_handler = logging.StreamHandler()
handlers.append(console_handler)
if log_path:
file_handler = logging.FileHandler(log_path)
handlers.append(file_handler)
set_logging_config(root_log_level, handlers=handlers)
root_logger = logging.getLogger("pypyr")
root_logger.debug(
f"Root logger {root_logger.name} configured with level "
f"{root_log_level}") | [
"def",
"set_root_logger",
"(",
"root_log_level",
",",
"log_path",
"=",
"None",
")",
":",
"handlers",
"=",
"[",
"]",
"console_handler",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"handlers",
".",
"append",
"(",
"console_handler",
")",
"if",
"log_path",
":",
"file_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"log_path",
")",
"handlers",
".",
"append",
"(",
"file_handler",
")",
"set_logging_config",
"(",
"root_log_level",
",",
"handlers",
"=",
"handlers",
")",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"pypyr\"",
")",
"root_logger",
".",
"debug",
"(",
"f\"Root logger {root_logger.name} configured with level \"",
"f\"{root_log_level}\"",
")"
] | Set the root logger 'pypyr'. Do this before you do anything else.
Run once and only once at initialization. | [
"Set",
"the",
"root",
"logger",
"pypyr",
".",
"Do",
"this",
"before",
"you",
"do",
"anything",
"else",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/log/logger.py#L22-L39 | [
"root_log_level",
"log_path"
] | What does this function do? | [
"Set",
"the",
"root",
"logger",
"pypyr",
".",
"Do",
"this",
"before",
"you",
"do",
"anything",
"else",
"."
] |
pypyr/pypyr-cli | pypyr/parser/json.py | get_parsed_context | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this json parser you're looking for something "
"like: "
"pypyr pipelinename '{\"key1\":\"value1\","
"\"key2\":\"value2\"}'")
return None
logger.debug("starting")
# deserialize the input context string into json
return json.loads(context_arg) | python | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this json parser you're looking for something "
"like: "
"pypyr pipelinename '{\"key1\":\"value1\","
"\"key2\":\"value2\"}'")
return None
logger.debug("starting")
# deserialize the input context string into json
return json.loads(context_arg) | [
"def",
"get_parsed_context",
"(",
"context_arg",
")",
":",
"if",
"not",
"context_arg",
":",
"logger",
".",
"debug",
"(",
"\"pipeline invoked without context arg set. For \"",
"\"this json parser you're looking for something \"",
"\"like: \"",
"\"pypyr pipelinename '{\\\"key1\\\":\\\"value1\\\",\"",
"\"\\\"key2\\\":\\\"value2\\\"}'\"",
")",
"return",
"None",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# deserialize the input context string into json",
"return",
"json",
".",
"loads",
"(",
"context_arg",
")"
] | Parse input context string and returns context as dictionary. | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/parser/json.py#L10-L22 | [
"context_arg"
] | What does this function do? | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] |
pypyr/pypyr-cli | pypyr/pipelinerunner.py | get_parsed_context | def get_parsed_context(pipeline, context_in_string):
"""Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function.
"""
logger.debug("starting")
if 'context_parser' in pipeline:
parser_module_name = pipeline['context_parser']
logger.debug(f"context parser found: {parser_module_name}")
parser_module = pypyr.moduleloader.get_module(parser_module_name)
try:
logger.debug(f"running parser {parser_module_name}")
result_context = parser_module.get_parsed_context(
context_in_string)
logger.debug(f"step {parser_module_name} done")
# Downstream steps likely to expect context not to be None, hence
# empty rather than None.
if result_context is None:
logger.debug(f"{parser_module_name} returned None. Using "
"empty context instead")
return pypyr.context.Context()
else:
return pypyr.context.Context(result_context)
except AttributeError:
logger.error(f"The parser {parser_module_name} doesn't have a "
"get_parsed_context(context) function.")
raise
else:
logger.debug("pipeline does not have custom context parser. Using "
"empty context.")
logger.debug("done")
# initialize to an empty dictionary because you want to be able to run
# with no context.
return pypyr.context.Context() | python | def get_parsed_context(pipeline, context_in_string):
"""Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function.
"""
logger.debug("starting")
if 'context_parser' in pipeline:
parser_module_name = pipeline['context_parser']
logger.debug(f"context parser found: {parser_module_name}")
parser_module = pypyr.moduleloader.get_module(parser_module_name)
try:
logger.debug(f"running parser {parser_module_name}")
result_context = parser_module.get_parsed_context(
context_in_string)
logger.debug(f"step {parser_module_name} done")
# Downstream steps likely to expect context not to be None, hence
# empty rather than None.
if result_context is None:
logger.debug(f"{parser_module_name} returned None. Using "
"empty context instead")
return pypyr.context.Context()
else:
return pypyr.context.Context(result_context)
except AttributeError:
logger.error(f"The parser {parser_module_name} doesn't have a "
"get_parsed_context(context) function.")
raise
else:
logger.debug("pipeline does not have custom context parser. Using "
"empty context.")
logger.debug("done")
# initialize to an empty dictionary because you want to be able to run
# with no context.
return pypyr.context.Context() | [
"def",
"get_parsed_context",
"(",
"pipeline",
",",
"context_in_string",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"if",
"'context_parser'",
"in",
"pipeline",
":",
"parser_module_name",
"=",
"pipeline",
"[",
"'context_parser'",
"]",
"logger",
".",
"debug",
"(",
"f\"context parser found: {parser_module_name}\"",
")",
"parser_module",
"=",
"pypyr",
".",
"moduleloader",
".",
"get_module",
"(",
"parser_module_name",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"f\"running parser {parser_module_name}\"",
")",
"result_context",
"=",
"parser_module",
".",
"get_parsed_context",
"(",
"context_in_string",
")",
"logger",
".",
"debug",
"(",
"f\"step {parser_module_name} done\"",
")",
"# Downstream steps likely to expect context not to be None, hence",
"# empty rather than None.",
"if",
"result_context",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"f\"{parser_module_name} returned None. Using \"",
"\"empty context instead\"",
")",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
")",
"else",
":",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
"result_context",
")",
"except",
"AttributeError",
":",
"logger",
".",
"error",
"(",
"f\"The parser {parser_module_name} doesn't have a \"",
"\"get_parsed_context(context) function.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"\"pipeline does not have custom context parser. Using \"",
"\"empty context.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"# initialize to an empty dictionary because you want to be able to run",
"# with no context.",
"return",
"pypyr",
".",
"context",
".",
"Context",
"(",
")"
] | Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function. | [
"Execute",
"get_parsed_context",
"handler",
"if",
"specified",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L17-L65 | [
"pipeline",
"context_in_string"
] | What does this function do? | [
"Execute",
"get_parsed_context",
"handler",
"if",
"specified",
"."
] |
pypyr/pypyr-cli | pypyr/pipelinerunner.py | main | def main(
pipeline_name,
pipeline_context_input,
working_dir,
log_level,
log_path,
):
"""Entry point for pypyr pipeline runner.
Call this once per pypyr run. Call me if you want to run a pypyr pipeline
from your own code. This function does some one-off 1st time initialization
before running the actual pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline, sans .yaml at end.
pipeline_context_input: string. Initialize the pypyr context with this
string.
working_dir: path. looks for ./pipelines and modules in this directory.
log_level: int. Standard python log level enumerated value.
log_path: os.path. Append log to this path.
Returns:
None
"""
pypyr.log.logger.set_root_logger(log_level, log_path)
logger.debug("starting pypyr")
# pipelines specify steps in python modules that load dynamically.
# make it easy for the operator so that the cwd is automatically included
# without needing to pip install a package 1st.
pypyr.moduleloader.set_working_directory(working_dir)
load_and_run_pipeline(pipeline_name=pipeline_name,
pipeline_context_input=pipeline_context_input,
working_dir=working_dir)
logger.debug("pypyr done") | python | def main(
pipeline_name,
pipeline_context_input,
working_dir,
log_level,
log_path,
):
"""Entry point for pypyr pipeline runner.
Call this once per pypyr run. Call me if you want to run a pypyr pipeline
from your own code. This function does some one-off 1st time initialization
before running the actual pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline, sans .yaml at end.
pipeline_context_input: string. Initialize the pypyr context with this
string.
working_dir: path. looks for ./pipelines and modules in this directory.
log_level: int. Standard python log level enumerated value.
log_path: os.path. Append log to this path.
Returns:
None
"""
pypyr.log.logger.set_root_logger(log_level, log_path)
logger.debug("starting pypyr")
# pipelines specify steps in python modules that load dynamically.
# make it easy for the operator so that the cwd is automatically included
# without needing to pip install a package 1st.
pypyr.moduleloader.set_working_directory(working_dir)
load_and_run_pipeline(pipeline_name=pipeline_name,
pipeline_context_input=pipeline_context_input,
working_dir=working_dir)
logger.debug("pypyr done") | [
"def",
"main",
"(",
"pipeline_name",
",",
"pipeline_context_input",
",",
"working_dir",
",",
"log_level",
",",
"log_path",
",",
")",
":",
"pypyr",
".",
"log",
".",
"logger",
".",
"set_root_logger",
"(",
"log_level",
",",
"log_path",
")",
"logger",
".",
"debug",
"(",
"\"starting pypyr\"",
")",
"# pipelines specify steps in python modules that load dynamically.",
"# make it easy for the operator so that the cwd is automatically included",
"# without needing to pip install a package 1st.",
"pypyr",
".",
"moduleloader",
".",
"set_working_directory",
"(",
"working_dir",
")",
"load_and_run_pipeline",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"pipeline_context_input",
",",
"working_dir",
"=",
"working_dir",
")",
"logger",
".",
"debug",
"(",
"\"pypyr done\"",
")"
] | Entry point for pypyr pipeline runner.
Call this once per pypyr run. Call me if you want to run a pypyr pipeline
from your own code. This function does some one-off 1st time initialization
before running the actual pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline, sans .yaml at end.
pipeline_context_input: string. Initialize the pypyr context with this
string.
working_dir: path. looks for ./pipelines and modules in this directory.
log_level: int. Standard python log level enumerated value.
log_path: os.path. Append log to this path.
Returns:
None | [
"Entry",
"point",
"for",
"pypyr",
"pipeline",
"runner",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L68-L108 | [
"pipeline_name",
"pipeline_context_input",
"working_dir",
"log_level",
"log_path",
""
] | What does this function do? | [
"Entry",
"point",
"for",
"pypyr",
"pipeline",
"runner",
"."
] |
pypyr/pypyr-cli | pypyr/pipelinerunner.py | prepare_context | def prepare_context(pipeline, context_in_string, context):
"""Prepare context for pipeline run.
Args:
pipeline: dict. Dictionary representing the pipeline.
context_in_string: string. Argument string used to initialize context.
context: pypyr.context.Context. Merge any new context generated from
context_in_string into this context instance.
Returns:
None. The context instance to use for the pipeline run is contained
in the context arg, it's not passed back as a function return.
"""
logger.debug("starting")
parsed_context = get_parsed_context(
pipeline=pipeline,
context_in_string=context_in_string)
context.update(parsed_context)
logger.debug("done") | python | def prepare_context(pipeline, context_in_string, context):
"""Prepare context for pipeline run.
Args:
pipeline: dict. Dictionary representing the pipeline.
context_in_string: string. Argument string used to initialize context.
context: pypyr.context.Context. Merge any new context generated from
context_in_string into this context instance.
Returns:
None. The context instance to use for the pipeline run is contained
in the context arg, it's not passed back as a function return.
"""
logger.debug("starting")
parsed_context = get_parsed_context(
pipeline=pipeline,
context_in_string=context_in_string)
context.update(parsed_context)
logger.debug("done") | [
"def",
"prepare_context",
"(",
"pipeline",
",",
"context_in_string",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"parsed_context",
"=",
"get_parsed_context",
"(",
"pipeline",
"=",
"pipeline",
",",
"context_in_string",
"=",
"context_in_string",
")",
"context",
".",
"update",
"(",
"parsed_context",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Prepare context for pipeline run.
Args:
pipeline: dict. Dictionary representing the pipeline.
context_in_string: string. Argument string used to initialize context.
context: pypyr.context.Context. Merge any new context generated from
context_in_string into this context instance.
Returns:
None. The context instance to use for the pipeline run is contained
in the context arg, it's not passed back as a function return. | [
"Prepare",
"context",
"for",
"pipeline",
"run",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L111-L133 | [
"pipeline",
"context_in_string",
"context"
] | What does this function do? | [
"Prepare",
"context",
"for",
"pipeline",
"run",
"."
] |
pypyr/pypyr-cli | pypyr/pipelinerunner.py | load_and_run_pipeline | def load_and_run_pipeline(pipeline_name,
pipeline_context_input=None,
working_dir=None,
context=None,
parse_input=True,
loader=None):
"""Load and run the specified pypyr pipeline.
This function runs the actual pipeline by name. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
By default pypyr uses file loader. This means that pipeline_name.yaml
should be in the working_dir/pipelines/ directory.
Args:
pipeline_name (str): Name of pipeline, sans .yaml at end.
pipeline_context_input (str): Initialize the pypyr context with this
string.
working_dir (path): Look for pipelines and modules in this directory.
If context arg passed, will use context.working_dir and
ignore this argument. If context is None, working_dir
must be specified.
context (pypyr.context.Context): Use if you already have a
Context object, such as if you are running a pipeline from
within a pipeline and you want to re-use the same context
object for the child pipeline. Any mutations of the context by
the pipeline will be against this instance of it.
parse_input (bool): run context_parser in pipeline.
loader (str): str. optional. Absolute name of pipeline loader module.
If not specified will use pypyr.pypeloaders.fileloader.
Returns:
None
"""
logger.debug(f"you asked to run pipeline: {pipeline_name}")
if loader:
logger.debug(f"you set the pype loader to: {loader}")
else:
loader = 'pypyr.pypeloaders.fileloader'
logger.debug(f"use default pype loader: {loader}")
logger.debug(f"you set the initial context to: {pipeline_context_input}")
if context is None:
context = pypyr.context.Context()
context.working_dir = working_dir
else:
working_dir = context.working_dir
# pipeline loading deliberately outside of try catch. The try catch will
# try to run a failure-handler from the pipeline, but if the pipeline
# doesn't exist there is no failure handler that can possibly run so this
# is very much a fatal stop error.
loader_module = pypyr.moduleloader.get_module(loader)
try:
get_pipeline_definition = getattr(
loader_module, 'get_pipeline_definition'
)
except AttributeError:
logger.error(
f"The pipeline loader {loader_module} doesn't have a "
"get_pipeline_definition(pipeline_name, working_dir) function.")
raise
logger.debug(f"loading the pipeline definition with {loader_module}")
pipeline_definition = get_pipeline_definition(
pipeline_name=pipeline_name,
working_dir=working_dir
)
logger.debug(f"{loader_module} done")
run_pipeline(
pipeline=pipeline_definition,
pipeline_context_input=pipeline_context_input,
context=context,
parse_input=parse_input
) | python | def load_and_run_pipeline(pipeline_name,
pipeline_context_input=None,
working_dir=None,
context=None,
parse_input=True,
loader=None):
"""Load and run the specified pypyr pipeline.
This function runs the actual pipeline by name. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
By default pypyr uses file loader. This means that pipeline_name.yaml
should be in the working_dir/pipelines/ directory.
Args:
pipeline_name (str): Name of pipeline, sans .yaml at end.
pipeline_context_input (str): Initialize the pypyr context with this
string.
working_dir (path): Look for pipelines and modules in this directory.
If context arg passed, will use context.working_dir and
ignore this argument. If context is None, working_dir
must be specified.
context (pypyr.context.Context): Use if you already have a
Context object, such as if you are running a pipeline from
within a pipeline and you want to re-use the same context
object for the child pipeline. Any mutations of the context by
the pipeline will be against this instance of it.
parse_input (bool): run context_parser in pipeline.
loader (str): str. optional. Absolute name of pipeline loader module.
If not specified will use pypyr.pypeloaders.fileloader.
Returns:
None
"""
logger.debug(f"you asked to run pipeline: {pipeline_name}")
if loader:
logger.debug(f"you set the pype loader to: {loader}")
else:
loader = 'pypyr.pypeloaders.fileloader'
logger.debug(f"use default pype loader: {loader}")
logger.debug(f"you set the initial context to: {pipeline_context_input}")
if context is None:
context = pypyr.context.Context()
context.working_dir = working_dir
else:
working_dir = context.working_dir
# pipeline loading deliberately outside of try catch. The try catch will
# try to run a failure-handler from the pipeline, but if the pipeline
# doesn't exist there is no failure handler that can possibly run so this
# is very much a fatal stop error.
loader_module = pypyr.moduleloader.get_module(loader)
try:
get_pipeline_definition = getattr(
loader_module, 'get_pipeline_definition'
)
except AttributeError:
logger.error(
f"The pipeline loader {loader_module} doesn't have a "
"get_pipeline_definition(pipeline_name, working_dir) function.")
raise
logger.debug(f"loading the pipeline definition with {loader_module}")
pipeline_definition = get_pipeline_definition(
pipeline_name=pipeline_name,
working_dir=working_dir
)
logger.debug(f"{loader_module} done")
run_pipeline(
pipeline=pipeline_definition,
pipeline_context_input=pipeline_context_input,
context=context,
parse_input=parse_input
) | [
"def",
"load_and_run_pipeline",
"(",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"None",
",",
"working_dir",
"=",
"None",
",",
"context",
"=",
"None",
",",
"parse_input",
"=",
"True",
",",
"loader",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"f\"you asked to run pipeline: {pipeline_name}\"",
")",
"if",
"loader",
":",
"logger",
".",
"debug",
"(",
"f\"you set the pype loader to: {loader}\"",
")",
"else",
":",
"loader",
"=",
"'pypyr.pypeloaders.fileloader'",
"logger",
".",
"debug",
"(",
"f\"use default pype loader: {loader}\"",
")",
"logger",
".",
"debug",
"(",
"f\"you set the initial context to: {pipeline_context_input}\"",
")",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"pypyr",
".",
"context",
".",
"Context",
"(",
")",
"context",
".",
"working_dir",
"=",
"working_dir",
"else",
":",
"working_dir",
"=",
"context",
".",
"working_dir",
"# pipeline loading deliberately outside of try catch. The try catch will",
"# try to run a failure-handler from the pipeline, but if the pipeline",
"# doesn't exist there is no failure handler that can possibly run so this",
"# is very much a fatal stop error.",
"loader_module",
"=",
"pypyr",
".",
"moduleloader",
".",
"get_module",
"(",
"loader",
")",
"try",
":",
"get_pipeline_definition",
"=",
"getattr",
"(",
"loader_module",
",",
"'get_pipeline_definition'",
")",
"except",
"AttributeError",
":",
"logger",
".",
"error",
"(",
"f\"The pipeline loader {loader_module} doesn't have a \"",
"\"get_pipeline_definition(pipeline_name, working_dir) function.\"",
")",
"raise",
"logger",
".",
"debug",
"(",
"f\"loading the pipeline definition with {loader_module}\"",
")",
"pipeline_definition",
"=",
"get_pipeline_definition",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"working_dir",
"=",
"working_dir",
")",
"logger",
".",
"debug",
"(",
"f\"{loader_module} done\"",
")",
"run_pipeline",
"(",
"pipeline",
"=",
"pipeline_definition",
",",
"pipeline_context_input",
"=",
"pipeline_context_input",
",",
"context",
"=",
"context",
",",
"parse_input",
"=",
"parse_input",
")"
] | Load and run the specified pypyr pipeline.
This function runs the actual pipeline by name. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
By default pypyr uses file loader. This means that pipeline_name.yaml
should be in the working_dir/pipelines/ directory.
Args:
pipeline_name (str): Name of pipeline, sans .yaml at end.
pipeline_context_input (str): Initialize the pypyr context with this
string.
working_dir (path): Look for pipelines and modules in this directory.
If context arg passed, will use context.working_dir and
ignore this argument. If context is None, working_dir
must be specified.
context (pypyr.context.Context): Use if you already have a
Context object, such as if you are running a pipeline from
within a pipeline and you want to re-use the same context
object for the child pipeline. Any mutations of the context by
the pipeline will be against this instance of it.
parse_input (bool): run context_parser in pipeline.
loader (str): str. optional. Absolute name of pipeline loader module.
If not specified will use pypyr.pypeloaders.fileloader.
Returns:
None | [
"Load",
"and",
"run",
"the",
"specified",
"pypyr",
"pipeline",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L136-L215 | [
"pipeline_name",
"pipeline_context_input",
"working_dir",
"context",
"parse_input",
"loader"
] | What does this function do? | [
"Load",
"and",
"run",
"the",
"specified",
"pypyr",
"pipeline",
"."
] |
pypyr/pypyr-cli | pypyr/pipelinerunner.py | run_pipeline | def run_pipeline(pipeline,
context,
pipeline_context_input=None,
parse_input=True):
"""Run the specified pypyr pipeline.
This function runs the actual pipeline. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
Pipeline and context should be already loaded.
Args:
pipeline (dict): Dictionary representing the pipeline.
context (pypyr.context.Context): Reusable context object.
pipeline_context_input (str): Initialize the pypyr context with this
string.
parse_input (bool): run context_parser in pipeline.
Returns:
None
"""
logger.debug("starting")
try:
if parse_input:
logger.debug("executing context_parser")
prepare_context(pipeline=pipeline,
context_in_string=pipeline_context_input,
context=context)
else:
logger.debug("skipping context_parser")
# run main steps
pypyr.stepsrunner.run_step_group(
pipeline_definition=pipeline,
step_group_name='steps',
context=context)
# if nothing went wrong, run on_success
logger.debug("pipeline steps complete. Running on_success steps now.")
pypyr.stepsrunner.run_step_group(
pipeline_definition=pipeline,
step_group_name='on_success',
context=context)
except Exception:
# yes, yes, don't catch Exception. Have to, though, to run the failure
# handler. Also, it does raise it back up.
logger.error("Something went wrong. Will now try to run on_failure.")
# failure_step_group will log but swallow any errors
pypyr.stepsrunner.run_failure_step_group(
pipeline=pipeline,
context=context)
logger.debug("Raising original exception to caller.")
raise
logger.debug("done") | python | def run_pipeline(pipeline,
context,
pipeline_context_input=None,
parse_input=True):
"""Run the specified pypyr pipeline.
This function runs the actual pipeline. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
Pipeline and context should be already loaded.
Args:
pipeline (dict): Dictionary representing the pipeline.
context (pypyr.context.Context): Reusable context object.
pipeline_context_input (str): Initialize the pypyr context with this
string.
parse_input (bool): run context_parser in pipeline.
Returns:
None
"""
logger.debug("starting")
try:
if parse_input:
logger.debug("executing context_parser")
prepare_context(pipeline=pipeline,
context_in_string=pipeline_context_input,
context=context)
else:
logger.debug("skipping context_parser")
# run main steps
pypyr.stepsrunner.run_step_group(
pipeline_definition=pipeline,
step_group_name='steps',
context=context)
# if nothing went wrong, run on_success
logger.debug("pipeline steps complete. Running on_success steps now.")
pypyr.stepsrunner.run_step_group(
pipeline_definition=pipeline,
step_group_name='on_success',
context=context)
except Exception:
# yes, yes, don't catch Exception. Have to, though, to run the failure
# handler. Also, it does raise it back up.
logger.error("Something went wrong. Will now try to run on_failure.")
# failure_step_group will log but swallow any errors
pypyr.stepsrunner.run_failure_step_group(
pipeline=pipeline,
context=context)
logger.debug("Raising original exception to caller.")
raise
logger.debug("done") | [
"def",
"run_pipeline",
"(",
"pipeline",
",",
"context",
",",
"pipeline_context_input",
"=",
"None",
",",
"parse_input",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"try",
":",
"if",
"parse_input",
":",
"logger",
".",
"debug",
"(",
"\"executing context_parser\"",
")",
"prepare_context",
"(",
"pipeline",
"=",
"pipeline",
",",
"context_in_string",
"=",
"pipeline_context_input",
",",
"context",
"=",
"context",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"skipping context_parser\"",
")",
"# run main steps",
"pypyr",
".",
"stepsrunner",
".",
"run_step_group",
"(",
"pipeline_definition",
"=",
"pipeline",
",",
"step_group_name",
"=",
"'steps'",
",",
"context",
"=",
"context",
")",
"# if nothing went wrong, run on_success",
"logger",
".",
"debug",
"(",
"\"pipeline steps complete. Running on_success steps now.\"",
")",
"pypyr",
".",
"stepsrunner",
".",
"run_step_group",
"(",
"pipeline_definition",
"=",
"pipeline",
",",
"step_group_name",
"=",
"'on_success'",
",",
"context",
"=",
"context",
")",
"except",
"Exception",
":",
"# yes, yes, don't catch Exception. Have to, though, to run the failure",
"# handler. Also, it does raise it back up.",
"logger",
".",
"error",
"(",
"\"Something went wrong. Will now try to run on_failure.\"",
")",
"# failure_step_group will log but swallow any errors",
"pypyr",
".",
"stepsrunner",
".",
"run_failure_step_group",
"(",
"pipeline",
"=",
"pipeline",
",",
"context",
"=",
"context",
")",
"logger",
".",
"debug",
"(",
"\"Raising original exception to caller.\"",
")",
"raise",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run the specified pypyr pipeline.
This function runs the actual pipeline. If you are running another
pipeline from within a pipeline, call this, not main(). Do call main()
instead for your 1st pipeline if there are pipelines calling pipelines.
Pipeline and context should be already loaded.
Args:
pipeline (dict): Dictionary representing the pipeline.
context (pypyr.context.Context): Reusable context object.
pipeline_context_input (str): Initialize the pypyr context with this
string.
parse_input (bool): run context_parser in pipeline.
Returns:
None | [
"Run",
"the",
"specified",
"pypyr",
"pipeline",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L218-L276 | [
"pipeline",
"context",
"pipeline_context_input",
"parse_input"
] | What does this function do? | [
"Run",
"the",
"specified",
"pypyr",
"pipeline",
"."
] |
pypyr/pypyr-cli | pypyr/steps/filewriteyaml.py | run_step | def run_step(context):
"""Write payload out to yaml file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteYaml
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteYaml or
fileWriteYaml['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or
fileWriteYaml['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteYaml', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteYaml']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteYaml']
yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context()
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteYaml']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
yaml_writer.dump(formatted_iterable, outfile)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | python | def run_step(context):
"""Write payload out to yaml file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteYaml
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteYaml or
fileWriteYaml['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or
fileWriteYaml['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteYaml', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteYaml']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteYaml']
yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context()
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteYaml']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
yaml_writer.dump(formatted_iterable, outfile)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_child_key_has_value",
"(",
"'fileWriteYaml'",
",",
"'path'",
",",
"__name__",
")",
"out_path",
"=",
"context",
".",
"get_formatted_string",
"(",
"context",
"[",
"'fileWriteYaml'",
"]",
"[",
"'path'",
"]",
")",
"# doing it like this to safeguard against accidentally dumping all context",
"# with potentially sensitive values in it to disk if payload exists but is",
"# None.",
"is_payload_specified",
"=",
"'payload'",
"in",
"context",
"[",
"'fileWriteYaml'",
"]",
"yaml_writer",
"=",
"pypyr",
".",
"yaml",
".",
"get_yaml_parser_roundtrip_for_context",
"(",
")",
"logger",
".",
"debug",
"(",
"f\"opening destination file for writing: {out_path}\"",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_path",
")",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"out_path",
",",
"'w'",
")",
"as",
"outfile",
":",
"if",
"is_payload_specified",
":",
"payload",
"=",
"context",
"[",
"'fileWriteYaml'",
"]",
"[",
"'payload'",
"]",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"payload",
")",
"else",
":",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"context",
")",
"yaml_writer",
".",
"dump",
"(",
"formatted_iterable",
",",
"outfile",
")",
"logger",
".",
"info",
"(",
"f\"formatted context content and wrote to {out_path}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Write payload out to yaml file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteYaml
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteYaml or
fileWriteYaml['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or
fileWriteYaml['path'] exists but is None. | [
"Write",
"payload",
"out",
"to",
"yaml",
"file",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filewriteyaml.py#L10-L55 | [
"context"
] | What does this function do? | [
"Write",
"payload",
"out",
"to",
"yaml",
"file",
"."
] |
pypyr/pypyr-cli | pypyr/steps/debug.py | run_step | def run_step(context):
"""Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump.
"""
logger.debug("started")
debug = context.get('debug', None)
if debug:
keys = debug.get('keys', None)
format = debug.get('format', False)
if keys:
logger.debug(f"Writing to output: {keys}")
if isinstance(keys, str):
payload = {keys: context[keys]}
else:
payload = {k: context[k] for k in keys}
else:
logger.debug(
"No keys specified. Writing entire context to output.")
payload = context
if format:
payload = context.get_formatted_iterable(payload)
else:
payload = context
logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}')
logger.debug("done") | python | def run_step(context):
"""Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump.
"""
logger.debug("started")
debug = context.get('debug', None)
if debug:
keys = debug.get('keys', None)
format = debug.get('format', False)
if keys:
logger.debug(f"Writing to output: {keys}")
if isinstance(keys, str):
payload = {keys: context[keys]}
else:
payload = {k: context[k] for k in keys}
else:
logger.debug(
"No keys specified. Writing entire context to output.")
payload = context
if format:
payload = context.get_formatted_iterable(payload)
else:
payload = context
logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}')
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"debug",
"=",
"context",
".",
"get",
"(",
"'debug'",
",",
"None",
")",
"if",
"debug",
":",
"keys",
"=",
"debug",
".",
"get",
"(",
"'keys'",
",",
"None",
")",
"format",
"=",
"debug",
".",
"get",
"(",
"'format'",
",",
"False",
")",
"if",
"keys",
":",
"logger",
".",
"debug",
"(",
"f\"Writing to output: {keys}\"",
")",
"if",
"isinstance",
"(",
"keys",
",",
"str",
")",
":",
"payload",
"=",
"{",
"keys",
":",
"context",
"[",
"keys",
"]",
"}",
"else",
":",
"payload",
"=",
"{",
"k",
":",
"context",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"}",
"else",
":",
"logger",
".",
"debug",
"(",
"\"No keys specified. Writing entire context to output.\"",
")",
"payload",
"=",
"context",
"if",
"format",
":",
"payload",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"payload",
")",
"else",
":",
"payload",
"=",
"context",
"logger",
".",
"info",
"(",
"f'\\n{json.dumps(payload, indent=2, ensure_ascii=False)}'",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Print debug info to console.
context is a dictionary or dictionary-like.
If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the
debug input context), it will just dump the entire context to stdout.
Configure the debug step with the following optional context item:
debug:
keys: str (for single key) or list (of str keys). Only dump the
specified keys.
format: bool. Defaults False. Applies formatting expressions on
dump. | [
"Print",
"debug",
"info",
"to",
"console",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/debug.py#L23-L64 | [
"context"
] | What does this function do? | [
"Print",
"debug",
"info",
"to",
"console",
"."
] |
pypyr/pypyr-cli | pypyr/steps/echo.py | run_step | def run_step(context):
"""Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
"""
logger.debug("started")
assert context, ("context must be set for echo. Did you set "
"'echoMe=text here'?")
context.assert_key_exists('echoMe', __name__)
if isinstance(context['echoMe'], str):
val = context.get_formatted('echoMe')
else:
val = context['echoMe']
logger.info(val)
logger.debug("done") | python | def run_step(context):
"""Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser.
"""
logger.debug("started")
assert context, ("context must be set for echo. Did you set "
"'echoMe=text here'?")
context.assert_key_exists('echoMe', __name__)
if isinstance(context['echoMe'], str):
val = context.get_formatted('echoMe')
else:
val = context['echoMe']
logger.info(val)
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"assert",
"context",
",",
"(",
"\"context must be set for echo. Did you set \"",
"\"'echoMe=text here'?\"",
")",
"context",
".",
"assert_key_exists",
"(",
"'echoMe'",
",",
"__name__",
")",
"if",
"isinstance",
"(",
"context",
"[",
"'echoMe'",
"]",
",",
"str",
")",
":",
"val",
"=",
"context",
".",
"get_formatted",
"(",
"'echoMe'",
")",
"else",
":",
"val",
"=",
"context",
"[",
"'echoMe'",
"]",
"logger",
".",
"info",
"(",
"val",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Simple echo. Outputs context['echoMe'].
Args:
context: dictionary-like. context is mandatory.
context must contain key 'echoMe'
context['echoMe'] will echo the value to logger.
This logger could well be stdout.
When you execute the pipeline, it should look something like this:
pypyr [name here] 'echoMe=test', assuming a keyvaluepair context parser. | [
"Simple",
"echo",
".",
"Outputs",
"context",
"[",
"echoMe",
"]",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/echo.py#L8-L34 | [
"context"
] | What does this function do? | [
"Simple",
"echo",
".",
"Outputs",
"context",
"[",
"echoMe",
"]",
"."
] |
pypyr/pypyr-cli | pypyr/errors.py | get_error_name | def get_error_name(error):
"""Return canonical error name as string.
For builtin errors like ValueError or Exception, will return the bare
name, like ValueError or Exception.
For all other exceptions, will return modulename.errorname, such as
arbpackage.mod.myerror
Args:
error: Exception object.
Returns:
str. Canonical error name.
"""
error_type = type(error)
if error_type.__module__ in ['__main__', 'builtins']:
return error_type.__name__
else:
return f'{error_type.__module__}.{error_type.__name__}' | python | def get_error_name(error):
"""Return canonical error name as string.
For builtin errors like ValueError or Exception, will return the bare
name, like ValueError or Exception.
For all other exceptions, will return modulename.errorname, such as
arbpackage.mod.myerror
Args:
error: Exception object.
Returns:
str. Canonical error name.
"""
error_type = type(error)
if error_type.__module__ in ['__main__', 'builtins']:
return error_type.__name__
else:
return f'{error_type.__module__}.{error_type.__name__}' | [
"def",
"get_error_name",
"(",
"error",
")",
":",
"error_type",
"=",
"type",
"(",
"error",
")",
"if",
"error_type",
".",
"__module__",
"in",
"[",
"'__main__'",
",",
"'builtins'",
"]",
":",
"return",
"error_type",
".",
"__name__",
"else",
":",
"return",
"f'{error_type.__module__}.{error_type.__name__}'"
] | Return canonical error name as string.
For builtin errors like ValueError or Exception, will return the bare
name, like ValueError or Exception.
For all other exceptions, will return modulename.errorname, such as
arbpackage.mod.myerror
Args:
error: Exception object.
Returns:
str. Canonical error name. | [
"Return",
"canonical",
"error",
"name",
"as",
"string",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/errors.py#L7-L27 | [
"error"
] | What does this function do? | [
"Return",
"canonical",
"error",
"name",
"as",
"string",
"."
] |
pypyr/pypyr-cli | pypyr/moduleloader.py | get_module | def get_module(module_abs_import):
"""Use importlib to get the module dynamically.
Get instance of the module specified by the module_abs_import.
This means that module_abs_import must be resolvable from this package.
Args:
module_abs_import: string. Absolute name of module to import.
Raises:
PyModuleNotFoundError: if module not found.
"""
logger.debug("starting")
logger.debug(f"loading module {module_abs_import}")
try:
imported_module = importlib.import_module(module_abs_import)
logger.debug("done")
return imported_module
except ModuleNotFoundError as err:
msg = ("The module doesn't exist. Looking for a file like this: "
f"{module_abs_import}")
extended_msg = (f"{module_abs_import}.py should be in your working "
"dir or it should be installed to the python path."
"\nIf you have 'package.sub.mod' your current working "
"dir should contain ./package/sub/mod.py\n"
"If you specified 'mymodulename', your current "
"working dir should contain ./mymodulename.py\n"
"If the module is not in your current working dir, it "
"must exist in your current python path - so you "
"should have run pip install or setup.py")
logger.error(msg)
raise PyModuleNotFoundError(extended_msg) from err | python | def get_module(module_abs_import):
"""Use importlib to get the module dynamically.
Get instance of the module specified by the module_abs_import.
This means that module_abs_import must be resolvable from this package.
Args:
module_abs_import: string. Absolute name of module to import.
Raises:
PyModuleNotFoundError: if module not found.
"""
logger.debug("starting")
logger.debug(f"loading module {module_abs_import}")
try:
imported_module = importlib.import_module(module_abs_import)
logger.debug("done")
return imported_module
except ModuleNotFoundError as err:
msg = ("The module doesn't exist. Looking for a file like this: "
f"{module_abs_import}")
extended_msg = (f"{module_abs_import}.py should be in your working "
"dir or it should be installed to the python path."
"\nIf you have 'package.sub.mod' your current working "
"dir should contain ./package/sub/mod.py\n"
"If you specified 'mymodulename', your current "
"working dir should contain ./mymodulename.py\n"
"If the module is not in your current working dir, it "
"must exist in your current python path - so you "
"should have run pip install or setup.py")
logger.error(msg)
raise PyModuleNotFoundError(extended_msg) from err | [
"def",
"get_module",
"(",
"module_abs_import",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"logger",
".",
"debug",
"(",
"f\"loading module {module_abs_import}\"",
")",
"try",
":",
"imported_module",
"=",
"importlib",
".",
"import_module",
"(",
"module_abs_import",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"imported_module",
"except",
"ModuleNotFoundError",
"as",
"err",
":",
"msg",
"=",
"(",
"\"The module doesn't exist. Looking for a file like this: \"",
"f\"{module_abs_import}\"",
")",
"extended_msg",
"=",
"(",
"f\"{module_abs_import}.py should be in your working \"",
"\"dir or it should be installed to the python path.\"",
"\"\\nIf you have 'package.sub.mod' your current working \"",
"\"dir should contain ./package/sub/mod.py\\n\"",
"\"If you specified 'mymodulename', your current \"",
"\"working dir should contain ./mymodulename.py\\n\"",
"\"If the module is not in your current working dir, it \"",
"\"must exist in your current python path - so you \"",
"\"should have run pip install or setup.py\"",
")",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"PyModuleNotFoundError",
"(",
"extended_msg",
")",
"from",
"err"
] | Use importlib to get the module dynamically.
Get instance of the module specified by the module_abs_import.
This means that module_abs_import must be resolvable from this package.
Args:
module_abs_import: string. Absolute name of module to import.
Raises:
PyModuleNotFoundError: if module not found. | [
"Use",
"importlib",
"to",
"get",
"the",
"module",
"dynamically",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/moduleloader.py#L15-L48 | [
"module_abs_import"
] | What does this function do? | [
"Use",
"importlib",
"to",
"get",
"the",
"module",
"dynamically",
"."
] |
pypyr/pypyr-cli | pypyr/moduleloader.py | set_working_directory | def set_working_directory(working_directory):
"""Add working_directory to sys.paths.
This allows dynamic loading of arbitrary python modules in cwd.
Args:
working_directory: string. path to add to sys.paths
"""
logger.debug("starting")
logger.debug(f"adding {working_directory} to sys.paths")
sys.path.append(working_directory)
logger.debug("done") | python | def set_working_directory(working_directory):
"""Add working_directory to sys.paths.
This allows dynamic loading of arbitrary python modules in cwd.
Args:
working_directory: string. path to add to sys.paths
"""
logger.debug("starting")
logger.debug(f"adding {working_directory} to sys.paths")
sys.path.append(working_directory)
logger.debug("done") | [
"def",
"set_working_directory",
"(",
"working_directory",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"logger",
".",
"debug",
"(",
"f\"adding {working_directory} to sys.paths\"",
")",
"sys",
".",
"path",
".",
"append",
"(",
"working_directory",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Add working_directory to sys.paths.
This allows dynamic loading of arbitrary python modules in cwd.
Args:
working_directory: string. path to add to sys.paths | [
"Add",
"working_directory",
"to",
"sys",
".",
"paths",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/moduleloader.py#L51-L65 | [
"working_directory"
] | What does this function do? | [
"Add",
"working_directory",
"to",
"sys",
".",
"paths",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_child_key_has_value | def assert_child_key_has_value(self, parent, child, caller):
"""Assert that context contains key that has child which has a value.
Args:
parent: parent key
child: validate this sub-key of parent exists AND isn't None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None
"""
assert parent, ("parent parameter must be specified.")
assert child, ("child parameter must be specified.")
self.assert_key_has_value(parent, caller)
try:
child_exists = child in self[parent]
except TypeError as err:
# This happens if parent isn't iterable
raise ContextError(
f"context['{parent}'] must be iterable and contain '{child}' "
f"for {caller}. {err}") from err
if child_exists:
if self[parent][child] is None:
raise KeyInContextHasNoValueError(
f"context['{parent}']['{child}'] must have a value for "
f"{caller}.")
else:
raise KeyNotInContextError(
f"context['{parent}']['{child}'] doesn't "
f"exist. It must exist for {caller}.") | python | def assert_child_key_has_value(self, parent, child, caller):
"""Assert that context contains key that has child which has a value.
Args:
parent: parent key
child: validate this sub-key of parent exists AND isn't None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None
"""
assert parent, ("parent parameter must be specified.")
assert child, ("child parameter must be specified.")
self.assert_key_has_value(parent, caller)
try:
child_exists = child in self[parent]
except TypeError as err:
# This happens if parent isn't iterable
raise ContextError(
f"context['{parent}'] must be iterable and contain '{child}' "
f"for {caller}. {err}") from err
if child_exists:
if self[parent][child] is None:
raise KeyInContextHasNoValueError(
f"context['{parent}']['{child}'] must have a value for "
f"{caller}.")
else:
raise KeyNotInContextError(
f"context['{parent}']['{child}'] doesn't "
f"exist. It must exist for {caller}.") | [
"def",
"assert_child_key_has_value",
"(",
"self",
",",
"parent",
",",
"child",
",",
"caller",
")",
":",
"assert",
"parent",
",",
"(",
"\"parent parameter must be specified.\"",
")",
"assert",
"child",
",",
"(",
"\"child parameter must be specified.\"",
")",
"self",
".",
"assert_key_has_value",
"(",
"parent",
",",
"caller",
")",
"try",
":",
"child_exists",
"=",
"child",
"in",
"self",
"[",
"parent",
"]",
"except",
"TypeError",
"as",
"err",
":",
"# This happens if parent isn't iterable",
"raise",
"ContextError",
"(",
"f\"context['{parent}'] must be iterable and contain '{child}' \"",
"f\"for {caller}. {err}\"",
")",
"from",
"err",
"if",
"child_exists",
":",
"if",
"self",
"[",
"parent",
"]",
"[",
"child",
"]",
"is",
"None",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"f\"context['{parent}']['{child}'] must have a value for \"",
"f\"{caller}.\"",
")",
"else",
":",
"raise",
"KeyNotInContextError",
"(",
"f\"context['{parent}']['{child}'] doesn't \"",
"f\"exist. It must exist for {caller}.\"",
")"
] | Assert that context contains key that has child which has a value.
Args:
parent: parent key
child: validate this sub-key of parent exists AND isn't None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None | [
"Assert",
"that",
"context",
"contains",
"key",
"that",
"has",
"child",
"which",
"has",
"a",
"value",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L48-L83 | [
"parent",
"child",
"caller"
] | What does this function do? | [
"Assert",
"that",
"context",
"contains",
"key",
"that",
"has",
"child",
"which",
"has",
"a",
"value",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_key_has_value | def assert_key_has_value(self, key, caller):
"""Assert that context contains key which also has a value.
Args:
key: validate this key exists in context AND has a value that isn't
None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None
"""
assert key, ("key parameter must be specified.")
self.assert_key_exists(key, caller)
if self[key] is None:
raise KeyInContextHasNoValueError(
f"context['{key}'] must have a value for {caller}.") | python | def assert_key_has_value(self, key, caller):
"""Assert that context contains key which also has a value.
Args:
key: validate this key exists in context AND has a value that isn't
None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None
"""
assert key, ("key parameter must be specified.")
self.assert_key_exists(key, caller)
if self[key] is None:
raise KeyInContextHasNoValueError(
f"context['{key}'] must have a value for {caller}.") | [
"def",
"assert_key_has_value",
"(",
"self",
",",
"key",
",",
"caller",
")",
":",
"assert",
"key",
",",
"(",
"\"key parameter must be specified.\"",
")",
"self",
".",
"assert_key_exists",
"(",
"key",
",",
"caller",
")",
"if",
"self",
"[",
"key",
"]",
"is",
"None",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"f\"context['{key}'] must have a value for {caller}.\"",
")"
] | Assert that context contains key which also has a value.
Args:
key: validate this key exists in context AND has a value that isn't
None.
caller: string. calling function name - this used to construct
error messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if key is None | [
"Assert",
"that",
"context",
"contains",
"key",
"which",
"also",
"has",
"a",
"value",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L102-L122 | [
"key",
"caller"
] | What does this function do? | [
"Assert",
"that",
"context",
"contains",
"key",
"which",
"also",
"has",
"a",
"value",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_key_type_value | def assert_key_type_value(self,
context_item,
caller,
extra_error_text=''):
"""Assert that keys exist of right type and has a value.
Args:
context_item: ContextItemInfo tuple
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message.
Raises:
AssertionError: if context_item None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type.
"""
assert context_item, ("context_item parameter must be specified.")
if extra_error_text is None or extra_error_text == '':
append_error_text = ''
else:
append_error_text = f' {extra_error_text}'
if not context_item.key_in_context:
raise KeyNotInContextError(f'{caller} couldn\'t find '
f'{context_item.key} in context.'
f'{append_error_text}')
if not context_item.has_value:
raise KeyInContextHasNoValueError(
f'{caller} found {context_item.key} in '
f'context but it doesn\'t have a value.'
f'{append_error_text}')
if not context_item.is_expected_type:
raise KeyInContextHasNoValueError(
f'{caller} found {context_item.key} in context, but it\'s '
f'not a {context_item.expected_type}.'
f'{append_error_text}') | python | def assert_key_type_value(self,
context_item,
caller,
extra_error_text=''):
"""Assert that keys exist of right type and has a value.
Args:
context_item: ContextItemInfo tuple
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message.
Raises:
AssertionError: if context_item None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type.
"""
assert context_item, ("context_item parameter must be specified.")
if extra_error_text is None or extra_error_text == '':
append_error_text = ''
else:
append_error_text = f' {extra_error_text}'
if not context_item.key_in_context:
raise KeyNotInContextError(f'{caller} couldn\'t find '
f'{context_item.key} in context.'
f'{append_error_text}')
if not context_item.has_value:
raise KeyInContextHasNoValueError(
f'{caller} found {context_item.key} in '
f'context but it doesn\'t have a value.'
f'{append_error_text}')
if not context_item.is_expected_type:
raise KeyInContextHasNoValueError(
f'{caller} found {context_item.key} in context, but it\'s '
f'not a {context_item.expected_type}.'
f'{append_error_text}') | [
"def",
"assert_key_type_value",
"(",
"self",
",",
"context_item",
",",
"caller",
",",
"extra_error_text",
"=",
"''",
")",
":",
"assert",
"context_item",
",",
"(",
"\"context_item parameter must be specified.\"",
")",
"if",
"extra_error_text",
"is",
"None",
"or",
"extra_error_text",
"==",
"''",
":",
"append_error_text",
"=",
"''",
"else",
":",
"append_error_text",
"=",
"f' {extra_error_text}'",
"if",
"not",
"context_item",
".",
"key_in_context",
":",
"raise",
"KeyNotInContextError",
"(",
"f'{caller} couldn\\'t find '",
"f'{context_item.key} in context.'",
"f'{append_error_text}'",
")",
"if",
"not",
"context_item",
".",
"has_value",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"f'{caller} found {context_item.key} in '",
"f'context but it doesn\\'t have a value.'",
"f'{append_error_text}'",
")",
"if",
"not",
"context_item",
".",
"is_expected_type",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"f'{caller} found {context_item.key} in context, but it\\'s '",
"f'not a {context_item.expected_type}.'",
"f'{append_error_text}'",
")"
] | Assert that keys exist of right type and has a value.
Args:
context_item: ContextItemInfo tuple
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message.
Raises:
AssertionError: if context_item None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type. | [
"Assert",
"that",
"keys",
"exist",
"of",
"right",
"type",
"and",
"has",
"a",
"value",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L124-L165 | [
"context_item",
"caller",
"extra_error_text"
] | What does this function do? | [
"Assert",
"that",
"keys",
"exist",
"of",
"right",
"type",
"and",
"has",
"a",
"value",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_keys_exist | def assert_keys_exist(self, caller, *keys):
"""Assert that context contains keys.
Args:
keys: validates that these keys exists in context
caller: string. calling function or module name - this used to
construct error messages
Raises:
KeyNotInContextError: When key doesn't exist in context.
"""
assert keys, ("*keys parameter must be specified.")
for key in keys:
self.assert_key_exists(key, caller) | python | def assert_keys_exist(self, caller, *keys):
"""Assert that context contains keys.
Args:
keys: validates that these keys exists in context
caller: string. calling function or module name - this used to
construct error messages
Raises:
KeyNotInContextError: When key doesn't exist in context.
"""
assert keys, ("*keys parameter must be specified.")
for key in keys:
self.assert_key_exists(key, caller) | [
"def",
"assert_keys_exist",
"(",
"self",
",",
"caller",
",",
"*",
"keys",
")",
":",
"assert",
"keys",
",",
"(",
"\"*keys parameter must be specified.\"",
")",
"for",
"key",
"in",
"keys",
":",
"self",
".",
"assert_key_exists",
"(",
"key",
",",
"caller",
")"
] | Assert that context contains keys.
Args:
keys: validates that these keys exists in context
caller: string. calling function or module name - this used to
construct error messages
Raises:
KeyNotInContextError: When key doesn't exist in context. | [
"Assert",
"that",
"context",
"contains",
"keys",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L167-L181 | [
"caller",
"keys"
] | What does this function do? | [
"Assert",
"that",
"context",
"contains",
"keys",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_keys_have_values | def assert_keys_have_values(self, caller, *keys):
"""Check that keys list are all in context and all have values.
Args:
*keys: Will check each of these keys in context
caller: string. Calling function name - just used for informational
messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if *keys is None
"""
for key in keys:
self.assert_key_has_value(key, caller) | python | def assert_keys_have_values(self, caller, *keys):
"""Check that keys list are all in context and all have values.
Args:
*keys: Will check each of these keys in context
caller: string. Calling function name - just used for informational
messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if *keys is None
"""
for key in keys:
self.assert_key_has_value(key, caller) | [
"def",
"assert_keys_have_values",
"(",
"self",
",",
"caller",
",",
"*",
"keys",
")",
":",
"for",
"key",
"in",
"keys",
":",
"self",
".",
"assert_key_has_value",
"(",
"key",
",",
"caller",
")"
] | Check that keys list are all in context and all have values.
Args:
*keys: Will check each of these keys in context
caller: string. Calling function name - just used for informational
messages
Raises:
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None
AssertionError: if *keys is None | [
"Check",
"that",
"keys",
"list",
"are",
"all",
"in",
"context",
"and",
"all",
"have",
"values",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L183-L198 | [
"caller",
"keys"
] | What does this function do? | [
"Check",
"that",
"keys",
"list",
"are",
"all",
"in",
"context",
"and",
"all",
"have",
"values",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.assert_keys_type_value | def assert_keys_type_value(self,
caller,
extra_error_text,
*context_items):
"""Assert that keys exist of right type and has a value.
Args:
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message. This can happily
be None or ''.
*context_items: ContextItemInfo tuples
Raises:
AssertionError: if context_items None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type.
"""
assert context_items, ("context_items parameter must be specified.")
for context_item in context_items:
self.assert_key_type_value(context_item, caller, extra_error_text) | python | def assert_keys_type_value(self,
caller,
extra_error_text,
*context_items):
"""Assert that keys exist of right type and has a value.
Args:
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message. This can happily
be None or ''.
*context_items: ContextItemInfo tuples
Raises:
AssertionError: if context_items None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type.
"""
assert context_items, ("context_items parameter must be specified.")
for context_item in context_items:
self.assert_key_type_value(context_item, caller, extra_error_text) | [
"def",
"assert_keys_type_value",
"(",
"self",
",",
"caller",
",",
"extra_error_text",
",",
"*",
"context_items",
")",
":",
"assert",
"context_items",
",",
"(",
"\"context_items parameter must be specified.\"",
")",
"for",
"context_item",
"in",
"context_items",
":",
"self",
".",
"assert_key_type_value",
"(",
"context_item",
",",
"caller",
",",
"extra_error_text",
")"
] | Assert that keys exist of right type and has a value.
Args:
caller: string. calling function name - this used to construct
error messages
extra_error_text: append to end of error message. This can happily
be None or ''.
*context_items: ContextItemInfo tuples
Raises:
AssertionError: if context_items None.
KeyNotInContextError: Key doesn't exist
KeyInContextHasNoValueError: context[key] is None or the wrong
type. | [
"Assert",
"that",
"keys",
"exist",
"of",
"right",
"type",
"and",
"has",
"a",
"value",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L200-L223 | [
"caller",
"extra_error_text",
"context_items"
] | What does this function do? | [
"Assert",
"that",
"keys",
"exist",
"of",
"right",
"type",
"and",
"has",
"a",
"value",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.get_formatted | def get_formatted(self, key):
"""Return formatted value for context[key].
If context[key] is a type string, will just format and return the
string.
If context[key] is a special literal type, like a py string or sic
string, will run the formatting implemented by the custom tag
representer.
If context[key] is not a string, specifically an iterable type like a
dict, list, tuple, set, it will use get_formatted_iterable under the
covers to loop through and handle the entire structure contained in
context[key].
Returns a string interpolated from the context dictionary.
If context[key]='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
key: dictionary key to retrieve.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] value contains {somekey} where
somekey does not exist in context dictionary.
"""
val = self[key]
if isinstance(val, str):
try:
return self.get_processed_string(val)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{val}\' at context[\'{key}\'], '
f'because {err}'
) from err
elif isinstance(val, SpecialTagDirective):
return val.get_value(self)
else:
# any sort of complex type will work with get_formatted_iterable.
return self.get_formatted_iterable(val) | python | def get_formatted(self, key):
"""Return formatted value for context[key].
If context[key] is a type string, will just format and return the
string.
If context[key] is a special literal type, like a py string or sic
string, will run the formatting implemented by the custom tag
representer.
If context[key] is not a string, specifically an iterable type like a
dict, list, tuple, set, it will use get_formatted_iterable under the
covers to loop through and handle the entire structure contained in
context[key].
Returns a string interpolated from the context dictionary.
If context[key]='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
key: dictionary key to retrieve.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] value contains {somekey} where
somekey does not exist in context dictionary.
"""
val = self[key]
if isinstance(val, str):
try:
return self.get_processed_string(val)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{val}\' at context[\'{key}\'], '
f'because {err}'
) from err
elif isinstance(val, SpecialTagDirective):
return val.get_value(self)
else:
# any sort of complex type will work with get_formatted_iterable.
return self.get_formatted_iterable(val) | [
"def",
"get_formatted",
"(",
"self",
",",
"key",
")",
":",
"val",
"=",
"self",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"try",
":",
"return",
"self",
".",
"get_processed_string",
"(",
"val",
")",
"except",
"KeyNotInContextError",
"as",
"err",
":",
"# Wrapping the KeyError into a less cryptic error for end-user",
"# friendliness",
"raise",
"KeyNotInContextError",
"(",
"f'Unable to format \\'{val}\\' at context[\\'{key}\\'], '",
"f'because {err}'",
")",
"from",
"err",
"elif",
"isinstance",
"(",
"val",
",",
"SpecialTagDirective",
")",
":",
"return",
"val",
".",
"get_value",
"(",
"self",
")",
"else",
":",
"# any sort of complex type will work with get_formatted_iterable.",
"return",
"self",
".",
"get_formatted_iterable",
"(",
"val",
")"
] | Return formatted value for context[key].
If context[key] is a type string, will just format and return the
string.
If context[key] is a special literal type, like a py string or sic
string, will run the formatting implemented by the custom tag
representer.
If context[key] is not a string, specifically an iterable type like a
dict, list, tuple, set, it will use get_formatted_iterable under the
covers to loop through and handle the entire structure contained in
context[key].
Returns a string interpolated from the context dictionary.
If context[key]='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
key: dictionary key to retrieve.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] value contains {somekey} where
somekey does not exist in context dictionary. | [
"Return",
"formatted",
"value",
"for",
"context",
"[",
"key",
"]",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L249-L296 | [
"key"
] | What does this function do? | [
"Return",
"formatted",
"value",
"for",
"context",
"[",
"key",
"]",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.get_formatted_iterable | def get_formatted_iterable(self, obj, memo=None):
"""Recursively loop through obj, formatting as it goes.
Interpolates strings from the context dictionary.
This is not a full on deepcopy, and it's on purpose not a full on
deepcopy. It will handle dict, list, set, tuple for iteration, without
any especial cuteness for other types or types not derived from these.
For lists: if value is a string, format it.
For dicts: format key. If value str, format it.
For sets/tuples: if type str, format it.
This is what formatting or interpolating a string means:
So where a string like this 'Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
obj: iterable. Recurse through and format strings found in
dicts, lists, tuples. Does not mutate the input
iterable.
memo: dict. Don't use. Used internally on recursion to optimize
recursive loops.
Returns:
Iterable identical in structure to the input iterable.
"""
if memo is None:
memo = {}
obj_id = id(obj)
already_done = memo.get(obj_id, None)
if already_done is not None:
return already_done
if isinstance(obj, str):
new = self.get_formatted_string(obj)
elif isinstance(obj, SpecialTagDirective):
new = obj.get_value(self)
elif isinstance(obj, (bytes, bytearray)):
new = obj
elif isinstance(obj, Mapping):
# dicts
new = obj.__class__()
for k, v in obj.items():
new[self.get_formatted_string(
k)] = self.get_formatted_iterable(v, memo)
elif isinstance(obj, (Sequence, Set)):
# list, set, tuple. Bytes and str won't fall into this branch coz
# they're expicitly checked further up in the if.
new = obj.__class__(self.get_formatted_iterable(v, memo)
for v in obj)
else:
# int, float, bool, function, et.
return obj
# If is its own copy, don't memoize.
if new is not obj:
memo[obj_id] = new
return new | python | def get_formatted_iterable(self, obj, memo=None):
"""Recursively loop through obj, formatting as it goes.
Interpolates strings from the context dictionary.
This is not a full on deepcopy, and it's on purpose not a full on
deepcopy. It will handle dict, list, set, tuple for iteration, without
any especial cuteness for other types or types not derived from these.
For lists: if value is a string, format it.
For dicts: format key. If value str, format it.
For sets/tuples: if type str, format it.
This is what formatting or interpolating a string means:
So where a string like this 'Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
obj: iterable. Recurse through and format strings found in
dicts, lists, tuples. Does not mutate the input
iterable.
memo: dict. Don't use. Used internally on recursion to optimize
recursive loops.
Returns:
Iterable identical in structure to the input iterable.
"""
if memo is None:
memo = {}
obj_id = id(obj)
already_done = memo.get(obj_id, None)
if already_done is not None:
return already_done
if isinstance(obj, str):
new = self.get_formatted_string(obj)
elif isinstance(obj, SpecialTagDirective):
new = obj.get_value(self)
elif isinstance(obj, (bytes, bytearray)):
new = obj
elif isinstance(obj, Mapping):
# dicts
new = obj.__class__()
for k, v in obj.items():
new[self.get_formatted_string(
k)] = self.get_formatted_iterable(v, memo)
elif isinstance(obj, (Sequence, Set)):
# list, set, tuple. Bytes and str won't fall into this branch coz
# they're expicitly checked further up in the if.
new = obj.__class__(self.get_formatted_iterable(v, memo)
for v in obj)
else:
# int, float, bool, function, et.
return obj
# If is its own copy, don't memoize.
if new is not obj:
memo[obj_id] = new
return new | [
"def",
"get_formatted_iterable",
"(",
"self",
",",
"obj",
",",
"memo",
"=",
"None",
")",
":",
"if",
"memo",
"is",
"None",
":",
"memo",
"=",
"{",
"}",
"obj_id",
"=",
"id",
"(",
"obj",
")",
"already_done",
"=",
"memo",
".",
"get",
"(",
"obj_id",
",",
"None",
")",
"if",
"already_done",
"is",
"not",
"None",
":",
"return",
"already_done",
"if",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"new",
"=",
"self",
".",
"get_formatted_string",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"SpecialTagDirective",
")",
":",
"new",
"=",
"obj",
".",
"get_value",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"new",
"=",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"Mapping",
")",
":",
"# dicts",
"new",
"=",
"obj",
".",
"__class__",
"(",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
":",
"new",
"[",
"self",
".",
"get_formatted_string",
"(",
"k",
")",
"]",
"=",
"self",
".",
"get_formatted_iterable",
"(",
"v",
",",
"memo",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"Sequence",
",",
"Set",
")",
")",
":",
"# list, set, tuple. Bytes and str won't fall into this branch coz",
"# they're expicitly checked further up in the if.",
"new",
"=",
"obj",
".",
"__class__",
"(",
"self",
".",
"get_formatted_iterable",
"(",
"v",
",",
"memo",
")",
"for",
"v",
"in",
"obj",
")",
"else",
":",
"# int, float, bool, function, et.",
"return",
"obj",
"# If is its own copy, don't memoize.",
"if",
"new",
"is",
"not",
"obj",
":",
"memo",
"[",
"obj_id",
"]",
"=",
"new",
"return",
"new"
] | Recursively loop through obj, formatting as it goes.
Interpolates strings from the context dictionary.
This is not a full on deepcopy, and it's on purpose not a full on
deepcopy. It will handle dict, list, set, tuple for iteration, without
any especial cuteness for other types or types not derived from these.
For lists: if value is a string, format it.
For dicts: format key. If value str, format it.
For sets/tuples: if type str, format it.
This is what formatting or interpolating a string means:
So where a string like this 'Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
obj: iterable. Recurse through and format strings found in
dicts, lists, tuples. Does not mutate the input
iterable.
memo: dict. Don't use. Used internally on recursion to optimize
recursive loops.
Returns:
Iterable identical in structure to the input iterable. | [
"Recursively",
"loop",
"through",
"obj",
"formatting",
"as",
"it",
"goes",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L298-L361 | [
"obj",
"memo"
] | What does this function do? | [
"Recursively",
"loop",
"through",
"obj",
"formatting",
"as",
"it",
"goes",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.get_formatted_string | def get_formatted_string(self, input_string):
"""Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
"""
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{input_string}\' because {err}'
) from err
elif isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
raise TypeError(f"can only format on strings. {input_string} is a "
f"{type(input_string)} instead.") | python | def get_formatted_string(self, input_string):
"""Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type.
"""
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
# Wrapping the KeyError into a less cryptic error for end-user
# friendliness
raise KeyNotInContextError(
f'Unable to format \'{input_string}\' because {err}'
) from err
elif isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
raise TypeError(f"can only format on strings. {input_string} is a "
f"{type(input_string)} instead.") | [
"def",
"get_formatted_string",
"(",
"self",
",",
"input_string",
")",
":",
"if",
"isinstance",
"(",
"input_string",
",",
"str",
")",
":",
"try",
":",
"return",
"self",
".",
"get_processed_string",
"(",
"input_string",
")",
"except",
"KeyNotInContextError",
"as",
"err",
":",
"# Wrapping the KeyError into a less cryptic error for end-user",
"# friendliness",
"raise",
"KeyNotInContextError",
"(",
"f'Unable to format \\'{input_string}\\' because {err}'",
")",
"from",
"err",
"elif",
"isinstance",
"(",
"input_string",
",",
"SpecialTagDirective",
")",
":",
"return",
"input_string",
".",
"get_value",
"(",
"self",
")",
"else",
":",
"raise",
"TypeError",
"(",
"f\"can only format on strings. {input_string} is a \"",
"f\"{type(input_string)} instead.\"",
")"
] | Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to parse for substitutions.
Returns:
Formatted string.
Raises:
KeyNotInContextError: context[key] has {somekey} where somekey does
not exist in context dictionary.
TypeError: Attempt operation on a non-string type. | [
"Return",
"formatted",
"value",
"for",
"input_string",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L363-L403 | [
"input_string"
] | What does this function do? | [
"Return",
"formatted",
"value",
"for",
"input_string",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.get_formatted_as_type | def get_formatted_as_type(self, value, default=None, out_type=str):
"""Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
"""
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if out_type is result_type:
# get_formatted_string result is already a string
return result
elif out_type is bool and result_type is str:
# casting a str to bool is always True, hence special case. If
# the str value is 'False'/'false', presumably user can
# reasonably expect a bool False response.
return result.lower() in ['true', '1', '1.0']
else:
return out_type(result)
else:
return out_type(value) | python | def get_formatted_as_type(self, value, default=None, out_type=str):
"""Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
"""
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if out_type is result_type:
# get_formatted_string result is already a string
return result
elif out_type is bool and result_type is str:
# casting a str to bool is always True, hence special case. If
# the str value is 'False'/'false', presumably user can
# reasonably expect a bool False response.
return result.lower() in ['true', '1', '1.0']
else:
return out_type(result)
else:
return out_type(value) | [
"def",
"get_formatted_as_type",
"(",
"self",
",",
"value",
",",
"default",
"=",
"None",
",",
"out_type",
"=",
"str",
")",
":",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"default",
"if",
"isinstance",
"(",
"value",
",",
"SpecialTagDirective",
")",
":",
"result",
"=",
"value",
".",
"get_value",
"(",
"self",
")",
"return",
"types",
".",
"cast_to_type",
"(",
"result",
",",
"out_type",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"result",
"=",
"self",
".",
"get_formatted_string",
"(",
"value",
")",
"result_type",
"=",
"type",
"(",
"result",
")",
"if",
"out_type",
"is",
"result_type",
":",
"# get_formatted_string result is already a string",
"return",
"result",
"elif",
"out_type",
"is",
"bool",
"and",
"result_type",
"is",
"str",
":",
"# casting a str to bool is always True, hence special case. If",
"# the str value is 'False'/'false', presumably user can",
"# reasonably expect a bool False response.",
"return",
"result",
".",
"lower",
"(",
")",
"in",
"[",
"'true'",
",",
"'1'",
",",
"'1.0'",
"]",
"else",
":",
"return",
"out_type",
"(",
"result",
")",
"else",
":",
"return",
"out_type",
"(",
"value",
")"
] | Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type | [
"Return",
"formatted",
"value",
"for",
"input",
"value",
"returns",
"as",
"out_type",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L405-L441 | [
"value",
"default",
"out_type"
] | What does this function do? | [
"Return",
"formatted",
"value",
"for",
"input",
"value",
"returns",
"as",
"out_type",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.get_processed_string | def get_processed_string(self, input_string):
"""Run token substitution on input_string against context.
You probably don't want to call this directly yourself - rather use
get_formatted, get_formatted_iterable, or get_formatted_string because
these contain more friendly error handling plumbing and context logic.
If you do want to call it yourself, go for it, it doesn't touch state.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
An input string with a single formatting expression and nothing else
will return the object at that context path: input_string='{key1}'.
This means that the return obj will be the same type as the source
object. This return object in itself has token substitions run on it
iteratively.
By comparison, multiple formatting expressions and/or the inclusion of
literal text will result in a string return type:
input_string='{key1} literal text {key2}'
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to Parse
Returns:
any given type: Formatted string with {substitutions} made from
context. If it's a !sic string, x from !sic x, with no
substitutions made on x. If input_string was a single expression
(e.g '{field}'), then returns the object with {substitutions} made
for its attributes.
Raises:
KeyNotInContextError: input_string is not a sic string and has
{somekey} where somekey does not exist in
context dictionary.
"""
# arguably, this doesn't really belong here, or at least it makes a
# nonsense of the function name. given how py and strings
# look and feel pretty much like strings from user's perspective, and
# given legacy code back when sic strings were in fact just strings,
# keep in here for backwards compatibility.
if isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
# is this a special one field formatstring? i.e "{field}", with
# nothing else?
out = None
is_out_set = False
expr_count = 0
# parse finds field format expressions and/or literals in input
for expression in formatter.parse(input_string):
# parse tuple:
# (literal_text, field_name, format_spec, conversion)
# it's a single '{field}' if no literal_text but field_name
# no literal, field name exists, and no previous expr found
if (not expression[0] and expression[1] and not expr_count):
# get_field tuple: (obj, used_key)
out = formatter.get_field(expression[1], None, self)[0]
# second flag necessary because a literal with no format
# expression will still result in expr_count == 1
is_out_set = True
expr_count += 1
# this is a little bit clumsy, but you have to consume the
# iterator to get the count. Interested in 1 and only 1 field
# expressions with no literal text: have to loop to see if
# there is >1.
if expr_count > 1:
break
if is_out_set and expr_count == 1:
# found 1 and only 1. but this could be an iterable obj
# that needs formatting rules run on it in itself
return self.get_formatted_iterable(out)
else:
return input_string.format_map(self) | python | def get_processed_string(self, input_string):
"""Run token substitution on input_string against context.
You probably don't want to call this directly yourself - rather use
get_formatted, get_formatted_iterable, or get_formatted_string because
these contain more friendly error handling plumbing and context logic.
If you do want to call it yourself, go for it, it doesn't touch state.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
An input string with a single formatting expression and nothing else
will return the object at that context path: input_string='{key1}'.
This means that the return obj will be the same type as the source
object. This return object in itself has token substitions run on it
iteratively.
By comparison, multiple formatting expressions and/or the inclusion of
literal text will result in a string return type:
input_string='{key1} literal text {key2}'
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to Parse
Returns:
any given type: Formatted string with {substitutions} made from
context. If it's a !sic string, x from !sic x, with no
substitutions made on x. If input_string was a single expression
(e.g '{field}'), then returns the object with {substitutions} made
for its attributes.
Raises:
KeyNotInContextError: input_string is not a sic string and has
{somekey} where somekey does not exist in
context dictionary.
"""
# arguably, this doesn't really belong here, or at least it makes a
# nonsense of the function name. given how py and strings
# look and feel pretty much like strings from user's perspective, and
# given legacy code back when sic strings were in fact just strings,
# keep in here for backwards compatibility.
if isinstance(input_string, SpecialTagDirective):
return input_string.get_value(self)
else:
# is this a special one field formatstring? i.e "{field}", with
# nothing else?
out = None
is_out_set = False
expr_count = 0
# parse finds field format expressions and/or literals in input
for expression in formatter.parse(input_string):
# parse tuple:
# (literal_text, field_name, format_spec, conversion)
# it's a single '{field}' if no literal_text but field_name
# no literal, field name exists, and no previous expr found
if (not expression[0] and expression[1] and not expr_count):
# get_field tuple: (obj, used_key)
out = formatter.get_field(expression[1], None, self)[0]
# second flag necessary because a literal with no format
# expression will still result in expr_count == 1
is_out_set = True
expr_count += 1
# this is a little bit clumsy, but you have to consume the
# iterator to get the count. Interested in 1 and only 1 field
# expressions with no literal text: have to loop to see if
# there is >1.
if expr_count > 1:
break
if is_out_set and expr_count == 1:
# found 1 and only 1. but this could be an iterable obj
# that needs formatting rules run on it in itself
return self.get_formatted_iterable(out)
else:
return input_string.format_map(self) | [
"def",
"get_processed_string",
"(",
"self",
",",
"input_string",
")",
":",
"# arguably, this doesn't really belong here, or at least it makes a",
"# nonsense of the function name. given how py and strings",
"# look and feel pretty much like strings from user's perspective, and",
"# given legacy code back when sic strings were in fact just strings,",
"# keep in here for backwards compatibility.",
"if",
"isinstance",
"(",
"input_string",
",",
"SpecialTagDirective",
")",
":",
"return",
"input_string",
".",
"get_value",
"(",
"self",
")",
"else",
":",
"# is this a special one field formatstring? i.e \"{field}\", with",
"# nothing else?",
"out",
"=",
"None",
"is_out_set",
"=",
"False",
"expr_count",
"=",
"0",
"# parse finds field format expressions and/or literals in input",
"for",
"expression",
"in",
"formatter",
".",
"parse",
"(",
"input_string",
")",
":",
"# parse tuple:",
"# (literal_text, field_name, format_spec, conversion)",
"# it's a single '{field}' if no literal_text but field_name",
"# no literal, field name exists, and no previous expr found",
"if",
"(",
"not",
"expression",
"[",
"0",
"]",
"and",
"expression",
"[",
"1",
"]",
"and",
"not",
"expr_count",
")",
":",
"# get_field tuple: (obj, used_key)",
"out",
"=",
"formatter",
".",
"get_field",
"(",
"expression",
"[",
"1",
"]",
",",
"None",
",",
"self",
")",
"[",
"0",
"]",
"# second flag necessary because a literal with no format",
"# expression will still result in expr_count == 1",
"is_out_set",
"=",
"True",
"expr_count",
"+=",
"1",
"# this is a little bit clumsy, but you have to consume the",
"# iterator to get the count. Interested in 1 and only 1 field",
"# expressions with no literal text: have to loop to see if",
"# there is >1.",
"if",
"expr_count",
">",
"1",
":",
"break",
"if",
"is_out_set",
"and",
"expr_count",
"==",
"1",
":",
"# found 1 and only 1. but this could be an iterable obj",
"# that needs formatting rules run on it in itself",
"return",
"self",
".",
"get_formatted_iterable",
"(",
"out",
")",
"else",
":",
"return",
"input_string",
".",
"format_map",
"(",
"self",
")"
] | Run token substitution on input_string against context.
You probably don't want to call this directly yourself - rather use
get_formatted, get_formatted_iterable, or get_formatted_string because
these contain more friendly error handling plumbing and context logic.
If you do want to call it yourself, go for it, it doesn't touch state.
If input_string='Piping {key1} the {key2} wild'
And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'}
An input string with a single formatting expression and nothing else
will return the object at that context path: input_string='{key1}'.
This means that the return obj will be the same type as the source
object. This return object in itself has token substitions run on it
iteratively.
By comparison, multiple formatting expressions and/or the inclusion of
literal text will result in a string return type:
input_string='{key1} literal text {key2}'
Then this will return string: "Piping down the valleys wild"
Args:
input_string: string to Parse
Returns:
any given type: Formatted string with {substitutions} made from
context. If it's a !sic string, x from !sic x, with no
substitutions made on x. If input_string was a single expression
(e.g '{field}'), then returns the object with {substitutions} made
for its attributes.
Raises:
KeyNotInContextError: input_string is not a sic string and has
{somekey} where somekey does not exist in
context dictionary. | [
"Run",
"token",
"substitution",
"on",
"input_string",
"against",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L443-L523 | [
"input_string"
] | What does this function do? | [
"Run",
"token",
"substitution",
"on",
"input_string",
"against",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.keys_of_type_exist | def keys_of_type_exist(self, *keys):
"""Check if keys exist in context and if types are as expected.
Args:
*keys: *args for keys to check in context.
Each arg is a tuple (str, type)
Returns:
Tuple of namedtuple ContextItemInfo, same order as *keys.
ContextItemInfo(key,
key_in_context,
expected_type,
is_expected_type)
Remember if there is only one key in keys, the return assignment
needs an extra comma to remind python that it's a tuple:
# one
a, = context.keys_of_type_exist('a')
# > 1
a, b = context.keys_of_type_exist('a', 'b')
"""
# k[0] = key name, k[1] = exists, k2 = expected type
keys_exist = [(key, key in self.keys(), expected_type)
for key, expected_type in keys]
return tuple(ContextItemInfo(
key=k[0],
key_in_context=k[1],
expected_type=k[2],
is_expected_type=isinstance(self[k[0]], k[2])
if k[1] else None,
has_value=k[1] and not self[k[0]] is None
) for k in keys_exist) | python | def keys_of_type_exist(self, *keys):
"""Check if keys exist in context and if types are as expected.
Args:
*keys: *args for keys to check in context.
Each arg is a tuple (str, type)
Returns:
Tuple of namedtuple ContextItemInfo, same order as *keys.
ContextItemInfo(key,
key_in_context,
expected_type,
is_expected_type)
Remember if there is only one key in keys, the return assignment
needs an extra comma to remind python that it's a tuple:
# one
a, = context.keys_of_type_exist('a')
# > 1
a, b = context.keys_of_type_exist('a', 'b')
"""
# k[0] = key name, k[1] = exists, k2 = expected type
keys_exist = [(key, key in self.keys(), expected_type)
for key, expected_type in keys]
return tuple(ContextItemInfo(
key=k[0],
key_in_context=k[1],
expected_type=k[2],
is_expected_type=isinstance(self[k[0]], k[2])
if k[1] else None,
has_value=k[1] and not self[k[0]] is None
) for k in keys_exist) | [
"def",
"keys_of_type_exist",
"(",
"self",
",",
"*",
"keys",
")",
":",
"# k[0] = key name, k[1] = exists, k2 = expected type",
"keys_exist",
"=",
"[",
"(",
"key",
",",
"key",
"in",
"self",
".",
"keys",
"(",
")",
",",
"expected_type",
")",
"for",
"key",
",",
"expected_type",
"in",
"keys",
"]",
"return",
"tuple",
"(",
"ContextItemInfo",
"(",
"key",
"=",
"k",
"[",
"0",
"]",
",",
"key_in_context",
"=",
"k",
"[",
"1",
"]",
",",
"expected_type",
"=",
"k",
"[",
"2",
"]",
",",
"is_expected_type",
"=",
"isinstance",
"(",
"self",
"[",
"k",
"[",
"0",
"]",
"]",
",",
"k",
"[",
"2",
"]",
")",
"if",
"k",
"[",
"1",
"]",
"else",
"None",
",",
"has_value",
"=",
"k",
"[",
"1",
"]",
"and",
"not",
"self",
"[",
"k",
"[",
"0",
"]",
"]",
"is",
"None",
")",
"for",
"k",
"in",
"keys_exist",
")"
] | Check if keys exist in context and if types are as expected.
Args:
*keys: *args for keys to check in context.
Each arg is a tuple (str, type)
Returns:
Tuple of namedtuple ContextItemInfo, same order as *keys.
ContextItemInfo(key,
key_in_context,
expected_type,
is_expected_type)
Remember if there is only one key in keys, the return assignment
needs an extra comma to remind python that it's a tuple:
# one
a, = context.keys_of_type_exist('a')
# > 1
a, b = context.keys_of_type_exist('a', 'b') | [
"Check",
"if",
"keys",
"exist",
"in",
"context",
"and",
"if",
"types",
"are",
"as",
"expected",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L560-L593 | [
"keys"
] | What does this function do? | [
"Check",
"if",
"keys",
"exist",
"in",
"context",
"and",
"if",
"types",
"are",
"as",
"expected",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.merge | def merge(self, add_me):
"""Merge add_me into context and applies interpolation.
Bottom-up merge where add_me merges into context. Applies string
interpolation where the type is a string. Where a key exists in
context already, add_me's value will overwrite what's in context
already.
Supports nested hierarchy. add_me can contains dicts/lists/enumerables
that contain other enumerables et. It doesn't restrict levels of
nesting, so if you really want to go crazy with the levels you can, but
you might blow your stack.
If something from add_me exists in context already, but add_me's value
is of a different type, add_me will overwrite context. Do note this.
i.e if you had context['int_key'] == 1 and
add_me['int_key'] == 'clearly not a number', the end result would be
context['int_key'] == 'clearly not a number'
If add_me contains lists/sets/tuples, this merges these
additively, meaning it appends values from add_me to the existing
sequence.
Args:
add_me: dict. Merge this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def merge_recurse(current, add_me):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self (i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
add_me: dict. Merge this to current.
"""
for k, v in add_me.items():
# key supports interpolation
k = self.get_formatted_string(k)
# str not mergable, so it doesn't matter if it exists in dest
if isinstance(v, str):
# just overwrite dest - str adds/edits indiscriminately
current[k] = self.get_formatted_string(v)
elif isinstance(v, (bytes, bytearray)):
# bytes aren't mergable or formattable
# only here to prevent the elif on enumerables catching it
current[k] = v
# deal with things that are mergable - exists already in dest
elif k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to merge since
# it exists in dest
merge_recurse(current[k], v)
elif types.are_all_this_type(list, current[k], v):
# it's list-y. Extend mutates existing list since it
# exists in dest
current[k].extend(
self.get_formatted_iterable(v))
elif types.are_all_this_type(tuple, current[k], v):
# concatenate tuples
current[k] = (
current[k] + self.get_formatted_iterable(v))
elif types.are_all_this_type(Set, current[k], v):
# join sets
current[k] = (
current[k] | self.get_formatted_iterable(v))
else:
# at this point it's not mergable nor a known iterable
current[k] = v
else:
# at this point it's not mergable, nor in context
current[k] = self.get_formatted_iterable(v)
# first iteration starts at context dict root
merge_recurse(self, add_me) | python | def merge(self, add_me):
"""Merge add_me into context and applies interpolation.
Bottom-up merge where add_me merges into context. Applies string
interpolation where the type is a string. Where a key exists in
context already, add_me's value will overwrite what's in context
already.
Supports nested hierarchy. add_me can contains dicts/lists/enumerables
that contain other enumerables et. It doesn't restrict levels of
nesting, so if you really want to go crazy with the levels you can, but
you might blow your stack.
If something from add_me exists in context already, but add_me's value
is of a different type, add_me will overwrite context. Do note this.
i.e if you had context['int_key'] == 1 and
add_me['int_key'] == 'clearly not a number', the end result would be
context['int_key'] == 'clearly not a number'
If add_me contains lists/sets/tuples, this merges these
additively, meaning it appends values from add_me to the existing
sequence.
Args:
add_me: dict. Merge this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def merge_recurse(current, add_me):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self (i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
add_me: dict. Merge this to current.
"""
for k, v in add_me.items():
# key supports interpolation
k = self.get_formatted_string(k)
# str not mergable, so it doesn't matter if it exists in dest
if isinstance(v, str):
# just overwrite dest - str adds/edits indiscriminately
current[k] = self.get_formatted_string(v)
elif isinstance(v, (bytes, bytearray)):
# bytes aren't mergable or formattable
# only here to prevent the elif on enumerables catching it
current[k] = v
# deal with things that are mergable - exists already in dest
elif k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to merge since
# it exists in dest
merge_recurse(current[k], v)
elif types.are_all_this_type(list, current[k], v):
# it's list-y. Extend mutates existing list since it
# exists in dest
current[k].extend(
self.get_formatted_iterable(v))
elif types.are_all_this_type(tuple, current[k], v):
# concatenate tuples
current[k] = (
current[k] + self.get_formatted_iterable(v))
elif types.are_all_this_type(Set, current[k], v):
# join sets
current[k] = (
current[k] | self.get_formatted_iterable(v))
else:
# at this point it's not mergable nor a known iterable
current[k] = v
else:
# at this point it's not mergable, nor in context
current[k] = self.get_formatted_iterable(v)
# first iteration starts at context dict root
merge_recurse(self, add_me) | [
"def",
"merge",
"(",
"self",
",",
"add_me",
")",
":",
"def",
"merge_recurse",
"(",
"current",
",",
"add_me",
")",
":",
"\"\"\"Walk the current context tree in recursive inner function.\n\n On 1st iteration, current = self (i.e root of context)\n On subsequent recursive iterations, current is wherever you're at\n in the nested context hierarchy.\n\n Args:\n current: dict. Destination of merge.\n add_me: dict. Merge this to current.\n \"\"\"",
"for",
"k",
",",
"v",
"in",
"add_me",
".",
"items",
"(",
")",
":",
"# key supports interpolation",
"k",
"=",
"self",
".",
"get_formatted_string",
"(",
"k",
")",
"# str not mergable, so it doesn't matter if it exists in dest",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"# just overwrite dest - str adds/edits indiscriminately",
"current",
"[",
"k",
"]",
"=",
"self",
".",
"get_formatted_string",
"(",
"v",
")",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"bytes",
",",
"bytearray",
")",
")",
":",
"# bytes aren't mergable or formattable",
"# only here to prevent the elif on enumerables catching it",
"current",
"[",
"k",
"]",
"=",
"v",
"# deal with things that are mergable - exists already in dest",
"elif",
"k",
"in",
"current",
":",
"if",
"types",
".",
"are_all_this_type",
"(",
"Mapping",
",",
"current",
"[",
"k",
"]",
",",
"v",
")",
":",
"# it's dict-y, thus recurse through it to merge since",
"# it exists in dest",
"merge_recurse",
"(",
"current",
"[",
"k",
"]",
",",
"v",
")",
"elif",
"types",
".",
"are_all_this_type",
"(",
"list",
",",
"current",
"[",
"k",
"]",
",",
"v",
")",
":",
"# it's list-y. Extend mutates existing list since it",
"# exists in dest",
"current",
"[",
"k",
"]",
".",
"extend",
"(",
"self",
".",
"get_formatted_iterable",
"(",
"v",
")",
")",
"elif",
"types",
".",
"are_all_this_type",
"(",
"tuple",
",",
"current",
"[",
"k",
"]",
",",
"v",
")",
":",
"# concatenate tuples",
"current",
"[",
"k",
"]",
"=",
"(",
"current",
"[",
"k",
"]",
"+",
"self",
".",
"get_formatted_iterable",
"(",
"v",
")",
")",
"elif",
"types",
".",
"are_all_this_type",
"(",
"Set",
",",
"current",
"[",
"k",
"]",
",",
"v",
")",
":",
"# join sets",
"current",
"[",
"k",
"]",
"=",
"(",
"current",
"[",
"k",
"]",
"|",
"self",
".",
"get_formatted_iterable",
"(",
"v",
")",
")",
"else",
":",
"# at this point it's not mergable nor a known iterable",
"current",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"# at this point it's not mergable, nor in context",
"current",
"[",
"k",
"]",
"=",
"self",
".",
"get_formatted_iterable",
"(",
"v",
")",
"# first iteration starts at context dict root",
"merge_recurse",
"(",
"self",
",",
"add_me",
")"
] | Merge add_me into context and applies interpolation.
Bottom-up merge where add_me merges into context. Applies string
interpolation where the type is a string. Where a key exists in
context already, add_me's value will overwrite what's in context
already.
Supports nested hierarchy. add_me can contains dicts/lists/enumerables
that contain other enumerables et. It doesn't restrict levels of
nesting, so if you really want to go crazy with the levels you can, but
you might blow your stack.
If something from add_me exists in context already, but add_me's value
is of a different type, add_me will overwrite context. Do note this.
i.e if you had context['int_key'] == 1 and
add_me['int_key'] == 'clearly not a number', the end result would be
context['int_key'] == 'clearly not a number'
If add_me contains lists/sets/tuples, this merges these
additively, meaning it appends values from add_me to the existing
sequence.
Args:
add_me: dict. Merge this dict into context.
Returns:
None. All operations mutate this instance of context. | [
"Merge",
"add_me",
"into",
"context",
"and",
"applies",
"interpolation",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L595-L675 | [
"add_me"
] | What does this function do? | [
"Merge",
"add_me",
"into",
"context",
"and",
"applies",
"interpolation",
"."
] |
pypyr/pypyr-cli | pypyr/context.py | Context.set_defaults | def set_defaults(self, defaults):
"""Set defaults in context if keys do not exist already.
Adds the input dict (defaults) into the context, only where keys in
defaults do not already exist in context. Supports nested hierarchies.
Example:
Given a context like this:
key1: value1
key2:
key2.1: value2.1
key3: None
And defaults input like this:
key1: 'updated value here won't overwrite since it already exists'
key2:
key2.2: value2.2
key3: 'key 3 exists so I won't overwrite
Will result in context:
key1: value1
key2:
key2.1: value2.1
key2.2: value2.2
key3: None
Args:
defaults: dict. Add this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def defaults_recurse(current, defaults):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self (i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
defaults: dict. Add this to current if keys don't exist
already.
"""
for k, v in defaults.items():
# key supports interpolation
k = self.get_formatted_string(k)
if k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to check if it
# contains child items that don't exist in dest
defaults_recurse(current[k], v)
else:
# since it's not in context already, add the default
current[k] = self.get_formatted_iterable(v)
# first iteration starts at context dict root
defaults_recurse(self, defaults) | python | def set_defaults(self, defaults):
"""Set defaults in context if keys do not exist already.
Adds the input dict (defaults) into the context, only where keys in
defaults do not already exist in context. Supports nested hierarchies.
Example:
Given a context like this:
key1: value1
key2:
key2.1: value2.1
key3: None
And defaults input like this:
key1: 'updated value here won't overwrite since it already exists'
key2:
key2.2: value2.2
key3: 'key 3 exists so I won't overwrite
Will result in context:
key1: value1
key2:
key2.1: value2.1
key2.2: value2.2
key3: None
Args:
defaults: dict. Add this dict into context.
Returns:
None. All operations mutate this instance of context.
"""
def defaults_recurse(current, defaults):
"""Walk the current context tree in recursive inner function.
On 1st iteration, current = self (i.e root of context)
On subsequent recursive iterations, current is wherever you're at
in the nested context hierarchy.
Args:
current: dict. Destination of merge.
defaults: dict. Add this to current if keys don't exist
already.
"""
for k, v in defaults.items():
# key supports interpolation
k = self.get_formatted_string(k)
if k in current:
if types.are_all_this_type(Mapping, current[k], v):
# it's dict-y, thus recurse through it to check if it
# contains child items that don't exist in dest
defaults_recurse(current[k], v)
else:
# since it's not in context already, add the default
current[k] = self.get_formatted_iterable(v)
# first iteration starts at context dict root
defaults_recurse(self, defaults) | [
"def",
"set_defaults",
"(",
"self",
",",
"defaults",
")",
":",
"def",
"defaults_recurse",
"(",
"current",
",",
"defaults",
")",
":",
"\"\"\"Walk the current context tree in recursive inner function.\n\n On 1st iteration, current = self (i.e root of context)\n On subsequent recursive iterations, current is wherever you're at\n in the nested context hierarchy.\n\n Args:\n current: dict. Destination of merge.\n defaults: dict. Add this to current if keys don't exist\n already.\n\n \"\"\"",
"for",
"k",
",",
"v",
"in",
"defaults",
".",
"items",
"(",
")",
":",
"# key supports interpolation",
"k",
"=",
"self",
".",
"get_formatted_string",
"(",
"k",
")",
"if",
"k",
"in",
"current",
":",
"if",
"types",
".",
"are_all_this_type",
"(",
"Mapping",
",",
"current",
"[",
"k",
"]",
",",
"v",
")",
":",
"# it's dict-y, thus recurse through it to check if it",
"# contains child items that don't exist in dest",
"defaults_recurse",
"(",
"current",
"[",
"k",
"]",
",",
"v",
")",
"else",
":",
"# since it's not in context already, add the default",
"current",
"[",
"k",
"]",
"=",
"self",
".",
"get_formatted_iterable",
"(",
"v",
")",
"# first iteration starts at context dict root",
"defaults_recurse",
"(",
"self",
",",
"defaults",
")"
] | Set defaults in context if keys do not exist already.
Adds the input dict (defaults) into the context, only where keys in
defaults do not already exist in context. Supports nested hierarchies.
Example:
Given a context like this:
key1: value1
key2:
key2.1: value2.1
key3: None
And defaults input like this:
key1: 'updated value here won't overwrite since it already exists'
key2:
key2.2: value2.2
key3: 'key 3 exists so I won't overwrite
Will result in context:
key1: value1
key2:
key2.1: value2.1
key2.2: value2.2
key3: None
Args:
defaults: dict. Add this dict into context.
Returns:
None. All operations mutate this instance of context. | [
"Set",
"defaults",
"in",
"context",
"if",
"keys",
"do",
"not",
"exist",
"already",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L677-L737 | [
"defaults"
] | What does this function do? | [
"Set",
"defaults",
"in",
"context",
"if",
"keys",
"do",
"not",
"exist",
"already",
"."
] |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | FileInRewriterStep.run_step | def run_step(self, rewriter):
"""Do the file in to out rewrite.
Doesn't do anything more crazy than call files_in_to_out on the
rewriter.
Args:
rewriter: pypyr.filesystem.FileRewriter instance.
"""
assert rewriter, ("FileRewriter instance required to run "
"FileInRewriterStep.")
rewriter.files_in_to_out(in_path=self.path_in, out_path=self.path_out) | python | def run_step(self, rewriter):
"""Do the file in to out rewrite.
Doesn't do anything more crazy than call files_in_to_out on the
rewriter.
Args:
rewriter: pypyr.filesystem.FileRewriter instance.
"""
assert rewriter, ("FileRewriter instance required to run "
"FileInRewriterStep.")
rewriter.files_in_to_out(in_path=self.path_in, out_path=self.path_out) | [
"def",
"run_step",
"(",
"self",
",",
"rewriter",
")",
":",
"assert",
"rewriter",
",",
"(",
"\"FileRewriter instance required to run \"",
"\"FileInRewriterStep.\"",
")",
"rewriter",
".",
"files_in_to_out",
"(",
"in_path",
"=",
"self",
".",
"path_in",
",",
"out_path",
"=",
"self",
".",
"path_out",
")"
] | Do the file in to out rewrite.
Doesn't do anything more crazy than call files_in_to_out on the
rewriter.
Args:
rewriter: pypyr.filesystem.FileRewriter instance. | [
"Do",
"the",
"file",
"in",
"to",
"out",
"rewrite",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L57-L68 | [
"rewriter"
] | What does this function do? | [
"Do",
"the",
"file",
"in",
"to",
"out",
"rewrite",
"."
] |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | ObjectRewriterStep.run_step | def run_step(self, representer):
"""Do the object in-out rewrite.
Args:
representer: A pypyr.filesystem.ObjectRepresenter instance.
"""
assert representer, ("ObjectRepresenter instance required to run "
"ObjectRewriterStep.")
rewriter = ObjectRewriter(self.context.get_formatted_iterable,
representer)
super().run_step(rewriter) | python | def run_step(self, representer):
"""Do the object in-out rewrite.
Args:
representer: A pypyr.filesystem.ObjectRepresenter instance.
"""
assert representer, ("ObjectRepresenter instance required to run "
"ObjectRewriterStep.")
rewriter = ObjectRewriter(self.context.get_formatted_iterable,
representer)
super().run_step(rewriter) | [
"def",
"run_step",
"(",
"self",
",",
"representer",
")",
":",
"assert",
"representer",
",",
"(",
"\"ObjectRepresenter instance required to run \"",
"\"ObjectRewriterStep.\"",
")",
"rewriter",
"=",
"ObjectRewriter",
"(",
"self",
".",
"context",
".",
"get_formatted_iterable",
",",
"representer",
")",
"super",
"(",
")",
".",
"run_step",
"(",
"rewriter",
")"
] | Do the object in-out rewrite.
Args:
representer: A pypyr.filesystem.ObjectRepresenter instance. | [
"Do",
"the",
"object",
"in",
"-",
"out",
"rewrite",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L74-L85 | [
"representer"
] | What does this function do? | [
"Do",
"the",
"object",
"in",
"-",
"out",
"rewrite",
"."
] |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | StreamRewriterStep.run_step | def run_step(self):
"""Do the file in-out rewrite."""
rewriter = StreamRewriter(self.context.iter_formatted_strings)
super().run_step(rewriter) | python | def run_step(self):
"""Do the file in-out rewrite."""
rewriter = StreamRewriter(self.context.iter_formatted_strings)
super().run_step(rewriter) | [
"def",
"run_step",
"(",
"self",
")",
":",
"rewriter",
"=",
"StreamRewriter",
"(",
"self",
".",
"context",
".",
"iter_formatted_strings",
")",
"super",
"(",
")",
".",
"run_step",
"(",
"rewriter",
")"
] | Do the file in-out rewrite. | [
"Do",
"the",
"file",
"in",
"-",
"out",
"rewrite",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L100-L103 | [] | What does this function do? | [
"Do",
"the",
"file",
"in",
"-",
"out",
"rewrite",
"."
] |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | StreamReplacePairsRewriterStep.run_step | def run_step(self):
"""Write in to out, replacing strings per the replace_pairs."""
formatted_replacements = self.context.get_formatted_iterable(
self.replace_pairs)
iter = StreamReplacePairsRewriterStep.iter_replace_strings(
formatted_replacements)
rewriter = StreamRewriter(iter)
super().run_step(rewriter) | python | def run_step(self):
"""Write in to out, replacing strings per the replace_pairs."""
formatted_replacements = self.context.get_formatted_iterable(
self.replace_pairs)
iter = StreamReplacePairsRewriterStep.iter_replace_strings(
formatted_replacements)
rewriter = StreamRewriter(iter)
super().run_step(rewriter) | [
"def",
"run_step",
"(",
"self",
")",
":",
"formatted_replacements",
"=",
"self",
".",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"replace_pairs",
")",
"iter",
"=",
"StreamReplacePairsRewriterStep",
".",
"iter_replace_strings",
"(",
"formatted_replacements",
")",
"rewriter",
"=",
"StreamRewriter",
"(",
"iter",
")",
"super",
"(",
")",
".",
"run_step",
"(",
"rewriter",
")"
] | Write in to out, replacing strings per the replace_pairs. | [
"Write",
"in",
"to",
"out",
"replacing",
"strings",
"per",
"the",
"replace_pairs",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L133-L141 | [] | What does this function do? | [
"Write",
"in",
"to",
"out",
"replacing",
"strings",
"per",
"the",
"replace_pairs",
"."
] |
pypyr/pypyr-cli | pypyr/steps/dsl/fileinoutrewriter.py | StreamReplacePairsRewriterStep.iter_replace_strings | def iter_replace_strings(replacements):
"""Create a function that uses replacement pairs to process a string.
The returned function takes an iterator and yields on each processed
line.
Args:
replacements: Dict containing 'find_string': 'replace_string' pairs
Returns:
function with signature: iterator of strings = function(iterable)
"""
def function_iter_replace_strings(iterable_strings):
"""Yield a formatted string from iterable_strings using a generator.
Args:
iterable_strings: Iterable containing strings. E.g a file-like
object.
Returns:
Yields formatted line.
"""
for string in iterable_strings:
yield reduce((lambda s, kv: s.replace(*kv)),
replacements.items(),
string)
return function_iter_replace_strings | python | def iter_replace_strings(replacements):
"""Create a function that uses replacement pairs to process a string.
The returned function takes an iterator and yields on each processed
line.
Args:
replacements: Dict containing 'find_string': 'replace_string' pairs
Returns:
function with signature: iterator of strings = function(iterable)
"""
def function_iter_replace_strings(iterable_strings):
"""Yield a formatted string from iterable_strings using a generator.
Args:
iterable_strings: Iterable containing strings. E.g a file-like
object.
Returns:
Yields formatted line.
"""
for string in iterable_strings:
yield reduce((lambda s, kv: s.replace(*kv)),
replacements.items(),
string)
return function_iter_replace_strings | [
"def",
"iter_replace_strings",
"(",
"replacements",
")",
":",
"def",
"function_iter_replace_strings",
"(",
"iterable_strings",
")",
":",
"\"\"\"Yield a formatted string from iterable_strings using a generator.\n\n Args:\n iterable_strings: Iterable containing strings. E.g a file-like\n object.\n\n Returns:\n Yields formatted line.\n\n \"\"\"",
"for",
"string",
"in",
"iterable_strings",
":",
"yield",
"reduce",
"(",
"(",
"lambda",
"s",
",",
"kv",
":",
"s",
".",
"replace",
"(",
"*",
"kv",
")",
")",
",",
"replacements",
".",
"items",
"(",
")",
",",
"string",
")",
"return",
"function_iter_replace_strings"
] | Create a function that uses replacement pairs to process a string.
The returned function takes an iterator and yields on each processed
line.
Args:
replacements: Dict containing 'find_string': 'replace_string' pairs
Returns:
function with signature: iterator of strings = function(iterable) | [
"Create",
"a",
"function",
"that",
"uses",
"replacement",
"pairs",
"to",
"process",
"a",
"string",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L144-L173 | [
"replacements"
] | What does this function do? | [
"Create",
"a",
"function",
"that",
"uses",
"replacement",
"pairs",
"to",
"process",
"a",
"string",
"."
] |
pypyr/pypyr-cli | pypyr/steps/contextsetf.py | run_step | def run_step(context):
"""Set new context keys from formatting expressions with substitutions.
Context is a dictionary or dictionary-like.
context['contextSetf'] must exist. It's a dictionary.
Will iterate context['contextSetf'] and save the values as new keys to the
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
contextSetf:
key2: 'aaa_{key1}_zzz'
key4: 'bbb_{key3}_yyy'
This will result in return context:
key1: value1
key2: aaa_value1_zzz
key3: bbb_value3_yyy
key4: value3
"""
logger.debug("started")
context.assert_key_has_value(key='contextSetf', caller=__name__)
for k, v in context['contextSetf'].items():
logger.debug(f"setting context {k} to value from context {v}")
context[context.get_formatted_iterable(
k)] = context.get_formatted_iterable(v)
logger.info(f"Set {len(context['contextSetf'])} context items.")
logger.debug("done") | python | def run_step(context):
"""Set new context keys from formatting expressions with substitutions.
Context is a dictionary or dictionary-like.
context['contextSetf'] must exist. It's a dictionary.
Will iterate context['contextSetf'] and save the values as new keys to the
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
contextSetf:
key2: 'aaa_{key1}_zzz'
key4: 'bbb_{key3}_yyy'
This will result in return context:
key1: value1
key2: aaa_value1_zzz
key3: bbb_value3_yyy
key4: value3
"""
logger.debug("started")
context.assert_key_has_value(key='contextSetf', caller=__name__)
for k, v in context['contextSetf'].items():
logger.debug(f"setting context {k} to value from context {v}")
context[context.get_formatted_iterable(
k)] = context.get_formatted_iterable(v)
logger.info(f"Set {len(context['contextSetf'])} context items.")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'contextSetf'",
",",
"caller",
"=",
"__name__",
")",
"for",
"k",
",",
"v",
"in",
"context",
"[",
"'contextSetf'",
"]",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"f\"setting context {k} to value from context {v}\"",
")",
"context",
"[",
"context",
".",
"get_formatted_iterable",
"(",
"k",
")",
"]",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"v",
")",
"logger",
".",
"info",
"(",
"f\"Set {len(context['contextSetf'])} context items.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Set new context keys from formatting expressions with substitutions.
Context is a dictionary or dictionary-like.
context['contextSetf'] must exist. It's a dictionary.
Will iterate context['contextSetf'] and save the values as new keys to the
context.
For example, say input context is:
key1: value1
key2: value2
key3: value3
contextSetf:
key2: 'aaa_{key1}_zzz'
key4: 'bbb_{key3}_yyy'
This will result in return context:
key1: value1
key2: aaa_value1_zzz
key3: bbb_value3_yyy
key4: value3 | [
"Set",
"new",
"context",
"keys",
"from",
"formatting",
"expressions",
"with",
"substitutions",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextsetf.py#L13-L45 | [
"context"
] | What does this function do? | [
"Set",
"new",
"context",
"keys",
"from",
"formatting",
"expressions",
"with",
"substitutions",
"."
] |
pypyr/pypyr-cli | pypyr/utils/types.py | cast_to_type | def cast_to_type(obj, out_type):
"""Cast obj to out_type if it's not out_type already.
If the obj happens to be out_type already, it just returns obj as is.
Args:
obj: input object
out_type: type.
Returns:
obj cast to out_type. Usual python conversion / casting rules apply.
"""
in_type = type(obj)
if out_type is in_type:
# no need to cast.
return obj
else:
return out_type(obj) | python | def cast_to_type(obj, out_type):
"""Cast obj to out_type if it's not out_type already.
If the obj happens to be out_type already, it just returns obj as is.
Args:
obj: input object
out_type: type.
Returns:
obj cast to out_type. Usual python conversion / casting rules apply.
"""
in_type = type(obj)
if out_type is in_type:
# no need to cast.
return obj
else:
return out_type(obj) | [
"def",
"cast_to_type",
"(",
"obj",
",",
"out_type",
")",
":",
"in_type",
"=",
"type",
"(",
"obj",
")",
"if",
"out_type",
"is",
"in_type",
":",
"# no need to cast.",
"return",
"obj",
"else",
":",
"return",
"out_type",
"(",
"obj",
")"
] | Cast obj to out_type if it's not out_type already.
If the obj happens to be out_type already, it just returns obj as is.
Args:
obj: input object
out_type: type.
Returns:
obj cast to out_type. Usual python conversion / casting rules apply. | [
"Cast",
"obj",
"to",
"out_type",
"if",
"it",
"s",
"not",
"out_type",
"already",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/types.py#L20-L38 | [
"obj",
"out_type"
] | What does this function do? | [
"Cast",
"obj",
"to",
"out_type",
"if",
"it",
"s",
"not",
"out_type",
"already",
"."
] |
pypyr/pypyr-cli | pypyr/yaml.py | get_pipeline_yaml | def get_pipeline_yaml(file):
"""Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
"""
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition | python | def get_pipeline_yaml(file):
"""Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
"""
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition | [
"def",
"get_pipeline_yaml",
"(",
"file",
")",
":",
"tag_representers",
"=",
"[",
"PyString",
",",
"SicString",
"]",
"yaml_loader",
"=",
"get_yaml_parser_safe",
"(",
")",
"for",
"representer",
"in",
"tag_representers",
":",
"yaml_loader",
".",
"register_class",
"(",
"representer",
")",
"pipeline_definition",
"=",
"yaml_loader",
".",
"load",
"(",
"file",
")",
"return",
"pipeline_definition"
] | Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml. | [
"Return",
"pipeline",
"yaml",
"from",
"open",
"file",
"object",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L7-L31 | [
"file"
] | What does this function do? | [
"Return",
"pipeline",
"yaml",
"from",
"open",
"file",
"object",
"."
] |
pypyr/pypyr-cli | pypyr/yaml.py | get_yaml_parser_roundtrip | def get_yaml_parser_roundtrip():
"""Create the yaml parser object with this factory method.
The round-trip parser preserves:
- comments
- block style and key ordering are kept, so you can diff the round-tripped
source
- flow style sequences ( ‘a: b, c, d’) (based on request and test by
Anthony Sottile)
- anchor names that are hand-crafted (i.e. not of the form``idNNN``)
- merges in dictionaries are preserved
Returns:
ruamel.yaml.YAML object with round-trip loader
"""
yaml_writer = yamler.YAML(typ='rt', pure=True)
# if this isn't here the yaml doesn't format nicely indented for humans
yaml_writer.indent(mapping=2, sequence=4, offset=2)
return yaml_writer | python | def get_yaml_parser_roundtrip():
"""Create the yaml parser object with this factory method.
The round-trip parser preserves:
- comments
- block style and key ordering are kept, so you can diff the round-tripped
source
- flow style sequences ( ‘a: b, c, d’) (based on request and test by
Anthony Sottile)
- anchor names that are hand-crafted (i.e. not of the form``idNNN``)
- merges in dictionaries are preserved
Returns:
ruamel.yaml.YAML object with round-trip loader
"""
yaml_writer = yamler.YAML(typ='rt', pure=True)
# if this isn't here the yaml doesn't format nicely indented for humans
yaml_writer.indent(mapping=2, sequence=4, offset=2)
return yaml_writer | [
"def",
"get_yaml_parser_roundtrip",
"(",
")",
":",
"yaml_writer",
"=",
"yamler",
".",
"YAML",
"(",
"typ",
"=",
"'rt'",
",",
"pure",
"=",
"True",
")",
"# if this isn't here the yaml doesn't format nicely indented for humans",
"yaml_writer",
".",
"indent",
"(",
"mapping",
"=",
"2",
",",
"sequence",
"=",
"4",
",",
"offset",
"=",
"2",
")",
"return",
"yaml_writer"
] | Create the yaml parser object with this factory method.
The round-trip parser preserves:
- comments
- block style and key ordering are kept, so you can diff the round-tripped
source
- flow style sequences ( ‘a: b, c, d’) (based on request and test by
Anthony Sottile)
- anchor names that are hand-crafted (i.e. not of the form``idNNN``)
- merges in dictionaries are preserved
Returns:
ruamel.yaml.YAML object with round-trip loader | [
"Create",
"the",
"yaml",
"parser",
"object",
"with",
"this",
"factory",
"method",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L46-L65 | [] | What does this function do? | [
"Create",
"the",
"yaml",
"parser",
"object",
"with",
"this",
"factory",
"method",
"."
] |
pypyr/pypyr-cli | pypyr/yaml.py | get_yaml_parser_roundtrip_for_context | def get_yaml_parser_roundtrip_for_context():
"""Create a yaml parser that can serialize the pypyr Context.
Create yaml parser with get_yaml_parser_roundtrip, adding Context.
This allows the yaml parser to serialize the pypyr Context.
"""
yaml_writer = get_yaml_parser_roundtrip()
# Context is a dict data structure, so can just use a dict representer
yaml_writer.Representer.add_representer(
Context,
yamler.representer.RoundTripRepresenter.represent_dict)
return yaml_writer | python | def get_yaml_parser_roundtrip_for_context():
"""Create a yaml parser that can serialize the pypyr Context.
Create yaml parser with get_yaml_parser_roundtrip, adding Context.
This allows the yaml parser to serialize the pypyr Context.
"""
yaml_writer = get_yaml_parser_roundtrip()
# Context is a dict data structure, so can just use a dict representer
yaml_writer.Representer.add_representer(
Context,
yamler.representer.RoundTripRepresenter.represent_dict)
return yaml_writer | [
"def",
"get_yaml_parser_roundtrip_for_context",
"(",
")",
":",
"yaml_writer",
"=",
"get_yaml_parser_roundtrip",
"(",
")",
"# Context is a dict data structure, so can just use a dict representer",
"yaml_writer",
".",
"Representer",
".",
"add_representer",
"(",
"Context",
",",
"yamler",
".",
"representer",
".",
"RoundTripRepresenter",
".",
"represent_dict",
")",
"return",
"yaml_writer"
] | Create a yaml parser that can serialize the pypyr Context.
Create yaml parser with get_yaml_parser_roundtrip, adding Context.
This allows the yaml parser to serialize the pypyr Context. | [
"Create",
"a",
"yaml",
"parser",
"that",
"can",
"serialize",
"the",
"pypyr",
"Context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L68-L81 | [] | What does this function do? | [
"Create",
"a",
"yaml",
"parser",
"that",
"can",
"serialize",
"the",
"pypyr",
"Context",
"."
] |
pypyr/pypyr-cli | pypyr/parser/keyvaluepairs.py | get_parsed_context | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this keyvaluepairs parser you're looking for "
"something like: "
"pypyr pipelinename 'key1=value1,key2=value2'.")
return None
logger.debug("starting")
# for each comma-delimited element, project key=value
return dict(element.split('=') for element in context_arg.split(',')) | python | def get_parsed_context(context_arg):
"""Parse input context string and returns context as dictionary."""
if not context_arg:
logger.debug("pipeline invoked without context arg set. For "
"this keyvaluepairs parser you're looking for "
"something like: "
"pypyr pipelinename 'key1=value1,key2=value2'.")
return None
logger.debug("starting")
# for each comma-delimited element, project key=value
return dict(element.split('=') for element in context_arg.split(',')) | [
"def",
"get_parsed_context",
"(",
"context_arg",
")",
":",
"if",
"not",
"context_arg",
":",
"logger",
".",
"debug",
"(",
"\"pipeline invoked without context arg set. For \"",
"\"this keyvaluepairs parser you're looking for \"",
"\"something like: \"",
"\"pypyr pipelinename 'key1=value1,key2=value2'.\"",
")",
"return",
"None",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# for each comma-delimited element, project key=value",
"return",
"dict",
"(",
"element",
".",
"split",
"(",
"'='",
")",
"for",
"element",
"in",
"context_arg",
".",
"split",
"(",
"','",
")",
")"
] | Parse input context string and returns context as dictionary. | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/parser/keyvaluepairs.py#L18-L29 | [
"context_arg"
] | What does this function do? | [
"Parse",
"input",
"context",
"string",
"and",
"returns",
"context",
"as",
"dictionary",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fetchjson.py | run_step | def run_step(context):
"""Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchJson', caller=__name__)
fetch_json_input = context.get_formatted('fetchJson')
if isinstance(fetch_json_input, str):
file_path = fetch_json_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchJson',
child='path',
caller=__name__)
file_path = fetch_json_input['path']
destination_key_expression = fetch_json_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as json_file:
payload = json.load(json_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"json file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
'json input should describe an object at the top '
'level when fetchJsonKey isn\'t specified. You should have '
'something like {"key1": "value1", "key2": "value2"} '
'in the json top-level, not ["value1", "value2"]')
logger.debug("json file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"json file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | python | def run_step(context):
"""Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None.
"""
logger.debug("started")
deprecated(context)
context.assert_key_has_value(key='fetchJson', caller=__name__)
fetch_json_input = context.get_formatted('fetchJson')
if isinstance(fetch_json_input, str):
file_path = fetch_json_input
destination_key_expression = None
else:
context.assert_child_key_has_value(parent='fetchJson',
child='path',
caller=__name__)
file_path = fetch_json_input['path']
destination_key_expression = fetch_json_input.get('key', None)
logger.debug(f"attempting to open file: {file_path}")
with open(file_path) as json_file:
payload = json.load(json_file)
if destination_key_expression:
destination_key = context.get_formatted_iterable(
destination_key_expression)
logger.debug(f"json file loaded. Writing to context {destination_key}")
context[destination_key] = payload
else:
if not isinstance(payload, MutableMapping):
raise TypeError(
'json input should describe an object at the top '
'level when fetchJsonKey isn\'t specified. You should have '
'something like {"key1": "value1", "key2": "value2"} '
'in the json top-level, not ["value1", "value2"]')
logger.debug("json file loaded. Merging into pypyr context. . .")
context.update(payload)
logger.info(f"json file written into pypyr context. Count: {len(payload)}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"deprecated",
"(",
"context",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'fetchJson'",
",",
"caller",
"=",
"__name__",
")",
"fetch_json_input",
"=",
"context",
".",
"get_formatted",
"(",
"'fetchJson'",
")",
"if",
"isinstance",
"(",
"fetch_json_input",
",",
"str",
")",
":",
"file_path",
"=",
"fetch_json_input",
"destination_key_expression",
"=",
"None",
"else",
":",
"context",
".",
"assert_child_key_has_value",
"(",
"parent",
"=",
"'fetchJson'",
",",
"child",
"=",
"'path'",
",",
"caller",
"=",
"__name__",
")",
"file_path",
"=",
"fetch_json_input",
"[",
"'path'",
"]",
"destination_key_expression",
"=",
"fetch_json_input",
".",
"get",
"(",
"'key'",
",",
"None",
")",
"logger",
".",
"debug",
"(",
"f\"attempting to open file: {file_path}\"",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"json_file",
":",
"payload",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"if",
"destination_key_expression",
":",
"destination_key",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"destination_key_expression",
")",
"logger",
".",
"debug",
"(",
"f\"json file loaded. Writing to context {destination_key}\"",
")",
"context",
"[",
"destination_key",
"]",
"=",
"payload",
"else",
":",
"if",
"not",
"isinstance",
"(",
"payload",
",",
"MutableMapping",
")",
":",
"raise",
"TypeError",
"(",
"'json input should describe an object at the top '",
"'level when fetchJsonKey isn\\'t specified. You should have '",
"'something like {\"key1\": \"value1\", \"key2\": \"value2\"} '",
"'in the json top-level, not [\"value1\", \"value2\"]'",
")",
"logger",
".",
"debug",
"(",
"\"json file loaded. Merging into pypyr context. . .\"",
")",
"context",
".",
"update",
"(",
"payload",
")",
"logger",
".",
"info",
"(",
"f\"json file written into pypyr context. Count: {len(payload)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Load a json file into the pypyr context.
json parsed from the file will be merged into the pypyr context. This will
overwrite existing values if the same keys are already in there.
I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'}
already exists, returned context['eggs'] will be 'boiled'.
The json should not be an array [] on the top level, but rather an Object.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- fetchJson
- path. path-like. Path to file on disk.
- key. string. If exists, write json structure to this
context key. Else json writes to context root.
Also supports a passing path as string to fetchJson, but in this case you
won't be able to specify a key.
All inputs support formatting expressions.
Returns:
None. updates context arg.
Raises:
FileNotFoundError: take a guess
pypyr.errors.KeyNotInContextError: fetchJson.path missing in context.
pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
None. | [
"Load",
"a",
"json",
"file",
"into",
"the",
"pypyr",
"context",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchjson.py#L10-L82 | [
"context"
] | What does this function do? | [
"Load",
"a",
"json",
"file",
"into",
"the",
"pypyr",
"context",
"."
] |
pypyr/pypyr-cli | pypyr/steps/fetchjson.py | deprecated | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fetchJsonPath' in context:
context.assert_key_has_value(key='fetchJsonPath', caller=__name__)
context['fetchJson'] = {'path': context['fetchJsonPath']}
if 'fetchJsonKey' in context:
context['fetchJson']['key'] = context.get('fetchJsonKey', None)
logger.warning("fetchJsonPath and fetchJsonKey "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fetchJson "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fetchJson key for you "
"under the hood.") | python | def deprecated(context):
"""Create new style in params from deprecated."""
if 'fetchJsonPath' in context:
context.assert_key_has_value(key='fetchJsonPath', caller=__name__)
context['fetchJson'] = {'path': context['fetchJsonPath']}
if 'fetchJsonKey' in context:
context['fetchJson']['key'] = context.get('fetchJsonKey', None)
logger.warning("fetchJsonPath and fetchJsonKey "
"are deprecated. They will stop working upon the next "
"major release. Use the new context key fetchJson "
"instead. It's a lot better, promise! For the moment "
"pypyr is creating the new fetchJson key for you "
"under the hood.") | [
"def",
"deprecated",
"(",
"context",
")",
":",
"if",
"'fetchJsonPath'",
"in",
"context",
":",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'fetchJsonPath'",
",",
"caller",
"=",
"__name__",
")",
"context",
"[",
"'fetchJson'",
"]",
"=",
"{",
"'path'",
":",
"context",
"[",
"'fetchJsonPath'",
"]",
"}",
"if",
"'fetchJsonKey'",
"in",
"context",
":",
"context",
"[",
"'fetchJson'",
"]",
"[",
"'key'",
"]",
"=",
"context",
".",
"get",
"(",
"'fetchJsonKey'",
",",
"None",
")",
"logger",
".",
"warning",
"(",
"\"fetchJsonPath and fetchJsonKey \"",
"\"are deprecated. They will stop working upon the next \"",
"\"major release. Use the new context key fetchJson \"",
"\"instead. It's a lot better, promise! For the moment \"",
"\"pypyr is creating the new fetchJson key for you \"",
"\"under the hood.\"",
")"
] | Create new style in params from deprecated. | [
"Create",
"new",
"style",
"in",
"params",
"from",
"deprecated",
"."
] | train | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchjson.py#L85-L100 | [
"context"
] | What does this function do? | [
"Create",
"new",
"style",
"in",
"params",
"from",
"deprecated",
"."
] |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._ignore_request | def _ignore_request(self, path):
"""Check to see if we should ignore the request."""
return any([
re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS']
]) | python | def _ignore_request(self, path):
"""Check to see if we should ignore the request."""
return any([
re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS']
]) | [
"def",
"_ignore_request",
"(",
"self",
",",
"path",
")",
":",
"return",
"any",
"(",
"[",
"re",
".",
"match",
"(",
"pattern",
",",
"path",
")",
"for",
"pattern",
"in",
"QC_SETTINGS",
"[",
"'IGNORE_REQUEST_PATTERNS'",
"]",
"]",
")"
] | Check to see if we should ignore the request. | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"request",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L83-L87 | [
"path"
] | What does this function do? | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"request",
"."
] |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._ignore_sql | def _ignore_sql(self, query):
"""Check to see if we should ignore the sql query."""
return any([
re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS']
]) | python | def _ignore_sql(self, query):
"""Check to see if we should ignore the sql query."""
return any([
re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS']
]) | [
"def",
"_ignore_sql",
"(",
"self",
",",
"query",
")",
":",
"return",
"any",
"(",
"[",
"re",
".",
"search",
"(",
"pattern",
",",
"query",
".",
"get",
"(",
"'sql'",
")",
")",
"for",
"pattern",
"in",
"QC_SETTINGS",
"[",
"'IGNORE_SQL_PATTERNS'",
"]",
"]",
")"
] | Check to see if we should ignore the sql query. | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"sql",
"query",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L89-L93 | [
"query"
] | What does this function do? | [
"Check",
"to",
"see",
"if",
"we",
"should",
"ignore",
"the",
"sql",
"query",
"."
] |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._duplicate_queries | def _duplicate_queries(self, output):
"""Appends the most common duplicate queries to the given output."""
if QC_SETTINGS['DISPLAY_DUPLICATES']:
for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']):
lines = ['\nRepeated {0} times.'.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output | python | def _duplicate_queries(self, output):
"""Appends the most common duplicate queries to the given output."""
if QC_SETTINGS['DISPLAY_DUPLICATES']:
for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']):
lines = ['\nRepeated {0} times.'.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output | [
"def",
"_duplicate_queries",
"(",
"self",
",",
"output",
")",
":",
"if",
"QC_SETTINGS",
"[",
"'DISPLAY_DUPLICATES'",
"]",
":",
"for",
"query",
",",
"count",
"in",
"self",
".",
"queries",
".",
"most_common",
"(",
"QC_SETTINGS",
"[",
"'DISPLAY_DUPLICATES'",
"]",
")",
":",
"lines",
"=",
"[",
"'\\nRepeated {0} times.'",
".",
"format",
"(",
"count",
")",
"]",
"lines",
"+=",
"wrap",
"(",
"query",
")",
"lines",
"=",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
"+",
"\"\\n\"",
"output",
"+=",
"self",
".",
"_colorize",
"(",
"lines",
",",
"count",
")",
"return",
"output"
] | Appends the most common duplicate queries to the given output. | [
"Appends",
"the",
"most",
"common",
"duplicate",
"queries",
"to",
"the",
"given",
"output",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L142-L150 | [
"output"
] | What does this function do? | [
"Appends",
"the",
"most",
"common",
"duplicate",
"queries",
"to",
"the",
"given",
"output",
"."
] |
bradmontgomery/django-querycount | querycount/middleware.py | QueryCountMiddleware._calculate_num_queries | def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2] | python | def _calculate_num_queries(self):
"""
Calculate the total number of request and response queries.
Used for count header and count table.
"""
request_totals = self._totals("request")
response_totals = self._totals("response")
return request_totals[2] + response_totals[2] | [
"def",
"_calculate_num_queries",
"(",
"self",
")",
":",
"request_totals",
"=",
"self",
".",
"_totals",
"(",
"\"request\"",
")",
"response_totals",
"=",
"self",
".",
"_totals",
"(",
"\"response\"",
")",
"return",
"request_totals",
"[",
"2",
"]",
"+",
"response_totals",
"[",
"2",
"]"
] | Calculate the total number of request and response queries.
Used for count header and count table. | [
"Calculate",
"the",
"total",
"number",
"of",
"request",
"and",
"response",
"queries",
".",
"Used",
"for",
"count",
"header",
"and",
"count",
"table",
"."
] | train | https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L193-L201 | [] | What does this function do? | [
"Calculate",
"the",
"total",
"number",
"of",
"request",
"and",
"response",
"queries",
".",
"Used",
"for",
"count",
"header",
"and",
"count",
"table",
"."
] |