id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
100
pypyr/pypyr-cli
pypyr/dsl.py
Step.invoke_step
def invoke_step(self, context): """Invoke 'run_step' in the dynamically loaded step module. Don't invoke this from outside the Step class. Use pypyr.dsl.Step.run_step instead. invoke_step just does the bare module step invocation, it does not evaluate any of the decorator logic surrounding the step. So unless you really know what you're doing, use run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") logger.debug(f"running step {self.module}") self.run_step_function(context) logger.debug(f"step {self.module} done")
python
def invoke_step(self, context): logger.debug("starting") logger.debug(f"running step {self.module}") self.run_step_function(context) logger.debug(f"step {self.module} done")
[ "def", "invoke_step", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "logger", ".", "debug", "(", "f\"running step {self.module}\"", ")", "self", ".", "run_step_function", "(", "context", ")", "logger", ".", "debug", "(", "f\"step {self.module} done\"", ")" ]
Invoke 'run_step' in the dynamically loaded step module. Don't invoke this from outside the Step class. Use pypyr.dsl.Step.run_step instead. invoke_step just does the bare module step invocation, it does not evaluate any of the decorator logic surrounding the step. So unless you really know what you're doing, use run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
[ "Invoke", "run_step", "in", "the", "dynamically", "loaded", "step", "module", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L285-L305
101
pypyr/pypyr-cli
pypyr/dsl.py
Step.run_conditional_decorators
def run_conditional_decorators(self, context): """Evaluate the step decorators to decide whether to run step or not. Use pypyr.dsl.Step.run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # The decorator attributes might contain formatting expressions that # change whether they evaluate True or False, thus apply formatting at # last possible instant. run_me = context.get_formatted_as_type(self.run_me, out_type=bool) skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool) swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool) if run_me: if not skip_me: try: if self.retry_decorator: self.retry_decorator.retry_loop(context, self.invoke_step) else: self.invoke_step(context=context) except Exception as ex_info: if swallow_me: logger.error( f"{self.name} Ignoring error because swallow " "is True for this step.\n" f"{type(ex_info).__name__}: {ex_info}") else: raise else: logger.info( f"{self.name} not running because skip is True.") else: logger.info(f"{self.name} not running because run is False.") logger.debug("done")
python
def run_conditional_decorators(self, context): logger.debug("starting") # The decorator attributes might contain formatting expressions that # change whether they evaluate True or False, thus apply formatting at # last possible instant. run_me = context.get_formatted_as_type(self.run_me, out_type=bool) skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool) swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool) if run_me: if not skip_me: try: if self.retry_decorator: self.retry_decorator.retry_loop(context, self.invoke_step) else: self.invoke_step(context=context) except Exception as ex_info: if swallow_me: logger.error( f"{self.name} Ignoring error because swallow " "is True for this step.\n" f"{type(ex_info).__name__}: {ex_info}") else: raise else: logger.info( f"{self.name} not running because skip is True.") else: logger.info(f"{self.name} not running because run is False.") logger.debug("done")
[ "def", "run_conditional_decorators", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "# The decorator attributes might contain formatting expressions that", "# change whether they evaluate True or False, thus apply formatting at", "# last possible instant.", "run_me", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "run_me", ",", "out_type", "=", "bool", ")", "skip_me", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "skip_me", ",", "out_type", "=", "bool", ")", "swallow_me", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "swallow_me", ",", "out_type", "=", "bool", ")", "if", "run_me", ":", "if", "not", "skip_me", ":", "try", ":", "if", "self", ".", "retry_decorator", ":", "self", ".", "retry_decorator", ".", "retry_loop", "(", "context", ",", "self", ".", "invoke_step", ")", "else", ":", "self", ".", "invoke_step", "(", "context", "=", "context", ")", "except", "Exception", "as", "ex_info", ":", "if", "swallow_me", ":", "logger", ".", "error", "(", "f\"{self.name} Ignoring error because swallow \"", "\"is True for this step.\\n\"", "f\"{type(ex_info).__name__}: {ex_info}\"", ")", "else", ":", "raise", "else", ":", "logger", ".", "info", "(", "f\"{self.name} not running because skip is True.\"", ")", "else", ":", "logger", ".", "info", "(", "f\"{self.name} not running because run is False.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Evaluate the step decorators to decide whether to run step or not. Use pypyr.dsl.Step.run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
[ "Evaluate", "the", "step", "decorators", "to", "decide", "whether", "to", "run", "step", "or", "not", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L307-L349
102
pypyr/pypyr-cli
pypyr/dsl.py
Step.run_foreach_or_conditional
def run_foreach_or_conditional(self, context): """Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # friendly reminder [] list obj (i.e empty) evals False if self.foreach_items: self.foreach_loop(context) else: # since no looping required, don't pollute output with looping info self.run_conditional_decorators(context) logger.debug("done")
python
def run_foreach_or_conditional(self, context): logger.debug("starting") # friendly reminder [] list obj (i.e empty) evals False if self.foreach_items: self.foreach_loop(context) else: # since no looping required, don't pollute output with looping info self.run_conditional_decorators(context) logger.debug("done")
[ "def", "run_foreach_or_conditional", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "# friendly reminder [] list obj (i.e empty) evals False", "if", "self", ".", "foreach_items", ":", "self", ".", "foreach_loop", "(", "context", ")", "else", ":", "# since no looping required, don't pollute output with looping info", "self", ".", "run_conditional_decorators", "(", "context", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
[ "Run", "the", "foreach", "sequence", "or", "the", "conditional", "evaluation", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L351-L366
103
pypyr/pypyr-cli
pypyr/dsl.py
Step.run_step
def run_step(self, context): """Run a single pipeline step. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # the in params should be added to context before step execution. self.set_step_input_context(context) if self.while_decorator: self.while_decorator.while_loop(context, self.run_foreach_or_conditional) else: self.run_foreach_or_conditional(context) logger.debug("done")
python
def run_step(self, context): logger.debug("starting") # the in params should be added to context before step execution. self.set_step_input_context(context) if self.while_decorator: self.while_decorator.while_loop(context, self.run_foreach_or_conditional) else: self.run_foreach_or_conditional(context) logger.debug("done")
[ "def", "run_step", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "# the in params should be added to context before step execution.", "self", ".", "set_step_input_context", "(", "context", ")", "if", "self", ".", "while_decorator", ":", "self", ".", "while_decorator", ".", "while_loop", "(", "context", ",", "self", ".", "run_foreach_or_conditional", ")", "else", ":", "self", ".", "run_foreach_or_conditional", "(", "context", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run a single pipeline step. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
[ "Run", "a", "single", "pipeline", "step", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L368-L385
104
pypyr/pypyr-cli
pypyr/dsl.py
Step.set_step_input_context
def set_step_input_context(self, context): """Append step's 'in' parameters to context, if they exist. Append the[in] dictionary to the context. This will overwrite existing values if the same keys are already in there. I.e if in_parameters has {'eggs': 'boiled'} and key 'eggs' already exists in context, context['eggs'] hereafter will be 'boiled'. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. """ logger.debug("starting") if self.in_parameters is not None: parameter_count = len(self.in_parameters) if parameter_count > 0: logger.debug( f"Updating context with {parameter_count} 'in' " "parameters.") context.update(self.in_parameters) logger.debug("done")
python
def set_step_input_context(self, context): logger.debug("starting") if self.in_parameters is not None: parameter_count = len(self.in_parameters) if parameter_count > 0: logger.debug( f"Updating context with {parameter_count} 'in' " "parameters.") context.update(self.in_parameters) logger.debug("done")
[ "def", "set_step_input_context", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "if", "self", ".", "in_parameters", "is", "not", "None", ":", "parameter_count", "=", "len", "(", "self", ".", "in_parameters", ")", "if", "parameter_count", ">", "0", ":", "logger", ".", "debug", "(", "f\"Updating context with {parameter_count} 'in' \"", "\"parameters.\"", ")", "context", ".", "update", "(", "self", ".", "in_parameters", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Append step's 'in' parameters to context, if they exist. Append the[in] dictionary to the context. This will overwrite existing values if the same keys are already in there. I.e if in_parameters has {'eggs': 'boiled'} and key 'eggs' already exists in context, context['eggs'] hereafter will be 'boiled'. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context.
[ "Append", "step", "s", "in", "parameters", "to", "context", "if", "they", "exist", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L387-L409
105
pypyr/pypyr-cli
pypyr/dsl.py
RetryDecorator.exec_iteration
def exec_iteration(self, counter, context, step_method): """Run a single retry iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.retry_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if step execution completed without error. False if error occured during step execution. """ logger.debug("starting") context['retryCounter'] = counter logger.info(f"retry: running step with counter {counter}") try: step_method(context) result = True except Exception as ex_info: if self.max: if counter == self.max: logger.debug(f"retry: max {counter} retries exhausted. " "raising error.") # arguably shouldn't be using errs for control of flow. # but would lose the err info if not, so lesser of 2 evils. raise if self.stop_on or self.retry_on: error_name = get_error_name(ex_info) if self.stop_on: formatted_stop_list = context.get_formatted_iterable( self.stop_on) if error_name in formatted_stop_list: logger.error(f"{error_name} in stopOn. Raising error " "and exiting retry.") raise else: logger.debug(f"{error_name} not in stopOn. Continue.") if self.retry_on: formatted_retry_list = context.get_formatted_iterable( self.retry_on) if error_name not in formatted_retry_list: logger.error(f"{error_name} not in retryOn. Raising " "error and exiting retry.") raise else: logger.debug(f"{error_name} in retryOn. Retry again.") result = False logger.error(f"retry: ignoring error because retryCounter < max.\n" f"{type(ex_info).__name__}: {ex_info}") logger.debug(f"retry: done step with counter {counter}") logger.debug("done") return result
python
def exec_iteration(self, counter, context, step_method): logger.debug("starting") context['retryCounter'] = counter logger.info(f"retry: running step with counter {counter}") try: step_method(context) result = True except Exception as ex_info: if self.max: if counter == self.max: logger.debug(f"retry: max {counter} retries exhausted. " "raising error.") # arguably shouldn't be using errs for control of flow. # but would lose the err info if not, so lesser of 2 evils. raise if self.stop_on or self.retry_on: error_name = get_error_name(ex_info) if self.stop_on: formatted_stop_list = context.get_formatted_iterable( self.stop_on) if error_name in formatted_stop_list: logger.error(f"{error_name} in stopOn. Raising error " "and exiting retry.") raise else: logger.debug(f"{error_name} not in stopOn. Continue.") if self.retry_on: formatted_retry_list = context.get_formatted_iterable( self.retry_on) if error_name not in formatted_retry_list: logger.error(f"{error_name} not in retryOn. Raising " "error and exiting retry.") raise else: logger.debug(f"{error_name} in retryOn. Retry again.") result = False logger.error(f"retry: ignoring error because retryCounter < max.\n" f"{type(ex_info).__name__}: {ex_info}") logger.debug(f"retry: done step with counter {counter}") logger.debug("done") return result
[ "def", "exec_iteration", "(", "self", ",", "counter", ",", "context", ",", "step_method", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "context", "[", "'retryCounter'", "]", "=", "counter", "logger", ".", "info", "(", "f\"retry: running step with counter {counter}\"", ")", "try", ":", "step_method", "(", "context", ")", "result", "=", "True", "except", "Exception", "as", "ex_info", ":", "if", "self", ".", "max", ":", "if", "counter", "==", "self", ".", "max", ":", "logger", ".", "debug", "(", "f\"retry: max {counter} retries exhausted. \"", "\"raising error.\"", ")", "# arguably shouldn't be using errs for control of flow.", "# but would lose the err info if not, so lesser of 2 evils.", "raise", "if", "self", ".", "stop_on", "or", "self", ".", "retry_on", ":", "error_name", "=", "get_error_name", "(", "ex_info", ")", "if", "self", ".", "stop_on", ":", "formatted_stop_list", "=", "context", ".", "get_formatted_iterable", "(", "self", ".", "stop_on", ")", "if", "error_name", "in", "formatted_stop_list", ":", "logger", ".", "error", "(", "f\"{error_name} in stopOn. Raising error \"", "\"and exiting retry.\"", ")", "raise", "else", ":", "logger", ".", "debug", "(", "f\"{error_name} not in stopOn. Continue.\"", ")", "if", "self", ".", "retry_on", ":", "formatted_retry_list", "=", "context", ".", "get_formatted_iterable", "(", "self", ".", "retry_on", ")", "if", "error_name", "not", "in", "formatted_retry_list", ":", "logger", ".", "error", "(", "f\"{error_name} not in retryOn. Raising \"", "\"error and exiting retry.\"", ")", "raise", "else", ":", "logger", ".", "debug", "(", "f\"{error_name} in retryOn. Retry again.\"", ")", "result", "=", "False", "logger", ".", "error", "(", "f\"retry: ignoring error because retryCounter < max.\\n\"", "f\"{type(ex_info).__name__}: {ex_info}\"", ")", "logger", ".", "debug", "(", "f\"retry: done step with counter {counter}\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "result" ]
Run a single retry iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.retry_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if step execution completed without error. False if error occured during step execution.
[ "Run", "a", "single", "retry", "iteration", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L469-L536
106
pypyr/pypyr-cli
pypyr/dsl.py
RetryDecorator.retry_loop
def retry_loop(self, context, step_method): """Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) """ logger.debug("starting") context['retryCounter'] = 0 sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max: max = context.get_formatted_as_type(self.max, out_type=int) logger.info(f"retry decorator will try {max} times at {sleep}s " "intervals.") else: max = None logger.info(f"retry decorator will try indefinitely at {sleep}s " "intervals.") # this will never be false. because on counter == max, # exec_iteration raises an exception, breaking out of the loop. # pragma because cov doesn't know the implied else is impossible. # unit test cov is 100%, though. if poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method ): # pragma: no cover logger.debug("retry loop complete, reporting success.") logger.debug("retry loop done") logger.debug("done")
python
def retry_loop(self, context, step_method): logger.debug("starting") context['retryCounter'] = 0 sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max: max = context.get_formatted_as_type(self.max, out_type=int) logger.info(f"retry decorator will try {max} times at {sleep}s " "intervals.") else: max = None logger.info(f"retry decorator will try indefinitely at {sleep}s " "intervals.") # this will never be false. because on counter == max, # exec_iteration raises an exception, breaking out of the loop. # pragma because cov doesn't know the implied else is impossible. # unit test cov is 100%, though. if poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method ): # pragma: no cover logger.debug("retry loop complete, reporting success.") logger.debug("retry loop done") logger.debug("done")
[ "def", "retry_loop", "(", "self", ",", "context", ",", "step_method", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "context", "[", "'retryCounter'", "]", "=", "0", "sleep", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "sleep", ",", "out_type", "=", "float", ")", "if", "self", ".", "max", ":", "max", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "max", ",", "out_type", "=", "int", ")", "logger", ".", "info", "(", "f\"retry decorator will try {max} times at {sleep}s \"", "\"intervals.\"", ")", "else", ":", "max", "=", "None", "logger", ".", "info", "(", "f\"retry decorator will try indefinitely at {sleep}s \"", "\"intervals.\"", ")", "# this will never be false. because on counter == max,", "# exec_iteration raises an exception, breaking out of the loop.", "# pragma because cov doesn't know the implied else is impossible.", "# unit test cov is 100%, though.", "if", "poll", ".", "while_until_true", "(", "interval", "=", "sleep", ",", "max_attempts", "=", "max", ")", "(", "self", ".", "exec_iteration", ")", "(", "context", "=", "context", ",", "step_method", "=", "step_method", ")", ":", "# pragma: no cover", "logger", ".", "debug", "(", "\"retry loop complete, reporting success.\"", ")", "logger", ".", "debug", "(", "\"retry loop done\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context)
[ "Run", "step", "inside", "a", "retry", "loop", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L538-L578
107
pypyr/pypyr-cli
pypyr/dsl.py
WhileDecorator.exec_iteration
def exec_iteration(self, counter, context, step_method): """Run a single loop iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.while_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if self.stop evaluates to True after step execution, False otherwise. """ logger.debug("starting") context['whileCounter'] = counter logger.info(f"while: running step with counter {counter}") step_method(context) logger.debug(f"while: done step {counter}") result = False # if no stop, just iterating to max) if self.stop: # dynamically evaluate stop after step execution, since the step # might have changed True/False status for stop. result = context.get_formatted_as_type(self.stop, out_type=bool) logger.debug("done") return result
python
def exec_iteration(self, counter, context, step_method): logger.debug("starting") context['whileCounter'] = counter logger.info(f"while: running step with counter {counter}") step_method(context) logger.debug(f"while: done step {counter}") result = False # if no stop, just iterating to max) if self.stop: # dynamically evaluate stop after step execution, since the step # might have changed True/False status for stop. result = context.get_formatted_as_type(self.stop, out_type=bool) logger.debug("done") return result
[ "def", "exec_iteration", "(", "self", ",", "counter", ",", "context", ",", "step_method", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "context", "[", "'whileCounter'", "]", "=", "counter", "logger", ".", "info", "(", "f\"while: running step with counter {counter}\"", ")", "step_method", "(", "context", ")", "logger", ".", "debug", "(", "f\"while: done step {counter}\"", ")", "result", "=", "False", "# if no stop, just iterating to max)", "if", "self", ".", "stop", ":", "# dynamically evaluate stop after step execution, since the step", "# might have changed True/False status for stop.", "result", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "stop", ",", "out_type", "=", "bool", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "result" ]
Run a single loop iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.while_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if self.stop evaluates to True after step execution, False otherwise.
[ "Run", "a", "single", "loop", "iteration", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L645-L682
108
pypyr/pypyr-cli
pypyr/dsl.py
WhileDecorator.while_loop
def while_loop(self, context, step_method): """Run step inside a while loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) """ logger.debug("starting") context['whileCounter'] = 0 if self.stop is None and self.max is None: # the ctor already does this check, but guess theoretically # consumer could have messed with the props since ctor logger.error(f"while decorator missing both max and stop.") raise PipelineDefinitionError("the while decorator must have " "either max or stop, or both. " "But not neither.") error_on_max = context.get_formatted_as_type( self.error_on_max, out_type=bool) sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max is None: max = None logger.info(f"while decorator will loop until {self.stop} " f"evaluates to True at {sleep}s intervals.") else: max = context.get_formatted_as_type(self.max, out_type=int) if max < 1: logger.info( f"max {self.max} is {max}. while only runs when max > 0.") logger.debug("done") return if self.stop is None: logger.info(f"while decorator will loop {max} times at " f"{sleep}s intervals.") else: logger.info(f"while decorator will loop {max} times, or " f"until {self.stop} evaluates to True at " f"{sleep}s intervals.") if not poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method): # False means loop exhausted and stop never eval-ed True. if error_on_max: logger.error(f"exhausted {max} iterations of while loop, " "and errorOnMax is True.") if self.stop and max: raise LoopMaxExhaustedError("while loop reached " f"{max} and {self.stop} " "never evaluated to True.") else: raise LoopMaxExhaustedError(f"while loop reached {max}.") else: if self.stop and max: logger.info( f"while decorator looped {max} times, " f"and {self.stop} never evaluated to True.") logger.debug("while loop done") else: logger.info(f"while loop done, stop condition {self.stop} " "evaluated True.") logger.debug("done")
python
def while_loop(self, context, step_method): logger.debug("starting") context['whileCounter'] = 0 if self.stop is None and self.max is None: # the ctor already does this check, but guess theoretically # consumer could have messed with the props since ctor logger.error(f"while decorator missing both max and stop.") raise PipelineDefinitionError("the while decorator must have " "either max or stop, or both. " "But not neither.") error_on_max = context.get_formatted_as_type( self.error_on_max, out_type=bool) sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max is None: max = None logger.info(f"while decorator will loop until {self.stop} " f"evaluates to True at {sleep}s intervals.") else: max = context.get_formatted_as_type(self.max, out_type=int) if max < 1: logger.info( f"max {self.max} is {max}. while only runs when max > 0.") logger.debug("done") return if self.stop is None: logger.info(f"while decorator will loop {max} times at " f"{sleep}s intervals.") else: logger.info(f"while decorator will loop {max} times, or " f"until {self.stop} evaluates to True at " f"{sleep}s intervals.") if not poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method): # False means loop exhausted and stop never eval-ed True. if error_on_max: logger.error(f"exhausted {max} iterations of while loop, " "and errorOnMax is True.") if self.stop and max: raise LoopMaxExhaustedError("while loop reached " f"{max} and {self.stop} " "never evaluated to True.") else: raise LoopMaxExhaustedError(f"while loop reached {max}.") else: if self.stop and max: logger.info( f"while decorator looped {max} times, " f"and {self.stop} never evaluated to True.") logger.debug("while loop done") else: logger.info(f"while loop done, stop condition {self.stop} " "evaluated True.") logger.debug("done")
[ "def", "while_loop", "(", "self", ",", "context", ",", "step_method", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "context", "[", "'whileCounter'", "]", "=", "0", "if", "self", ".", "stop", "is", "None", "and", "self", ".", "max", "is", "None", ":", "# the ctor already does this check, but guess theoretically", "# consumer could have messed with the props since ctor", "logger", ".", "error", "(", "f\"while decorator missing both max and stop.\"", ")", "raise", "PipelineDefinitionError", "(", "\"the while decorator must have \"", "\"either max or stop, or both. \"", "\"But not neither.\"", ")", "error_on_max", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "error_on_max", ",", "out_type", "=", "bool", ")", "sleep", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "sleep", ",", "out_type", "=", "float", ")", "if", "self", ".", "max", "is", "None", ":", "max", "=", "None", "logger", ".", "info", "(", "f\"while decorator will loop until {self.stop} \"", "f\"evaluates to True at {sleep}s intervals.\"", ")", "else", ":", "max", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "max", ",", "out_type", "=", "int", ")", "if", "max", "<", "1", ":", "logger", ".", "info", "(", "f\"max {self.max} is {max}. while only runs when max > 0.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "if", "self", ".", "stop", "is", "None", ":", "logger", ".", "info", "(", "f\"while decorator will loop {max} times at \"", "f\"{sleep}s intervals.\"", ")", "else", ":", "logger", ".", "info", "(", "f\"while decorator will loop {max} times, or \"", "f\"until {self.stop} evaluates to True at \"", "f\"{sleep}s intervals.\"", ")", "if", "not", "poll", ".", "while_until_true", "(", "interval", "=", "sleep", ",", "max_attempts", "=", "max", ")", "(", "self", ".", "exec_iteration", ")", "(", "context", "=", "context", ",", "step_method", "=", "step_method", ")", ":", "# False means loop exhausted and stop never eval-ed True.", "if", "error_on_max", ":", "logger", ".", "error", "(", "f\"exhausted {max} iterations of while loop, \"", "\"and errorOnMax is True.\"", ")", "if", "self", ".", "stop", "and", "max", ":", "raise", "LoopMaxExhaustedError", "(", "\"while loop reached \"", "f\"{max} and {self.stop} \"", "\"never evaluated to True.\"", ")", "else", ":", "raise", "LoopMaxExhaustedError", "(", "f\"while loop reached {max}.\"", ")", "else", ":", "if", "self", ".", "stop", "and", "max", ":", "logger", ".", "info", "(", "f\"while decorator looped {max} times, \"", "f\"and {self.stop} never evaluated to True.\"", ")", "logger", ".", "debug", "(", "\"while loop done\"", ")", "else", ":", "logger", ".", "info", "(", "f\"while loop done, stop condition {self.stop} \"", "\"evaluated True.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run step inside a while loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context)
[ "Run", "step", "inside", "a", "while", "loop", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L684-L757
109
pypyr/pypyr-cli
pypyr/steps/fetchyaml.py
run_step
def run_step(context): """Load a yaml file into the pypyr context. Yaml parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchYaml - path. path-like. Path to file on disk. - key. string. If exists, write yaml to this context key. Else yaml writes to context root. All inputs support formatting expressions. Also supports a passing path as string to fetchYaml, but in this case you won't be able to specify a key. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is None. """ logger.debug("started") deprecated(context) context.assert_key_has_value(key='fetchYaml', caller=__name__) fetch_yaml_input = context.get_formatted('fetchYaml') if isinstance(fetch_yaml_input, str): file_path = fetch_yaml_input destination_key_expression = None else: context.assert_child_key_has_value(parent='fetchYaml', child='path', caller=__name__) file_path = fetch_yaml_input['path'] destination_key_expression = fetch_yaml_input.get('key', None) logger.debug(f"attempting to open file: {file_path}") with open(file_path) as yaml_file: yaml_loader = yaml.YAML(typ='safe', pure=True) payload = yaml_loader.load(yaml_file) if destination_key_expression: destination_key = context.get_formatted_iterable( destination_key_expression) logger.debug(f"yaml file loaded. Writing to context {destination_key}") context[destination_key] = payload else: if not isinstance(payload, MutableMapping): raise TypeError( "yaml input should describe a dictionary at the top " "level when fetchYamlKey isn't specified. You should have " "something like \n'key1: value1'\n key2: value2'\n" "in the yaml top-level, not \n'- value1\n - value2'") logger.debug("yaml file loaded. Merging into pypyr context. . .") context.update(payload) logger.info(f"yaml file written into pypyr context. Count: {len(payload)}") logger.debug("done")
python
def run_step(context): logger.debug("started") deprecated(context) context.assert_key_has_value(key='fetchYaml', caller=__name__) fetch_yaml_input = context.get_formatted('fetchYaml') if isinstance(fetch_yaml_input, str): file_path = fetch_yaml_input destination_key_expression = None else: context.assert_child_key_has_value(parent='fetchYaml', child='path', caller=__name__) file_path = fetch_yaml_input['path'] destination_key_expression = fetch_yaml_input.get('key', None) logger.debug(f"attempting to open file: {file_path}") with open(file_path) as yaml_file: yaml_loader = yaml.YAML(typ='safe', pure=True) payload = yaml_loader.load(yaml_file) if destination_key_expression: destination_key = context.get_formatted_iterable( destination_key_expression) logger.debug(f"yaml file loaded. Writing to context {destination_key}") context[destination_key] = payload else: if not isinstance(payload, MutableMapping): raise TypeError( "yaml input should describe a dictionary at the top " "level when fetchYamlKey isn't specified. You should have " "something like \n'key1: value1'\n key2: value2'\n" "in the yaml top-level, not \n'- value1\n - value2'") logger.debug("yaml file loaded. Merging into pypyr context. . .") context.update(payload) logger.info(f"yaml file written into pypyr context. Count: {len(payload)}") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "deprecated", "(", "context", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'fetchYaml'", ",", "caller", "=", "__name__", ")", "fetch_yaml_input", "=", "context", ".", "get_formatted", "(", "'fetchYaml'", ")", "if", "isinstance", "(", "fetch_yaml_input", ",", "str", ")", ":", "file_path", "=", "fetch_yaml_input", "destination_key_expression", "=", "None", "else", ":", "context", ".", "assert_child_key_has_value", "(", "parent", "=", "'fetchYaml'", ",", "child", "=", "'path'", ",", "caller", "=", "__name__", ")", "file_path", "=", "fetch_yaml_input", "[", "'path'", "]", "destination_key_expression", "=", "fetch_yaml_input", ".", "get", "(", "'key'", ",", "None", ")", "logger", ".", "debug", "(", "f\"attempting to open file: {file_path}\"", ")", "with", "open", "(", "file_path", ")", "as", "yaml_file", ":", "yaml_loader", "=", "yaml", ".", "YAML", "(", "typ", "=", "'safe'", ",", "pure", "=", "True", ")", "payload", "=", "yaml_loader", ".", "load", "(", "yaml_file", ")", "if", "destination_key_expression", ":", "destination_key", "=", "context", ".", "get_formatted_iterable", "(", "destination_key_expression", ")", "logger", ".", "debug", "(", "f\"yaml file loaded. Writing to context {destination_key}\"", ")", "context", "[", "destination_key", "]", "=", "payload", "else", ":", "if", "not", "isinstance", "(", "payload", ",", "MutableMapping", ")", ":", "raise", "TypeError", "(", "\"yaml input should describe a dictionary at the top \"", "\"level when fetchYamlKey isn't specified. You should have \"", "\"something like \\n'key1: value1'\\n key2: value2'\\n\"", "\"in the yaml top-level, not \\n'- value1\\n - value2'\"", ")", "logger", ".", "debug", "(", "\"yaml file loaded. Merging into pypyr context. . .\"", ")", "context", ".", "update", "(", "payload", ")", "logger", ".", "info", "(", "f\"yaml file written into pypyr context. Count: {len(payload)}\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Load a yaml file into the pypyr context. Yaml parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchYaml - path. path-like. Path to file on disk. - key. string. If exists, write yaml to this context key. Else yaml writes to context root. All inputs support formatting expressions. Also supports a passing path as string to fetchYaml, but in this case you won't be able to specify a key. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is None.
[ "Load", "a", "yaml", "file", "into", "the", "pypyr", "context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchyaml.py#L10-L81
110
pypyr/pypyr-cli
pypyr/steps/nowutc.py
run_step
def run_step(context): """pypyr step saves current utc datetime to context. Args: context: pypyr.context.Context. Mandatory. The following context key is optional: - nowUtcIn. str. Datetime formatting expression. For full list of possible expressions, check here: https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior All inputs support pypyr formatting expressions. This step creates now in context, containing a string representation of the timestamp. If input formatting not specified, defaults to ISO8601. Default is: YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0, YYYY-MM-DDTHH:MM:SS Returns: None. updates context arg. """ logger.debug("started") format_expression = context.get('nowUtcIn', None) if format_expression: formatted_expression = context.get_formatted_string(format_expression) context['nowUtc'] = datetime.now( timezone.utc).strftime(formatted_expression) else: context['nowUtc'] = datetime.now(timezone.utc).isoformat() logger.info(f"timestamp {context['nowUtc']} saved to context nowUtc") logger.debug("done")
python
def run_step(context): logger.debug("started") format_expression = context.get('nowUtcIn', None) if format_expression: formatted_expression = context.get_formatted_string(format_expression) context['nowUtc'] = datetime.now( timezone.utc).strftime(formatted_expression) else: context['nowUtc'] = datetime.now(timezone.utc).isoformat() logger.info(f"timestamp {context['nowUtc']} saved to context nowUtc") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "format_expression", "=", "context", ".", "get", "(", "'nowUtcIn'", ",", "None", ")", "if", "format_expression", ":", "formatted_expression", "=", "context", ".", "get_formatted_string", "(", "format_expression", ")", "context", "[", "'nowUtc'", "]", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", ".", "strftime", "(", "formatted_expression", ")", "else", ":", "context", "[", "'nowUtc'", "]", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", ".", "isoformat", "(", ")", "logger", ".", "info", "(", "f\"timestamp {context['nowUtc']} saved to context nowUtc\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
pypyr step saves current utc datetime to context. Args: context: pypyr.context.Context. Mandatory. The following context key is optional: - nowUtcIn. str. Datetime formatting expression. For full list of possible expressions, check here: https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior All inputs support pypyr formatting expressions. This step creates now in context, containing a string representation of the timestamp. If input formatting not specified, defaults to ISO8601. Default is: YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0, YYYY-MM-DDTHH:MM:SS Returns: None. updates context arg.
[ "pypyr", "step", "saves", "current", "utc", "datetime", "to", "context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/nowutc.py#L9-L44
111
pypyr/pypyr-cli
pypyr/steps/assert.py
run_step
def run_step(context): """Assert that something is True or equal to something else. Args: context: dictionary-like pypyr.context.Context. context is mandatory. Uses the following context keys in context: - assert - this. mandatory. Any type. If assert['equals'] not specified, evals as boolean. - equals. optional. Any type. If assert['this'] evaluates to False raises error. If assert['equals'] is specified, raises error if assert.this != assert.equals. assert['this'] & assert['equals'] both support string substitutions. Returns: None Raises: ContextError: if assert evaluates to False. """ logger.debug("started") assert context, f"context must have value for {__name__}" deprecated(context) context.assert_key_has_value('assert', __name__) assert_this = context['assert']['this'] is_equals_there = 'equals' in context['assert'] if is_equals_there: assert_equals = context['assert']['equals'] # compare assertThis to assertEquals logger.debug("comparing assert['this'] to assert['equals'].") assert_result = (context.get_formatted_iterable(assert_this) == context.get_formatted_iterable(assert_equals)) else: # nothing to compare means treat assertThis as a bool. logger.debug("evaluating assert['this'] as a boolean.") assert_result = context.get_formatted_as_type(assert_this, out_type=bool) logger.info(f"assert evaluated to {assert_result}") if not assert_result: if is_equals_there: # emit type to help user, but not the actual field contents. type_this = ( type(context.get_formatted_iterable(assert_this)).__name__) type_equals = ( type(context.get_formatted_iterable(assert_equals)).__name__) error_text = ( f"assert assert['this'] is of type {type_this} " f"and does not equal assert['equals'] of type {type_equals}.") else: # if it's a bool it's presumably not a sensitive value. error_text = ( f"assert {assert_this} evaluated to False.") raise ContextError(error_text) logger.debug("done")
python
def run_step(context): logger.debug("started") assert context, f"context must have value for {__name__}" deprecated(context) context.assert_key_has_value('assert', __name__) assert_this = context['assert']['this'] is_equals_there = 'equals' in context['assert'] if is_equals_there: assert_equals = context['assert']['equals'] # compare assertThis to assertEquals logger.debug("comparing assert['this'] to assert['equals'].") assert_result = (context.get_formatted_iterable(assert_this) == context.get_formatted_iterable(assert_equals)) else: # nothing to compare means treat assertThis as a bool. logger.debug("evaluating assert['this'] as a boolean.") assert_result = context.get_formatted_as_type(assert_this, out_type=bool) logger.info(f"assert evaluated to {assert_result}") if not assert_result: if is_equals_there: # emit type to help user, but not the actual field contents. type_this = ( type(context.get_formatted_iterable(assert_this)).__name__) type_equals = ( type(context.get_formatted_iterable(assert_equals)).__name__) error_text = ( f"assert assert['this'] is of type {type_this} " f"and does not equal assert['equals'] of type {type_equals}.") else: # if it's a bool it's presumably not a sensitive value. error_text = ( f"assert {assert_this} evaluated to False.") raise ContextError(error_text) logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "assert", "context", ",", "f\"context must have value for {__name__}\"", "deprecated", "(", "context", ")", "context", ".", "assert_key_has_value", "(", "'assert'", ",", "__name__", ")", "assert_this", "=", "context", "[", "'assert'", "]", "[", "'this'", "]", "is_equals_there", "=", "'equals'", "in", "context", "[", "'assert'", "]", "if", "is_equals_there", ":", "assert_equals", "=", "context", "[", "'assert'", "]", "[", "'equals'", "]", "# compare assertThis to assertEquals", "logger", ".", "debug", "(", "\"comparing assert['this'] to assert['equals'].\"", ")", "assert_result", "=", "(", "context", ".", "get_formatted_iterable", "(", "assert_this", ")", "==", "context", ".", "get_formatted_iterable", "(", "assert_equals", ")", ")", "else", ":", "# nothing to compare means treat assertThis as a bool.", "logger", ".", "debug", "(", "\"evaluating assert['this'] as a boolean.\"", ")", "assert_result", "=", "context", ".", "get_formatted_as_type", "(", "assert_this", ",", "out_type", "=", "bool", ")", "logger", ".", "info", "(", "f\"assert evaluated to {assert_result}\"", ")", "if", "not", "assert_result", ":", "if", "is_equals_there", ":", "# emit type to help user, but not the actual field contents.", "type_this", "=", "(", "type", "(", "context", ".", "get_formatted_iterable", "(", "assert_this", ")", ")", ".", "__name__", ")", "type_equals", "=", "(", "type", "(", "context", ".", "get_formatted_iterable", "(", "assert_equals", ")", ")", ".", "__name__", ")", "error_text", "=", "(", "f\"assert assert['this'] is of type {type_this} \"", "f\"and does not equal assert['equals'] of type {type_equals}.\"", ")", "else", ":", "# if it's a bool it's presumably not a sensitive value.", "error_text", "=", "(", "f\"assert {assert_this} evaluated to False.\"", ")", "raise", "ContextError", "(", "error_text", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Assert that something is True or equal to something else. Args: context: dictionary-like pypyr.context.Context. context is mandatory. Uses the following context keys in context: - assert - this. mandatory. Any type. If assert['equals'] not specified, evals as boolean. - equals. optional. Any type. If assert['this'] evaluates to False raises error. If assert['equals'] is specified, raises error if assert.this != assert.equals. assert['this'] & assert['equals'] both support string substitutions. Returns: None Raises: ContextError: if assert evaluates to False.
[ "Assert", "that", "something", "is", "True", "or", "equal", "to", "something", "else", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/assert.py#L9-L71
112
pypyr/pypyr-cli
pypyr/steps/tar.py
tar_archive
def tar_archive(context): """Archive specified path to a tar archive. Args: context: dictionary-like. context is mandatory. context['tar']['archive'] must exist. It's a dictionary. keys are the paths to archive. values are the destination output paths. Example: tar: archive: - in: path/to/dir out: path/to/destination.tar.xs - in: another/my.file out: ./my.tar.xs This will archive directory path/to/dir to path/to/destination.tar.xs, and also archive file another/my.file to ./my.tar.xs """ logger.debug("start") mode = get_file_mode_for_writing(context) for item in context['tar']['archive']: # value is the destination tar. Allow string interpolation. destination = context.get_formatted_string(item['out']) # key is the source to archive source = context.get_formatted_string(item['in']) with tarfile.open(destination, mode) as archive_me: logger.debug(f"Archiving '{source}' to '{destination}'") archive_me.add(source, arcname='.') logger.info(f"Archived '{source}' to '{destination}'") logger.debug("end")
python
def tar_archive(context): logger.debug("start") mode = get_file_mode_for_writing(context) for item in context['tar']['archive']: # value is the destination tar. Allow string interpolation. destination = context.get_formatted_string(item['out']) # key is the source to archive source = context.get_formatted_string(item['in']) with tarfile.open(destination, mode) as archive_me: logger.debug(f"Archiving '{source}' to '{destination}'") archive_me.add(source, arcname='.') logger.info(f"Archived '{source}' to '{destination}'") logger.debug("end")
[ "def", "tar_archive", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"start\"", ")", "mode", "=", "get_file_mode_for_writing", "(", "context", ")", "for", "item", "in", "context", "[", "'tar'", "]", "[", "'archive'", "]", ":", "# value is the destination tar. Allow string interpolation.", "destination", "=", "context", ".", "get_formatted_string", "(", "item", "[", "'out'", "]", ")", "# key is the source to archive", "source", "=", "context", ".", "get_formatted_string", "(", "item", "[", "'in'", "]", ")", "with", "tarfile", ".", "open", "(", "destination", ",", "mode", ")", "as", "archive_me", ":", "logger", ".", "debug", "(", "f\"Archiving '{source}' to '{destination}'\"", ")", "archive_me", ".", "add", "(", "source", ",", "arcname", "=", "'.'", ")", "logger", ".", "info", "(", "f\"Archived '{source}' to '{destination}'\"", ")", "logger", ".", "debug", "(", "\"end\"", ")" ]
Archive specified path to a tar archive. Args: context: dictionary-like. context is mandatory. context['tar']['archive'] must exist. It's a dictionary. keys are the paths to archive. values are the destination output paths. Example: tar: archive: - in: path/to/dir out: path/to/destination.tar.xs - in: another/my.file out: ./my.tar.xs This will archive directory path/to/dir to path/to/destination.tar.xs, and also archive file another/my.file to ./my.tar.xs
[ "Archive", "specified", "path", "to", "a", "tar", "archive", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L105-L140
113
pypyr/pypyr-cli
pypyr/steps/tar.py
tar_extract
def tar_extract(context): """Extract all members of tar archive to specified path. Args: context: dictionary-like. context is mandatory. context['tar']['extract'] must exist. It's a dictionary. keys are the path to the tar to extract. values are the destination paths. Example: tar: extract: - in: path/to/my.tar.xs out: /path/extract/here - in: another/tar.xs out: . This will extract path/to/my.tar.xs to /path/extract/here, and also extract another/tar.xs to $PWD. """ logger.debug("start") mode = get_file_mode_for_reading(context) for item in context['tar']['extract']: # in is the path to the tar to extract. Allows string interpolation. source = context.get_formatted_string(item['in']) # out is the outdir, dhur. Allows string interpolation. destination = context.get_formatted_string(item['out']) with tarfile.open(source, mode) as extract_me: logger.debug(f"Extracting '{source}' to '{destination}'") extract_me.extractall(destination) logger.info(f"Extracted '{source}' to '{destination}'") logger.debug("end")
python
def tar_extract(context): logger.debug("start") mode = get_file_mode_for_reading(context) for item in context['tar']['extract']: # in is the path to the tar to extract. Allows string interpolation. source = context.get_formatted_string(item['in']) # out is the outdir, dhur. Allows string interpolation. destination = context.get_formatted_string(item['out']) with tarfile.open(source, mode) as extract_me: logger.debug(f"Extracting '{source}' to '{destination}'") extract_me.extractall(destination) logger.info(f"Extracted '{source}' to '{destination}'") logger.debug("end")
[ "def", "tar_extract", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"start\"", ")", "mode", "=", "get_file_mode_for_reading", "(", "context", ")", "for", "item", "in", "context", "[", "'tar'", "]", "[", "'extract'", "]", ":", "# in is the path to the tar to extract. Allows string interpolation.", "source", "=", "context", ".", "get_formatted_string", "(", "item", "[", "'in'", "]", ")", "# out is the outdir, dhur. Allows string interpolation.", "destination", "=", "context", ".", "get_formatted_string", "(", "item", "[", "'out'", "]", ")", "with", "tarfile", ".", "open", "(", "source", ",", "mode", ")", "as", "extract_me", ":", "logger", ".", "debug", "(", "f\"Extracting '{source}' to '{destination}'\"", ")", "extract_me", ".", "extractall", "(", "destination", ")", "logger", ".", "info", "(", "f\"Extracted '{source}' to '{destination}'\"", ")", "logger", ".", "debug", "(", "\"end\"", ")" ]
Extract all members of tar archive to specified path. Args: context: dictionary-like. context is mandatory. context['tar']['extract'] must exist. It's a dictionary. keys are the path to the tar to extract. values are the destination paths. Example: tar: extract: - in: path/to/my.tar.xs out: /path/extract/here - in: another/tar.xs out: . This will extract path/to/my.tar.xs to /path/extract/here, and also extract another/tar.xs to $PWD.
[ "Extract", "all", "members", "of", "tar", "archive", "to", "specified", "path", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/tar.py#L143-L178
114
pypyr/pypyr-cli
pypyr/steps/shell.py
run_step
def run_step(context): """Run shell command without shell interpolation. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute command string in the shell as a sub-process. The shell defaults to /bin/sh. The context['cmd'] string must be formatted exactly as it would be when typed at the shell prompt. This includes, for example, quoting or backslash escaping filenames with spaces in them. There is an exception to this: Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.' stderr: 'stderr str here. None if empty.' cmdOut.returncode is the exit status of the called process. Typically 0 means OK. A negative value -N indicates that the child was terminated by signal N (POSIX only). context['cmd'] will interpolate anything in curly braces for values found in context. So if your context looks like this: key1: value1 key2: value2 cmd: mything --arg1 {key1} The cmd passed to the shell will be "mything --arg value1" """ logger.debug("started") CmdStep(name=__name__, context=context).run_step(is_shell=True) logger.debug("done")
python
def run_step(context): logger.debug("started") CmdStep(name=__name__, context=context).run_step(is_shell=True) logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "CmdStep", "(", "name", "=", "__name__", ",", "context", "=", "context", ")", ".", "run_step", "(", "is_shell", "=", "True", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run shell command without shell interpolation. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute command string in the shell as a sub-process. The shell defaults to /bin/sh. The context['cmd'] string must be formatted exactly as it would be when typed at the shell prompt. This includes, for example, quoting or backslash escaping filenames with spaces in them. There is an exception to this: Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.' stderr: 'stderr str here. None if empty.' cmdOut.returncode is the exit status of the called process. Typically 0 means OK. A negative value -N indicates that the child was terminated by signal N (POSIX only). context['cmd'] will interpolate anything in curly braces for values found in context. So if your context looks like this: key1: value1 key2: value2 cmd: mything --arg1 {key1} The cmd passed to the shell will be "mything --arg value1"
[ "Run", "shell", "command", "without", "shell", "interpolation", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/shell.py#L14-L57
115
pypyr/pypyr-cli
pypyr/steps/envget.py
get_args
def get_args(get_item): """Parse env, key, default out of input dict. Args: get_item: dict. contains keys env/key/default Returns: (env, key, has_default, default) tuple, where env: str. env var name. key: str. save env value to this context key. has_default: bool. True if default specified. default: the value of default, if specified. Raises: ContextError: envGet is not a list of dicts. KeyNotInContextError: If env or key not found in get_config. """ if not isinstance(get_item, dict): raise ContextError('envGet must contain a list of dicts.') env = get_item.get('env', None) if not env: raise KeyNotInContextError( 'context envGet[env] must exist in context for envGet.') key = get_item.get('key', None) if not key: raise KeyNotInContextError( 'context envGet[key] must exist in context for envGet.') if 'default' in get_item: has_default = True default = get_item['default'] else: has_default = False default = None return (env, key, has_default, default)
python
def get_args(get_item): if not isinstance(get_item, dict): raise ContextError('envGet must contain a list of dicts.') env = get_item.get('env', None) if not env: raise KeyNotInContextError( 'context envGet[env] must exist in context for envGet.') key = get_item.get('key', None) if not key: raise KeyNotInContextError( 'context envGet[key] must exist in context for envGet.') if 'default' in get_item: has_default = True default = get_item['default'] else: has_default = False default = None return (env, key, has_default, default)
[ "def", "get_args", "(", "get_item", ")", ":", "if", "not", "isinstance", "(", "get_item", ",", "dict", ")", ":", "raise", "ContextError", "(", "'envGet must contain a list of dicts.'", ")", "env", "=", "get_item", ".", "get", "(", "'env'", ",", "None", ")", "if", "not", "env", ":", "raise", "KeyNotInContextError", "(", "'context envGet[env] must exist in context for envGet.'", ")", "key", "=", "get_item", ".", "get", "(", "'key'", ",", "None", ")", "if", "not", "key", ":", "raise", "KeyNotInContextError", "(", "'context envGet[key] must exist in context for envGet.'", ")", "if", "'default'", "in", "get_item", ":", "has_default", "=", "True", "default", "=", "get_item", "[", "'default'", "]", "else", ":", "has_default", "=", "False", "default", "=", "None", "return", "(", "env", ",", "key", ",", "has_default", ",", "default", ")" ]
Parse env, key, default out of input dict. Args: get_item: dict. contains keys env/key/default Returns: (env, key, has_default, default) tuple, where env: str. env var name. key: str. save env value to this context key. has_default: bool. True if default specified. default: the value of default, if specified. Raises: ContextError: envGet is not a list of dicts. KeyNotInContextError: If env or key not found in get_config.
[ "Parse", "env", "key", "default", "out", "of", "input", "dict", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/envget.py#L80-L120
116
pypyr/pypyr-cli
pypyr/steps/py.py
run_step
def run_step(context): """Executes dynamic python code. Context is a dictionary or dictionary-like. Context must contain key 'pycode' Will exec context['pycode'] as dynamically interpreted python statements. context is mandatory. When you execute the pipeline, it should look something like this: pipeline-runner [name here] 'pycode=print(1+1)'. """ logger.debug("started") context.assert_key_has_value(key='pycode', caller=__name__) logger.debug(f"Executing python string: {context['pycode']}") locals_dictionary = locals() exec(context['pycode'], globals(), locals_dictionary) # It looks like this dance might be unnecessary in python 3.6 logger.debug("looking for context update in exec") exec_context = locals_dictionary['context'] context.update(exec_context) logger.debug("exec output context merged with pipeline context") logger.debug("done")
python
def run_step(context): logger.debug("started") context.assert_key_has_value(key='pycode', caller=__name__) logger.debug(f"Executing python string: {context['pycode']}") locals_dictionary = locals() exec(context['pycode'], globals(), locals_dictionary) # It looks like this dance might be unnecessary in python 3.6 logger.debug("looking for context update in exec") exec_context = locals_dictionary['context'] context.update(exec_context) logger.debug("exec output context merged with pipeline context") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'pycode'", ",", "caller", "=", "__name__", ")", "logger", ".", "debug", "(", "f\"Executing python string: {context['pycode']}\"", ")", "locals_dictionary", "=", "locals", "(", ")", "exec", "(", "context", "[", "'pycode'", "]", ",", "globals", "(", ")", ",", "locals_dictionary", ")", "# It looks like this dance might be unnecessary in python 3.6", "logger", ".", "debug", "(", "\"looking for context update in exec\"", ")", "exec_context", "=", "locals_dictionary", "[", "'context'", "]", "context", ".", "update", "(", "exec_context", ")", "logger", ".", "debug", "(", "\"exec output context merged with pipeline context\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Executes dynamic python code. Context is a dictionary or dictionary-like. Context must contain key 'pycode' Will exec context['pycode'] as dynamically interpreted python statements. context is mandatory. When you execute the pipeline, it should look something like this: pipeline-runner [name here] 'pycode=print(1+1)'.
[ "Executes", "dynamic", "python", "code", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/py.py#L11-L35
117
pypyr/pypyr-cli
pypyr/cli.py
get_parser
def get_parser(): """Return ArgumentParser for pypyr cli.""" parser = argparse.ArgumentParser( allow_abbrev=True, description='pypyr pipeline runner') parser.add_argument('pipeline_name', help='Name of pipeline to run. It should exist in the ' './pipelines directory.') parser.add_argument(dest='pipeline_context', nargs='?', help='String for context values. Parsed by the ' 'pipeline\'s context_parser function.') parser.add_argument('--dir', dest='working_dir', default=os.getcwd(), help='Working directory. Use if your pipelines ' 'directory is elsewhere. Defaults to cwd.') parser.add_argument('--log', '--loglevel', dest='log_level', type=int, default=20, help='Integer log level. Defaults to 20 (INFO). ' '10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL' '.\n Log Level < 10 gives full traceback on errors.') parser.add_argument('--logpath', dest='log_path', help='Log-file path. Append log output to this path') parser.add_argument('--version', action='version', help='Echo version number.', version=f'{pypyr.version.get_version()}') return parser
python
def get_parser(): parser = argparse.ArgumentParser( allow_abbrev=True, description='pypyr pipeline runner') parser.add_argument('pipeline_name', help='Name of pipeline to run. It should exist in the ' './pipelines directory.') parser.add_argument(dest='pipeline_context', nargs='?', help='String for context values. Parsed by the ' 'pipeline\'s context_parser function.') parser.add_argument('--dir', dest='working_dir', default=os.getcwd(), help='Working directory. Use if your pipelines ' 'directory is elsewhere. Defaults to cwd.') parser.add_argument('--log', '--loglevel', dest='log_level', type=int, default=20, help='Integer log level. Defaults to 20 (INFO). ' '10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL' '.\n Log Level < 10 gives full traceback on errors.') parser.add_argument('--logpath', dest='log_path', help='Log-file path. Append log output to this path') parser.add_argument('--version', action='version', help='Echo version number.', version=f'{pypyr.version.get_version()}') return parser
[ "def", "get_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "allow_abbrev", "=", "True", ",", "description", "=", "'pypyr pipeline runner'", ")", "parser", ".", "add_argument", "(", "'pipeline_name'", ",", "help", "=", "'Name of pipeline to run. It should exist in the '", "'./pipelines directory.'", ")", "parser", ".", "add_argument", "(", "dest", "=", "'pipeline_context'", ",", "nargs", "=", "'?'", ",", "help", "=", "'String for context values. Parsed by the '", "'pipeline\\'s context_parser function.'", ")", "parser", ".", "add_argument", "(", "'--dir'", ",", "dest", "=", "'working_dir'", ",", "default", "=", "os", ".", "getcwd", "(", ")", ",", "help", "=", "'Working directory. Use if your pipelines '", "'directory is elsewhere. Defaults to cwd.'", ")", "parser", ".", "add_argument", "(", "'--log'", ",", "'--loglevel'", ",", "dest", "=", "'log_level'", ",", "type", "=", "int", ",", "default", "=", "20", ",", "help", "=", "'Integer log level. Defaults to 20 (INFO). '", "'10=DEBUG\\n20=INFO\\n30=WARNING\\n40=ERROR\\n50=CRITICAL'", "'.\\n Log Level < 10 gives full traceback on errors.'", ")", "parser", ".", "add_argument", "(", "'--logpath'", ",", "dest", "=", "'log_path'", ",", "help", "=", "'Log-file path. Append log output to this path'", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "help", "=", "'Echo version number.'", ",", "version", "=", "f'{pypyr.version.get_version()}'", ")", "return", "parser" ]
Return ArgumentParser for pypyr cli.
[ "Return", "ArgumentParser", "for", "pypyr", "cli", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/cli.py#L19-L44
118
pypyr/pypyr-cli
pypyr/cli.py
main
def main(args=None): """Entry point for pypyr cli. The setup_py entry_point wraps this in sys.exit already so this effectively becomes sys.exit(main()). The __main__ entry point similarly wraps sys.exit(). """ if args is None: args = sys.argv[1:] parsed_args = get_args(args) try: return pypyr.pipelinerunner.main( pipeline_name=parsed_args.pipeline_name, pipeline_context_input=parsed_args.pipeline_context, working_dir=parsed_args.working_dir, log_level=parsed_args.log_level, log_path=parsed_args.log_path) except KeyboardInterrupt: # Shell standard is 128 + signum = 130 (SIGINT = 2) sys.stdout.write("\n") return 128 + signal.SIGINT except Exception as e: # stderr and exit code 255 sys.stderr.write("\n") sys.stderr.write(f"\033[91m{type(e).__name__}: {str(e)}\033[0;0m") sys.stderr.write("\n") # at this point, you're guaranteed to have args and thus log_level if parsed_args.log_level < 10: # traceback prints to stderr by default traceback.print_exc() return 255
python
def main(args=None): if args is None: args = sys.argv[1:] parsed_args = get_args(args) try: return pypyr.pipelinerunner.main( pipeline_name=parsed_args.pipeline_name, pipeline_context_input=parsed_args.pipeline_context, working_dir=parsed_args.working_dir, log_level=parsed_args.log_level, log_path=parsed_args.log_path) except KeyboardInterrupt: # Shell standard is 128 + signum = 130 (SIGINT = 2) sys.stdout.write("\n") return 128 + signal.SIGINT except Exception as e: # stderr and exit code 255 sys.stderr.write("\n") sys.stderr.write(f"\033[91m{type(e).__name__}: {str(e)}\033[0;0m") sys.stderr.write("\n") # at this point, you're guaranteed to have args and thus log_level if parsed_args.log_level < 10: # traceback prints to stderr by default traceback.print_exc() return 255
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "parsed_args", "=", "get_args", "(", "args", ")", "try", ":", "return", "pypyr", ".", "pipelinerunner", ".", "main", "(", "pipeline_name", "=", "parsed_args", ".", "pipeline_name", ",", "pipeline_context_input", "=", "parsed_args", ".", "pipeline_context", ",", "working_dir", "=", "parsed_args", ".", "working_dir", ",", "log_level", "=", "parsed_args", ".", "log_level", ",", "log_path", "=", "parsed_args", ".", "log_path", ")", "except", "KeyboardInterrupt", ":", "# Shell standard is 128 + signum = 130 (SIGINT = 2)", "sys", ".", "stdout", ".", "write", "(", "\"\\n\"", ")", "return", "128", "+", "signal", ".", "SIGINT", "except", "Exception", "as", "e", ":", "# stderr and exit code 255", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "f\"\\033[91m{type(e).__name__}: {str(e)}\\033[0;0m\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", ")", "# at this point, you're guaranteed to have args and thus log_level", "if", "parsed_args", ".", "log_level", "<", "10", ":", "# traceback prints to stderr by default", "traceback", ".", "print_exc", "(", ")", "return", "255" ]
Entry point for pypyr cli. The setup_py entry_point wraps this in sys.exit already so this effectively becomes sys.exit(main()). The __main__ entry point similarly wraps sys.exit().
[ "Entry", "point", "for", "pypyr", "cli", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/cli.py#L47-L80
119
pypyr/pypyr-cli
pypyr/steps/contextclear.py
run_step
def run_step(context): """Remove specified keys from context. Args: Context is a dictionary or dictionary-like. context['contextClear'] must exist. It's a dictionary. Will iterate context['contextClear'] and remove those keys from context. For example, say input context is: key1: value1 key2: value2 key3: value3 key4: value4 contextClear: - key2 - key4 - contextClear This will result in return context: key1: value1 key3: value3 """ logger.debug("started") context.assert_key_has_value(key='contextClear', caller=__name__) for k in context['contextClear']: logger.debug(f"removing {k} from context") # slightly unorthodox pop returning None means you don't get a KeyError # if key doesn't exist context.pop(k, None) logger.info(f"removed {k} from context") logger.debug("done")
python
def run_step(context): logger.debug("started") context.assert_key_has_value(key='contextClear', caller=__name__) for k in context['contextClear']: logger.debug(f"removing {k} from context") # slightly unorthodox pop returning None means you don't get a KeyError # if key doesn't exist context.pop(k, None) logger.info(f"removed {k} from context") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'contextClear'", ",", "caller", "=", "__name__", ")", "for", "k", "in", "context", "[", "'contextClear'", "]", ":", "logger", ".", "debug", "(", "f\"removing {k} from context\"", ")", "# slightly unorthodox pop returning None means you don't get a KeyError", "# if key doesn't exist", "context", ".", "pop", "(", "k", ",", "None", ")", "logger", ".", "info", "(", "f\"removed {k} from context\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Remove specified keys from context. Args: Context is a dictionary or dictionary-like. context['contextClear'] must exist. It's a dictionary. Will iterate context['contextClear'] and remove those keys from context. For example, say input context is: key1: value1 key2: value2 key3: value3 key4: value4 contextClear: - key2 - key4 - contextClear This will result in return context: key1: value1 key3: value3
[ "Remove", "specified", "keys", "from", "context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextclear.py#L13-L46
120
pypyr/pypyr-cli
pypyr/steps/safeshell.py
run_step
def run_step(context): """Run command, program or executable. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute the command string in the shell as a sub-process. Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.' stderr: 'stderr str here. None if empty.' cmdOut.returncode is the exit status of the called process. Typically 0 means OK. A negative value -N indicates that the child was terminated by signal N (POSIX only). context['cmd'] will interpolate anything in curly braces for values found in context. So if your context looks like this: key1: value1 key2: value2 cmd: mything --arg1 {key1} The cmd passed to the shell will be "mything --arg value1" """ logger.debug("started") pypyr.steps.cmd.run_step(context) logger.debug("done")
python
def run_step(context): logger.debug("started") pypyr.steps.cmd.run_step(context) logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "pypyr", ".", "steps", ".", "cmd", ".", "run_step", "(", "context", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run command, program or executable. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute the command string in the shell as a sub-process. Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.' stderr: 'stderr str here. None if empty.' cmdOut.returncode is the exit status of the called process. Typically 0 means OK. A negative value -N indicates that the child was terminated by signal N (POSIX only). context['cmd'] will interpolate anything in curly braces for values found in context. So if your context looks like this: key1: value1 key2: value2 cmd: mything --arg1 {key1} The cmd passed to the shell will be "mything --arg value1"
[ "Run", "command", "program", "or", "executable", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/safeshell.py#L16-L55
121
pypyr/pypyr-cli
pypyr/steps/default.py
run_step
def run_step(context): """Set hierarchy into context with substitutions if it doesn't exist yet. context is a dictionary or dictionary-like. context['defaults'] must exist. It's a dictionary. Will iterate context['defaults'] and add these as new values where their keys don't already exist. While it's doing so, it will leave all other values in the existing hierarchy untouched. List merging is purely additive, with no checks for uniqueness or already existing list items. E.g context [0,1,2] with contextMerge=[2,3,4] will result in [0,1,2,2,3,4] Keep this in mind especially where complex types like dicts nest inside a list - a merge will always add a new dict list item, not merge it into whatever dicts might exist on the list already. For example, say input context is: key1: value1 key2: value2 key3: k31: value31 k32: value32 defaults: key2: 'aaa_{key1}_zzz' key3: k33: value33 key4: 'bbb_{key2}_yyy' This will result in return context: key1: value1 key2: value2 key3: k31: value31 k32: value32 k33: value33 key4: bbb_value2_yyy """ logger.debug("started") context.assert_key_has_value(key='defaults', caller=__name__) context.set_defaults(context['defaults']) logger.info(f"set {len(context['defaults'])} context item defaults.") logger.debug("done")
python
def run_step(context): logger.debug("started") context.assert_key_has_value(key='defaults', caller=__name__) context.set_defaults(context['defaults']) logger.info(f"set {len(context['defaults'])} context item defaults.") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'defaults'", ",", "caller", "=", "__name__", ")", "context", ".", "set_defaults", "(", "context", "[", "'defaults'", "]", ")", "logger", ".", "info", "(", "f\"set {len(context['defaults'])} context item defaults.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Set hierarchy into context with substitutions if it doesn't exist yet. context is a dictionary or dictionary-like. context['defaults'] must exist. It's a dictionary. Will iterate context['defaults'] and add these as new values where their keys don't already exist. While it's doing so, it will leave all other values in the existing hierarchy untouched. List merging is purely additive, with no checks for uniqueness or already existing list items. E.g context [0,1,2] with contextMerge=[2,3,4] will result in [0,1,2,2,3,4] Keep this in mind especially where complex types like dicts nest inside a list - a merge will always add a new dict list item, not merge it into whatever dicts might exist on the list already. For example, say input context is: key1: value1 key2: value2 key3: k31: value31 k32: value32 defaults: key2: 'aaa_{key1}_zzz' key3: k33: value33 key4: 'bbb_{key2}_yyy' This will result in return context: key1: value1 key2: value2 key3: k31: value31 k32: value32 k33: value33 key4: bbb_value2_yyy
[ "Set", "hierarchy", "into", "context", "with", "substitutions", "if", "it", "doesn", "t", "exist", "yet", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/default.py#L38-L84
122
pypyr/pypyr-cli
pypyr/stepsrunner.py
get_pipeline_steps
def get_pipeline_steps(pipeline, steps_group): """Get the steps attribute of module pipeline. If there is no steps sequence on the pipeline, return None. Guess you could theoretically want to run a pipeline with nothing in it. """ logger.debug("starting") assert pipeline assert steps_group logger.debug(f"retrieving {steps_group} steps from pipeline") if steps_group in pipeline: steps = pipeline[steps_group] if steps is None: logger.warn( f"{steps_group}: sequence has no elements. So it won't do " "anything.") logger.debug("done") return None steps_count = len(steps) logger.debug(f"{steps_count} steps found under {steps_group} in " "pipeline definition.") logger.debug("done") return steps else: logger.debug( f"pipeline doesn't have a {steps_group} collection. Add a " f"{steps_group}: sequence to the yaml if you want {steps_group} " "actually to do something.") logger.debug("done") return None
python
def get_pipeline_steps(pipeline, steps_group): logger.debug("starting") assert pipeline assert steps_group logger.debug(f"retrieving {steps_group} steps from pipeline") if steps_group in pipeline: steps = pipeline[steps_group] if steps is None: logger.warn( f"{steps_group}: sequence has no elements. So it won't do " "anything.") logger.debug("done") return None steps_count = len(steps) logger.debug(f"{steps_count} steps found under {steps_group} in " "pipeline definition.") logger.debug("done") return steps else: logger.debug( f"pipeline doesn't have a {steps_group} collection. Add a " f"{steps_group}: sequence to the yaml if you want {steps_group} " "actually to do something.") logger.debug("done") return None
[ "def", "get_pipeline_steps", "(", "pipeline", ",", "steps_group", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "assert", "pipeline", "assert", "steps_group", "logger", ".", "debug", "(", "f\"retrieving {steps_group} steps from pipeline\"", ")", "if", "steps_group", "in", "pipeline", ":", "steps", "=", "pipeline", "[", "steps_group", "]", "if", "steps", "is", "None", ":", "logger", ".", "warn", "(", "f\"{steps_group}: sequence has no elements. So it won't do \"", "\"anything.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "None", "steps_count", "=", "len", "(", "steps", ")", "logger", ".", "debug", "(", "f\"{steps_count} steps found under {steps_group} in \"", "\"pipeline definition.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "steps", "else", ":", "logger", ".", "debug", "(", "f\"pipeline doesn't have a {steps_group} collection. Add a \"", "f\"{steps_group}: sequence to the yaml if you want {steps_group} \"", "\"actually to do something.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "None" ]
Get the steps attribute of module pipeline. If there is no steps sequence on the pipeline, return None. Guess you could theoretically want to run a pipeline with nothing in it.
[ "Get", "the", "steps", "attribute", "of", "module", "pipeline", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L13-L47
123
pypyr/pypyr-cli
pypyr/stepsrunner.py
run_failure_step_group
def run_failure_step_group(pipeline, context): """Run the on_failure step group if it exists. This function will swallow all errors, to prevent obfuscating the error condition that got it here to begin with. """ logger.debug("starting") try: assert pipeline # if no on_failure exists, it'll do nothing. run_step_group(pipeline_definition=pipeline, step_group_name='on_failure', context=context) except Exception as exception: logger.error("Failure handler also failed. Swallowing.") logger.error(exception) logger.debug("done")
python
def run_failure_step_group(pipeline, context): logger.debug("starting") try: assert pipeline # if no on_failure exists, it'll do nothing. run_step_group(pipeline_definition=pipeline, step_group_name='on_failure', context=context) except Exception as exception: logger.error("Failure handler also failed. Swallowing.") logger.error(exception) logger.debug("done")
[ "def", "run_failure_step_group", "(", "pipeline", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "try", ":", "assert", "pipeline", "# if no on_failure exists, it'll do nothing.", "run_step_group", "(", "pipeline_definition", "=", "pipeline", ",", "step_group_name", "=", "'on_failure'", ",", "context", "=", "context", ")", "except", "Exception", "as", "exception", ":", "logger", ".", "error", "(", "\"Failure handler also failed. Swallowing.\"", ")", "logger", ".", "error", "(", "exception", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run the on_failure step group if it exists. This function will swallow all errors, to prevent obfuscating the error condition that got it here to begin with.
[ "Run", "the", "on_failure", "step", "group", "if", "it", "exists", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L50-L67
124
pypyr/pypyr-cli
pypyr/stepsrunner.py
run_step_group
def run_step_group(pipeline_definition, step_group_name, context): """Get the specified step group from the pipeline and run its steps.""" logger.debug(f"starting {step_group_name}") assert step_group_name steps = get_pipeline_steps(pipeline=pipeline_definition, steps_group=step_group_name) run_pipeline_steps(steps=steps, context=context) logger.debug(f"done {step_group_name}")
python
def run_step_group(pipeline_definition, step_group_name, context): logger.debug(f"starting {step_group_name}") assert step_group_name steps = get_pipeline_steps(pipeline=pipeline_definition, steps_group=step_group_name) run_pipeline_steps(steps=steps, context=context) logger.debug(f"done {step_group_name}")
[ "def", "run_step_group", "(", "pipeline_definition", ",", "step_group_name", ",", "context", ")", ":", "logger", ".", "debug", "(", "f\"starting {step_group_name}\"", ")", "assert", "step_group_name", "steps", "=", "get_pipeline_steps", "(", "pipeline", "=", "pipeline_definition", ",", "steps_group", "=", "step_group_name", ")", "run_pipeline_steps", "(", "steps", "=", "steps", ",", "context", "=", "context", ")", "logger", ".", "debug", "(", "f\"done {step_group_name}\"", ")" ]
Get the specified step group from the pipeline and run its steps.
[ "Get", "the", "specified", "step", "group", "from", "the", "pipeline", "and", "run", "its", "steps", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/stepsrunner.py#L96-L106
125
pypyr/pypyr-cli
pypyr/utils/filesystem.py
ensure_dir
def ensure_dir(path): """Create all parent directories of path if they don't exist. Args: path. Path-like object. Create parent dirs to this path. Return: None. """ os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True)
python
def ensure_dir(path): os.makedirs(os.path.abspath(os.path.dirname(path)), exist_ok=True)
[ "def", "ensure_dir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", ",", "exist_ok", "=", "True", ")" ]
Create all parent directories of path if they don't exist. Args: path. Path-like object. Create parent dirs to this path. Return: None.
[ "Create", "all", "parent", "directories", "of", "path", "if", "they", "don", "t", "exist", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L394-L404
126
pypyr/pypyr-cli
pypyr/utils/filesystem.py
get_glob
def get_glob(path): """Process the input path, applying globbing and formatting. Do note that this will returns files AND directories that match the glob. No tilde expansion is done, but *, ?, and character ranges expressed with [] will be correctly matched. Escape all special characters ('?', '*' and '['). For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. If passing in an iterable of paths, will expand matches for each path in the iterable. The function will return all the matches for each path glob expression combined into a single list. Args: path: Path-like string, or iterable (list or tuple ) of paths. Returns: Combined list of paths found for input glob. """ if isinstance(path, str): return glob.glob(path, recursive=True) if isinstance(path, os.PathLike): # hilariously enough, glob doesn't like path-like. Gotta be str. return glob.glob(str(path), recursive=True) elif isinstance(path, (list, tuple)): # each glob returns a list, so chain all the lists into one big list return list(chain.from_iterable( glob.glob(str(p), recursive=True) for p in path)) else: raise TypeError("path should be string, path-like or a list. Instead, " f"it's a {type(path)}")
python
def get_glob(path): if isinstance(path, str): return glob.glob(path, recursive=True) if isinstance(path, os.PathLike): # hilariously enough, glob doesn't like path-like. Gotta be str. return glob.glob(str(path), recursive=True) elif isinstance(path, (list, tuple)): # each glob returns a list, so chain all the lists into one big list return list(chain.from_iterable( glob.glob(str(p), recursive=True) for p in path)) else: raise TypeError("path should be string, path-like or a list. Instead, " f"it's a {type(path)}")
[ "def", "get_glob", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "str", ")", ":", "return", "glob", ".", "glob", "(", "path", ",", "recursive", "=", "True", ")", "if", "isinstance", "(", "path", ",", "os", ".", "PathLike", ")", ":", "# hilariously enough, glob doesn't like path-like. Gotta be str.", "return", "glob", ".", "glob", "(", "str", "(", "path", ")", ",", "recursive", "=", "True", ")", "elif", "isinstance", "(", "path", ",", "(", "list", ",", "tuple", ")", ")", ":", "# each glob returns a list, so chain all the lists into one big list", "return", "list", "(", "chain", ".", "from_iterable", "(", "glob", ".", "glob", "(", "str", "(", "p", ")", ",", "recursive", "=", "True", ")", "for", "p", "in", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path should be string, path-like or a list. Instead, \"", "f\"it's a {type(path)}\"", ")" ]
Process the input path, applying globbing and formatting. Do note that this will returns files AND directories that match the glob. No tilde expansion is done, but *, ?, and character ranges expressed with [] will be correctly matched. Escape all special characters ('?', '*' and '['). For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. If passing in an iterable of paths, will expand matches for each path in the iterable. The function will return all the matches for each path glob expression combined into a single list. Args: path: Path-like string, or iterable (list or tuple ) of paths. Returns: Combined list of paths found for input glob.
[ "Process", "the", "input", "path", "applying", "globbing", "and", "formatting", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L407-L441
127
pypyr/pypyr-cli
pypyr/utils/filesystem.py
is_same_file
def is_same_file(path1, path2): """Return True if path1 is the same file as path2. The reason for this dance is that samefile throws if either file doesn't exist. Args: path1: str or path-like. path2: str or path-like. Returns: bool. True if the same file, False if not. """ return ( path1 and path2 and os.path.isfile(path1) and os.path.isfile(path2) and os.path.samefile(path1, path2))
python
def is_same_file(path1, path2): return ( path1 and path2 and os.path.isfile(path1) and os.path.isfile(path2) and os.path.samefile(path1, path2))
[ "def", "is_same_file", "(", "path1", ",", "path2", ")", ":", "return", "(", "path1", "and", "path2", "and", "os", ".", "path", ".", "isfile", "(", "path1", ")", "and", "os", ".", "path", ".", "isfile", "(", "path2", ")", "and", "os", ".", "path", ".", "samefile", "(", "path1", ",", "path2", ")", ")" ]
Return True if path1 is the same file as path2. The reason for this dance is that samefile throws if either file doesn't exist. Args: path1: str or path-like. path2: str or path-like. Returns: bool. True if the same file, False if not.
[ "Return", "True", "if", "path1", "is", "the", "same", "file", "as", "path2", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L444-L461
128
pypyr/pypyr-cli
pypyr/utils/filesystem.py
move_file
def move_file(src, dest): """Move source file to destination. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong. """ try: os.replace(src, dest) except Exception as ex_replace: logger.error(f"error moving file {src} to " f"{dest}. {ex_replace}") raise
python
def move_file(src, dest): try: os.replace(src, dest) except Exception as ex_replace: logger.error(f"error moving file {src} to " f"{dest}. {ex_replace}") raise
[ "def", "move_file", "(", "src", ",", "dest", ")", ":", "try", ":", "os", ".", "replace", "(", "src", ",", "dest", ")", "except", "Exception", "as", "ex_replace", ":", "logger", ".", "error", "(", "f\"error moving file {src} to \"", "f\"{dest}. {ex_replace}\"", ")", "raise" ]
Move source file to destination. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong.
[ "Move", "source", "file", "to", "destination", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L464-L486
129
pypyr/pypyr-cli
pypyr/utils/filesystem.py
move_temp_file
def move_temp_file(src, dest): """Move src to dest. Delete src if something goes wrong. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong. Does its best to clean up after itself and remove temp files. """ try: move_file(src, dest) except Exception: try: os.remove(src) except Exception as ex_clean: # at this point, something's deeply wrong, so log error. # raising the original error, though, not this error in the # error handler, as the 1st was the initial cause of all of # this. logger.error(f"error removing temp file {src}. " f"{ex_clean}") raise
python
def move_temp_file(src, dest): try: move_file(src, dest) except Exception: try: os.remove(src) except Exception as ex_clean: # at this point, something's deeply wrong, so log error. # raising the original error, though, not this error in the # error handler, as the 1st was the initial cause of all of # this. logger.error(f"error removing temp file {src}. " f"{ex_clean}") raise
[ "def", "move_temp_file", "(", "src", ",", "dest", ")", ":", "try", ":", "move_file", "(", "src", ",", "dest", ")", "except", "Exception", ":", "try", ":", "os", ".", "remove", "(", "src", ")", "except", "Exception", "as", "ex_clean", ":", "# at this point, something's deeply wrong, so log error.", "# raising the original error, though, not this error in the", "# error handler, as the 1st was the initial cause of all of", "# this.", "logger", ".", "error", "(", "f\"error removing temp file {src}. \"", "f\"{ex_clean}\"", ")", "raise" ]
Move src to dest. Delete src if something goes wrong. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong. Does its best to clean up after itself and remove temp files.
[ "Move", "src", "to", "dest", ".", "Delete", "src", "if", "something", "goes", "wrong", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L489-L520
130
pypyr/pypyr-cli
pypyr/utils/filesystem.py
FileRewriter.files_in_to_out
def files_in_to_out(self, in_path, out_path=None): """Write in files to out, calling the line_handler on each line. Calls file_in_to_out under the hood to format the in_path payload. The formatting processing is done by the self.formatter instance. Args: in_path: str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. out_path: str or path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is no an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. Top tip: Path-like objects strip the trailing slash. If you want to pass in a dir that does not exist yet as out-path with a trailing /, you should be passing it as a str to preserve the /. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ in_paths = get_glob(in_path) in_count = len(in_paths) if in_count == 0: logger.debug(f'in path found {in_count} paths.') else: logger.debug(f'in path found {in_count} paths:') for path in in_paths: logger.debug(f'{path}') logger.debug( 'herewith ends the paths. will now process each file.') if in_paths: # derive the destination directory, ensure it's ready for writing basedir_out = None is_outfile_name_known = False if out_path: # outpath could be a file, or a dir pathlib_out = Path(out_path) # yep, Path() strips trailing /, hence check original string if isinstance(out_path, str) and out_path.endswith(os.sep): # ensure dir - mimic posix mkdir -p pathlib_out.mkdir(parents=True, exist_ok=True) basedir_out = pathlib_out elif pathlib_out.is_dir(): basedir_out = pathlib_out else: if len(in_paths) > 1: raise Error( f'{in_path} resolves to {len(in_paths)} files, ' 'but you specified only a single file as out ' f'{out_path}. If the outpath is meant to be a ' 'directory, put a / at the end.') # at this point it must be a file (not dir) path # make sure that the parent dir exists basedir_out = pathlib_out.parent basedir_out.parent.mkdir(parents=True, exist_ok=True) is_outfile_name_known = True # loop through all the in files and write them to the out dir file_counter = 0 is_edit = False for path in in_paths: actual_in = Path(path) # recursive glob returns dirs too, only interested in files if actual_in.is_file(): if basedir_out: if is_outfile_name_known: actual_out = pathlib_out else: # default to original src file name if only out dir # specified without an out file name actual_out = basedir_out.joinpath(actual_in.name) logger.debug(f"writing {path} to {actual_out}") self.in_to_out(in_path=actual_in, out_path=actual_out) else: logger.debug(f"editing {path}") self.in_to_out(in_path=actual_in) is_edit = True file_counter += 1 if is_edit: logger.info( f"edited & wrote {file_counter} file(s) at {in_path}") else: logger.info( f"read {in_path}, formatted and wrote {file_counter} " f"file(s) to {out_path}") else: logger.info(f"{in_path} found no files")
python
def files_in_to_out(self, in_path, out_path=None): in_paths = get_glob(in_path) in_count = len(in_paths) if in_count == 0: logger.debug(f'in path found {in_count} paths.') else: logger.debug(f'in path found {in_count} paths:') for path in in_paths: logger.debug(f'{path}') logger.debug( 'herewith ends the paths. will now process each file.') if in_paths: # derive the destination directory, ensure it's ready for writing basedir_out = None is_outfile_name_known = False if out_path: # outpath could be a file, or a dir pathlib_out = Path(out_path) # yep, Path() strips trailing /, hence check original string if isinstance(out_path, str) and out_path.endswith(os.sep): # ensure dir - mimic posix mkdir -p pathlib_out.mkdir(parents=True, exist_ok=True) basedir_out = pathlib_out elif pathlib_out.is_dir(): basedir_out = pathlib_out else: if len(in_paths) > 1: raise Error( f'{in_path} resolves to {len(in_paths)} files, ' 'but you specified only a single file as out ' f'{out_path}. If the outpath is meant to be a ' 'directory, put a / at the end.') # at this point it must be a file (not dir) path # make sure that the parent dir exists basedir_out = pathlib_out.parent basedir_out.parent.mkdir(parents=True, exist_ok=True) is_outfile_name_known = True # loop through all the in files and write them to the out dir file_counter = 0 is_edit = False for path in in_paths: actual_in = Path(path) # recursive glob returns dirs too, only interested in files if actual_in.is_file(): if basedir_out: if is_outfile_name_known: actual_out = pathlib_out else: # default to original src file name if only out dir # specified without an out file name actual_out = basedir_out.joinpath(actual_in.name) logger.debug(f"writing {path} to {actual_out}") self.in_to_out(in_path=actual_in, out_path=actual_out) else: logger.debug(f"editing {path}") self.in_to_out(in_path=actual_in) is_edit = True file_counter += 1 if is_edit: logger.info( f"edited & wrote {file_counter} file(s) at {in_path}") else: logger.info( f"read {in_path}, formatted and wrote {file_counter} " f"file(s) to {out_path}") else: logger.info(f"{in_path} found no files")
[ "def", "files_in_to_out", "(", "self", ",", "in_path", ",", "out_path", "=", "None", ")", ":", "in_paths", "=", "get_glob", "(", "in_path", ")", "in_count", "=", "len", "(", "in_paths", ")", "if", "in_count", "==", "0", ":", "logger", ".", "debug", "(", "f'in path found {in_count} paths.'", ")", "else", ":", "logger", ".", "debug", "(", "f'in path found {in_count} paths:'", ")", "for", "path", "in", "in_paths", ":", "logger", ".", "debug", "(", "f'{path}'", ")", "logger", ".", "debug", "(", "'herewith ends the paths. will now process each file.'", ")", "if", "in_paths", ":", "# derive the destination directory, ensure it's ready for writing", "basedir_out", "=", "None", "is_outfile_name_known", "=", "False", "if", "out_path", ":", "# outpath could be a file, or a dir", "pathlib_out", "=", "Path", "(", "out_path", ")", "# yep, Path() strips trailing /, hence check original string", "if", "isinstance", "(", "out_path", ",", "str", ")", "and", "out_path", ".", "endswith", "(", "os", ".", "sep", ")", ":", "# ensure dir - mimic posix mkdir -p", "pathlib_out", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "basedir_out", "=", "pathlib_out", "elif", "pathlib_out", ".", "is_dir", "(", ")", ":", "basedir_out", "=", "pathlib_out", "else", ":", "if", "len", "(", "in_paths", ")", ">", "1", ":", "raise", "Error", "(", "f'{in_path} resolves to {len(in_paths)} files, '", "'but you specified only a single file as out '", "f'{out_path}. If the outpath is meant to be a '", "'directory, put a / at the end.'", ")", "# at this point it must be a file (not dir) path", "# make sure that the parent dir exists", "basedir_out", "=", "pathlib_out", ".", "parent", "basedir_out", ".", "parent", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "is_outfile_name_known", "=", "True", "# loop through all the in files and write them to the out dir", "file_counter", "=", "0", "is_edit", "=", "False", "for", "path", "in", "in_paths", ":", "actual_in", "=", "Path", "(", "path", ")", "# recursive glob returns dirs too, only interested in files", "if", "actual_in", ".", "is_file", "(", ")", ":", "if", "basedir_out", ":", "if", "is_outfile_name_known", ":", "actual_out", "=", "pathlib_out", "else", ":", "# default to original src file name if only out dir", "# specified without an out file name", "actual_out", "=", "basedir_out", ".", "joinpath", "(", "actual_in", ".", "name", ")", "logger", ".", "debug", "(", "f\"writing {path} to {actual_out}\"", ")", "self", ".", "in_to_out", "(", "in_path", "=", "actual_in", ",", "out_path", "=", "actual_out", ")", "else", ":", "logger", ".", "debug", "(", "f\"editing {path}\"", ")", "self", ".", "in_to_out", "(", "in_path", "=", "actual_in", ")", "is_edit", "=", "True", "file_counter", "+=", "1", "if", "is_edit", ":", "logger", ".", "info", "(", "f\"edited & wrote {file_counter} file(s) at {in_path}\"", ")", "else", ":", "logger", ".", "info", "(", "f\"read {in_path}, formatted and wrote {file_counter} \"", "f\"file(s) to {out_path}\"", ")", "else", ":", "logger", ".", "info", "(", "f\"{in_path} found no files\"", ")" ]
Write in files to out, calling the line_handler on each line. Calls file_in_to_out under the hood to format the in_path payload. The formatting processing is done by the self.formatter instance. Args: in_path: str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. out_path: str or path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is no an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. Top tip: Path-like objects strip the trailing slash. If you want to pass in a dir that does not exist yet as out-path with a trailing /, you should be passing it as a str to preserve the /. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
[ "Write", "in", "files", "to", "out", "calling", "the", "line_handler", "on", "each", "line", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L56-L156
131
pypyr/pypyr-cli
pypyr/utils/filesystem.py
ObjectRewriter.in_to_out
def in_to_out(self, in_path, out_path=None): """Load file into object, formats, writes object to out. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: obj = self.object_representer.load(infile) if out_path: logger.debug( f"opening destination file for writing: {out_path}") ensure_dir(out_path) with open(out_path, 'w') as outfile: self.object_representer.dump(outfile, self.formatter(obj)) return else: logger.debug("opening temp file for writing...") with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: self.object_representer.dump(outfile, self.formatter(obj)) logger.debug(f"moving temp file to: {in_path}") move_temp_file(outfile.name, infile.name)
python
def in_to_out(self, in_path, out_path=None): if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: obj = self.object_representer.load(infile) if out_path: logger.debug( f"opening destination file for writing: {out_path}") ensure_dir(out_path) with open(out_path, 'w') as outfile: self.object_representer.dump(outfile, self.formatter(obj)) return else: logger.debug("opening temp file for writing...") with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: self.object_representer.dump(outfile, self.formatter(obj)) logger.debug(f"moving temp file to: {in_path}") move_temp_file(outfile.name, infile.name)
[ "def", "in_to_out", "(", "self", ",", "in_path", ",", "out_path", "=", "None", ")", ":", "if", "is_same_file", "(", "in_path", ",", "out_path", ")", ":", "logger", ".", "debug", "(", "\"in path and out path are the same file. writing to temp \"", "\"file and then replacing in path with the temp file.\"", ")", "out_path", "=", "None", "logger", ".", "debug", "(", "f\"opening source file: {in_path}\"", ")", "with", "open", "(", "in_path", ")", "as", "infile", ":", "obj", "=", "self", ".", "object_representer", ".", "load", "(", "infile", ")", "if", "out_path", ":", "logger", ".", "debug", "(", "f\"opening destination file for writing: {out_path}\"", ")", "ensure_dir", "(", "out_path", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "outfile", ":", "self", ".", "object_representer", ".", "dump", "(", "outfile", ",", "self", ".", "formatter", "(", "obj", ")", ")", "return", "else", ":", "logger", ".", "debug", "(", "\"opening temp file for writing...\"", ")", "with", "NamedTemporaryFile", "(", "mode", "=", "'w+t'", ",", "dir", "=", "os", ".", "path", ".", "dirname", "(", "in_path", ")", ",", "delete", "=", "False", ")", "as", "outfile", ":", "self", ".", "object_representer", ".", "dump", "(", "outfile", ",", "self", ".", "formatter", "(", "obj", ")", ")", "logger", ".", "debug", "(", "f\"moving temp file to: {in_path}\"", ")", "move_temp_file", "(", "outfile", ".", "name", ",", "infile", ".", "name", ")" ]
Load file into object, formats, writes object to out. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
[ "Load", "file", "into", "object", "formats", "writes", "object", "to", "out", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L188-L233
132
pypyr/pypyr-cli
pypyr/utils/filesystem.py
StreamRewriter.in_to_out
def in_to_out(self, in_path, out_path=None): """Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ is_in_place_edit = False if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None is_in_place_edit = True logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: if out_path: logger.debug( f"opening destination file for writing: {out_path}") ensure_dir(out_path) with open(out_path, 'w') as outfile: outfile.writelines(self.formatter(infile)) return else: logger.debug("opening temp file for writing...") with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: outfile.writelines(self.formatter(infile)) is_in_place_edit = True # only replace infile AFTER it's closed, outside the with. # pragma exclude because func actually returns on 287 in if out_path, # and cov not smart enough to realize that !is_in_place_edit won't ever # happen here (the function will have exited already) if is_in_place_edit: # pragma: no branch logger.debug(f"moving temp file to: {in_path}") move_temp_file(outfile.name, infile.name)
python
def in_to_out(self, in_path, out_path=None): is_in_place_edit = False if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None is_in_place_edit = True logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: if out_path: logger.debug( f"opening destination file for writing: {out_path}") ensure_dir(out_path) with open(out_path, 'w') as outfile: outfile.writelines(self.formatter(infile)) return else: logger.debug("opening temp file for writing...") with NamedTemporaryFile(mode='w+t', dir=os.path.dirname(in_path), delete=False) as outfile: outfile.writelines(self.formatter(infile)) is_in_place_edit = True # only replace infile AFTER it's closed, outside the with. # pragma exclude because func actually returns on 287 in if out_path, # and cov not smart enough to realize that !is_in_place_edit won't ever # happen here (the function will have exited already) if is_in_place_edit: # pragma: no branch logger.debug(f"moving temp file to: {in_path}") move_temp_file(outfile.name, infile.name)
[ "def", "in_to_out", "(", "self", ",", "in_path", ",", "out_path", "=", "None", ")", ":", "is_in_place_edit", "=", "False", "if", "is_same_file", "(", "in_path", ",", "out_path", ")", ":", "logger", ".", "debug", "(", "\"in path and out path are the same file. writing to temp \"", "\"file and then replacing in path with the temp file.\"", ")", "out_path", "=", "None", "is_in_place_edit", "=", "True", "logger", ".", "debug", "(", "f\"opening source file: {in_path}\"", ")", "with", "open", "(", "in_path", ")", "as", "infile", ":", "if", "out_path", ":", "logger", ".", "debug", "(", "f\"opening destination file for writing: {out_path}\"", ")", "ensure_dir", "(", "out_path", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "writelines", "(", "self", ".", "formatter", "(", "infile", ")", ")", "return", "else", ":", "logger", ".", "debug", "(", "\"opening temp file for writing...\"", ")", "with", "NamedTemporaryFile", "(", "mode", "=", "'w+t'", ",", "dir", "=", "os", ".", "path", ".", "dirname", "(", "in_path", ")", ",", "delete", "=", "False", ")", "as", "outfile", ":", "outfile", ".", "writelines", "(", "self", ".", "formatter", "(", "infile", ")", ")", "is_in_place_edit", "=", "True", "# only replace infile AFTER it's closed, outside the with.", "# pragma exclude because func actually returns on 287 in if out_path,", "# and cov not smart enough to realize that !is_in_place_edit won't ever", "# happen here (the function will have exited already)", "if", "is_in_place_edit", ":", "# pragma: no branch", "logger", ".", "debug", "(", "f\"moving temp file to: {in_path}\"", ")", "move_temp_file", "(", "outfile", ".", "name", ",", "infile", ".", "name", ")" ]
Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None.
[ "Write", "a", "single", "file", "in", "to", "out", "running", "self", ".", "formatter", "on", "each", "line", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L252-L303
133
pypyr/pypyr-cli
pypyr/utils/filesystem.py
JsonRepresenter.dump
def dump(self, file, payload): """Dump json oject to open file output. Writes json with 2 spaces indentation. Args: file: Open file-like object. Must be open for writing. payload: The Json object to write to file. Returns: None. """ json.dump(payload, file, indent=2, ensure_ascii=False)
python
def dump(self, file, payload): json.dump(payload, file, indent=2, ensure_ascii=False)
[ "def", "dump", "(", "self", ",", "file", ",", "payload", ")", ":", "json", ".", "dump", "(", "payload", ",", "file", ",", "indent", "=", "2", ",", "ensure_ascii", "=", "False", ")" ]
Dump json oject to open file output. Writes json with 2 spaces indentation. Args: file: Open file-like object. Must be open for writing. payload: The Json object to write to file. Returns: None.
[ "Dump", "json", "oject", "to", "open", "file", "output", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/filesystem.py#L341-L354
134
pypyr/pypyr-cli
pypyr/steps/filereplace.py
run_step
def run_step(context): """Parse input file and replace a search string. This also does string substitutions from context on the fileReplacePairs. It does this before it search & replaces the in file. Be careful of order. If fileReplacePairs is not an ordered collection, replacements could evaluate in any given order. If this is coming in from pipeline yaml it will be an ordered dictionary, so life is good. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileReplace - in. mandatory. str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. - out. optional. path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is not an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. If out_path is not specified or None, will in-place edit and overwrite the in-files. - replacePairs. mandatory. Dictionary where items are: 'find_string': 'replace_string' Returns: None. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: Any of the required keys missing in context. pypyr.errors.KeyInContextHasNoValueError: Any of the required keys exists but is None. """ logger.debug("started") deprecated(context) StreamReplacePairsRewriterStep(__name__, 'fileReplace', context).run_step() logger.debug("done")
python
def run_step(context): logger.debug("started") deprecated(context) StreamReplacePairsRewriterStep(__name__, 'fileReplace', context).run_step() logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "deprecated", "(", "context", ")", "StreamReplacePairsRewriterStep", "(", "__name__", ",", "'fileReplace'", ",", "context", ")", ".", "run_step", "(", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Parse input file and replace a search string. This also does string substitutions from context on the fileReplacePairs. It does this before it search & replaces the in file. Be careful of order. If fileReplacePairs is not an ordered collection, replacements could evaluate in any given order. If this is coming in from pipeline yaml it will be an ordered dictionary, so life is good. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileReplace - in. mandatory. str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. - out. optional. path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is not an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. If out_path is not specified or None, will in-place edit and overwrite the in-files. - replacePairs. mandatory. Dictionary where items are: 'find_string': 'replace_string' Returns: None. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: Any of the required keys missing in context. pypyr.errors.KeyInContextHasNoValueError: Any of the required keys exists but is None.
[ "Parse", "input", "file", "and", "replace", "a", "search", "string", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filereplace.py#L9-L56
135
pypyr/pypyr-cli
pypyr/log/logger.py
set_logging_config
def set_logging_config(log_level, handlers): """Set python logging library config. Run this ONCE at the start of your process. It formats the python logging module's output. Defaults logging level to INFO = 20) """ logging.basicConfig( format='%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=log_level, handlers=handlers)
python
def set_logging_config(log_level, handlers): logging.basicConfig( format='%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=log_level, handlers=handlers)
[ "def", "set_logging_config", "(", "log_level", ",", "handlers", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "'%(asctime)s %(levelname)s:%(name)s:%(funcName)s: %(message)s'", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ",", "level", "=", "log_level", ",", "handlers", "=", "handlers", ")" ]
Set python logging library config. Run this ONCE at the start of your process. It formats the python logging module's output. Defaults logging level to INFO = 20)
[ "Set", "python", "logging", "library", "config", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/log/logger.py#L8-L19
136
pypyr/pypyr-cli
pypyr/log/logger.py
set_root_logger
def set_root_logger(root_log_level, log_path=None): """Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization. """ handlers = [] console_handler = logging.StreamHandler() handlers.append(console_handler) if log_path: file_handler = logging.FileHandler(log_path) handlers.append(file_handler) set_logging_config(root_log_level, handlers=handlers) root_logger = logging.getLogger("pypyr") root_logger.debug( f"Root logger {root_logger.name} configured with level " f"{root_log_level}")
python
def set_root_logger(root_log_level, log_path=None): handlers = [] console_handler = logging.StreamHandler() handlers.append(console_handler) if log_path: file_handler = logging.FileHandler(log_path) handlers.append(file_handler) set_logging_config(root_log_level, handlers=handlers) root_logger = logging.getLogger("pypyr") root_logger.debug( f"Root logger {root_logger.name} configured with level " f"{root_log_level}")
[ "def", "set_root_logger", "(", "root_log_level", ",", "log_path", "=", "None", ")", ":", "handlers", "=", "[", "]", "console_handler", "=", "logging", ".", "StreamHandler", "(", ")", "handlers", ".", "append", "(", "console_handler", ")", "if", "log_path", ":", "file_handler", "=", "logging", ".", "FileHandler", "(", "log_path", ")", "handlers", ".", "append", "(", "file_handler", ")", "set_logging_config", "(", "root_log_level", ",", "handlers", "=", "handlers", ")", "root_logger", "=", "logging", ".", "getLogger", "(", "\"pypyr\"", ")", "root_logger", ".", "debug", "(", "f\"Root logger {root_logger.name} configured with level \"", "f\"{root_log_level}\"", ")" ]
Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization.
[ "Set", "the", "root", "logger", "pypyr", ".", "Do", "this", "before", "you", "do", "anything", "else", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/log/logger.py#L22-L39
137
pypyr/pypyr-cli
pypyr/pipelinerunner.py
get_parsed_context
def get_parsed_context(pipeline, context_in_string): """Execute get_parsed_context handler if specified. Dynamically load the module specified by the context_parser key in pipeline dict and execute the get_parsed_context function on that module. Args: pipeline: dict. Pipeline object. context_in_string: string. Argument string used to initialize context. Returns: pypyr.context.Context() instance. Raises: AttributeError: parser specified on pipeline missing get_parsed_context function. """ logger.debug("starting") if 'context_parser' in pipeline: parser_module_name = pipeline['context_parser'] logger.debug(f"context parser found: {parser_module_name}") parser_module = pypyr.moduleloader.get_module(parser_module_name) try: logger.debug(f"running parser {parser_module_name}") result_context = parser_module.get_parsed_context( context_in_string) logger.debug(f"step {parser_module_name} done") # Downstream steps likely to expect context not to be None, hence # empty rather than None. if result_context is None: logger.debug(f"{parser_module_name} returned None. Using " "empty context instead") return pypyr.context.Context() else: return pypyr.context.Context(result_context) except AttributeError: logger.error(f"The parser {parser_module_name} doesn't have a " "get_parsed_context(context) function.") raise else: logger.debug("pipeline does not have custom context parser. Using " "empty context.") logger.debug("done") # initialize to an empty dictionary because you want to be able to run # with no context. return pypyr.context.Context()
python
def get_parsed_context(pipeline, context_in_string): logger.debug("starting") if 'context_parser' in pipeline: parser_module_name = pipeline['context_parser'] logger.debug(f"context parser found: {parser_module_name}") parser_module = pypyr.moduleloader.get_module(parser_module_name) try: logger.debug(f"running parser {parser_module_name}") result_context = parser_module.get_parsed_context( context_in_string) logger.debug(f"step {parser_module_name} done") # Downstream steps likely to expect context not to be None, hence # empty rather than None. if result_context is None: logger.debug(f"{parser_module_name} returned None. Using " "empty context instead") return pypyr.context.Context() else: return pypyr.context.Context(result_context) except AttributeError: logger.error(f"The parser {parser_module_name} doesn't have a " "get_parsed_context(context) function.") raise else: logger.debug("pipeline does not have custom context parser. Using " "empty context.") logger.debug("done") # initialize to an empty dictionary because you want to be able to run # with no context. return pypyr.context.Context()
[ "def", "get_parsed_context", "(", "pipeline", ",", "context_in_string", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "if", "'context_parser'", "in", "pipeline", ":", "parser_module_name", "=", "pipeline", "[", "'context_parser'", "]", "logger", ".", "debug", "(", "f\"context parser found: {parser_module_name}\"", ")", "parser_module", "=", "pypyr", ".", "moduleloader", ".", "get_module", "(", "parser_module_name", ")", "try", ":", "logger", ".", "debug", "(", "f\"running parser {parser_module_name}\"", ")", "result_context", "=", "parser_module", ".", "get_parsed_context", "(", "context_in_string", ")", "logger", ".", "debug", "(", "f\"step {parser_module_name} done\"", ")", "# Downstream steps likely to expect context not to be None, hence", "# empty rather than None.", "if", "result_context", "is", "None", ":", "logger", ".", "debug", "(", "f\"{parser_module_name} returned None. Using \"", "\"empty context instead\"", ")", "return", "pypyr", ".", "context", ".", "Context", "(", ")", "else", ":", "return", "pypyr", ".", "context", ".", "Context", "(", "result_context", ")", "except", "AttributeError", ":", "logger", ".", "error", "(", "f\"The parser {parser_module_name} doesn't have a \"", "\"get_parsed_context(context) function.\"", ")", "raise", "else", ":", "logger", ".", "debug", "(", "\"pipeline does not have custom context parser. Using \"", "\"empty context.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")", "# initialize to an empty dictionary because you want to be able to run", "# with no context.", "return", "pypyr", ".", "context", ".", "Context", "(", ")" ]
Execute get_parsed_context handler if specified. Dynamically load the module specified by the context_parser key in pipeline dict and execute the get_parsed_context function on that module. Args: pipeline: dict. Pipeline object. context_in_string: string. Argument string used to initialize context. Returns: pypyr.context.Context() instance. Raises: AttributeError: parser specified on pipeline missing get_parsed_context function.
[ "Execute", "get_parsed_context", "handler", "if", "specified", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L17-L65
138
pypyr/pypyr-cli
pypyr/pipelinerunner.py
main
def main( pipeline_name, pipeline_context_input, working_dir, log_level, log_path, ): """Entry point for pypyr pipeline runner. Call this once per pypyr run. Call me if you want to run a pypyr pipeline from your own code. This function does some one-off 1st time initialization before running the actual pipeline. pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name: string. Name of pipeline, sans .yaml at end. pipeline_context_input: string. Initialize the pypyr context with this string. working_dir: path. looks for ./pipelines and modules in this directory. log_level: int. Standard python log level enumerated value. log_path: os.path. Append log to this path. Returns: None """ pypyr.log.logger.set_root_logger(log_level, log_path) logger.debug("starting pypyr") # pipelines specify steps in python modules that load dynamically. # make it easy for the operator so that the cwd is automatically included # without needing to pip install a package 1st. pypyr.moduleloader.set_working_directory(working_dir) load_and_run_pipeline(pipeline_name=pipeline_name, pipeline_context_input=pipeline_context_input, working_dir=working_dir) logger.debug("pypyr done")
python
def main( pipeline_name, pipeline_context_input, working_dir, log_level, log_path, ): pypyr.log.logger.set_root_logger(log_level, log_path) logger.debug("starting pypyr") # pipelines specify steps in python modules that load dynamically. # make it easy for the operator so that the cwd is automatically included # without needing to pip install a package 1st. pypyr.moduleloader.set_working_directory(working_dir) load_and_run_pipeline(pipeline_name=pipeline_name, pipeline_context_input=pipeline_context_input, working_dir=working_dir) logger.debug("pypyr done")
[ "def", "main", "(", "pipeline_name", ",", "pipeline_context_input", ",", "working_dir", ",", "log_level", ",", "log_path", ",", ")", ":", "pypyr", ".", "log", ".", "logger", ".", "set_root_logger", "(", "log_level", ",", "log_path", ")", "logger", ".", "debug", "(", "\"starting pypyr\"", ")", "# pipelines specify steps in python modules that load dynamically.", "# make it easy for the operator so that the cwd is automatically included", "# without needing to pip install a package 1st.", "pypyr", ".", "moduleloader", ".", "set_working_directory", "(", "working_dir", ")", "load_and_run_pipeline", "(", "pipeline_name", "=", "pipeline_name", ",", "pipeline_context_input", "=", "pipeline_context_input", ",", "working_dir", "=", "working_dir", ")", "logger", ".", "debug", "(", "\"pypyr done\"", ")" ]
Entry point for pypyr pipeline runner. Call this once per pypyr run. Call me if you want to run a pypyr pipeline from your own code. This function does some one-off 1st time initialization before running the actual pipeline. pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name: string. Name of pipeline, sans .yaml at end. pipeline_context_input: string. Initialize the pypyr context with this string. working_dir: path. looks for ./pipelines and modules in this directory. log_level: int. Standard python log level enumerated value. log_path: os.path. Append log to this path. Returns: None
[ "Entry", "point", "for", "pypyr", "pipeline", "runner", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L68-L108
139
pypyr/pypyr-cli
pypyr/pipelinerunner.py
prepare_context
def prepare_context(pipeline, context_in_string, context): """Prepare context for pipeline run. Args: pipeline: dict. Dictionary representing the pipeline. context_in_string: string. Argument string used to initialize context. context: pypyr.context.Context. Merge any new context generated from context_in_string into this context instance. Returns: None. The context instance to use for the pipeline run is contained in the context arg, it's not passed back as a function return. """ logger.debug("starting") parsed_context = get_parsed_context( pipeline=pipeline, context_in_string=context_in_string) context.update(parsed_context) logger.debug("done")
python
def prepare_context(pipeline, context_in_string, context): logger.debug("starting") parsed_context = get_parsed_context( pipeline=pipeline, context_in_string=context_in_string) context.update(parsed_context) logger.debug("done")
[ "def", "prepare_context", "(", "pipeline", ",", "context_in_string", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "parsed_context", "=", "get_parsed_context", "(", "pipeline", "=", "pipeline", ",", "context_in_string", "=", "context_in_string", ")", "context", ".", "update", "(", "parsed_context", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Prepare context for pipeline run. Args: pipeline: dict. Dictionary representing the pipeline. context_in_string: string. Argument string used to initialize context. context: pypyr.context.Context. Merge any new context generated from context_in_string into this context instance. Returns: None. The context instance to use for the pipeline run is contained in the context arg, it's not passed back as a function return.
[ "Prepare", "context", "for", "pipeline", "run", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L111-L133
140
pypyr/pypyr-cli
pypyr/pipelinerunner.py
load_and_run_pipeline
def load_and_run_pipeline(pipeline_name, pipeline_context_input=None, working_dir=None, context=None, parse_input=True, loader=None): """Load and run the specified pypyr pipeline. This function runs the actual pipeline by name. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. By default pypyr uses file loader. This means that pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name (str): Name of pipeline, sans .yaml at end. pipeline_context_input (str): Initialize the pypyr context with this string. working_dir (path): Look for pipelines and modules in this directory. If context arg passed, will use context.working_dir and ignore this argument. If context is None, working_dir must be specified. context (pypyr.context.Context): Use if you already have a Context object, such as if you are running a pipeline from within a pipeline and you want to re-use the same context object for the child pipeline. Any mutations of the context by the pipeline will be against this instance of it. parse_input (bool): run context_parser in pipeline. loader (str): str. optional. Absolute name of pipeline loader module. If not specified will use pypyr.pypeloaders.fileloader. Returns: None """ logger.debug(f"you asked to run pipeline: {pipeline_name}") if loader: logger.debug(f"you set the pype loader to: {loader}") else: loader = 'pypyr.pypeloaders.fileloader' logger.debug(f"use default pype loader: {loader}") logger.debug(f"you set the initial context to: {pipeline_context_input}") if context is None: context = pypyr.context.Context() context.working_dir = working_dir else: working_dir = context.working_dir # pipeline loading deliberately outside of try catch. The try catch will # try to run a failure-handler from the pipeline, but if the pipeline # doesn't exist there is no failure handler that can possibly run so this # is very much a fatal stop error. loader_module = pypyr.moduleloader.get_module(loader) try: get_pipeline_definition = getattr( loader_module, 'get_pipeline_definition' ) except AttributeError: logger.error( f"The pipeline loader {loader_module} doesn't have a " "get_pipeline_definition(pipeline_name, working_dir) function.") raise logger.debug(f"loading the pipeline definition with {loader_module}") pipeline_definition = get_pipeline_definition( pipeline_name=pipeline_name, working_dir=working_dir ) logger.debug(f"{loader_module} done") run_pipeline( pipeline=pipeline_definition, pipeline_context_input=pipeline_context_input, context=context, parse_input=parse_input )
python
def load_and_run_pipeline(pipeline_name, pipeline_context_input=None, working_dir=None, context=None, parse_input=True, loader=None): logger.debug(f"you asked to run pipeline: {pipeline_name}") if loader: logger.debug(f"you set the pype loader to: {loader}") else: loader = 'pypyr.pypeloaders.fileloader' logger.debug(f"use default pype loader: {loader}") logger.debug(f"you set the initial context to: {pipeline_context_input}") if context is None: context = pypyr.context.Context() context.working_dir = working_dir else: working_dir = context.working_dir # pipeline loading deliberately outside of try catch. The try catch will # try to run a failure-handler from the pipeline, but if the pipeline # doesn't exist there is no failure handler that can possibly run so this # is very much a fatal stop error. loader_module = pypyr.moduleloader.get_module(loader) try: get_pipeline_definition = getattr( loader_module, 'get_pipeline_definition' ) except AttributeError: logger.error( f"The pipeline loader {loader_module} doesn't have a " "get_pipeline_definition(pipeline_name, working_dir) function.") raise logger.debug(f"loading the pipeline definition with {loader_module}") pipeline_definition = get_pipeline_definition( pipeline_name=pipeline_name, working_dir=working_dir ) logger.debug(f"{loader_module} done") run_pipeline( pipeline=pipeline_definition, pipeline_context_input=pipeline_context_input, context=context, parse_input=parse_input )
[ "def", "load_and_run_pipeline", "(", "pipeline_name", ",", "pipeline_context_input", "=", "None", ",", "working_dir", "=", "None", ",", "context", "=", "None", ",", "parse_input", "=", "True", ",", "loader", "=", "None", ")", ":", "logger", ".", "debug", "(", "f\"you asked to run pipeline: {pipeline_name}\"", ")", "if", "loader", ":", "logger", ".", "debug", "(", "f\"you set the pype loader to: {loader}\"", ")", "else", ":", "loader", "=", "'pypyr.pypeloaders.fileloader'", "logger", ".", "debug", "(", "f\"use default pype loader: {loader}\"", ")", "logger", ".", "debug", "(", "f\"you set the initial context to: {pipeline_context_input}\"", ")", "if", "context", "is", "None", ":", "context", "=", "pypyr", ".", "context", ".", "Context", "(", ")", "context", ".", "working_dir", "=", "working_dir", "else", ":", "working_dir", "=", "context", ".", "working_dir", "# pipeline loading deliberately outside of try catch. The try catch will", "# try to run a failure-handler from the pipeline, but if the pipeline", "# doesn't exist there is no failure handler that can possibly run so this", "# is very much a fatal stop error.", "loader_module", "=", "pypyr", ".", "moduleloader", ".", "get_module", "(", "loader", ")", "try", ":", "get_pipeline_definition", "=", "getattr", "(", "loader_module", ",", "'get_pipeline_definition'", ")", "except", "AttributeError", ":", "logger", ".", "error", "(", "f\"The pipeline loader {loader_module} doesn't have a \"", "\"get_pipeline_definition(pipeline_name, working_dir) function.\"", ")", "raise", "logger", ".", "debug", "(", "f\"loading the pipeline definition with {loader_module}\"", ")", "pipeline_definition", "=", "get_pipeline_definition", "(", "pipeline_name", "=", "pipeline_name", ",", "working_dir", "=", "working_dir", ")", "logger", ".", "debug", "(", "f\"{loader_module} done\"", ")", "run_pipeline", "(", "pipeline", "=", "pipeline_definition", ",", "pipeline_context_input", "=", "pipeline_context_input", ",", "context", "=", "context", ",", "parse_input", "=", "parse_input", ")" ]
Load and run the specified pypyr pipeline. This function runs the actual pipeline by name. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. By default pypyr uses file loader. This means that pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name (str): Name of pipeline, sans .yaml at end. pipeline_context_input (str): Initialize the pypyr context with this string. working_dir (path): Look for pipelines and modules in this directory. If context arg passed, will use context.working_dir and ignore this argument. If context is None, working_dir must be specified. context (pypyr.context.Context): Use if you already have a Context object, such as if you are running a pipeline from within a pipeline and you want to re-use the same context object for the child pipeline. Any mutations of the context by the pipeline will be against this instance of it. parse_input (bool): run context_parser in pipeline. loader (str): str. optional. Absolute name of pipeline loader module. If not specified will use pypyr.pypeloaders.fileloader. Returns: None
[ "Load", "and", "run", "the", "specified", "pypyr", "pipeline", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L136-L215
141
pypyr/pypyr-cli
pypyr/pipelinerunner.py
run_pipeline
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True): """Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. Pipeline and context should be already loaded. Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (str): Initialize the pypyr context with this string. parse_input (bool): run context_parser in pipeline. Returns: None """ logger.debug("starting") try: if parse_input: logger.debug("executing context_parser") prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context) else: logger.debug("skipping context_parser") # run main steps pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='steps', context=context) # if nothing went wrong, run on_success logger.debug("pipeline steps complete. Running on_success steps now.") pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='on_success', context=context) except Exception: # yes, yes, don't catch Exception. Have to, though, to run the failure # handler. Also, it does raise it back up. logger.error("Something went wrong. Will now try to run on_failure.") # failure_step_group will log but swallow any errors pypyr.stepsrunner.run_failure_step_group( pipeline=pipeline, context=context) logger.debug("Raising original exception to caller.") raise logger.debug("done")
python
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True): logger.debug("starting") try: if parse_input: logger.debug("executing context_parser") prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context) else: logger.debug("skipping context_parser") # run main steps pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='steps', context=context) # if nothing went wrong, run on_success logger.debug("pipeline steps complete. Running on_success steps now.") pypyr.stepsrunner.run_step_group( pipeline_definition=pipeline, step_group_name='on_success', context=context) except Exception: # yes, yes, don't catch Exception. Have to, though, to run the failure # handler. Also, it does raise it back up. logger.error("Something went wrong. Will now try to run on_failure.") # failure_step_group will log but swallow any errors pypyr.stepsrunner.run_failure_step_group( pipeline=pipeline, context=context) logger.debug("Raising original exception to caller.") raise logger.debug("done")
[ "def", "run_pipeline", "(", "pipeline", ",", "context", ",", "pipeline_context_input", "=", "None", ",", "parse_input", "=", "True", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "try", ":", "if", "parse_input", ":", "logger", ".", "debug", "(", "\"executing context_parser\"", ")", "prepare_context", "(", "pipeline", "=", "pipeline", ",", "context_in_string", "=", "pipeline_context_input", ",", "context", "=", "context", ")", "else", ":", "logger", ".", "debug", "(", "\"skipping context_parser\"", ")", "# run main steps", "pypyr", ".", "stepsrunner", ".", "run_step_group", "(", "pipeline_definition", "=", "pipeline", ",", "step_group_name", "=", "'steps'", ",", "context", "=", "context", ")", "# if nothing went wrong, run on_success", "logger", ".", "debug", "(", "\"pipeline steps complete. Running on_success steps now.\"", ")", "pypyr", ".", "stepsrunner", ".", "run_step_group", "(", "pipeline_definition", "=", "pipeline", ",", "step_group_name", "=", "'on_success'", ",", "context", "=", "context", ")", "except", "Exception", ":", "# yes, yes, don't catch Exception. Have to, though, to run the failure", "# handler. Also, it does raise it back up.", "logger", ".", "error", "(", "\"Something went wrong. Will now try to run on_failure.\"", ")", "# failure_step_group will log but swallow any errors", "pypyr", ".", "stepsrunner", ".", "run_failure_step_group", "(", "pipeline", "=", "pipeline", ",", "context", "=", "context", ")", "logger", ".", "debug", "(", "\"Raising original exception to caller.\"", ")", "raise", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. Pipeline and context should be already loaded. Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (str): Initialize the pypyr context with this string. parse_input (bool): run context_parser in pipeline. Returns: None
[ "Run", "the", "specified", "pypyr", "pipeline", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pipelinerunner.py#L218-L276
142
pypyr/pypyr-cli
pypyr/steps/filewriteyaml.py
run_step
def run_step(context): """Write payload out to yaml file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteYaml - path. mandatory. path-like. Write output file to here. Will create directories in path for you. - payload. optional. Write this to output file. If not specified, output entire context. Returns: None. Raises: pypyr.errors.KeyNotInContextError: fileWriteYaml or fileWriteYaml['path'] missing in context. pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or fileWriteYaml['path'] exists but is None. """ logger.debug("started") context.assert_child_key_has_value('fileWriteYaml', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteYaml']['path']) # doing it like this to safeguard against accidentally dumping all context # with potentially sensitive values in it to disk if payload exists but is # None. is_payload_specified = 'payload' in context['fileWriteYaml'] yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context() logger.debug(f"opening destination file for writing: {out_path}") os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True) with open(out_path, 'w') as outfile: if is_payload_specified: payload = context['fileWriteYaml']['payload'] formatted_iterable = context.get_formatted_iterable(payload) else: formatted_iterable = context.get_formatted_iterable(context) yaml_writer.dump(formatted_iterable, outfile) logger.info(f"formatted context content and wrote to {out_path}") logger.debug("done")
python
def run_step(context): logger.debug("started") context.assert_child_key_has_value('fileWriteYaml', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteYaml']['path']) # doing it like this to safeguard against accidentally dumping all context # with potentially sensitive values in it to disk if payload exists but is # None. is_payload_specified = 'payload' in context['fileWriteYaml'] yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context() logger.debug(f"opening destination file for writing: {out_path}") os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True) with open(out_path, 'w') as outfile: if is_payload_specified: payload = context['fileWriteYaml']['payload'] formatted_iterable = context.get_formatted_iterable(payload) else: formatted_iterable = context.get_formatted_iterable(context) yaml_writer.dump(formatted_iterable, outfile) logger.info(f"formatted context content and wrote to {out_path}") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_child_key_has_value", "(", "'fileWriteYaml'", ",", "'path'", ",", "__name__", ")", "out_path", "=", "context", ".", "get_formatted_string", "(", "context", "[", "'fileWriteYaml'", "]", "[", "'path'", "]", ")", "# doing it like this to safeguard against accidentally dumping all context", "# with potentially sensitive values in it to disk if payload exists but is", "# None.", "is_payload_specified", "=", "'payload'", "in", "context", "[", "'fileWriteYaml'", "]", "yaml_writer", "=", "pypyr", ".", "yaml", ".", "get_yaml_parser_roundtrip_for_context", "(", ")", "logger", ".", "debug", "(", "f\"opening destination file for writing: {out_path}\"", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "out_path", ")", ")", ",", "exist_ok", "=", "True", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "outfile", ":", "if", "is_payload_specified", ":", "payload", "=", "context", "[", "'fileWriteYaml'", "]", "[", "'payload'", "]", "formatted_iterable", "=", "context", ".", "get_formatted_iterable", "(", "payload", ")", "else", ":", "formatted_iterable", "=", "context", ".", "get_formatted_iterable", "(", "context", ")", "yaml_writer", ".", "dump", "(", "formatted_iterable", ",", "outfile", ")", "logger", ".", "info", "(", "f\"formatted context content and wrote to {out_path}\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Write payload out to yaml file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteYaml - path. mandatory. path-like. Write output file to here. Will create directories in path for you. - payload. optional. Write this to output file. If not specified, output entire context. Returns: None. Raises: pypyr.errors.KeyNotInContextError: fileWriteYaml or fileWriteYaml['path'] missing in context. pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or fileWriteYaml['path'] exists but is None.
[ "Write", "payload", "out", "to", "yaml", "file", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filewriteyaml.py#L10-L55
143
pypyr/pypyr-cli
pypyr/steps/debug.py
run_step
def run_step(context): """Print debug info to console. context is a dictionary or dictionary-like. If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the debug input context), it will just dump the entire context to stdout. Configure the debug step with the following optional context item: debug: keys: str (for single key) or list (of str keys). Only dump the specified keys. format: bool. Defaults False. Applies formatting expressions on dump. """ logger.debug("started") debug = context.get('debug', None) if debug: keys = debug.get('keys', None) format = debug.get('format', False) if keys: logger.debug(f"Writing to output: {keys}") if isinstance(keys, str): payload = {keys: context[keys]} else: payload = {k: context[k] for k in keys} else: logger.debug( "No keys specified. Writing entire context to output.") payload = context if format: payload = context.get_formatted_iterable(payload) else: payload = context logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}') logger.debug("done")
python
def run_step(context): logger.debug("started") debug = context.get('debug', None) if debug: keys = debug.get('keys', None) format = debug.get('format', False) if keys: logger.debug(f"Writing to output: {keys}") if isinstance(keys, str): payload = {keys: context[keys]} else: payload = {k: context[k] for k in keys} else: logger.debug( "No keys specified. Writing entire context to output.") payload = context if format: payload = context.get_formatted_iterable(payload) else: payload = context logger.info(f'\n{json.dumps(payload, indent=2, ensure_ascii=False)}') logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "debug", "=", "context", ".", "get", "(", "'debug'", ",", "None", ")", "if", "debug", ":", "keys", "=", "debug", ".", "get", "(", "'keys'", ",", "None", ")", "format", "=", "debug", ".", "get", "(", "'format'", ",", "False", ")", "if", "keys", ":", "logger", ".", "debug", "(", "f\"Writing to output: {keys}\"", ")", "if", "isinstance", "(", "keys", ",", "str", ")", ":", "payload", "=", "{", "keys", ":", "context", "[", "keys", "]", "}", "else", ":", "payload", "=", "{", "k", ":", "context", "[", "k", "]", "for", "k", "in", "keys", "}", "else", ":", "logger", ".", "debug", "(", "\"No keys specified. Writing entire context to output.\"", ")", "payload", "=", "context", "if", "format", ":", "payload", "=", "context", ".", "get_formatted_iterable", "(", "payload", ")", "else", ":", "payload", "=", "context", "logger", ".", "info", "(", "f'\\n{json.dumps(payload, indent=2, ensure_ascii=False)}'", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Print debug info to console. context is a dictionary or dictionary-like. If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the debug input context), it will just dump the entire context to stdout. Configure the debug step with the following optional context item: debug: keys: str (for single key) or list (of str keys). Only dump the specified keys. format: bool. Defaults False. Applies formatting expressions on dump.
[ "Print", "debug", "info", "to", "console", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/debug.py#L23-L64
144
pypyr/pypyr-cli
pypyr/errors.py
get_error_name
def get_error_name(error): """Return canonical error name as string. For builtin errors like ValueError or Exception, will return the bare name, like ValueError or Exception. For all other exceptions, will return modulename.errorname, such as arbpackage.mod.myerror Args: error: Exception object. Returns: str. Canonical error name. """ error_type = type(error) if error_type.__module__ in ['__main__', 'builtins']: return error_type.__name__ else: return f'{error_type.__module__}.{error_type.__name__}'
python
def get_error_name(error): error_type = type(error) if error_type.__module__ in ['__main__', 'builtins']: return error_type.__name__ else: return f'{error_type.__module__}.{error_type.__name__}'
[ "def", "get_error_name", "(", "error", ")", ":", "error_type", "=", "type", "(", "error", ")", "if", "error_type", ".", "__module__", "in", "[", "'__main__'", ",", "'builtins'", "]", ":", "return", "error_type", ".", "__name__", "else", ":", "return", "f'{error_type.__module__}.{error_type.__name__}'" ]
Return canonical error name as string. For builtin errors like ValueError or Exception, will return the bare name, like ValueError or Exception. For all other exceptions, will return modulename.errorname, such as arbpackage.mod.myerror Args: error: Exception object. Returns: str. Canonical error name.
[ "Return", "canonical", "error", "name", "as", "string", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/errors.py#L7-L27
145
pypyr/pypyr-cli
pypyr/moduleloader.py
get_module
def get_module(module_abs_import): """Use importlib to get the module dynamically. Get instance of the module specified by the module_abs_import. This means that module_abs_import must be resolvable from this package. Args: module_abs_import: string. Absolute name of module to import. Raises: PyModuleNotFoundError: if module not found. """ logger.debug("starting") logger.debug(f"loading module {module_abs_import}") try: imported_module = importlib.import_module(module_abs_import) logger.debug("done") return imported_module except ModuleNotFoundError as err: msg = ("The module doesn't exist. Looking for a file like this: " f"{module_abs_import}") extended_msg = (f"{module_abs_import}.py should be in your working " "dir or it should be installed to the python path." "\nIf you have 'package.sub.mod' your current working " "dir should contain ./package/sub/mod.py\n" "If you specified 'mymodulename', your current " "working dir should contain ./mymodulename.py\n" "If the module is not in your current working dir, it " "must exist in your current python path - so you " "should have run pip install or setup.py") logger.error(msg) raise PyModuleNotFoundError(extended_msg) from err
python
def get_module(module_abs_import): logger.debug("starting") logger.debug(f"loading module {module_abs_import}") try: imported_module = importlib.import_module(module_abs_import) logger.debug("done") return imported_module except ModuleNotFoundError as err: msg = ("The module doesn't exist. Looking for a file like this: " f"{module_abs_import}") extended_msg = (f"{module_abs_import}.py should be in your working " "dir or it should be installed to the python path." "\nIf you have 'package.sub.mod' your current working " "dir should contain ./package/sub/mod.py\n" "If you specified 'mymodulename', your current " "working dir should contain ./mymodulename.py\n" "If the module is not in your current working dir, it " "must exist in your current python path - so you " "should have run pip install or setup.py") logger.error(msg) raise PyModuleNotFoundError(extended_msg) from err
[ "def", "get_module", "(", "module_abs_import", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "logger", ".", "debug", "(", "f\"loading module {module_abs_import}\"", ")", "try", ":", "imported_module", "=", "importlib", ".", "import_module", "(", "module_abs_import", ")", "logger", ".", "debug", "(", "\"done\"", ")", "return", "imported_module", "except", "ModuleNotFoundError", "as", "err", ":", "msg", "=", "(", "\"The module doesn't exist. Looking for a file like this: \"", "f\"{module_abs_import}\"", ")", "extended_msg", "=", "(", "f\"{module_abs_import}.py should be in your working \"", "\"dir or it should be installed to the python path.\"", "\"\\nIf you have 'package.sub.mod' your current working \"", "\"dir should contain ./package/sub/mod.py\\n\"", "\"If you specified 'mymodulename', your current \"", "\"working dir should contain ./mymodulename.py\\n\"", "\"If the module is not in your current working dir, it \"", "\"must exist in your current python path - so you \"", "\"should have run pip install or setup.py\"", ")", "logger", ".", "error", "(", "msg", ")", "raise", "PyModuleNotFoundError", "(", "extended_msg", ")", "from", "err" ]
Use importlib to get the module dynamically. Get instance of the module specified by the module_abs_import. This means that module_abs_import must be resolvable from this package. Args: module_abs_import: string. Absolute name of module to import. Raises: PyModuleNotFoundError: if module not found.
[ "Use", "importlib", "to", "get", "the", "module", "dynamically", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/moduleloader.py#L15-L48
146
pypyr/pypyr-cli
pypyr/moduleloader.py
set_working_directory
def set_working_directory(working_directory): """Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths """ logger.debug("starting") logger.debug(f"adding {working_directory} to sys.paths") sys.path.append(working_directory) logger.debug("done")
python
def set_working_directory(working_directory): logger.debug("starting") logger.debug(f"adding {working_directory} to sys.paths") sys.path.append(working_directory) logger.debug("done")
[ "def", "set_working_directory", "(", "working_directory", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "logger", ".", "debug", "(", "f\"adding {working_directory} to sys.paths\"", ")", "sys", ".", "path", ".", "append", "(", "working_directory", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths
[ "Add", "working_directory", "to", "sys", ".", "paths", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/moduleloader.py#L51-L65
147
pypyr/pypyr-cli
pypyr/context.py
Context.assert_child_key_has_value
def assert_child_key_has_value(self, parent, child, caller): """Assert that context contains key that has child which has a value. Args: parent: parent key child: validate this sub-key of parent exists AND isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None """ assert parent, ("parent parameter must be specified.") assert child, ("child parameter must be specified.") self.assert_key_has_value(parent, caller) try: child_exists = child in self[parent] except TypeError as err: # This happens if parent isn't iterable raise ContextError( f"context['{parent}'] must be iterable and contain '{child}' " f"for {caller}. {err}") from err if child_exists: if self[parent][child] is None: raise KeyInContextHasNoValueError( f"context['{parent}']['{child}'] must have a value for " f"{caller}.") else: raise KeyNotInContextError( f"context['{parent}']['{child}'] doesn't " f"exist. It must exist for {caller}.")
python
def assert_child_key_has_value(self, parent, child, caller): assert parent, ("parent parameter must be specified.") assert child, ("child parameter must be specified.") self.assert_key_has_value(parent, caller) try: child_exists = child in self[parent] except TypeError as err: # This happens if parent isn't iterable raise ContextError( f"context['{parent}'] must be iterable and contain '{child}' " f"for {caller}. {err}") from err if child_exists: if self[parent][child] is None: raise KeyInContextHasNoValueError( f"context['{parent}']['{child}'] must have a value for " f"{caller}.") else: raise KeyNotInContextError( f"context['{parent}']['{child}'] doesn't " f"exist. It must exist for {caller}.")
[ "def", "assert_child_key_has_value", "(", "self", ",", "parent", ",", "child", ",", "caller", ")", ":", "assert", "parent", ",", "(", "\"parent parameter must be specified.\"", ")", "assert", "child", ",", "(", "\"child parameter must be specified.\"", ")", "self", ".", "assert_key_has_value", "(", "parent", ",", "caller", ")", "try", ":", "child_exists", "=", "child", "in", "self", "[", "parent", "]", "except", "TypeError", "as", "err", ":", "# This happens if parent isn't iterable", "raise", "ContextError", "(", "f\"context['{parent}'] must be iterable and contain '{child}' \"", "f\"for {caller}. {err}\"", ")", "from", "err", "if", "child_exists", ":", "if", "self", "[", "parent", "]", "[", "child", "]", "is", "None", ":", "raise", "KeyInContextHasNoValueError", "(", "f\"context['{parent}']['{child}'] must have a value for \"", "f\"{caller}.\"", ")", "else", ":", "raise", "KeyNotInContextError", "(", "f\"context['{parent}']['{child}'] doesn't \"", "f\"exist. It must exist for {caller}.\"", ")" ]
Assert that context contains key that has child which has a value. Args: parent: parent key child: validate this sub-key of parent exists AND isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
[ "Assert", "that", "context", "contains", "key", "that", "has", "child", "which", "has", "a", "value", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L48-L83
148
pypyr/pypyr-cli
pypyr/context.py
Context.assert_key_has_value
def assert_key_has_value(self, key, caller): """Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None """ assert key, ("key parameter must be specified.") self.assert_key_exists(key, caller) if self[key] is None: raise KeyInContextHasNoValueError( f"context['{key}'] must have a value for {caller}.")
python
def assert_key_has_value(self, key, caller): assert key, ("key parameter must be specified.") self.assert_key_exists(key, caller) if self[key] is None: raise KeyInContextHasNoValueError( f"context['{key}'] must have a value for {caller}.")
[ "def", "assert_key_has_value", "(", "self", ",", "key", ",", "caller", ")", ":", "assert", "key", ",", "(", "\"key parameter must be specified.\"", ")", "self", ".", "assert_key_exists", "(", "key", ",", "caller", ")", "if", "self", "[", "key", "]", "is", "None", ":", "raise", "KeyInContextHasNoValueError", "(", "f\"context['{key}'] must have a value for {caller}.\"", ")" ]
Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
[ "Assert", "that", "context", "contains", "key", "which", "also", "has", "a", "value", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L102-L122
149
pypyr/pypyr-cli
pypyr/context.py
Context.assert_keys_exist
def assert_keys_exist(self, caller, *keys): """Assert that context contains keys. Args: keys: validates that these keys exists in context caller: string. calling function or module name - this used to construct error messages Raises: KeyNotInContextError: When key doesn't exist in context. """ assert keys, ("*keys parameter must be specified.") for key in keys: self.assert_key_exists(key, caller)
python
def assert_keys_exist(self, caller, *keys): assert keys, ("*keys parameter must be specified.") for key in keys: self.assert_key_exists(key, caller)
[ "def", "assert_keys_exist", "(", "self", ",", "caller", ",", "*", "keys", ")", ":", "assert", "keys", ",", "(", "\"*keys parameter must be specified.\"", ")", "for", "key", "in", "keys", ":", "self", ".", "assert_key_exists", "(", "key", ",", "caller", ")" ]
Assert that context contains keys. Args: keys: validates that these keys exists in context caller: string. calling function or module name - this used to construct error messages Raises: KeyNotInContextError: When key doesn't exist in context.
[ "Assert", "that", "context", "contains", "keys", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L167-L181
150
pypyr/pypyr-cli
pypyr/context.py
Context.assert_keys_have_values
def assert_keys_have_values(self, caller, *keys): """Check that keys list are all in context and all have values. Args: *keys: Will check each of these keys in context caller: string. Calling function name - just used for informational messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if *keys is None """ for key in keys: self.assert_key_has_value(key, caller)
python
def assert_keys_have_values(self, caller, *keys): for key in keys: self.assert_key_has_value(key, caller)
[ "def", "assert_keys_have_values", "(", "self", ",", "caller", ",", "*", "keys", ")", ":", "for", "key", "in", "keys", ":", "self", ".", "assert_key_has_value", "(", "key", ",", "caller", ")" ]
Check that keys list are all in context and all have values. Args: *keys: Will check each of these keys in context caller: string. Calling function name - just used for informational messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if *keys is None
[ "Check", "that", "keys", "list", "are", "all", "in", "context", "and", "all", "have", "values", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L183-L198
151
pypyr/pypyr-cli
pypyr/context.py
Context.get_formatted_iterable
def get_formatted_iterable(self, obj, memo=None): """Recursively loop through obj, formatting as it goes. Interpolates strings from the context dictionary. This is not a full on deepcopy, and it's on purpose not a full on deepcopy. It will handle dict, list, set, tuple for iteration, without any especial cuteness for other types or types not derived from these. For lists: if value is a string, format it. For dicts: format key. If value str, format it. For sets/tuples: if type str, format it. This is what formatting or interpolating a string means: So where a string like this 'Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: obj: iterable. Recurse through and format strings found in dicts, lists, tuples. Does not mutate the input iterable. memo: dict. Don't use. Used internally on recursion to optimize recursive loops. Returns: Iterable identical in structure to the input iterable. """ if memo is None: memo = {} obj_id = id(obj) already_done = memo.get(obj_id, None) if already_done is not None: return already_done if isinstance(obj, str): new = self.get_formatted_string(obj) elif isinstance(obj, SpecialTagDirective): new = obj.get_value(self) elif isinstance(obj, (bytes, bytearray)): new = obj elif isinstance(obj, Mapping): # dicts new = obj.__class__() for k, v in obj.items(): new[self.get_formatted_string( k)] = self.get_formatted_iterable(v, memo) elif isinstance(obj, (Sequence, Set)): # list, set, tuple. Bytes and str won't fall into this branch coz # they're expicitly checked further up in the if. new = obj.__class__(self.get_formatted_iterable(v, memo) for v in obj) else: # int, float, bool, function, et. return obj # If is its own copy, don't memoize. if new is not obj: memo[obj_id] = new return new
python
def get_formatted_iterable(self, obj, memo=None): if memo is None: memo = {} obj_id = id(obj) already_done = memo.get(obj_id, None) if already_done is not None: return already_done if isinstance(obj, str): new = self.get_formatted_string(obj) elif isinstance(obj, SpecialTagDirective): new = obj.get_value(self) elif isinstance(obj, (bytes, bytearray)): new = obj elif isinstance(obj, Mapping): # dicts new = obj.__class__() for k, v in obj.items(): new[self.get_formatted_string( k)] = self.get_formatted_iterable(v, memo) elif isinstance(obj, (Sequence, Set)): # list, set, tuple. Bytes and str won't fall into this branch coz # they're expicitly checked further up in the if. new = obj.__class__(self.get_formatted_iterable(v, memo) for v in obj) else: # int, float, bool, function, et. return obj # If is its own copy, don't memoize. if new is not obj: memo[obj_id] = new return new
[ "def", "get_formatted_iterable", "(", "self", ",", "obj", ",", "memo", "=", "None", ")", ":", "if", "memo", "is", "None", ":", "memo", "=", "{", "}", "obj_id", "=", "id", "(", "obj", ")", "already_done", "=", "memo", ".", "get", "(", "obj_id", ",", "None", ")", "if", "already_done", "is", "not", "None", ":", "return", "already_done", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "new", "=", "self", ".", "get_formatted_string", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "SpecialTagDirective", ")", ":", "new", "=", "obj", ".", "get_value", "(", "self", ")", "elif", "isinstance", "(", "obj", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "new", "=", "obj", "elif", "isinstance", "(", "obj", ",", "Mapping", ")", ":", "# dicts", "new", "=", "obj", ".", "__class__", "(", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", ":", "new", "[", "self", ".", "get_formatted_string", "(", "k", ")", "]", "=", "self", ".", "get_formatted_iterable", "(", "v", ",", "memo", ")", "elif", "isinstance", "(", "obj", ",", "(", "Sequence", ",", "Set", ")", ")", ":", "# list, set, tuple. Bytes and str won't fall into this branch coz", "# they're expicitly checked further up in the if.", "new", "=", "obj", ".", "__class__", "(", "self", ".", "get_formatted_iterable", "(", "v", ",", "memo", ")", "for", "v", "in", "obj", ")", "else", ":", "# int, float, bool, function, et.", "return", "obj", "# If is its own copy, don't memoize.", "if", "new", "is", "not", "obj", ":", "memo", "[", "obj_id", "]", "=", "new", "return", "new" ]
Recursively loop through obj, formatting as it goes. Interpolates strings from the context dictionary. This is not a full on deepcopy, and it's on purpose not a full on deepcopy. It will handle dict, list, set, tuple for iteration, without any especial cuteness for other types or types not derived from these. For lists: if value is a string, format it. For dicts: format key. If value str, format it. For sets/tuples: if type str, format it. This is what formatting or interpolating a string means: So where a string like this 'Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: obj: iterable. Recurse through and format strings found in dicts, lists, tuples. Does not mutate the input iterable. memo: dict. Don't use. Used internally on recursion to optimize recursive loops. Returns: Iterable identical in structure to the input iterable.
[ "Recursively", "loop", "through", "obj", "formatting", "as", "it", "goes", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L298-L361
152
pypyr/pypyr-cli
pypyr/context.py
Context.get_formatted_string
def get_formatted_string(self, input_string): """Return formatted value for input_string. get_formatted gets a context[key] value. get_formatted_string is for any arbitrary string that is not in the context. Only valid if input_string is a type string. Return a string interpolated from the context dictionary. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: input_string: string to parse for substitutions. Returns: Formatted string. Raises: KeyNotInContextError: context[key] has {somekey} where somekey does not exist in context dictionary. TypeError: Attempt operation on a non-string type. """ if isinstance(input_string, str): try: return self.get_processed_string(input_string) except KeyNotInContextError as err: # Wrapping the KeyError into a less cryptic error for end-user # friendliness raise KeyNotInContextError( f'Unable to format \'{input_string}\' because {err}' ) from err elif isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: raise TypeError(f"can only format on strings. {input_string} is a " f"{type(input_string)} instead.")
python
def get_formatted_string(self, input_string): if isinstance(input_string, str): try: return self.get_processed_string(input_string) except KeyNotInContextError as err: # Wrapping the KeyError into a less cryptic error for end-user # friendliness raise KeyNotInContextError( f'Unable to format \'{input_string}\' because {err}' ) from err elif isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: raise TypeError(f"can only format on strings. {input_string} is a " f"{type(input_string)} instead.")
[ "def", "get_formatted_string", "(", "self", ",", "input_string", ")", ":", "if", "isinstance", "(", "input_string", ",", "str", ")", ":", "try", ":", "return", "self", ".", "get_processed_string", "(", "input_string", ")", "except", "KeyNotInContextError", "as", "err", ":", "# Wrapping the KeyError into a less cryptic error for end-user", "# friendliness", "raise", "KeyNotInContextError", "(", "f'Unable to format \\'{input_string}\\' because {err}'", ")", "from", "err", "elif", "isinstance", "(", "input_string", ",", "SpecialTagDirective", ")", ":", "return", "input_string", ".", "get_value", "(", "self", ")", "else", ":", "raise", "TypeError", "(", "f\"can only format on strings. {input_string} is a \"", "f\"{type(input_string)} instead.\"", ")" ]
Return formatted value for input_string. get_formatted gets a context[key] value. get_formatted_string is for any arbitrary string that is not in the context. Only valid if input_string is a type string. Return a string interpolated from the context dictionary. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: input_string: string to parse for substitutions. Returns: Formatted string. Raises: KeyNotInContextError: context[key] has {somekey} where somekey does not exist in context dictionary. TypeError: Attempt operation on a non-string type.
[ "Return", "formatted", "value", "for", "input_string", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L363-L403
153
pypyr/pypyr-cli
pypyr/context.py
Context.get_formatted_as_type
def get_formatted_as_type(self, value, default=None, out_type=str): """Return formatted value for input value, returns as out_type. Caveat emptor: if out_type is bool and value a string, return will be True if str is 'True'. It will be False for all other cases. Args: value: the value to format default: if value is None, set to this out_type: cast return as this type Returns: Formatted value of type out_type """ if value is None: value = default if isinstance(value, SpecialTagDirective): result = value.get_value(self) return types.cast_to_type(result, out_type) if isinstance(value, str): result = self.get_formatted_string(value) result_type = type(result) if out_type is result_type: # get_formatted_string result is already a string return result elif out_type is bool and result_type is str: # casting a str to bool is always True, hence special case. If # the str value is 'False'/'false', presumably user can # reasonably expect a bool False response. return result.lower() in ['true', '1', '1.0'] else: return out_type(result) else: return out_type(value)
python
def get_formatted_as_type(self, value, default=None, out_type=str): if value is None: value = default if isinstance(value, SpecialTagDirective): result = value.get_value(self) return types.cast_to_type(result, out_type) if isinstance(value, str): result = self.get_formatted_string(value) result_type = type(result) if out_type is result_type: # get_formatted_string result is already a string return result elif out_type is bool and result_type is str: # casting a str to bool is always True, hence special case. If # the str value is 'False'/'false', presumably user can # reasonably expect a bool False response. return result.lower() in ['true', '1', '1.0'] else: return out_type(result) else: return out_type(value)
[ "def", "get_formatted_as_type", "(", "self", ",", "value", ",", "default", "=", "None", ",", "out_type", "=", "str", ")", ":", "if", "value", "is", "None", ":", "value", "=", "default", "if", "isinstance", "(", "value", ",", "SpecialTagDirective", ")", ":", "result", "=", "value", ".", "get_value", "(", "self", ")", "return", "types", ".", "cast_to_type", "(", "result", ",", "out_type", ")", "if", "isinstance", "(", "value", ",", "str", ")", ":", "result", "=", "self", ".", "get_formatted_string", "(", "value", ")", "result_type", "=", "type", "(", "result", ")", "if", "out_type", "is", "result_type", ":", "# get_formatted_string result is already a string", "return", "result", "elif", "out_type", "is", "bool", "and", "result_type", "is", "str", ":", "# casting a str to bool is always True, hence special case. If", "# the str value is 'False'/'false', presumably user can", "# reasonably expect a bool False response.", "return", "result", ".", "lower", "(", ")", "in", "[", "'true'", ",", "'1'", ",", "'1.0'", "]", "else", ":", "return", "out_type", "(", "result", ")", "else", ":", "return", "out_type", "(", "value", ")" ]
Return formatted value for input value, returns as out_type. Caveat emptor: if out_type is bool and value a string, return will be True if str is 'True'. It will be False for all other cases. Args: value: the value to format default: if value is None, set to this out_type: cast return as this type Returns: Formatted value of type out_type
[ "Return", "formatted", "value", "for", "input", "value", "returns", "as", "out_type", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L405-L441
154
pypyr/pypyr-cli
pypyr/context.py
Context.get_processed_string
def get_processed_string(self, input_string): """Run token substitution on input_string against context. You probably don't want to call this directly yourself - rather use get_formatted, get_formatted_iterable, or get_formatted_string because these contain more friendly error handling plumbing and context logic. If you do want to call it yourself, go for it, it doesn't touch state. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} An input string with a single formatting expression and nothing else will return the object at that context path: input_string='{key1}'. This means that the return obj will be the same type as the source object. This return object in itself has token substitions run on it iteratively. By comparison, multiple formatting expressions and/or the inclusion of literal text will result in a string return type: input_string='{key1} literal text {key2}' Then this will return string: "Piping down the valleys wild" Args: input_string: string to Parse Returns: any given type: Formatted string with {substitutions} made from context. If it's a !sic string, x from !sic x, with no substitutions made on x. If input_string was a single expression (e.g '{field}'), then returns the object with {substitutions} made for its attributes. Raises: KeyNotInContextError: input_string is not a sic string and has {somekey} where somekey does not exist in context dictionary. """ # arguably, this doesn't really belong here, or at least it makes a # nonsense of the function name. given how py and strings # look and feel pretty much like strings from user's perspective, and # given legacy code back when sic strings were in fact just strings, # keep in here for backwards compatibility. if isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: # is this a special one field formatstring? i.e "{field}", with # nothing else? out = None is_out_set = False expr_count = 0 # parse finds field format expressions and/or literals in input for expression in formatter.parse(input_string): # parse tuple: # (literal_text, field_name, format_spec, conversion) # it's a single '{field}' if no literal_text but field_name # no literal, field name exists, and no previous expr found if (not expression[0] and expression[1] and not expr_count): # get_field tuple: (obj, used_key) out = formatter.get_field(expression[1], None, self)[0] # second flag necessary because a literal with no format # expression will still result in expr_count == 1 is_out_set = True expr_count += 1 # this is a little bit clumsy, but you have to consume the # iterator to get the count. Interested in 1 and only 1 field # expressions with no literal text: have to loop to see if # there is >1. if expr_count > 1: break if is_out_set and expr_count == 1: # found 1 and only 1. but this could be an iterable obj # that needs formatting rules run on it in itself return self.get_formatted_iterable(out) else: return input_string.format_map(self)
python
def get_processed_string(self, input_string): # arguably, this doesn't really belong here, or at least it makes a # nonsense of the function name. given how py and strings # look and feel pretty much like strings from user's perspective, and # given legacy code back when sic strings were in fact just strings, # keep in here for backwards compatibility. if isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: # is this a special one field formatstring? i.e "{field}", with # nothing else? out = None is_out_set = False expr_count = 0 # parse finds field format expressions and/or literals in input for expression in formatter.parse(input_string): # parse tuple: # (literal_text, field_name, format_spec, conversion) # it's a single '{field}' if no literal_text but field_name # no literal, field name exists, and no previous expr found if (not expression[0] and expression[1] and not expr_count): # get_field tuple: (obj, used_key) out = formatter.get_field(expression[1], None, self)[0] # second flag necessary because a literal with no format # expression will still result in expr_count == 1 is_out_set = True expr_count += 1 # this is a little bit clumsy, but you have to consume the # iterator to get the count. Interested in 1 and only 1 field # expressions with no literal text: have to loop to see if # there is >1. if expr_count > 1: break if is_out_set and expr_count == 1: # found 1 and only 1. but this could be an iterable obj # that needs formatting rules run on it in itself return self.get_formatted_iterable(out) else: return input_string.format_map(self)
[ "def", "get_processed_string", "(", "self", ",", "input_string", ")", ":", "# arguably, this doesn't really belong here, or at least it makes a", "# nonsense of the function name. given how py and strings", "# look and feel pretty much like strings from user's perspective, and", "# given legacy code back when sic strings were in fact just strings,", "# keep in here for backwards compatibility.", "if", "isinstance", "(", "input_string", ",", "SpecialTagDirective", ")", ":", "return", "input_string", ".", "get_value", "(", "self", ")", "else", ":", "# is this a special one field formatstring? i.e \"{field}\", with", "# nothing else?", "out", "=", "None", "is_out_set", "=", "False", "expr_count", "=", "0", "# parse finds field format expressions and/or literals in input", "for", "expression", "in", "formatter", ".", "parse", "(", "input_string", ")", ":", "# parse tuple:", "# (literal_text, field_name, format_spec, conversion)", "# it's a single '{field}' if no literal_text but field_name", "# no literal, field name exists, and no previous expr found", "if", "(", "not", "expression", "[", "0", "]", "and", "expression", "[", "1", "]", "and", "not", "expr_count", ")", ":", "# get_field tuple: (obj, used_key)", "out", "=", "formatter", ".", "get_field", "(", "expression", "[", "1", "]", ",", "None", ",", "self", ")", "[", "0", "]", "# second flag necessary because a literal with no format", "# expression will still result in expr_count == 1", "is_out_set", "=", "True", "expr_count", "+=", "1", "# this is a little bit clumsy, but you have to consume the", "# iterator to get the count. Interested in 1 and only 1 field", "# expressions with no literal text: have to loop to see if", "# there is >1.", "if", "expr_count", ">", "1", ":", "break", "if", "is_out_set", "and", "expr_count", "==", "1", ":", "# found 1 and only 1. but this could be an iterable obj", "# that needs formatting rules run on it in itself", "return", "self", ".", "get_formatted_iterable", "(", "out", ")", "else", ":", "return", "input_string", ".", "format_map", "(", "self", ")" ]
Run token substitution on input_string against context. You probably don't want to call this directly yourself - rather use get_formatted, get_formatted_iterable, or get_formatted_string because these contain more friendly error handling plumbing and context logic. If you do want to call it yourself, go for it, it doesn't touch state. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} An input string with a single formatting expression and nothing else will return the object at that context path: input_string='{key1}'. This means that the return obj will be the same type as the source object. This return object in itself has token substitions run on it iteratively. By comparison, multiple formatting expressions and/or the inclusion of literal text will result in a string return type: input_string='{key1} literal text {key2}' Then this will return string: "Piping down the valleys wild" Args: input_string: string to Parse Returns: any given type: Formatted string with {substitutions} made from context. If it's a !sic string, x from !sic x, with no substitutions made on x. If input_string was a single expression (e.g '{field}'), then returns the object with {substitutions} made for its attributes. Raises: KeyNotInContextError: input_string is not a sic string and has {somekey} where somekey does not exist in context dictionary.
[ "Run", "token", "substitution", "on", "input_string", "against", "context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L443-L523
155
pypyr/pypyr-cli
pypyr/context.py
Context.keys_of_type_exist
def keys_of_type_exist(self, *keys): """Check if keys exist in context and if types are as expected. Args: *keys: *args for keys to check in context. Each arg is a tuple (str, type) Returns: Tuple of namedtuple ContextItemInfo, same order as *keys. ContextItemInfo(key, key_in_context, expected_type, is_expected_type) Remember if there is only one key in keys, the return assignment needs an extra comma to remind python that it's a tuple: # one a, = context.keys_of_type_exist('a') # > 1 a, b = context.keys_of_type_exist('a', 'b') """ # k[0] = key name, k[1] = exists, k2 = expected type keys_exist = [(key, key in self.keys(), expected_type) for key, expected_type in keys] return tuple(ContextItemInfo( key=k[0], key_in_context=k[1], expected_type=k[2], is_expected_type=isinstance(self[k[0]], k[2]) if k[1] else None, has_value=k[1] and not self[k[0]] is None ) for k in keys_exist)
python
def keys_of_type_exist(self, *keys): # k[0] = key name, k[1] = exists, k2 = expected type keys_exist = [(key, key in self.keys(), expected_type) for key, expected_type in keys] return tuple(ContextItemInfo( key=k[0], key_in_context=k[1], expected_type=k[2], is_expected_type=isinstance(self[k[0]], k[2]) if k[1] else None, has_value=k[1] and not self[k[0]] is None ) for k in keys_exist)
[ "def", "keys_of_type_exist", "(", "self", ",", "*", "keys", ")", ":", "# k[0] = key name, k[1] = exists, k2 = expected type", "keys_exist", "=", "[", "(", "key", ",", "key", "in", "self", ".", "keys", "(", ")", ",", "expected_type", ")", "for", "key", ",", "expected_type", "in", "keys", "]", "return", "tuple", "(", "ContextItemInfo", "(", "key", "=", "k", "[", "0", "]", ",", "key_in_context", "=", "k", "[", "1", "]", ",", "expected_type", "=", "k", "[", "2", "]", ",", "is_expected_type", "=", "isinstance", "(", "self", "[", "k", "[", "0", "]", "]", ",", "k", "[", "2", "]", ")", "if", "k", "[", "1", "]", "else", "None", ",", "has_value", "=", "k", "[", "1", "]", "and", "not", "self", "[", "k", "[", "0", "]", "]", "is", "None", ")", "for", "k", "in", "keys_exist", ")" ]
Check if keys exist in context and if types are as expected. Args: *keys: *args for keys to check in context. Each arg is a tuple (str, type) Returns: Tuple of namedtuple ContextItemInfo, same order as *keys. ContextItemInfo(key, key_in_context, expected_type, is_expected_type) Remember if there is only one key in keys, the return assignment needs an extra comma to remind python that it's a tuple: # one a, = context.keys_of_type_exist('a') # > 1 a, b = context.keys_of_type_exist('a', 'b')
[ "Check", "if", "keys", "exist", "in", "context", "and", "if", "types", "are", "as", "expected", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L560-L593
156
pypyr/pypyr-cli
pypyr/context.py
Context.merge
def merge(self, add_me): """Merge add_me into context and applies interpolation. Bottom-up merge where add_me merges into context. Applies string interpolation where the type is a string. Where a key exists in context already, add_me's value will overwrite what's in context already. Supports nested hierarchy. add_me can contains dicts/lists/enumerables that contain other enumerables et. It doesn't restrict levels of nesting, so if you really want to go crazy with the levels you can, but you might blow your stack. If something from add_me exists in context already, but add_me's value is of a different type, add_me will overwrite context. Do note this. i.e if you had context['int_key'] == 1 and add_me['int_key'] == 'clearly not a number', the end result would be context['int_key'] == 'clearly not a number' If add_me contains lists/sets/tuples, this merges these additively, meaning it appends values from add_me to the existing sequence. Args: add_me: dict. Merge this dict into context. Returns: None. All operations mutate this instance of context. """ def merge_recurse(current, add_me): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. add_me: dict. Merge this to current. """ for k, v in add_me.items(): # key supports interpolation k = self.get_formatted_string(k) # str not mergable, so it doesn't matter if it exists in dest if isinstance(v, str): # just overwrite dest - str adds/edits indiscriminately current[k] = self.get_formatted_string(v) elif isinstance(v, (bytes, bytearray)): # bytes aren't mergable or formattable # only here to prevent the elif on enumerables catching it current[k] = v # deal with things that are mergable - exists already in dest elif k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to merge since # it exists in dest merge_recurse(current[k], v) elif types.are_all_this_type(list, current[k], v): # it's list-y. Extend mutates existing list since it # exists in dest current[k].extend( self.get_formatted_iterable(v)) elif types.are_all_this_type(tuple, current[k], v): # concatenate tuples current[k] = ( current[k] + self.get_formatted_iterable(v)) elif types.are_all_this_type(Set, current[k], v): # join sets current[k] = ( current[k] | self.get_formatted_iterable(v)) else: # at this point it's not mergable nor a known iterable current[k] = v else: # at this point it's not mergable, nor in context current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root merge_recurse(self, add_me)
python
def merge(self, add_me): def merge_recurse(current, add_me): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. add_me: dict. Merge this to current. """ for k, v in add_me.items(): # key supports interpolation k = self.get_formatted_string(k) # str not mergable, so it doesn't matter if it exists in dest if isinstance(v, str): # just overwrite dest - str adds/edits indiscriminately current[k] = self.get_formatted_string(v) elif isinstance(v, (bytes, bytearray)): # bytes aren't mergable or formattable # only here to prevent the elif on enumerables catching it current[k] = v # deal with things that are mergable - exists already in dest elif k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to merge since # it exists in dest merge_recurse(current[k], v) elif types.are_all_this_type(list, current[k], v): # it's list-y. Extend mutates existing list since it # exists in dest current[k].extend( self.get_formatted_iterable(v)) elif types.are_all_this_type(tuple, current[k], v): # concatenate tuples current[k] = ( current[k] + self.get_formatted_iterable(v)) elif types.are_all_this_type(Set, current[k], v): # join sets current[k] = ( current[k] | self.get_formatted_iterable(v)) else: # at this point it's not mergable nor a known iterable current[k] = v else: # at this point it's not mergable, nor in context current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root merge_recurse(self, add_me)
[ "def", "merge", "(", "self", ",", "add_me", ")", ":", "def", "merge_recurse", "(", "current", ",", "add_me", ")", ":", "\"\"\"Walk the current context tree in recursive inner function.\n\n On 1st iteration, current = self (i.e root of context)\n On subsequent recursive iterations, current is wherever you're at\n in the nested context hierarchy.\n\n Args:\n current: dict. Destination of merge.\n add_me: dict. Merge this to current.\n \"\"\"", "for", "k", ",", "v", "in", "add_me", ".", "items", "(", ")", ":", "# key supports interpolation", "k", "=", "self", ".", "get_formatted_string", "(", "k", ")", "# str not mergable, so it doesn't matter if it exists in dest", "if", "isinstance", "(", "v", ",", "str", ")", ":", "# just overwrite dest - str adds/edits indiscriminately", "current", "[", "k", "]", "=", "self", ".", "get_formatted_string", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "(", "bytes", ",", "bytearray", ")", ")", ":", "# bytes aren't mergable or formattable", "# only here to prevent the elif on enumerables catching it", "current", "[", "k", "]", "=", "v", "# deal with things that are mergable - exists already in dest", "elif", "k", "in", "current", ":", "if", "types", ".", "are_all_this_type", "(", "Mapping", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# it's dict-y, thus recurse through it to merge since", "# it exists in dest", "merge_recurse", "(", "current", "[", "k", "]", ",", "v", ")", "elif", "types", ".", "are_all_this_type", "(", "list", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# it's list-y. Extend mutates existing list since it", "# exists in dest", "current", "[", "k", "]", ".", "extend", "(", "self", ".", "get_formatted_iterable", "(", "v", ")", ")", "elif", "types", ".", "are_all_this_type", "(", "tuple", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# concatenate tuples", "current", "[", "k", "]", "=", "(", "current", "[", "k", "]", "+", "self", ".", "get_formatted_iterable", "(", "v", ")", ")", "elif", "types", ".", "are_all_this_type", "(", "Set", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# join sets", "current", "[", "k", "]", "=", "(", "current", "[", "k", "]", "|", "self", ".", "get_formatted_iterable", "(", "v", ")", ")", "else", ":", "# at this point it's not mergable nor a known iterable", "current", "[", "k", "]", "=", "v", "else", ":", "# at this point it's not mergable, nor in context", "current", "[", "k", "]", "=", "self", ".", "get_formatted_iterable", "(", "v", ")", "# first iteration starts at context dict root", "merge_recurse", "(", "self", ",", "add_me", ")" ]
Merge add_me into context and applies interpolation. Bottom-up merge where add_me merges into context. Applies string interpolation where the type is a string. Where a key exists in context already, add_me's value will overwrite what's in context already. Supports nested hierarchy. add_me can contains dicts/lists/enumerables that contain other enumerables et. It doesn't restrict levels of nesting, so if you really want to go crazy with the levels you can, but you might blow your stack. If something from add_me exists in context already, but add_me's value is of a different type, add_me will overwrite context. Do note this. i.e if you had context['int_key'] == 1 and add_me['int_key'] == 'clearly not a number', the end result would be context['int_key'] == 'clearly not a number' If add_me contains lists/sets/tuples, this merges these additively, meaning it appends values from add_me to the existing sequence. Args: add_me: dict. Merge this dict into context. Returns: None. All operations mutate this instance of context.
[ "Merge", "add_me", "into", "context", "and", "applies", "interpolation", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L595-L675
157
pypyr/pypyr-cli
pypyr/context.py
Context.set_defaults
def set_defaults(self, defaults): """Set defaults in context if keys do not exist already. Adds the input dict (defaults) into the context, only where keys in defaults do not already exist in context. Supports nested hierarchies. Example: Given a context like this: key1: value1 key2: key2.1: value2.1 key3: None And defaults input like this: key1: 'updated value here won't overwrite since it already exists' key2: key2.2: value2.2 key3: 'key 3 exists so I won't overwrite Will result in context: key1: value1 key2: key2.1: value2.1 key2.2: value2.2 key3: None Args: defaults: dict. Add this dict into context. Returns: None. All operations mutate this instance of context. """ def defaults_recurse(current, defaults): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. defaults: dict. Add this to current if keys don't exist already. """ for k, v in defaults.items(): # key supports interpolation k = self.get_formatted_string(k) if k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to check if it # contains child items that don't exist in dest defaults_recurse(current[k], v) else: # since it's not in context already, add the default current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root defaults_recurse(self, defaults)
python
def set_defaults(self, defaults): def defaults_recurse(current, defaults): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. defaults: dict. Add this to current if keys don't exist already. """ for k, v in defaults.items(): # key supports interpolation k = self.get_formatted_string(k) if k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to check if it # contains child items that don't exist in dest defaults_recurse(current[k], v) else: # since it's not in context already, add the default current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root defaults_recurse(self, defaults)
[ "def", "set_defaults", "(", "self", ",", "defaults", ")", ":", "def", "defaults_recurse", "(", "current", ",", "defaults", ")", ":", "\"\"\"Walk the current context tree in recursive inner function.\n\n On 1st iteration, current = self (i.e root of context)\n On subsequent recursive iterations, current is wherever you're at\n in the nested context hierarchy.\n\n Args:\n current: dict. Destination of merge.\n defaults: dict. Add this to current if keys don't exist\n already.\n\n \"\"\"", "for", "k", ",", "v", "in", "defaults", ".", "items", "(", ")", ":", "# key supports interpolation", "k", "=", "self", ".", "get_formatted_string", "(", "k", ")", "if", "k", "in", "current", ":", "if", "types", ".", "are_all_this_type", "(", "Mapping", ",", "current", "[", "k", "]", ",", "v", ")", ":", "# it's dict-y, thus recurse through it to check if it", "# contains child items that don't exist in dest", "defaults_recurse", "(", "current", "[", "k", "]", ",", "v", ")", "else", ":", "# since it's not in context already, add the default", "current", "[", "k", "]", "=", "self", ".", "get_formatted_iterable", "(", "v", ")", "# first iteration starts at context dict root", "defaults_recurse", "(", "self", ",", "defaults", ")" ]
Set defaults in context if keys do not exist already. Adds the input dict (defaults) into the context, only where keys in defaults do not already exist in context. Supports nested hierarchies. Example: Given a context like this: key1: value1 key2: key2.1: value2.1 key3: None And defaults input like this: key1: 'updated value here won't overwrite since it already exists' key2: key2.2: value2.2 key3: 'key 3 exists so I won't overwrite Will result in context: key1: value1 key2: key2.1: value2.1 key2.2: value2.2 key3: None Args: defaults: dict. Add this dict into context. Returns: None. All operations mutate this instance of context.
[ "Set", "defaults", "in", "context", "if", "keys", "do", "not", "exist", "already", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/context.py#L677-L737
158
pypyr/pypyr-cli
pypyr/steps/dsl/fileinoutrewriter.py
FileInRewriterStep.run_step
def run_step(self, rewriter): """Do the file in to out rewrite. Doesn't do anything more crazy than call files_in_to_out on the rewriter. Args: rewriter: pypyr.filesystem.FileRewriter instance. """ assert rewriter, ("FileRewriter instance required to run " "FileInRewriterStep.") rewriter.files_in_to_out(in_path=self.path_in, out_path=self.path_out)
python
def run_step(self, rewriter): assert rewriter, ("FileRewriter instance required to run " "FileInRewriterStep.") rewriter.files_in_to_out(in_path=self.path_in, out_path=self.path_out)
[ "def", "run_step", "(", "self", ",", "rewriter", ")", ":", "assert", "rewriter", ",", "(", "\"FileRewriter instance required to run \"", "\"FileInRewriterStep.\"", ")", "rewriter", ".", "files_in_to_out", "(", "in_path", "=", "self", ".", "path_in", ",", "out_path", "=", "self", ".", "path_out", ")" ]
Do the file in to out rewrite. Doesn't do anything more crazy than call files_in_to_out on the rewriter. Args: rewriter: pypyr.filesystem.FileRewriter instance.
[ "Do", "the", "file", "in", "to", "out", "rewrite", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L57-L68
159
pypyr/pypyr-cli
pypyr/steps/dsl/fileinoutrewriter.py
ObjectRewriterStep.run_step
def run_step(self, representer): """Do the object in-out rewrite. Args: representer: A pypyr.filesystem.ObjectRepresenter instance. """ assert representer, ("ObjectRepresenter instance required to run " "ObjectRewriterStep.") rewriter = ObjectRewriter(self.context.get_formatted_iterable, representer) super().run_step(rewriter)
python
def run_step(self, representer): assert representer, ("ObjectRepresenter instance required to run " "ObjectRewriterStep.") rewriter = ObjectRewriter(self.context.get_formatted_iterable, representer) super().run_step(rewriter)
[ "def", "run_step", "(", "self", ",", "representer", ")", ":", "assert", "representer", ",", "(", "\"ObjectRepresenter instance required to run \"", "\"ObjectRewriterStep.\"", ")", "rewriter", "=", "ObjectRewriter", "(", "self", ".", "context", ".", "get_formatted_iterable", ",", "representer", ")", "super", "(", ")", ".", "run_step", "(", "rewriter", ")" ]
Do the object in-out rewrite. Args: representer: A pypyr.filesystem.ObjectRepresenter instance.
[ "Do", "the", "object", "in", "-", "out", "rewrite", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L74-L85
160
pypyr/pypyr-cli
pypyr/steps/dsl/fileinoutrewriter.py
StreamRewriterStep.run_step
def run_step(self): """Do the file in-out rewrite.""" rewriter = StreamRewriter(self.context.iter_formatted_strings) super().run_step(rewriter)
python
def run_step(self): rewriter = StreamRewriter(self.context.iter_formatted_strings) super().run_step(rewriter)
[ "def", "run_step", "(", "self", ")", ":", "rewriter", "=", "StreamRewriter", "(", "self", ".", "context", ".", "iter_formatted_strings", ")", "super", "(", ")", ".", "run_step", "(", "rewriter", ")" ]
Do the file in-out rewrite.
[ "Do", "the", "file", "in", "-", "out", "rewrite", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L100-L103
161
pypyr/pypyr-cli
pypyr/steps/dsl/fileinoutrewriter.py
StreamReplacePairsRewriterStep.run_step
def run_step(self): """Write in to out, replacing strings per the replace_pairs.""" formatted_replacements = self.context.get_formatted_iterable( self.replace_pairs) iter = StreamReplacePairsRewriterStep.iter_replace_strings( formatted_replacements) rewriter = StreamRewriter(iter) super().run_step(rewriter)
python
def run_step(self): formatted_replacements = self.context.get_formatted_iterable( self.replace_pairs) iter = StreamReplacePairsRewriterStep.iter_replace_strings( formatted_replacements) rewriter = StreamRewriter(iter) super().run_step(rewriter)
[ "def", "run_step", "(", "self", ")", ":", "formatted_replacements", "=", "self", ".", "context", ".", "get_formatted_iterable", "(", "self", ".", "replace_pairs", ")", "iter", "=", "StreamReplacePairsRewriterStep", ".", "iter_replace_strings", "(", "formatted_replacements", ")", "rewriter", "=", "StreamRewriter", "(", "iter", ")", "super", "(", ")", ".", "run_step", "(", "rewriter", ")" ]
Write in to out, replacing strings per the replace_pairs.
[ "Write", "in", "to", "out", "replacing", "strings", "per", "the", "replace_pairs", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L133-L141
162
pypyr/pypyr-cli
pypyr/steps/dsl/fileinoutrewriter.py
StreamReplacePairsRewriterStep.iter_replace_strings
def iter_replace_strings(replacements): """Create a function that uses replacement pairs to process a string. The returned function takes an iterator and yields on each processed line. Args: replacements: Dict containing 'find_string': 'replace_string' pairs Returns: function with signature: iterator of strings = function(iterable) """ def function_iter_replace_strings(iterable_strings): """Yield a formatted string from iterable_strings using a generator. Args: iterable_strings: Iterable containing strings. E.g a file-like object. Returns: Yields formatted line. """ for string in iterable_strings: yield reduce((lambda s, kv: s.replace(*kv)), replacements.items(), string) return function_iter_replace_strings
python
def iter_replace_strings(replacements): def function_iter_replace_strings(iterable_strings): """Yield a formatted string from iterable_strings using a generator. Args: iterable_strings: Iterable containing strings. E.g a file-like object. Returns: Yields formatted line. """ for string in iterable_strings: yield reduce((lambda s, kv: s.replace(*kv)), replacements.items(), string) return function_iter_replace_strings
[ "def", "iter_replace_strings", "(", "replacements", ")", ":", "def", "function_iter_replace_strings", "(", "iterable_strings", ")", ":", "\"\"\"Yield a formatted string from iterable_strings using a generator.\n\n Args:\n iterable_strings: Iterable containing strings. E.g a file-like\n object.\n\n Returns:\n Yields formatted line.\n\n \"\"\"", "for", "string", "in", "iterable_strings", ":", "yield", "reduce", "(", "(", "lambda", "s", ",", "kv", ":", "s", ".", "replace", "(", "*", "kv", ")", ")", ",", "replacements", ".", "items", "(", ")", ",", "string", ")", "return", "function_iter_replace_strings" ]
Create a function that uses replacement pairs to process a string. The returned function takes an iterator and yields on each processed line. Args: replacements: Dict containing 'find_string': 'replace_string' pairs Returns: function with signature: iterator of strings = function(iterable)
[ "Create", "a", "function", "that", "uses", "replacement", "pairs", "to", "process", "a", "string", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/dsl/fileinoutrewriter.py#L144-L173
163
pypyr/pypyr-cli
pypyr/steps/contextsetf.py
run_step
def run_step(context): """Set new context keys from formatting expressions with substitutions. Context is a dictionary or dictionary-like. context['contextSetf'] must exist. It's a dictionary. Will iterate context['contextSetf'] and save the values as new keys to the context. For example, say input context is: key1: value1 key2: value2 key3: value3 contextSetf: key2: 'aaa_{key1}_zzz' key4: 'bbb_{key3}_yyy' This will result in return context: key1: value1 key2: aaa_value1_zzz key3: bbb_value3_yyy key4: value3 """ logger.debug("started") context.assert_key_has_value(key='contextSetf', caller=__name__) for k, v in context['contextSetf'].items(): logger.debug(f"setting context {k} to value from context {v}") context[context.get_formatted_iterable( k)] = context.get_formatted_iterable(v) logger.info(f"Set {len(context['contextSetf'])} context items.") logger.debug("done")
python
def run_step(context): logger.debug("started") context.assert_key_has_value(key='contextSetf', caller=__name__) for k, v in context['contextSetf'].items(): logger.debug(f"setting context {k} to value from context {v}") context[context.get_formatted_iterable( k)] = context.get_formatted_iterable(v) logger.info(f"Set {len(context['contextSetf'])} context items.") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'contextSetf'", ",", "caller", "=", "__name__", ")", "for", "k", ",", "v", "in", "context", "[", "'contextSetf'", "]", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "f\"setting context {k} to value from context {v}\"", ")", "context", "[", "context", ".", "get_formatted_iterable", "(", "k", ")", "]", "=", "context", ".", "get_formatted_iterable", "(", "v", ")", "logger", ".", "info", "(", "f\"Set {len(context['contextSetf'])} context items.\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Set new context keys from formatting expressions with substitutions. Context is a dictionary or dictionary-like. context['contextSetf'] must exist. It's a dictionary. Will iterate context['contextSetf'] and save the values as new keys to the context. For example, say input context is: key1: value1 key2: value2 key3: value3 contextSetf: key2: 'aaa_{key1}_zzz' key4: 'bbb_{key3}_yyy' This will result in return context: key1: value1 key2: aaa_value1_zzz key3: bbb_value3_yyy key4: value3
[ "Set", "new", "context", "keys", "from", "formatting", "expressions", "with", "substitutions", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextsetf.py#L13-L45
164
pypyr/pypyr-cli
pypyr/utils/types.py
cast_to_type
def cast_to_type(obj, out_type): """Cast obj to out_type if it's not out_type already. If the obj happens to be out_type already, it just returns obj as is. Args: obj: input object out_type: type. Returns: obj cast to out_type. Usual python conversion / casting rules apply. """ in_type = type(obj) if out_type is in_type: # no need to cast. return obj else: return out_type(obj)
python
def cast_to_type(obj, out_type): in_type = type(obj) if out_type is in_type: # no need to cast. return obj else: return out_type(obj)
[ "def", "cast_to_type", "(", "obj", ",", "out_type", ")", ":", "in_type", "=", "type", "(", "obj", ")", "if", "out_type", "is", "in_type", ":", "# no need to cast.", "return", "obj", "else", ":", "return", "out_type", "(", "obj", ")" ]
Cast obj to out_type if it's not out_type already. If the obj happens to be out_type already, it just returns obj as is. Args: obj: input object out_type: type. Returns: obj cast to out_type. Usual python conversion / casting rules apply.
[ "Cast", "obj", "to", "out_type", "if", "it", "s", "not", "out_type", "already", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/utils/types.py#L20-L38
165
pypyr/pypyr-cli
pypyr/yaml.py
get_pipeline_yaml
def get_pipeline_yaml(file): """Return pipeline yaml from open file object. Use specific custom representers to model the custom pypyr pipeline yaml format, to load in special literal types like py and sic strings. If looking to extend the pypyr pipeline syntax with special types, add these to the tag_representers list. Args: file: open file-like object. Returns: dict-like representation of loaded yaml. """ tag_representers = [PyString, SicString] yaml_loader = get_yaml_parser_safe() for representer in tag_representers: yaml_loader.register_class(representer) pipeline_definition = yaml_loader.load(file) return pipeline_definition
python
def get_pipeline_yaml(file): tag_representers = [PyString, SicString] yaml_loader = get_yaml_parser_safe() for representer in tag_representers: yaml_loader.register_class(representer) pipeline_definition = yaml_loader.load(file) return pipeline_definition
[ "def", "get_pipeline_yaml", "(", "file", ")", ":", "tag_representers", "=", "[", "PyString", ",", "SicString", "]", "yaml_loader", "=", "get_yaml_parser_safe", "(", ")", "for", "representer", "in", "tag_representers", ":", "yaml_loader", ".", "register_class", "(", "representer", ")", "pipeline_definition", "=", "yaml_loader", ".", "load", "(", "file", ")", "return", "pipeline_definition" ]
Return pipeline yaml from open file object. Use specific custom representers to model the custom pypyr pipeline yaml format, to load in special literal types like py and sic strings. If looking to extend the pypyr pipeline syntax with special types, add these to the tag_representers list. Args: file: open file-like object. Returns: dict-like representation of loaded yaml.
[ "Return", "pipeline", "yaml", "from", "open", "file", "object", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L7-L31
166
pypyr/pypyr-cli
pypyr/yaml.py
get_yaml_parser_roundtrip
def get_yaml_parser_roundtrip(): """Create the yaml parser object with this factory method. The round-trip parser preserves: - comments - block style and key ordering are kept, so you can diff the round-tripped source - flow style sequences ( ‘a: b, c, d’) (based on request and test by Anthony Sottile) - anchor names that are hand-crafted (i.e. not of the form``idNNN``) - merges in dictionaries are preserved Returns: ruamel.yaml.YAML object with round-trip loader """ yaml_writer = yamler.YAML(typ='rt', pure=True) # if this isn't here the yaml doesn't format nicely indented for humans yaml_writer.indent(mapping=2, sequence=4, offset=2) return yaml_writer
python
def get_yaml_parser_roundtrip(): yaml_writer = yamler.YAML(typ='rt', pure=True) # if this isn't here the yaml doesn't format nicely indented for humans yaml_writer.indent(mapping=2, sequence=4, offset=2) return yaml_writer
[ "def", "get_yaml_parser_roundtrip", "(", ")", ":", "yaml_writer", "=", "yamler", ".", "YAML", "(", "typ", "=", "'rt'", ",", "pure", "=", "True", ")", "# if this isn't here the yaml doesn't format nicely indented for humans", "yaml_writer", ".", "indent", "(", "mapping", "=", "2", ",", "sequence", "=", "4", ",", "offset", "=", "2", ")", "return", "yaml_writer" ]
Create the yaml parser object with this factory method. The round-trip parser preserves: - comments - block style and key ordering are kept, so you can diff the round-tripped source - flow style sequences ( ‘a: b, c, d’) (based on request and test by Anthony Sottile) - anchor names that are hand-crafted (i.e. not of the form``idNNN``) - merges in dictionaries are preserved Returns: ruamel.yaml.YAML object with round-trip loader
[ "Create", "the", "yaml", "parser", "object", "with", "this", "factory", "method", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L46-L65
167
pypyr/pypyr-cli
pypyr/yaml.py
get_yaml_parser_roundtrip_for_context
def get_yaml_parser_roundtrip_for_context(): """Create a yaml parser that can serialize the pypyr Context. Create yaml parser with get_yaml_parser_roundtrip, adding Context. This allows the yaml parser to serialize the pypyr Context. """ yaml_writer = get_yaml_parser_roundtrip() # Context is a dict data structure, so can just use a dict representer yaml_writer.Representer.add_representer( Context, yamler.representer.RoundTripRepresenter.represent_dict) return yaml_writer
python
def get_yaml_parser_roundtrip_for_context(): yaml_writer = get_yaml_parser_roundtrip() # Context is a dict data structure, so can just use a dict representer yaml_writer.Representer.add_representer( Context, yamler.representer.RoundTripRepresenter.represent_dict) return yaml_writer
[ "def", "get_yaml_parser_roundtrip_for_context", "(", ")", ":", "yaml_writer", "=", "get_yaml_parser_roundtrip", "(", ")", "# Context is a dict data structure, so can just use a dict representer", "yaml_writer", ".", "Representer", ".", "add_representer", "(", "Context", ",", "yamler", ".", "representer", ".", "RoundTripRepresenter", ".", "represent_dict", ")", "return", "yaml_writer" ]
Create a yaml parser that can serialize the pypyr Context. Create yaml parser with get_yaml_parser_roundtrip, adding Context. This allows the yaml parser to serialize the pypyr Context.
[ "Create", "a", "yaml", "parser", "that", "can", "serialize", "the", "pypyr", "Context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/yaml.py#L68-L81
168
pypyr/pypyr-cli
pypyr/steps/fetchjson.py
run_step
def run_step(context): """Load a json file into the pypyr context. json parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. The json should not be an array [] on the top level, but rather an Object. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchJson - path. path-like. Path to file on disk. - key. string. If exists, write json structure to this context key. Else json writes to context root. Also supports a passing path as string to fetchJson, but in this case you won't be able to specify a key. All inputs support formatting expressions. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchJson.path missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is None. """ logger.debug("started") deprecated(context) context.assert_key_has_value(key='fetchJson', caller=__name__) fetch_json_input = context.get_formatted('fetchJson') if isinstance(fetch_json_input, str): file_path = fetch_json_input destination_key_expression = None else: context.assert_child_key_has_value(parent='fetchJson', child='path', caller=__name__) file_path = fetch_json_input['path'] destination_key_expression = fetch_json_input.get('key', None) logger.debug(f"attempting to open file: {file_path}") with open(file_path) as json_file: payload = json.load(json_file) if destination_key_expression: destination_key = context.get_formatted_iterable( destination_key_expression) logger.debug(f"json file loaded. Writing to context {destination_key}") context[destination_key] = payload else: if not isinstance(payload, MutableMapping): raise TypeError( 'json input should describe an object at the top ' 'level when fetchJsonKey isn\'t specified. You should have ' 'something like {"key1": "value1", "key2": "value2"} ' 'in the json top-level, not ["value1", "value2"]') logger.debug("json file loaded. Merging into pypyr context. . .") context.update(payload) logger.info(f"json file written into pypyr context. Count: {len(payload)}") logger.debug("done")
python
def run_step(context): logger.debug("started") deprecated(context) context.assert_key_has_value(key='fetchJson', caller=__name__) fetch_json_input = context.get_formatted('fetchJson') if isinstance(fetch_json_input, str): file_path = fetch_json_input destination_key_expression = None else: context.assert_child_key_has_value(parent='fetchJson', child='path', caller=__name__) file_path = fetch_json_input['path'] destination_key_expression = fetch_json_input.get('key', None) logger.debug(f"attempting to open file: {file_path}") with open(file_path) as json_file: payload = json.load(json_file) if destination_key_expression: destination_key = context.get_formatted_iterable( destination_key_expression) logger.debug(f"json file loaded. Writing to context {destination_key}") context[destination_key] = payload else: if not isinstance(payload, MutableMapping): raise TypeError( 'json input should describe an object at the top ' 'level when fetchJsonKey isn\'t specified. You should have ' 'something like {"key1": "value1", "key2": "value2"} ' 'in the json top-level, not ["value1", "value2"]') logger.debug("json file loaded. Merging into pypyr context. . .") context.update(payload) logger.info(f"json file written into pypyr context. Count: {len(payload)}") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "deprecated", "(", "context", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'fetchJson'", ",", "caller", "=", "__name__", ")", "fetch_json_input", "=", "context", ".", "get_formatted", "(", "'fetchJson'", ")", "if", "isinstance", "(", "fetch_json_input", ",", "str", ")", ":", "file_path", "=", "fetch_json_input", "destination_key_expression", "=", "None", "else", ":", "context", ".", "assert_child_key_has_value", "(", "parent", "=", "'fetchJson'", ",", "child", "=", "'path'", ",", "caller", "=", "__name__", ")", "file_path", "=", "fetch_json_input", "[", "'path'", "]", "destination_key_expression", "=", "fetch_json_input", ".", "get", "(", "'key'", ",", "None", ")", "logger", ".", "debug", "(", "f\"attempting to open file: {file_path}\"", ")", "with", "open", "(", "file_path", ")", "as", "json_file", ":", "payload", "=", "json", ".", "load", "(", "json_file", ")", "if", "destination_key_expression", ":", "destination_key", "=", "context", ".", "get_formatted_iterable", "(", "destination_key_expression", ")", "logger", ".", "debug", "(", "f\"json file loaded. Writing to context {destination_key}\"", ")", "context", "[", "destination_key", "]", "=", "payload", "else", ":", "if", "not", "isinstance", "(", "payload", ",", "MutableMapping", ")", ":", "raise", "TypeError", "(", "'json input should describe an object at the top '", "'level when fetchJsonKey isn\\'t specified. You should have '", "'something like {\"key1\": \"value1\", \"key2\": \"value2\"} '", "'in the json top-level, not [\"value1\", \"value2\"]'", ")", "logger", ".", "debug", "(", "\"json file loaded. Merging into pypyr context. . .\"", ")", "context", ".", "update", "(", "payload", ")", "logger", ".", "info", "(", "f\"json file written into pypyr context. Count: {len(payload)}\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Load a json file into the pypyr context. json parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. The json should not be an array [] on the top level, but rather an Object. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchJson - path. path-like. Path to file on disk. - key. string. If exists, write json structure to this context key. Else json writes to context root. Also supports a passing path as string to fetchJson, but in this case you won't be able to specify a key. All inputs support formatting expressions. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchJson.path missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is None.
[ "Load", "a", "json", "file", "into", "the", "pypyr", "context", "." ]
4003f999cd5eb030b4c7407317de728f5115a80f
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/fetchjson.py#L10-L82
169
bradmontgomery/django-querycount
querycount/middleware.py
QueryCountMiddleware._ignore_request
def _ignore_request(self, path): """Check to see if we should ignore the request.""" return any([ re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS'] ])
python
def _ignore_request(self, path): return any([ re.match(pattern, path) for pattern in QC_SETTINGS['IGNORE_REQUEST_PATTERNS'] ])
[ "def", "_ignore_request", "(", "self", ",", "path", ")", ":", "return", "any", "(", "[", "re", ".", "match", "(", "pattern", ",", "path", ")", "for", "pattern", "in", "QC_SETTINGS", "[", "'IGNORE_REQUEST_PATTERNS'", "]", "]", ")" ]
Check to see if we should ignore the request.
[ "Check", "to", "see", "if", "we", "should", "ignore", "the", "request", "." ]
61a380d98bc55e926c011367ecc2031102c3484c
https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L83-L87
170
bradmontgomery/django-querycount
querycount/middleware.py
QueryCountMiddleware._ignore_sql
def _ignore_sql(self, query): """Check to see if we should ignore the sql query.""" return any([ re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS'] ])
python
def _ignore_sql(self, query): return any([ re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS'] ])
[ "def", "_ignore_sql", "(", "self", ",", "query", ")", ":", "return", "any", "(", "[", "re", ".", "search", "(", "pattern", ",", "query", ".", "get", "(", "'sql'", ")", ")", "for", "pattern", "in", "QC_SETTINGS", "[", "'IGNORE_SQL_PATTERNS'", "]", "]", ")" ]
Check to see if we should ignore the sql query.
[ "Check", "to", "see", "if", "we", "should", "ignore", "the", "sql", "query", "." ]
61a380d98bc55e926c011367ecc2031102c3484c
https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L89-L93
171
bradmontgomery/django-querycount
querycount/middleware.py
QueryCountMiddleware._duplicate_queries
def _duplicate_queries(self, output): """Appends the most common duplicate queries to the given output.""" if QC_SETTINGS['DISPLAY_DUPLICATES']: for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']): lines = ['\nRepeated {0} times.'.format(count)] lines += wrap(query) lines = "\n".join(lines) + "\n" output += self._colorize(lines, count) return output
python
def _duplicate_queries(self, output): if QC_SETTINGS['DISPLAY_DUPLICATES']: for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']): lines = ['\nRepeated {0} times.'.format(count)] lines += wrap(query) lines = "\n".join(lines) + "\n" output += self._colorize(lines, count) return output
[ "def", "_duplicate_queries", "(", "self", ",", "output", ")", ":", "if", "QC_SETTINGS", "[", "'DISPLAY_DUPLICATES'", "]", ":", "for", "query", ",", "count", "in", "self", ".", "queries", ".", "most_common", "(", "QC_SETTINGS", "[", "'DISPLAY_DUPLICATES'", "]", ")", ":", "lines", "=", "[", "'\\nRepeated {0} times.'", ".", "format", "(", "count", ")", "]", "lines", "+=", "wrap", "(", "query", ")", "lines", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "+", "\"\\n\"", "output", "+=", "self", ".", "_colorize", "(", "lines", ",", "count", ")", "return", "output" ]
Appends the most common duplicate queries to the given output.
[ "Appends", "the", "most", "common", "duplicate", "queries", "to", "the", "given", "output", "." ]
61a380d98bc55e926c011367ecc2031102c3484c
https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L142-L150
172
bradmontgomery/django-querycount
querycount/middleware.py
QueryCountMiddleware._calculate_num_queries
def _calculate_num_queries(self): """ Calculate the total number of request and response queries. Used for count header and count table. """ request_totals = self._totals("request") response_totals = self._totals("response") return request_totals[2] + response_totals[2]
python
def _calculate_num_queries(self): request_totals = self._totals("request") response_totals = self._totals("response") return request_totals[2] + response_totals[2]
[ "def", "_calculate_num_queries", "(", "self", ")", ":", "request_totals", "=", "self", ".", "_totals", "(", "\"request\"", ")", "response_totals", "=", "self", ".", "_totals", "(", "\"response\"", ")", "return", "request_totals", "[", "2", "]", "+", "response_totals", "[", "2", "]" ]
Calculate the total number of request and response queries. Used for count header and count table.
[ "Calculate", "the", "total", "number", "of", "request", "and", "response", "queries", ".", "Used", "for", "count", "header", "and", "count", "table", "." ]
61a380d98bc55e926c011367ecc2031102c3484c
https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/middleware.py#L193-L201
173
bradmontgomery/django-querycount
querycount/qc_settings.py
_process_settings
def _process_settings(**kwargs): """ Apply user supplied settings. """ # If we are in this method due to a signal, only reload for our settings setting_name = kwargs.get('setting', None) if setting_name is not None and setting_name != 'QUERYCOUNT': return # Support the old-style settings if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False): QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS # Apply new-style settings if not getattr(settings, 'QUERYCOUNT', False): return # Duplicate display is a special case, configure it specifically if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT: duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES'] if duplicate_settings is not None: duplicate_settings = int(duplicate_settings) QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings # Apply the rest of the setting overrides for key in ['THRESHOLDS', 'IGNORE_REQUEST_PATTERNS', 'IGNORE_SQL_PATTERNS', 'IGNORE_PATTERNS', 'RESPONSE_HEADER']: if key in settings.QUERYCOUNT: QC_SETTINGS[key] = settings.QUERYCOUNT[key]
python
def _process_settings(**kwargs): # If we are in this method due to a signal, only reload for our settings setting_name = kwargs.get('setting', None) if setting_name is not None and setting_name != 'QUERYCOUNT': return # Support the old-style settings if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False): QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS # Apply new-style settings if not getattr(settings, 'QUERYCOUNT', False): return # Duplicate display is a special case, configure it specifically if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT: duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES'] if duplicate_settings is not None: duplicate_settings = int(duplicate_settings) QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings # Apply the rest of the setting overrides for key in ['THRESHOLDS', 'IGNORE_REQUEST_PATTERNS', 'IGNORE_SQL_PATTERNS', 'IGNORE_PATTERNS', 'RESPONSE_HEADER']: if key in settings.QUERYCOUNT: QC_SETTINGS[key] = settings.QUERYCOUNT[key]
[ "def", "_process_settings", "(", "*", "*", "kwargs", ")", ":", "# If we are in this method due to a signal, only reload for our settings", "setting_name", "=", "kwargs", ".", "get", "(", "'setting'", ",", "None", ")", "if", "setting_name", "is", "not", "None", "and", "setting_name", "!=", "'QUERYCOUNT'", ":", "return", "# Support the old-style settings", "if", "getattr", "(", "settings", ",", "'QUERYCOUNT_THRESHOLDS'", ",", "False", ")", ":", "QC_SETTINGS", "[", "'THRESHOLDS'", "]", "=", "settings", ".", "QUERYCOUNT_THRESHOLDS", "# Apply new-style settings", "if", "not", "getattr", "(", "settings", ",", "'QUERYCOUNT'", ",", "False", ")", ":", "return", "# Duplicate display is a special case, configure it specifically", "if", "'DISPLAY_DUPLICATES'", "in", "settings", ".", "QUERYCOUNT", ":", "duplicate_settings", "=", "settings", ".", "QUERYCOUNT", "[", "'DISPLAY_DUPLICATES'", "]", "if", "duplicate_settings", "is", "not", "None", ":", "duplicate_settings", "=", "int", "(", "duplicate_settings", ")", "QC_SETTINGS", "[", "'DISPLAY_DUPLICATES'", "]", "=", "duplicate_settings", "# Apply the rest of the setting overrides", "for", "key", "in", "[", "'THRESHOLDS'", ",", "'IGNORE_REQUEST_PATTERNS'", ",", "'IGNORE_SQL_PATTERNS'", ",", "'IGNORE_PATTERNS'", ",", "'RESPONSE_HEADER'", "]", ":", "if", "key", "in", "settings", ".", "QUERYCOUNT", ":", "QC_SETTINGS", "[", "key", "]", "=", "settings", ".", "QUERYCOUNT", "[", "key", "]" ]
Apply user supplied settings.
[ "Apply", "user", "supplied", "settings", "." ]
61a380d98bc55e926c011367ecc2031102c3484c
https://github.com/bradmontgomery/django-querycount/blob/61a380d98bc55e926c011367ecc2031102c3484c/querycount/qc_settings.py#L23-L55
174
xiyouMc/ncmbot
ncmbot/core.py
NCloudBot._get_webapi_requests
def _get_webapi_requests(self): """Update headers of webapi for Requests.""" headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host': 'music.163.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' } NCloudBot.req.headers.update(headers) return NCloudBot.req
python
def _get_webapi_requests(self): headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host': 'music.163.com', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' } NCloudBot.req.headers.update(headers) return NCloudBot.req
[ "def", "_get_webapi_requests", "(", "self", ")", ":", "headers", "=", "{", "'Accept'", ":", "'*/*'", ",", "'Accept-Language'", ":", "'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4'", ",", "'Connection'", ":", "'keep-alive'", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", ",", "'Referer'", ":", "'http://music.163.com'", ",", "'Host'", ":", "'music.163.com'", ",", "'User-Agent'", ":", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36'", "}", "NCloudBot", ".", "req", ".", "headers", ".", "update", "(", "headers", ")", "return", "NCloudBot", ".", "req" ]
Update headers of webapi for Requests.
[ "Update", "headers", "of", "webapi", "for", "Requests", "." ]
c4832f3ee7630ba104a89559f09c1fc366d1547b
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L104-L124
175
xiyouMc/ncmbot
ncmbot/core.py
NCloudBot._build_response
def _build_response(self, resp): """Build internal Response object from given response.""" # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
python
def _build_response(self, resp): # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username) self.response.content = resp.content self.response.status_code = resp.status_code self.response.headers = resp.headers
[ "def", "_build_response", "(", "self", ",", "resp", ")", ":", "# rememberLogin", "# if self.method is 'LOGIN' and resp.json().get('code') == 200:", "# cookiesJar.save_cookies(resp, NCloudBot.username)", "self", ".", "response", ".", "content", "=", "resp", ".", "content", "self", ".", "response", ".", "status_code", "=", "resp", ".", "status_code", "self", ".", "response", ".", "headers", "=", "resp", ".", "headers" ]
Build internal Response object from given response.
[ "Build", "internal", "Response", "object", "from", "given", "response", "." ]
c4832f3ee7630ba104a89559f09c1fc366d1547b
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L141-L148
176
xiyouMc/ncmbot
ncmbot/core.py
NCloudBot.send
def send(self): """Sens the request.""" success = False if self.method is None: raise ParamsError() try: if self.method == 'SEARCH': req = self._get_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] resp = req.post(_url, data=self.data) self._build_response(resp) self.response.ok = True else: if isinstance(self.data, dict): data = encrypted_request(self.data) req = self._get_webapi_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'): _url = _url % self.params['uid'] if self.method in ('LYRIC', 'MUSIC_COMMENT'): _url = _url % self.params['id'] # GET if self.method in ('LYRIC'): resp = req.get(_url) else: resp = req.post(_url, data=data) self._build_response(resp) self.response.ok = True except Exception as why: traceback.print_exc() print 'Requests Exception', why # self._build_response(why) self.response.error = why
python
def send(self): success = False if self.method is None: raise ParamsError() try: if self.method == 'SEARCH': req = self._get_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] resp = req.post(_url, data=self.data) self._build_response(resp) self.response.ok = True else: if isinstance(self.data, dict): data = encrypted_request(self.data) req = self._get_webapi_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'): _url = _url % self.params['uid'] if self.method in ('LYRIC', 'MUSIC_COMMENT'): _url = _url % self.params['id'] # GET if self.method in ('LYRIC'): resp = req.get(_url) else: resp = req.post(_url, data=data) self._build_response(resp) self.response.ok = True except Exception as why: traceback.print_exc() print 'Requests Exception', why # self._build_response(why) self.response.error = why
[ "def", "send", "(", "self", ")", ":", "success", "=", "False", "if", "self", ".", "method", "is", "None", ":", "raise", "ParamsError", "(", ")", "try", ":", "if", "self", ".", "method", "==", "'SEARCH'", ":", "req", "=", "self", ".", "_get_requests", "(", ")", "_url", "=", "self", ".", "__NETEAST_HOST", "+", "self", ".", "_METHODS", "[", "self", ".", "method", "]", "resp", "=", "req", ".", "post", "(", "_url", ",", "data", "=", "self", ".", "data", ")", "self", ".", "_build_response", "(", "resp", ")", "self", ".", "response", ".", "ok", "=", "True", "else", ":", "if", "isinstance", "(", "self", ".", "data", ",", "dict", ")", ":", "data", "=", "encrypted_request", "(", "self", ".", "data", ")", "req", "=", "self", ".", "_get_webapi_requests", "(", ")", "_url", "=", "self", ".", "__NETEAST_HOST", "+", "self", ".", "_METHODS", "[", "self", ".", "method", "]", "if", "self", ".", "method", "in", "(", "'USER_DJ'", ",", "'USER_FOLLOWS'", ",", "'USER_EVENT'", ")", ":", "_url", "=", "_url", "%", "self", ".", "params", "[", "'uid'", "]", "if", "self", ".", "method", "in", "(", "'LYRIC'", ",", "'MUSIC_COMMENT'", ")", ":", "_url", "=", "_url", "%", "self", ".", "params", "[", "'id'", "]", "# GET", "if", "self", ".", "method", "in", "(", "'LYRIC'", ")", ":", "resp", "=", "req", ".", "get", "(", "_url", ")", "else", ":", "resp", "=", "req", ".", "post", "(", "_url", ",", "data", "=", "data", ")", "self", ".", "_build_response", "(", "resp", ")", "self", ".", "response", ".", "ok", "=", "True", "except", "Exception", "as", "why", ":", "traceback", ".", "print_exc", "(", ")", "print", "'Requests Exception'", ",", "why", "# self._build_response(why)", "self", ".", "response", ".", "error", "=", "why" ]
Sens the request.
[ "Sens", "the", "request", "." ]
c4832f3ee7630ba104a89559f09c1fc366d1547b
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L150-L185
177
has2k1/plydata
plydata/options.py
set_option
def set_option(name, value): """ Set plydata option Parameters ---------- name : str Name of the option value : object New value of the option Returns ------- old : object Old value of the option See also -------- :class:`options` """ old = get_option(name) globals()[name] = value return old
python
def set_option(name, value): old = get_option(name) globals()[name] = value return old
[ "def", "set_option", "(", "name", ",", "value", ")", ":", "old", "=", "get_option", "(", "name", ")", "globals", "(", ")", "[", "name", "]", "=", "value", "return", "old" ]
Set plydata option Parameters ---------- name : str Name of the option value : object New value of the option Returns ------- old : object Old value of the option See also -------- :class:`options`
[ "Set", "plydata", "option" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/options.py#L45-L67
178
has2k1/plydata
plydata/types.py
GroupedDataFrame.group_indices
def group_indices(self): """ Return group indices """ # No groups if not self.plydata_groups: return np.ones(len(self), dtype=int) grouper = self.groupby() indices = np.empty(len(self), dtype=int) for i, (_, idx) in enumerate(sorted(grouper.indices.items())): indices[idx] = i return indices
python
def group_indices(self): # No groups if not self.plydata_groups: return np.ones(len(self), dtype=int) grouper = self.groupby() indices = np.empty(len(self), dtype=int) for i, (_, idx) in enumerate(sorted(grouper.indices.items())): indices[idx] = i return indices
[ "def", "group_indices", "(", "self", ")", ":", "# No groups", "if", "not", "self", ".", "plydata_groups", ":", "return", "np", ".", "ones", "(", "len", "(", "self", ")", ",", "dtype", "=", "int", ")", "grouper", "=", "self", ".", "groupby", "(", ")", "indices", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "int", ")", "for", "i", ",", "(", "_", ",", "idx", ")", "in", "enumerate", "(", "sorted", "(", "grouper", ".", "indices", ".", "items", "(", ")", ")", ")", ":", "indices", "[", "idx", "]", "=", "i", "return", "indices" ]
Return group indices
[ "Return", "group", "indices" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/types.py#L49-L61
179
has2k1/plydata
plydata/dataframe/helpers.py
_make_verb_helper
def _make_verb_helper(verb_func, add_groups=False): """ Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb. """ @wraps(verb_func) def _verb_func(verb): verb.expressions, new_columns = build_expressions(verb) if add_groups: verb.groups = new_columns return verb_func(verb) return _verb_func
python
def _make_verb_helper(verb_func, add_groups=False): @wraps(verb_func) def _verb_func(verb): verb.expressions, new_columns = build_expressions(verb) if add_groups: verb.groups = new_columns return verb_func(verb) return _verb_func
[ "def", "_make_verb_helper", "(", "verb_func", ",", "add_groups", "=", "False", ")", ":", "@", "wraps", "(", "verb_func", ")", "def", "_verb_func", "(", "verb", ")", ":", "verb", ".", "expressions", ",", "new_columns", "=", "build_expressions", "(", "verb", ")", "if", "add_groups", ":", "verb", ".", "groups", "=", "new_columns", "return", "verb_func", "(", "verb", ")", "return", "_verb_func" ]
Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the expressions. Returns ------- out : function A function that implements a helper verb.
[ "Create", "function", "that", "prepares", "verb", "for", "the", "verb", "function" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/helpers.py#L156-L188
180
has2k1/plydata
plydata/dataframe/common.py
_get_base_dataframe
def _get_base_dataframe(df): """ Remove all columns other than those grouped on """ if isinstance(df, GroupedDataFrame): base_df = GroupedDataFrame( df.loc[:, df.plydata_groups], df.plydata_groups, copy=True) else: base_df = pd.DataFrame(index=df.index) return base_df
python
def _get_base_dataframe(df): if isinstance(df, GroupedDataFrame): base_df = GroupedDataFrame( df.loc[:, df.plydata_groups], df.plydata_groups, copy=True) else: base_df = pd.DataFrame(index=df.index) return base_df
[ "def", "_get_base_dataframe", "(", "df", ")", ":", "if", "isinstance", "(", "df", ",", "GroupedDataFrame", ")", ":", "base_df", "=", "GroupedDataFrame", "(", "df", ".", "loc", "[", ":", ",", "df", ".", "plydata_groups", "]", ",", "df", ".", "plydata_groups", ",", "copy", "=", "True", ")", "else", ":", "base_df", "=", "pd", ".", "DataFrame", "(", "index", "=", "df", ".", "index", ")", "return", "base_df" ]
Remove all columns other than those grouped on
[ "Remove", "all", "columns", "other", "than", "those", "grouped", "on" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L27-L37
181
has2k1/plydata
plydata/dataframe/common.py
_add_group_columns
def _add_group_columns(data, gdf): """ Add group columns to data with a value from the grouped dataframe It is assumed that the grouped dataframe contains a single group >>> data = pd.DataFrame({ ... 'x': [5, 6, 7]}) >>> gdf = GroupedDataFrame({ ... 'g': list('aaa'), ... 'x': range(3)}, groups=['g']) >>> _add_group_columns(data, gdf) g x 0 a 5 1 a 6 2 a 7 """ n = len(data) if isinstance(gdf, GroupedDataFrame): for i, col in enumerate(gdf.plydata_groups): if col not in data: group_values = [gdf[col].iloc[0]] * n # Need to be careful and maintain the dtypes # of the group columns if pdtypes.is_categorical_dtype(gdf[col]): col_values = pd.Categorical( group_values, categories=gdf[col].cat.categories, ordered=gdf[col].cat.ordered ) else: col_values = pd.Series( group_values, index=data.index, dtype=gdf[col].dtype ) # Group columns come first data.insert(i, col, col_values) return data
python
def _add_group_columns(data, gdf): n = len(data) if isinstance(gdf, GroupedDataFrame): for i, col in enumerate(gdf.plydata_groups): if col not in data: group_values = [gdf[col].iloc[0]] * n # Need to be careful and maintain the dtypes # of the group columns if pdtypes.is_categorical_dtype(gdf[col]): col_values = pd.Categorical( group_values, categories=gdf[col].cat.categories, ordered=gdf[col].cat.ordered ) else: col_values = pd.Series( group_values, index=data.index, dtype=gdf[col].dtype ) # Group columns come first data.insert(i, col, col_values) return data
[ "def", "_add_group_columns", "(", "data", ",", "gdf", ")", ":", "n", "=", "len", "(", "data", ")", "if", "isinstance", "(", "gdf", ",", "GroupedDataFrame", ")", ":", "for", "i", ",", "col", "in", "enumerate", "(", "gdf", ".", "plydata_groups", ")", ":", "if", "col", "not", "in", "data", ":", "group_values", "=", "[", "gdf", "[", "col", "]", ".", "iloc", "[", "0", "]", "]", "*", "n", "# Need to be careful and maintain the dtypes", "# of the group columns", "if", "pdtypes", ".", "is_categorical_dtype", "(", "gdf", "[", "col", "]", ")", ":", "col_values", "=", "pd", ".", "Categorical", "(", "group_values", ",", "categories", "=", "gdf", "[", "col", "]", ".", "cat", ".", "categories", ",", "ordered", "=", "gdf", "[", "col", "]", ".", "cat", ".", "ordered", ")", "else", ":", "col_values", "=", "pd", ".", "Series", "(", "group_values", ",", "index", "=", "data", ".", "index", ",", "dtype", "=", "gdf", "[", "col", "]", ".", "dtype", ")", "# Group columns come first", "data", ".", "insert", "(", "i", ",", "col", ",", "col_values", ")", "return", "data" ]
Add group columns to data with a value from the grouped dataframe It is assumed that the grouped dataframe contains a single group >>> data = pd.DataFrame({ ... 'x': [5, 6, 7]}) >>> gdf = GroupedDataFrame({ ... 'g': list('aaa'), ... 'x': range(3)}, groups=['g']) >>> _add_group_columns(data, gdf) g x 0 a 5 1 a 6 2 a 7
[ "Add", "group", "columns", "to", "data", "with", "a", "value", "from", "the", "grouped", "dataframe" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L40-L78
182
has2k1/plydata
plydata/dataframe/common.py
_create_column
def _create_column(data, col, value): """ Create column in dataframe Helper method meant to deal with problematic column values. e.g When the series index does not match that of the data. Parameters ---------- data : pandas.DataFrame dataframe in which to insert value col : column label Column name value : object Value to assign to column Returns ------- data : pandas.DataFrame Modified original dataframe >>> df = pd.DataFrame({'x': [1, 2, 3]}) >>> y = pd.Series([11, 12, 13], index=[21, 22, 23]) Data index and value index do not match >>> _create_column(df, 'y', y) x y 0 1 11 1 2 12 2 3 13 Non-empty dataframe, scalar value >>> _create_column(df, 'z', 3) x y z 0 1 11 3 1 2 12 3 2 3 13 3 Empty dataframe, scalar value >>> df = pd.DataFrame() >>> _create_column(df, 'w', 3) w 0 3 >>> _create_column(df, 'z', 'abc') w z 0 3 abc """ with suppress(AttributeError): # If the index of a series and the dataframe # in which the series will be assigned to a # column do not match, missing values/NaNs # are created. We do not want that. if not value.index.equals(data.index): if len(value) == len(data): value.index = data.index else: value.reset_index(drop=True, inplace=True) # You cannot assign a scalar value to a dataframe # without an index. You need an interable value. if data.index.empty: try: len(value) except TypeError: scalar = True else: scalar = isinstance(value, str) if scalar: value = [value] data[col] = value return data
python
def _create_column(data, col, value): with suppress(AttributeError): # If the index of a series and the dataframe # in which the series will be assigned to a # column do not match, missing values/NaNs # are created. We do not want that. if not value.index.equals(data.index): if len(value) == len(data): value.index = data.index else: value.reset_index(drop=True, inplace=True) # You cannot assign a scalar value to a dataframe # without an index. You need an interable value. if data.index.empty: try: len(value) except TypeError: scalar = True else: scalar = isinstance(value, str) if scalar: value = [value] data[col] = value return data
[ "def", "_create_column", "(", "data", ",", "col", ",", "value", ")", ":", "with", "suppress", "(", "AttributeError", ")", ":", "# If the index of a series and the dataframe", "# in which the series will be assigned to a", "# column do not match, missing values/NaNs", "# are created. We do not want that.", "if", "not", "value", ".", "index", ".", "equals", "(", "data", ".", "index", ")", ":", "if", "len", "(", "value", ")", "==", "len", "(", "data", ")", ":", "value", ".", "index", "=", "data", ".", "index", "else", ":", "value", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "# You cannot assign a scalar value to a dataframe", "# without an index. You need an interable value.", "if", "data", ".", "index", ".", "empty", ":", "try", ":", "len", "(", "value", ")", "except", "TypeError", ":", "scalar", "=", "True", "else", ":", "scalar", "=", "isinstance", "(", "value", ",", "str", ")", "if", "scalar", ":", "value", "=", "[", "value", "]", "data", "[", "col", "]", "=", "value", "return", "data" ]
Create column in dataframe Helper method meant to deal with problematic column values. e.g When the series index does not match that of the data. Parameters ---------- data : pandas.DataFrame dataframe in which to insert value col : column label Column name value : object Value to assign to column Returns ------- data : pandas.DataFrame Modified original dataframe >>> df = pd.DataFrame({'x': [1, 2, 3]}) >>> y = pd.Series([11, 12, 13], index=[21, 22, 23]) Data index and value index do not match >>> _create_column(df, 'y', y) x y 0 1 11 1 2 12 2 3 13 Non-empty dataframe, scalar value >>> _create_column(df, 'z', 3) x y z 0 1 11 3 1 2 12 3 2 3 13 3 Empty dataframe, scalar value >>> df = pd.DataFrame() >>> _create_column(df, 'w', 3) w 0 3 >>> _create_column(df, 'z', 'abc') w z 0 3 abc
[ "Create", "column", "in", "dataframe" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L81-L157
183
has2k1/plydata
plydata/dataframe/common.py
build_expressions
def build_expressions(verb): """ Build expressions for helper verbs Parameters ---------- verb : verb A verb with a *functions* attribute. Returns ------- out : tuple (List of Expressions, New columns). The expressions and the new columns in which the results of those expressions will be stored. Even when a result will stored in a column with an existing label, that column is still considered new, i.e An expression ``x='x+1'``, will create a new_column `x` to replace an old column `x`. """ def partial(func, col, *args, **kwargs): """ Make a function that acts on a column in a dataframe Parameters ---------- func : callable Function col : str Column args : tuple Arguments to pass to func kwargs : dict Keyword arguments to func Results ------- new_func : callable Function that takes a dataframe, and calls the original function on a column in the dataframe. """ def new_func(gdf): return func(gdf[col], *args, **kwargs) return new_func def make_statement(func, col): """ A statement of function called on a column in a dataframe Parameters ---------- func : str or callable Function to call on a dataframe column col : str Column """ if isinstance(func, str): expr = '{}({})'.format(func, col) elif callable(func): expr = partial(func, col, *verb.args, **verb.kwargs) else: raise TypeError("{} is not a function".format(func)) return expr def func_name(func): """ Return name of a function. If the function is `np.sin`, we return `sin`. """ if isinstance(func, str): return func try: return func.__name__ except AttributeError: return '' # Generate function names. They act as identifiers (postfixed # to the original columns) in the new_column names. if isinstance(verb.functions, (tuple, list)): names = (func_name(func) for func in verb.functions) names_and_functions = zip(names, verb.functions) else: names_and_functions = verb.functions.items() # Create statements for the expressions # and postfix identifiers columns = Selector.get(verb) # columns to act on postfixes = [] stmts = [] for name, func in names_and_functions: postfixes.append(name) for col in columns: stmts.append(make_statement(func, col)) if not stmts: stmts = columns # Names of the new columns # e.g col1_mean, col2_mean, col1_std, col2_std add_postfix = (isinstance(verb.functions, dict) or len(verb.functions) > 1) if add_postfix: fmt = '{}_{}'.format new_columns = [fmt(c, p) for p in postfixes for c in columns] else: new_columns = columns expressions = [Expression(stmt, col) for stmt, col in zip(stmts, new_columns)] return expressions, new_columns
python
def build_expressions(verb): def partial(func, col, *args, **kwargs): """ Make a function that acts on a column in a dataframe Parameters ---------- func : callable Function col : str Column args : tuple Arguments to pass to func kwargs : dict Keyword arguments to func Results ------- new_func : callable Function that takes a dataframe, and calls the original function on a column in the dataframe. """ def new_func(gdf): return func(gdf[col], *args, **kwargs) return new_func def make_statement(func, col): """ A statement of function called on a column in a dataframe Parameters ---------- func : str or callable Function to call on a dataframe column col : str Column """ if isinstance(func, str): expr = '{}({})'.format(func, col) elif callable(func): expr = partial(func, col, *verb.args, **verb.kwargs) else: raise TypeError("{} is not a function".format(func)) return expr def func_name(func): """ Return name of a function. If the function is `np.sin`, we return `sin`. """ if isinstance(func, str): return func try: return func.__name__ except AttributeError: return '' # Generate function names. They act as identifiers (postfixed # to the original columns) in the new_column names. if isinstance(verb.functions, (tuple, list)): names = (func_name(func) for func in verb.functions) names_and_functions = zip(names, verb.functions) else: names_and_functions = verb.functions.items() # Create statements for the expressions # and postfix identifiers columns = Selector.get(verb) # columns to act on postfixes = [] stmts = [] for name, func in names_and_functions: postfixes.append(name) for col in columns: stmts.append(make_statement(func, col)) if not stmts: stmts = columns # Names of the new columns # e.g col1_mean, col2_mean, col1_std, col2_std add_postfix = (isinstance(verb.functions, dict) or len(verb.functions) > 1) if add_postfix: fmt = '{}_{}'.format new_columns = [fmt(c, p) for p in postfixes for c in columns] else: new_columns = columns expressions = [Expression(stmt, col) for stmt, col in zip(stmts, new_columns)] return expressions, new_columns
[ "def", "build_expressions", "(", "verb", ")", ":", "def", "partial", "(", "func", ",", "col", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Make a function that acts on a column in a dataframe\n\n Parameters\n ----------\n func : callable\n Function\n col : str\n Column\n args : tuple\n Arguments to pass to func\n kwargs : dict\n Keyword arguments to func\n\n Results\n -------\n new_func : callable\n Function that takes a dataframe, and calls the\n original function on a column in the dataframe.\n \"\"\"", "def", "new_func", "(", "gdf", ")", ":", "return", "func", "(", "gdf", "[", "col", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_func", "def", "make_statement", "(", "func", ",", "col", ")", ":", "\"\"\"\n A statement of function called on a column in a dataframe\n\n Parameters\n ----------\n func : str or callable\n Function to call on a dataframe column\n col : str\n Column\n \"\"\"", "if", "isinstance", "(", "func", ",", "str", ")", ":", "expr", "=", "'{}({})'", ".", "format", "(", "func", ",", "col", ")", "elif", "callable", "(", "func", ")", ":", "expr", "=", "partial", "(", "func", ",", "col", ",", "*", "verb", ".", "args", ",", "*", "*", "verb", ".", "kwargs", ")", "else", ":", "raise", "TypeError", "(", "\"{} is not a function\"", ".", "format", "(", "func", ")", ")", "return", "expr", "def", "func_name", "(", "func", ")", ":", "\"\"\"\n Return name of a function.\n\n If the function is `np.sin`, we return `sin`.\n \"\"\"", "if", "isinstance", "(", "func", ",", "str", ")", ":", "return", "func", "try", ":", "return", "func", ".", "__name__", "except", "AttributeError", ":", "return", "''", "# Generate function names. They act as identifiers (postfixed", "# to the original columns) in the new_column names.", "if", "isinstance", "(", "verb", ".", "functions", ",", "(", "tuple", ",", "list", ")", ")", ":", "names", "=", "(", "func_name", "(", "func", ")", "for", "func", "in", "verb", ".", "functions", ")", "names_and_functions", "=", "zip", "(", "names", ",", "verb", ".", "functions", ")", "else", ":", "names_and_functions", "=", "verb", ".", "functions", ".", "items", "(", ")", "# Create statements for the expressions", "# and postfix identifiers", "columns", "=", "Selector", ".", "get", "(", "verb", ")", "# columns to act on", "postfixes", "=", "[", "]", "stmts", "=", "[", "]", "for", "name", ",", "func", "in", "names_and_functions", ":", "postfixes", ".", "append", "(", "name", ")", "for", "col", "in", "columns", ":", "stmts", ".", "append", "(", "make_statement", "(", "func", ",", "col", ")", ")", "if", "not", "stmts", ":", "stmts", "=", "columns", "# Names of the new columns", "# e.g col1_mean, col2_mean, col1_std, col2_std", "add_postfix", "=", "(", "isinstance", "(", "verb", ".", "functions", ",", "dict", ")", "or", "len", "(", "verb", ".", "functions", ")", ">", "1", ")", "if", "add_postfix", ":", "fmt", "=", "'{}_{}'", ".", "format", "new_columns", "=", "[", "fmt", "(", "c", ",", "p", ")", "for", "p", "in", "postfixes", "for", "c", "in", "columns", "]", "else", ":", "new_columns", "=", "columns", "expressions", "=", "[", "Expression", "(", "stmt", ",", "col", ")", "for", "stmt", ",", "col", "in", "zip", "(", "stmts", ",", "new_columns", ")", "]", "return", "expressions", ",", "new_columns" ]
Build expressions for helper verbs Parameters ---------- verb : verb A verb with a *functions* attribute. Returns ------- out : tuple (List of Expressions, New columns). The expressions and the new columns in which the results of those expressions will be stored. Even when a result will stored in a column with an existing label, that column is still considered new, i.e An expression ``x='x+1'``, will create a new_column `x` to replace an old column `x`.
[ "Build", "expressions", "for", "helper", "verbs" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L502-L613
184
has2k1/plydata
plydata/dataframe/common.py
Evaluator.process
def process(self): """ Run the expressions Returns ------- out : pandas.DataFrame Resulting data """ # Short cut if self._all_expressions_evaluated(): if self.drop: # Drop extra columns. They do not correspond to # any expressions. columns = [expr.column for expr in self.expressions] self.data = self.data.loc[:, columns] return self.data # group_by # evaluate expressions # combine columns # concat evalutated group data and clean up index and group gdfs = self._get_group_dataframes() egdfs = self._evaluate_expressions(gdfs) edata = self._concat(egdfs) return edata
python
def process(self): # Short cut if self._all_expressions_evaluated(): if self.drop: # Drop extra columns. They do not correspond to # any expressions. columns = [expr.column for expr in self.expressions] self.data = self.data.loc[:, columns] return self.data # group_by # evaluate expressions # combine columns # concat evalutated group data and clean up index and group gdfs = self._get_group_dataframes() egdfs = self._evaluate_expressions(gdfs) edata = self._concat(egdfs) return edata
[ "def", "process", "(", "self", ")", ":", "# Short cut", "if", "self", ".", "_all_expressions_evaluated", "(", ")", ":", "if", "self", ".", "drop", ":", "# Drop extra columns. They do not correspond to", "# any expressions.", "columns", "=", "[", "expr", ".", "column", "for", "expr", "in", "self", ".", "expressions", "]", "self", ".", "data", "=", "self", ".", "data", ".", "loc", "[", ":", ",", "columns", "]", "return", "self", ".", "data", "# group_by", "# evaluate expressions", "# combine columns", "# concat evalutated group data and clean up index and group", "gdfs", "=", "self", ".", "_get_group_dataframes", "(", ")", "egdfs", "=", "self", ".", "_evaluate_expressions", "(", "gdfs", ")", "edata", "=", "self", ".", "_concat", "(", "egdfs", ")", "return", "edata" ]
Run the expressions Returns ------- out : pandas.DataFrame Resulting data
[ "Run", "the", "expressions" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L195-L220
185
has2k1/plydata
plydata/dataframe/common.py
Evaluator._all_expressions_evaluated
def _all_expressions_evaluated(self): """ Return True all expressions match with the columns Saves some processor cycles """ def present(expr): return expr.stmt == expr.column and expr.column in self.data return all(present(expr) for expr in self.expressions)
python
def _all_expressions_evaluated(self): def present(expr): return expr.stmt == expr.column and expr.column in self.data return all(present(expr) for expr in self.expressions)
[ "def", "_all_expressions_evaluated", "(", "self", ")", ":", "def", "present", "(", "expr", ")", ":", "return", "expr", ".", "stmt", "==", "expr", ".", "column", "and", "expr", ".", "column", "in", "self", ".", "data", "return", "all", "(", "present", "(", "expr", ")", "for", "expr", "in", "self", ".", "expressions", ")" ]
Return True all expressions match with the columns Saves some processor cycles
[ "Return", "True", "all", "expressions", "match", "with", "the", "columns" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L222-L230
186
has2k1/plydata
plydata/dataframe/common.py
Evaluator._get_group_dataframes
def _get_group_dataframes(self): """ Get group dataframes Returns ------- out : tuple or generator Group dataframes """ if isinstance(self.data, GroupedDataFrame): grouper = self.data.groupby() # groupby on categorical columns uses the categories # even if they are not present in the data. This # leads to empty groups. We exclude them. return (gdf for _, gdf in grouper if not gdf.empty) else: return (self.data, )
python
def _get_group_dataframes(self): if isinstance(self.data, GroupedDataFrame): grouper = self.data.groupby() # groupby on categorical columns uses the categories # even if they are not present in the data. This # leads to empty groups. We exclude them. return (gdf for _, gdf in grouper if not gdf.empty) else: return (self.data, )
[ "def", "_get_group_dataframes", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "data", ",", "GroupedDataFrame", ")", ":", "grouper", "=", "self", ".", "data", ".", "groupby", "(", ")", "# groupby on categorical columns uses the categories", "# even if they are not present in the data. This", "# leads to empty groups. We exclude them.", "return", "(", "gdf", "for", "_", ",", "gdf", "in", "grouper", "if", "not", "gdf", ".", "empty", ")", "else", ":", "return", "(", "self", ".", "data", ",", ")" ]
Get group dataframes Returns ------- out : tuple or generator Group dataframes
[ "Get", "group", "dataframes" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L232-L248
187
has2k1/plydata
plydata/dataframe/common.py
Evaluator._evaluate_group_dataframe
def _evaluate_group_dataframe(self, gdf): """ Evaluate a single group dataframe Parameters ---------- gdf : pandas.DataFrame Input group dataframe Returns ------- out : pandas.DataFrame Result data """ gdf._is_copy = None result_index = gdf.index if self.keep_index else [] data = pd.DataFrame(index=result_index) for expr in self.expressions: value = expr.evaluate(gdf, self.env) if isinstance(value, pd.DataFrame): data = value break else: _create_column(data, expr.column, value) data = _add_group_columns(data, gdf) return data
python
def _evaluate_group_dataframe(self, gdf): gdf._is_copy = None result_index = gdf.index if self.keep_index else [] data = pd.DataFrame(index=result_index) for expr in self.expressions: value = expr.evaluate(gdf, self.env) if isinstance(value, pd.DataFrame): data = value break else: _create_column(data, expr.column, value) data = _add_group_columns(data, gdf) return data
[ "def", "_evaluate_group_dataframe", "(", "self", ",", "gdf", ")", ":", "gdf", ".", "_is_copy", "=", "None", "result_index", "=", "gdf", ".", "index", "if", "self", ".", "keep_index", "else", "[", "]", "data", "=", "pd", ".", "DataFrame", "(", "index", "=", "result_index", ")", "for", "expr", "in", "self", ".", "expressions", ":", "value", "=", "expr", ".", "evaluate", "(", "gdf", ",", "self", ".", "env", ")", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ")", ":", "data", "=", "value", "break", "else", ":", "_create_column", "(", "data", ",", "expr", ".", "column", ",", "value", ")", "data", "=", "_add_group_columns", "(", "data", ",", "gdf", ")", "return", "data" ]
Evaluate a single group dataframe Parameters ---------- gdf : pandas.DataFrame Input group dataframe Returns ------- out : pandas.DataFrame Result data
[ "Evaluate", "a", "single", "group", "dataframe" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L266-L291
188
has2k1/plydata
plydata/dataframe/common.py
Evaluator._concat
def _concat(self, egdfs): """ Concatenate evaluated group dataframes Parameters ---------- egdfs : iterable Evaluated dataframes Returns ------- edata : pandas.DataFrame Evaluated data """ egdfs = list(egdfs) edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False) # groupby can mixup the rows. We try to maintain the original # order, but we can only do that if the result has a one to # one relationship with the original one2one = ( self.keep_index and not any(edata.index.duplicated()) and len(edata.index) == len(self.data.index)) if one2one: edata = edata.sort_index() else: edata.reset_index(drop=True, inplace=True) # Maybe this should happen in the verb functions if self.keep_groups and self.groups: edata = GroupedDataFrame(edata, groups=self.groups) return edata
python
def _concat(self, egdfs): egdfs = list(egdfs) edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False) # groupby can mixup the rows. We try to maintain the original # order, but we can only do that if the result has a one to # one relationship with the original one2one = ( self.keep_index and not any(edata.index.duplicated()) and len(edata.index) == len(self.data.index)) if one2one: edata = edata.sort_index() else: edata.reset_index(drop=True, inplace=True) # Maybe this should happen in the verb functions if self.keep_groups and self.groups: edata = GroupedDataFrame(edata, groups=self.groups) return edata
[ "def", "_concat", "(", "self", ",", "egdfs", ")", ":", "egdfs", "=", "list", "(", "egdfs", ")", "edata", "=", "pd", ".", "concat", "(", "egdfs", ",", "axis", "=", "0", ",", "ignore_index", "=", "False", ",", "copy", "=", "False", ")", "# groupby can mixup the rows. We try to maintain the original", "# order, but we can only do that if the result has a one to", "# one relationship with the original", "one2one", "=", "(", "self", ".", "keep_index", "and", "not", "any", "(", "edata", ".", "index", ".", "duplicated", "(", ")", ")", "and", "len", "(", "edata", ".", "index", ")", "==", "len", "(", "self", ".", "data", ".", "index", ")", ")", "if", "one2one", ":", "edata", "=", "edata", ".", "sort_index", "(", ")", "else", ":", "edata", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "# Maybe this should happen in the verb functions", "if", "self", ".", "keep_groups", "and", "self", ".", "groups", ":", "edata", "=", "GroupedDataFrame", "(", "edata", ",", "groups", "=", "self", ".", "groups", ")", "return", "edata" ]
Concatenate evaluated group dataframes Parameters ---------- egdfs : iterable Evaluated dataframes Returns ------- edata : pandas.DataFrame Evaluated data
[ "Concatenate", "evaluated", "group", "dataframes" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L293-L325
189
has2k1/plydata
plydata/dataframe/common.py
Selector._resolve_slices
def _resolve_slices(data_columns, names): """ Convert any slices into column names Parameters ---------- data_columns : pandas.Index Dataframe columns names : tuple Names (including slices) of columns in the dataframe. Returns ------- out : tuple Names of columns in the dataframe. Has no slices. """ def _get_slice_cols(sc): """ Convert slice to list of names """ # Just like pandas.DataFrame.loc the stop # column is included idx_start = data_columns.get_loc(sc.start) idx_stop = data_columns.get_loc(sc.stop) + 1 return data_columns[idx_start:idx_stop:sc.step] result = [] for col in names: if isinstance(col, slice): result.extend(_get_slice_cols(col)) else: result.append(col) return tuple(result)
python
def _resolve_slices(data_columns, names): def _get_slice_cols(sc): """ Convert slice to list of names """ # Just like pandas.DataFrame.loc the stop # column is included idx_start = data_columns.get_loc(sc.start) idx_stop = data_columns.get_loc(sc.stop) + 1 return data_columns[idx_start:idx_stop:sc.step] result = [] for col in names: if isinstance(col, slice): result.extend(_get_slice_cols(col)) else: result.append(col) return tuple(result)
[ "def", "_resolve_slices", "(", "data_columns", ",", "names", ")", ":", "def", "_get_slice_cols", "(", "sc", ")", ":", "\"\"\"\n Convert slice to list of names\n \"\"\"", "# Just like pandas.DataFrame.loc the stop", "# column is included", "idx_start", "=", "data_columns", ".", "get_loc", "(", "sc", ".", "start", ")", "idx_stop", "=", "data_columns", ".", "get_loc", "(", "sc", ".", "stop", ")", "+", "1", "return", "data_columns", "[", "idx_start", ":", "idx_stop", ":", "sc", ".", "step", "]", "result", "=", "[", "]", "for", "col", "in", "names", ":", "if", "isinstance", "(", "col", ",", "slice", ")", ":", "result", ".", "extend", "(", "_get_slice_cols", "(", "col", ")", ")", "else", ":", "result", ".", "append", "(", "col", ")", "return", "tuple", "(", "result", ")" ]
Convert any slices into column names Parameters ---------- data_columns : pandas.Index Dataframe columns names : tuple Names (including slices) of columns in the dataframe. Returns ------- out : tuple Names of columns in the dataframe. Has no slices.
[ "Convert", "any", "slices", "into", "column", "names" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L333-L367
190
has2k1/plydata
plydata/dataframe/common.py
Selector.select
def select(cls, verb): """ Return selected columns for the select verb Parameters ---------- verb : object verb with the column selection attributes: - names - startswith - endswith - contains - matches """ columns = verb.data.columns contains = verb.contains matches = verb.matches groups = _get_groups(verb) names = cls._resolve_slices(columns, verb.names) names_set = set(names) groups_set = set(groups) lst = [[]] if names or groups: # group variable missing from the selection are prepended missing = [g for g in groups if g not in names_set] missing_set = set(missing) c1 = missing + [x for x in names if x not in missing_set] lst.append(c1) if verb.startswith: c2 = [x for x in columns if isinstance(x, str) and x.startswith(verb.startswith)] lst.append(c2) if verb.endswith: c3 = [x for x in columns if isinstance(x, str) and x.endswith(verb.endswith)] lst.append(c3) if contains: c4 = [] for col in columns: if (isinstance(col, str) and any(s in col for s in contains)): c4.append(col) lst.append(c4) if matches: c5 = [] patterns = [x if hasattr(x, 'match') else re.compile(x) for x in matches] for col in columns: if isinstance(col, str): if any(bool(p.match(col)) for p in patterns): c5.append(col) lst.append(c5) selected = unique(list(itertools.chain(*lst))) if verb.drop: to_drop = [col for col in selected if col not in groups_set] selected = [col for col in columns if col not in to_drop] return selected
python
def select(cls, verb): columns = verb.data.columns contains = verb.contains matches = verb.matches groups = _get_groups(verb) names = cls._resolve_slices(columns, verb.names) names_set = set(names) groups_set = set(groups) lst = [[]] if names or groups: # group variable missing from the selection are prepended missing = [g for g in groups if g not in names_set] missing_set = set(missing) c1 = missing + [x for x in names if x not in missing_set] lst.append(c1) if verb.startswith: c2 = [x for x in columns if isinstance(x, str) and x.startswith(verb.startswith)] lst.append(c2) if verb.endswith: c3 = [x for x in columns if isinstance(x, str) and x.endswith(verb.endswith)] lst.append(c3) if contains: c4 = [] for col in columns: if (isinstance(col, str) and any(s in col for s in contains)): c4.append(col) lst.append(c4) if matches: c5 = [] patterns = [x if hasattr(x, 'match') else re.compile(x) for x in matches] for col in columns: if isinstance(col, str): if any(bool(p.match(col)) for p in patterns): c5.append(col) lst.append(c5) selected = unique(list(itertools.chain(*lst))) if verb.drop: to_drop = [col for col in selected if col not in groups_set] selected = [col for col in columns if col not in to_drop] return selected
[ "def", "select", "(", "cls", ",", "verb", ")", ":", "columns", "=", "verb", ".", "data", ".", "columns", "contains", "=", "verb", ".", "contains", "matches", "=", "verb", ".", "matches", "groups", "=", "_get_groups", "(", "verb", ")", "names", "=", "cls", ".", "_resolve_slices", "(", "columns", ",", "verb", ".", "names", ")", "names_set", "=", "set", "(", "names", ")", "groups_set", "=", "set", "(", "groups", ")", "lst", "=", "[", "[", "]", "]", "if", "names", "or", "groups", ":", "# group variable missing from the selection are prepended", "missing", "=", "[", "g", "for", "g", "in", "groups", "if", "g", "not", "in", "names_set", "]", "missing_set", "=", "set", "(", "missing", ")", "c1", "=", "missing", "+", "[", "x", "for", "x", "in", "names", "if", "x", "not", "in", "missing_set", "]", "lst", ".", "append", "(", "c1", ")", "if", "verb", ".", "startswith", ":", "c2", "=", "[", "x", "for", "x", "in", "columns", "if", "isinstance", "(", "x", ",", "str", ")", "and", "x", ".", "startswith", "(", "verb", ".", "startswith", ")", "]", "lst", ".", "append", "(", "c2", ")", "if", "verb", ".", "endswith", ":", "c3", "=", "[", "x", "for", "x", "in", "columns", "if", "isinstance", "(", "x", ",", "str", ")", "and", "x", ".", "endswith", "(", "verb", ".", "endswith", ")", "]", "lst", ".", "append", "(", "c3", ")", "if", "contains", ":", "c4", "=", "[", "]", "for", "col", "in", "columns", ":", "if", "(", "isinstance", "(", "col", ",", "str", ")", "and", "any", "(", "s", "in", "col", "for", "s", "in", "contains", ")", ")", ":", "c4", ".", "append", "(", "col", ")", "lst", ".", "append", "(", "c4", ")", "if", "matches", ":", "c5", "=", "[", "]", "patterns", "=", "[", "x", "if", "hasattr", "(", "x", ",", "'match'", ")", "else", "re", ".", "compile", "(", "x", ")", "for", "x", "in", "matches", "]", "for", "col", "in", "columns", ":", "if", "isinstance", "(", "col", ",", "str", ")", ":", "if", "any", "(", "bool", "(", "p", ".", "match", "(", "col", ")", ")", "for", "p", "in", "patterns", ")", ":", "c5", ".", "append", "(", "col", ")", "lst", ".", "append", "(", "c5", ")", "selected", "=", "unique", "(", "list", "(", "itertools", ".", "chain", "(", "*", "lst", ")", ")", ")", "if", "verb", ".", "drop", ":", "to_drop", "=", "[", "col", "for", "col", "in", "selected", "if", "col", "not", "in", "groups_set", "]", "selected", "=", "[", "col", "for", "col", "in", "columns", "if", "col", "not", "in", "to_drop", "]", "return", "selected" ]
Return selected columns for the select verb Parameters ---------- verb : object verb with the column selection attributes: - names - startswith - endswith - contains - matches
[ "Return", "selected", "columns", "for", "the", "select", "verb" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L370-L437
191
has2k1/plydata
plydata/dataframe/common.py
Selector._at
def _at(cls, verb): """ A verb with a select text match """ # Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col): if col not in verb.data: raise KeyError( "Unknown column name, {!r}".format(col)) return col in final_columns_set return [col for col in columns if pred(col)]
python
def _at(cls, verb): # Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col): if col not in verb.data: raise KeyError( "Unknown column name, {!r}".format(col)) return col in final_columns_set return [col for col in columns if pred(col)]
[ "def", "_at", "(", "cls", ",", "verb", ")", ":", "# Named (listed) columns are always included", "columns", "=", "cls", ".", "select", "(", "verb", ")", "final_columns_set", "=", "set", "(", "cls", ".", "select", "(", "verb", ")", ")", "groups_set", "=", "set", "(", "_get_groups", "(", "verb", ")", ")", "final_columns_set", "-=", "groups_set", "-", "set", "(", "verb", ".", "names", ")", "def", "pred", "(", "col", ")", ":", "if", "col", "not", "in", "verb", ".", "data", ":", "raise", "KeyError", "(", "\"Unknown column name, {!r}\"", ".", "format", "(", "col", ")", ")", "return", "col", "in", "final_columns_set", "return", "[", "col", "for", "col", "in", "columns", "if", "pred", "(", "col", ")", "]" ]
A verb with a select text match
[ "A", "verb", "with", "a", "select", "text", "match" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L448-L464
192
has2k1/plydata
plydata/dataframe/common.py
Selector._if
def _if(cls, verb): """ A verb with a predicate function """ pred = verb.predicate data = verb.data groups = set(_get_groups(verb)) # force predicate if isinstance(pred, str): if not pred.endswith('_dtype'): pred = '{}_dtype'.format(pred) pred = getattr(pdtypes, pred) elif pdtypes.is_bool_dtype(np.array(pred)): # Turn boolean array into a predicate function it = iter(pred) def pred(col): return next(it) return [col for col in data if pred(data[col]) and col not in groups]
python
def _if(cls, verb): pred = verb.predicate data = verb.data groups = set(_get_groups(verb)) # force predicate if isinstance(pred, str): if not pred.endswith('_dtype'): pred = '{}_dtype'.format(pred) pred = getattr(pdtypes, pred) elif pdtypes.is_bool_dtype(np.array(pred)): # Turn boolean array into a predicate function it = iter(pred) def pred(col): return next(it) return [col for col in data if pred(data[col]) and col not in groups]
[ "def", "_if", "(", "cls", ",", "verb", ")", ":", "pred", "=", "verb", ".", "predicate", "data", "=", "verb", ".", "data", "groups", "=", "set", "(", "_get_groups", "(", "verb", ")", ")", "# force predicate", "if", "isinstance", "(", "pred", ",", "str", ")", ":", "if", "not", "pred", ".", "endswith", "(", "'_dtype'", ")", ":", "pred", "=", "'{}_dtype'", ".", "format", "(", "pred", ")", "pred", "=", "getattr", "(", "pdtypes", ",", "pred", ")", "elif", "pdtypes", ".", "is_bool_dtype", "(", "np", ".", "array", "(", "pred", ")", ")", ":", "# Turn boolean array into a predicate function", "it", "=", "iter", "(", "pred", ")", "def", "pred", "(", "col", ")", ":", "return", "next", "(", "it", ")", "return", "[", "col", "for", "col", "in", "data", "if", "pred", "(", "data", "[", "col", "]", ")", "and", "col", "not", "in", "groups", "]" ]
A verb with a predicate function
[ "A", "verb", "with", "a", "predicate", "function" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/dataframe/common.py#L467-L488
193
has2k1/plydata
plydata/operators.py
get_verb_function
def get_verb_function(data, verb): """ Return function that implements the verb for given data type """ try: module = type_lookup[type(data)] except KeyError: # Some guess work for subclasses for type_, mod in type_lookup.items(): if isinstance(data, type_): module = mod break try: return getattr(module, verb) except (NameError, AttributeError): msg = "Data source of type '{}' is not supported." raise TypeError(msg.format(type(data)))
python
def get_verb_function(data, verb): try: module = type_lookup[type(data)] except KeyError: # Some guess work for subclasses for type_, mod in type_lookup.items(): if isinstance(data, type_): module = mod break try: return getattr(module, verb) except (NameError, AttributeError): msg = "Data source of type '{}' is not supported." raise TypeError(msg.format(type(data)))
[ "def", "get_verb_function", "(", "data", ",", "verb", ")", ":", "try", ":", "module", "=", "type_lookup", "[", "type", "(", "data", ")", "]", "except", "KeyError", ":", "# Some guess work for subclasses", "for", "type_", ",", "mod", "in", "type_lookup", ".", "items", "(", ")", ":", "if", "isinstance", "(", "data", ",", "type_", ")", ":", "module", "=", "mod", "break", "try", ":", "return", "getattr", "(", "module", ",", "verb", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "msg", "=", "\"Data source of type '{}' is not supported.\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "type", "(", "data", ")", ")", ")" ]
Return function that implements the verb for given data type
[ "Return", "function", "that", "implements", "the", "verb", "for", "given", "data", "type" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/operators.py#L23-L39
194
has2k1/plydata
plydata/expressions.py
Expression
def Expression(*args, **kwargs): """ Return an appropriate Expression given the arguments Parameters ---------- args : tuple Positional arguments passed to the Expression class kwargs : dict Keyword arguments passed to the Expression class """ # dispatch if not hasattr(args[0], '_Expression'): return BaseExpression(*args, *kwargs) else: return args[0]._Expression(*args, **kwargs)
python
def Expression(*args, **kwargs): # dispatch if not hasattr(args[0], '_Expression'): return BaseExpression(*args, *kwargs) else: return args[0]._Expression(*args, **kwargs)
[ "def", "Expression", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# dispatch", "if", "not", "hasattr", "(", "args", "[", "0", "]", ",", "'_Expression'", ")", ":", "return", "BaseExpression", "(", "*", "args", ",", "*", "kwargs", ")", "else", ":", "return", "args", "[", "0", "]", ".", "_Expression", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return an appropriate Expression given the arguments Parameters ---------- args : tuple Positional arguments passed to the Expression class kwargs : dict Keyword arguments passed to the Expression class
[ "Return", "an", "appropriate", "Expression", "given", "the", "arguments" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/expressions.py#L176-L191
195
has2k1/plydata
plydata/eval.py
EvalEnvironment.with_outer_namespace
def with_outer_namespace(self, outer_namespace): """Return a new EvalEnvironment with an extra namespace added. This namespace will be used only for variables that are not found in any existing namespace, i.e., it is "outside" them all.""" return self.__class__(self._namespaces + [outer_namespace], self.flags)
python
def with_outer_namespace(self, outer_namespace): return self.__class__(self._namespaces + [outer_namespace], self.flags)
[ "def", "with_outer_namespace", "(", "self", ",", "outer_namespace", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "_namespaces", "+", "[", "outer_namespace", "]", ",", "self", ".", "flags", ")" ]
Return a new EvalEnvironment with an extra namespace added. This namespace will be used only for variables that are not found in any existing namespace, i.e., it is "outside" them all.
[ "Return", "a", "new", "EvalEnvironment", "with", "an", "extra", "namespace", "added", ".", "This", "namespace", "will", "be", "used", "only", "for", "variables", "that", "are", "not", "found", "in", "any", "existing", "namespace", "i", ".", "e", ".", "it", "is", "outside", "them", "all", "." ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L81-L86
196
has2k1/plydata
plydata/eval.py
EvalEnvironment.subset
def subset(self, names): """Creates a new, flat EvalEnvironment that contains only the variables specified.""" vld = VarLookupDict(self._namespaces) new_ns = dict((name, vld[name]) for name in names) return EvalEnvironment([new_ns], self.flags)
python
def subset(self, names): vld = VarLookupDict(self._namespaces) new_ns = dict((name, vld[name]) for name in names) return EvalEnvironment([new_ns], self.flags)
[ "def", "subset", "(", "self", ",", "names", ")", ":", "vld", "=", "VarLookupDict", "(", "self", ".", "_namespaces", ")", "new_ns", "=", "dict", "(", "(", "name", ",", "vld", "[", "name", "]", ")", "for", "name", "in", "names", ")", "return", "EvalEnvironment", "(", "[", "new_ns", "]", ",", "self", ".", "flags", ")" ]
Creates a new, flat EvalEnvironment that contains only the variables specified.
[ "Creates", "a", "new", "flat", "EvalEnvironment", "that", "contains", "only", "the", "variables", "specified", "." ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/eval.py#L159-L164
197
has2k1/plydata
plydata/utils.py
Q
def Q(name): """ Quote a variable name A way to 'quote' variable names, especially ones that do not otherwise meet Python's variable name rules. Parameters ---------- name : str Name of variable Returns ------- value : object Value of variable Examples -------- >>> import pandas as pd >>> from plydata import define >>> df = pd.DataFrame({'class': [10, 20, 30]}) Since ``class`` is a reserved python keyword it cannot be a variable name, and therefore cannot be used in an expression without quoting it. >>> df >> define(y='class+1') Traceback (most recent call last): File "<string>", line 1 class+1 ^ SyntaxError: invalid syntax >>> df >> define(y='Q("class")+1') class y 0 10 11 1 20 21 2 30 31 Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``. As in the above example, you do not need to ``import`` ``Q`` before you can use it. """ env = EvalEnvironment.capture(1) try: return env.namespace[name] except KeyError: raise NameError("No data named {!r} found".format(name))
python
def Q(name): env = EvalEnvironment.capture(1) try: return env.namespace[name] except KeyError: raise NameError("No data named {!r} found".format(name))
[ "def", "Q", "(", "name", ")", ":", "env", "=", "EvalEnvironment", ".", "capture", "(", "1", ")", "try", ":", "return", "env", ".", "namespace", "[", "name", "]", "except", "KeyError", ":", "raise", "NameError", "(", "\"No data named {!r} found\"", ".", "format", "(", "name", ")", ")" ]
Quote a variable name A way to 'quote' variable names, especially ones that do not otherwise meet Python's variable name rules. Parameters ---------- name : str Name of variable Returns ------- value : object Value of variable Examples -------- >>> import pandas as pd >>> from plydata import define >>> df = pd.DataFrame({'class': [10, 20, 30]}) Since ``class`` is a reserved python keyword it cannot be a variable name, and therefore cannot be used in an expression without quoting it. >>> df >> define(y='class+1') Traceback (most recent call last): File "<string>", line 1 class+1 ^ SyntaxError: invalid syntax >>> df >> define(y='Q("class")+1') class y 0 10 11 1 20 21 2 30 31 Note that it is ``'Q("some name")'`` and not ``'Q(some name)'``. As in the above example, you do not need to ``import`` ``Q`` before you can use it.
[ "Quote", "a", "variable", "name" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L72-L119
198
has2k1/plydata
plydata/utils.py
regular_index
def regular_index(*dfs): """ Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64') """ original_index = [df.index for df in dfs] have_bad_index = [not isinstance(df.index, pd.RangeIndex) for df in dfs] for df, bad in zip(dfs, have_bad_index): if bad: df.reset_index(drop=True, inplace=True) try: yield dfs finally: for df, bad, idx in zip(dfs, have_bad_index, original_index): if bad and len(df.index) == len(idx): df.index = idx
python
def regular_index(*dfs): original_index = [df.index for df in dfs] have_bad_index = [not isinstance(df.index, pd.RangeIndex) for df in dfs] for df, bad in zip(dfs, have_bad_index): if bad: df.reset_index(drop=True, inplace=True) try: yield dfs finally: for df, bad, idx in zip(dfs, have_bad_index, original_index): if bad and len(df.index) == len(idx): df.index = idx
[ "def", "regular_index", "(", "*", "dfs", ")", ":", "original_index", "=", "[", "df", ".", "index", "for", "df", "in", "dfs", "]", "have_bad_index", "=", "[", "not", "isinstance", "(", "df", ".", "index", ",", "pd", ".", "RangeIndex", ")", "for", "df", "in", "dfs", "]", "for", "df", ",", "bad", "in", "zip", "(", "dfs", ",", "have_bad_index", ")", ":", "if", "bad", ":", "df", ".", "reset_index", "(", "drop", "=", "True", ",", "inplace", "=", "True", ")", "try", ":", "yield", "dfs", "finally", ":", "for", "df", ",", "bad", ",", "idx", "in", "zip", "(", "dfs", ",", "have_bad_index", ",", "original_index", ")", ":", "if", "bad", "and", "len", "(", "df", ".", "index", ")", "==", "len", "(", "idx", ")", ":", "df", ".", "index", "=", "idx" ]
Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 = pd.DataFrame([3, 2, 1], index=[3, 0, 0]) >>> df3 = pd.DataFrame([11, 12, 13], index=[11, 12, 13]) Within the contexmanager all frames have nice range indices >>> with regular_index(df1, df2, df3): ... print(df1.index) ... print(df2.index) ... print(df3.index) RangeIndex(start=0, stop=4, step=1) RangeIndex(start=0, stop=3, step=1) RangeIndex(start=0, stop=3, step=1) Indices restored >>> df1.index RangeIndex(start=0, stop=4, step=1) >>> df2.index Int64Index([3, 0, 0], dtype='int64') >>> df3.index Int64Index([11, 12, 13], dtype='int64')
[ "Change", "&", "restore", "the", "indices", "of", "dataframes" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L147-L212
199
has2k1/plydata
plydata/utils.py
unique
def unique(lst): """ Return unique elements :class:`pandas.unique` and :class:`numpy.unique` cast mixed type lists to the same type. They are faster, but some times we want to maintain the type. Parameters ---------- lst : list-like List of items Returns ------- out : list Unique items in the order that they appear in the input. Examples -------- >>> import pandas as pd >>> import numpy as np >>> lst = ['one', 'two', 123, 'three'] >>> pd.unique(lst) array(['one', 'two', '123', 'three'], dtype=object) >>> np.unique(lst) array(['123', 'one', 'three', 'two'], dtype='<U5') >>> unique(lst) ['one', 'two', 123, 'three'] pandas and numpy cast 123 to a string!, and numpy does not even maintain the order. """ seen = set() def make_seen(x): seen.add(x) return x return [make_seen(x) for x in lst if x not in seen]
python
def unique(lst): seen = set() def make_seen(x): seen.add(x) return x return [make_seen(x) for x in lst if x not in seen]
[ "def", "unique", "(", "lst", ")", ":", "seen", "=", "set", "(", ")", "def", "make_seen", "(", "x", ")", ":", "seen", ".", "add", "(", "x", ")", "return", "x", "return", "[", "make_seen", "(", "x", ")", "for", "x", "in", "lst", "if", "x", "not", "in", "seen", "]" ]
Return unique elements :class:`pandas.unique` and :class:`numpy.unique` cast mixed type lists to the same type. They are faster, but some times we want to maintain the type. Parameters ---------- lst : list-like List of items Returns ------- out : list Unique items in the order that they appear in the input. Examples -------- >>> import pandas as pd >>> import numpy as np >>> lst = ['one', 'two', 123, 'three'] >>> pd.unique(lst) array(['one', 'two', '123', 'three'], dtype=object) >>> np.unique(lst) array(['123', 'one', 'three', 'two'], dtype='<U5') >>> unique(lst) ['one', 'two', 123, 'three'] pandas and numpy cast 123 to a string!, and numpy does not even maintain the order.
[ "Return", "unique", "elements" ]
d8ca85ff70eee621e96f7c74034e90fec16e8b61
https://github.com/has2k1/plydata/blob/d8ca85ff70eee621e96f7c74034e90fec16e8b61/plydata/utils.py#L215-L256