_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q100
Step.invoke_step
train
def invoke_step(self, context): """Invoke 'run_step' in the dynamically loaded step module. Don't invoke this from outside the Step class. Use pypyr.dsl.Step.run_step instead. invoke_step just does the bare module step invocation, it does not evaluate any of the decorator logic surrounding the step. So
python
{ "resource": "" }
q101
Step.run_conditional_decorators
train
def run_conditional_decorators(self, context): """Evaluate the step decorators to decide whether to run step or not. Use pypyr.dsl.Step.run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # The decorator attributes might contain formatting expressions that # change whether they evaluate True or False, thus apply formatting at # last possible instant. run_me = context.get_formatted_as_type(self.run_me, out_type=bool) skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool) swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool) if run_me: if not skip_me: try: if self.retry_decorator: self.retry_decorator.retry_loop(context, self.invoke_step) else: self.invoke_step(context=context) except Exception as ex_info:
python
{ "resource": "" }
q102
Step.run_foreach_or_conditional
train
def run_foreach_or_conditional(self, context): """Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # friendly reminder [] list obj (i.e empty) evals False
python
{ "resource": "" }
q103
Step.run_step
train
def run_step(self, context): """Run a single pipeline step. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # the in params should be added to context before step execution. self.set_step_input_context(context) if self.while_decorator:
python
{ "resource": "" }
q104
Step.set_step_input_context
train
def set_step_input_context(self, context): """Append step's 'in' parameters to context, if they exist. Append the[in] dictionary to the context. This will overwrite existing values if the same keys are already in there. I.e if in_parameters has {'eggs': 'boiled'} and key 'eggs' already exists in context, context['eggs'] hereafter will be 'boiled'. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new
python
{ "resource": "" }
q105
RetryDecorator.exec_iteration
train
def exec_iteration(self, counter, context, step_method): """Run a single retry iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.retry_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if step execution completed without error. False if error occured during step execution. """ logger.debug("starting") context['retryCounter'] = counter logger.info(f"retry: running step with counter {counter}") try: step_method(context) result = True except Exception as ex_info: if self.max: if counter == self.max: logger.debug(f"retry: max {counter} retries exhausted. " "raising error.") # arguably shouldn't be using errs for control of flow. # but would lose the err info if not, so lesser of 2 evils. raise if self.stop_on or self.retry_on: error_name = get_error_name(ex_info) if self.stop_on: formatted_stop_list = context.get_formatted_iterable(
python
{ "resource": "" }
q106
RetryDecorator.retry_loop
train
def retry_loop(self, context, step_method): """Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) """ logger.debug("starting") context['retryCounter'] = 0 sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max: max = context.get_formatted_as_type(self.max, out_type=int) logger.info(f"retry decorator will try {max} times at {sleep}s " "intervals.") else: max = None logger.info(f"retry decorator will try indefinitely at {sleep}s " "intervals.") # this will never be false. because on counter
python
{ "resource": "" }
q107
WhileDecorator.exec_iteration
train
def exec_iteration(self, counter, context, step_method): """Run a single loop iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.while_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if self.stop evaluates to True after step execution,
python
{ "resource": "" }
q108
WhileDecorator.while_loop
train
def while_loop(self, context, step_method): """Run step inside a while loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) """ logger.debug("starting") context['whileCounter'] = 0 if self.stop is None and self.max is None: # the ctor already does this check, but guess theoretically # consumer could have messed with the props since ctor logger.error(f"while decorator missing both max and stop.") raise PipelineDefinitionError("the while decorator must have " "either max or stop, or both. " "But not neither.") error_on_max = context.get_formatted_as_type( self.error_on_max, out_type=bool) sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max is None: max = None logger.info(f"while decorator will loop until {self.stop} " f"evaluates to True at {sleep}s intervals.") else: max = context.get_formatted_as_type(self.max, out_type=int) if max < 1: logger.info( f"max {self.max} is {max}. while only runs when max > 0.") logger.debug("done") return if self.stop is None: logger.info(f"while decorator will loop {max} times at " f"{sleep}s intervals.") else: logger.info(f"while decorator will loop {max} times, or "
python
{ "resource": "" }
q109
run_step
train
def run_step(context): """Load a yaml file into the pypyr context. Yaml parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file yaml has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchYaml - path. path-like. Path to file on disk. - key. string. If exists, write yaml to this context key. Else yaml writes to context root. All inputs support formatting expressions. Also supports a passing path as string to fetchYaml, but in this case you won't be able to specify a key. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchYamlPath missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchYamlPath exists but is
python
{ "resource": "" }
q110
run_step
train
def run_step(context): """pypyr step saves current utc datetime to context. Args: context: pypyr.context.Context. Mandatory. The following context key is optional: - nowUtcIn. str. Datetime formatting expression. For full list of possible expressions, check here: https://docs.python.org/3.7/library/datetime.html#strftime-and-strptime-behavior All inputs support pypyr formatting expressions. This step creates now in context, containing a string representation of the timestamp. If input formatting not specified, defaults to ISO8601. Default is: YYYY-MM-DDTHH:MM:SS.ffffff+00:00, or, if microsecond is 0, YYYY-MM-DDTHH:MM:SS Returns: None. updates context arg. """ logger.debug("started")
python
{ "resource": "" }
q111
run_step
train
def run_step(context): """Assert that something is True or equal to something else. Args: context: dictionary-like pypyr.context.Context. context is mandatory. Uses the following context keys in context: - assert - this. mandatory. Any type. If assert['equals'] not specified, evals as boolean. - equals. optional. Any type. If assert['this'] evaluates to False raises error. If assert['equals'] is specified, raises error if assert.this != assert.equals. assert['this'] & assert['equals'] both support string substitutions. Returns: None Raises: ContextError: if assert evaluates to False. """ logger.debug("started") assert context, f"context must have value for {__name__}" deprecated(context) context.assert_key_has_value('assert', __name__) assert_this = context['assert']['this'] is_equals_there = 'equals' in context['assert'] if is_equals_there: assert_equals = context['assert']['equals'] # compare assertThis to assertEquals logger.debug("comparing assert['this'] to assert['equals'].") assert_result = (context.get_formatted_iterable(assert_this) == context.get_formatted_iterable(assert_equals)) else: # nothing to compare means treat assertThis as a bool. logger.debug("evaluating assert['this'] as a boolean.") assert_result = context.get_formatted_as_type(assert_this,
python
{ "resource": "" }
q112
tar_archive
train
def tar_archive(context): """Archive specified path to a tar archive. Args: context: dictionary-like. context is mandatory. context['tar']['archive'] must exist. It's a dictionary. keys are the paths to archive. values are the destination output paths. Example: tar: archive: - in: path/to/dir out: path/to/destination.tar.xs - in: another/my.file out: ./my.tar.xs This will archive directory path/to/dir to path/to/destination.tar.xs, and also archive file another/my.file to ./my.tar.xs """ logger.debug("start") mode = get_file_mode_for_writing(context) for item in context['tar']['archive']: # value is the destination tar. Allow
python
{ "resource": "" }
q113
tar_extract
train
def tar_extract(context): """Extract all members of tar archive to specified path. Args: context: dictionary-like. context is mandatory. context['tar']['extract'] must exist. It's a dictionary. keys are the path to the tar to extract. values are the destination paths. Example: tar: extract: - in: path/to/my.tar.xs out: /path/extract/here - in: another/tar.xs out: . This will extract path/to/my.tar.xs to /path/extract/here, and also extract another/tar.xs to $PWD. """ logger.debug("start") mode = get_file_mode_for_reading(context) for item in context['tar']['extract']: # in is the path to the tar to
python
{ "resource": "" }
q114
run_step
train
def run_step(context): """Run shell command without shell interpolation. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute command string in the shell as a sub-process. The shell defaults to /bin/sh. The context['cmd'] string must be formatted exactly as it would be when typed at the shell prompt. This includes, for example, quoting or backslash escaping filenames with spaces in them.
python
{ "resource": "" }
q115
get_args
train
def get_args(get_item): """Parse env, key, default out of input dict. Args: get_item: dict. contains keys env/key/default Returns: (env, key, has_default, default) tuple, where env: str. env var name. key: str. save env value to this context key. has_default: bool. True if default specified. default: the value of default, if specified. Raises: ContextError: envGet is not a list of dicts. KeyNotInContextError: If env or key not found in get_config. """ if not isinstance(get_item, dict): raise ContextError('envGet must contain a list of dicts.') env = get_item.get('env', None) if not env:
python
{ "resource": "" }
q116
run_step
train
def run_step(context): """Executes dynamic python code. Context is a dictionary or dictionary-like. Context must contain key 'pycode' Will exec context['pycode'] as dynamically interpreted python statements. context is mandatory. When you execute the pipeline, it should look something like this: pipeline-runner [name here] 'pycode=print(1+1)'. """ logger.debug("started") context.assert_key_has_value(key='pycode', caller=__name__) logger.debug(f"Executing python string: {context['pycode']}") locals_dictionary = locals()
python
{ "resource": "" }
q117
get_parser
train
def get_parser(): """Return ArgumentParser for pypyr cli.""" parser = argparse.ArgumentParser( allow_abbrev=True, description='pypyr pipeline runner') parser.add_argument('pipeline_name', help='Name of pipeline to run. It should exist in the ' './pipelines directory.') parser.add_argument(dest='pipeline_context', nargs='?', help='String for context values. Parsed by the ' 'pipeline\'s context_parser function.') parser.add_argument('--dir', dest='working_dir', default=os.getcwd(), help='Working directory. Use if your pipelines ' 'directory is elsewhere. Defaults to cwd.') parser.add_argument('--log', '--loglevel', dest='log_level', type=int, default=20, help='Integer log level. Defaults to 20 (INFO). '
python
{ "resource": "" }
q118
main
train
def main(args=None): """Entry point for pypyr cli. The setup_py entry_point wraps this in sys.exit already so this effectively becomes sys.exit(main()). The __main__ entry point similarly wraps sys.exit(). """ if args is None: args = sys.argv[1:] parsed_args = get_args(args) try: return pypyr.pipelinerunner.main( pipeline_name=parsed_args.pipeline_name, pipeline_context_input=parsed_args.pipeline_context, working_dir=parsed_args.working_dir, log_level=parsed_args.log_level, log_path=parsed_args.log_path) except KeyboardInterrupt: # Shell standard is 128 + signum = 130 (SIGINT = 2)
python
{ "resource": "" }
q119
run_step
train
def run_step(context): """Remove specified keys from context. Args: Context is a dictionary or dictionary-like. context['contextClear'] must exist. It's a dictionary. Will iterate context['contextClear'] and remove those keys from context. For example, say input context is: key1: value1 key2: value2 key3: value3 key4: value4 contextClear: - key2 - key4 - contextClear This will result in return context: key1: value1
python
{ "resource": "" }
q120
run_step
train
def run_step(context): """Run command, program or executable. Context is a dictionary or dictionary-like. Context must contain the following keys: cmd: <<cmd string>> (command + args to execute.) OR, as a dict cmd: run: str. mandatory. <<cmd string>> command + args to execute. save: bool. defaults False. save output to cmdOut. Will execute the command string in the shell as a sub-process. Escape curly braces: if you want a literal curly brace, double it like {{ or }}. If save is True, will save the output to context as follows: cmdOut: returncode: 0 stdout: 'stdout str here. None if empty.'
python
{ "resource": "" }
q121
run_step
train
def run_step(context): """Set hierarchy into context with substitutions if it doesn't exist yet. context is a dictionary or dictionary-like. context['defaults'] must exist. It's a dictionary. Will iterate context['defaults'] and add these as new values where their keys don't already exist. While it's doing so, it will leave all other values in the existing hierarchy untouched. List merging is purely additive, with no checks for uniqueness or already existing list items. E.g context [0,1,2] with contextMerge=[2,3,4] will result in [0,1,2,2,3,4] Keep this in mind especially where complex types like dicts nest inside a list - a merge will always add a new dict list item, not merge it into whatever dicts might exist on the list already. For example, say input context is: key1: value1 key2: value2 key3: k31: value31 k32: value32 defaults: key2: 'aaa_{key1}_zzz' key3:
python
{ "resource": "" }
q122
get_pipeline_steps
train
def get_pipeline_steps(pipeline, steps_group): """Get the steps attribute of module pipeline. If there is no steps sequence on the pipeline, return None. Guess you could theoretically want to run a pipeline with nothing in it. """ logger.debug("starting") assert pipeline assert steps_group logger.debug(f"retrieving {steps_group} steps from pipeline") if steps_group in pipeline: steps = pipeline[steps_group] if
python
{ "resource": "" }
q123
run_failure_step_group
train
def run_failure_step_group(pipeline, context): """Run the on_failure step group if it exists. This function will swallow all errors, to prevent obfuscating the error condition that got it here to begin with. """ logger.debug("starting") try:
python
{ "resource": "" }
q124
run_step_group
train
def run_step_group(pipeline_definition, step_group_name, context): """Get the specified step group from the pipeline and run its steps.""" logger.debug(f"starting {step_group_name}") assert step_group_name steps = get_pipeline_steps(pipeline=pipeline_definition,
python
{ "resource": "" }
q125
ensure_dir
train
def ensure_dir(path): """Create all parent directories of path if they don't exist. Args: path. Path-like object. Create parent dirs to this path. Return:
python
{ "resource": "" }
q126
get_glob
train
def get_glob(path): """Process the input path, applying globbing and formatting. Do note that this will returns files AND directories that match the glob. No tilde expansion is done, but *, ?, and character ranges expressed with [] will be correctly matched. Escape all special characters ('?', '*' and '['). For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'. If passing in an iterable of paths, will expand matches for each path in the iterable. The function will return all the matches for each path glob expression combined into a single list. Args: path: Path-like string, or iterable (list or tuple ) of paths. Returns: Combined list of paths found for input glob. """
python
{ "resource": "" }
q127
is_same_file
train
def is_same_file(path1, path2): """Return True if path1 is the same file as path2. The reason for this dance is that samefile throws if either file doesn't exist. Args: path1: str or path-like. path2: str or path-like. Returns:
python
{ "resource": "" }
q128
move_file
train
def move_file(src, dest): """Move source file to destination. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong.
python
{ "resource": "" }
q129
move_temp_file
train
def move_temp_file(src, dest): """Move src to dest. Delete src if something goes wrong. Overwrites dest. Args: src: str or path-like. source file dest: str or path-like. destination file Returns: None. Raises: FileNotFoundError: out path parent doesn't exist. OSError: if any IO operations go wrong. Does its best to clean up after itself and remove temp files. """ try: move_file(src, dest) except Exception: try:
python
{ "resource": "" }
q130
FileRewriter.files_in_to_out
train
def files_in_to_out(self, in_path, out_path=None): """Write in files to out, calling the line_handler on each line. Calls file_in_to_out under the hood to format the in_path payload. The formatting processing is done by the self.formatter instance. Args: in_path: str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. out_path: str or path-like. Can refer to a file or a directory. will create directory structure if it doesn't exist. If in-path refers to >1 file (e.g it's a glob or list), out path can only be a directory - it doesn't make sense to write >1 file to the same single file (this is no an appender.) To ensure out_path is read as a directory and not a file, be sure to have the path separator (/) at the end. Top tip: Path-like objects strip the trailing slash. If you want to pass in a dir that does not exist yet as out-path with a trailing /, you should be passing it as a str to preserve the /. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ in_paths = get_glob(in_path) in_count = len(in_paths) if in_count == 0: logger.debug(f'in path found {in_count} paths.') else: logger.debug(f'in path found {in_count} paths:') for path in in_paths: logger.debug(f'{path}') logger.debug( 'herewith ends the paths. will now process each file.') if in_paths: # derive the destination directory, ensure it's ready for writing basedir_out = None is_outfile_name_known = False if out_path: # outpath could be a file, or a dir pathlib_out = Path(out_path) # yep, Path() strips trailing /, hence check original string if isinstance(out_path, str) and out_path.endswith(os.sep): # ensure dir - mimic posix mkdir -p pathlib_out.mkdir(parents=True, exist_ok=True) basedir_out = pathlib_out elif pathlib_out.is_dir(): basedir_out = pathlib_out else: if len(in_paths) > 1: raise Error( f'{in_path} resolves to {len(in_paths)} files, ' 'but you specified only a single file as out ' f'{out_path}. If the outpath is meant to be a ' 'directory, put a / at the end.') # at this point it must be a file (not dir) path # make sure that the parent dir exists basedir_out = pathlib_out.parent
python
{ "resource": "" }
q131
ObjectRewriter.in_to_out
train
def in_to_out(self, in_path, out_path=None): """Load file into object, formats, writes object to out. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp " "file and then replacing in path with the temp file.") out_path = None logger.debug(f"opening source file: {in_path}") with open(in_path) as infile: obj = self.object_representer.load(infile)
python
{ "resource": "" }
q132
StreamRewriter.in_to_out
train
def in_to_out(self, in_path, out_path=None): """Write a single file in to out, running self.formatter on each line. If in_path and out_path point to the same thing it will in-place edit and overwrite the in path. Even easier, if you do want to edit a file in place, don't specify out_path, or set it to None. Args: in_path: str or path-like. Must refer to a single existing file. out_path: str or path-like. Must refer to a single destination file location. will create directory structure if it doesn't exist. If out_path is not specified or None, will in-place edit and overwrite the in-files. Returns: None. """ is_in_place_edit = False if is_same_file(in_path, out_path): logger.debug( "in path and out path are the same file. writing to temp "
python
{ "resource": "" }
q133
JsonRepresenter.dump
train
def dump(self, file, payload): """Dump json oject to open file output. Writes json with 2 spaces indentation. Args:
python
{ "resource": "" }
q134
run_step
train
def run_step(context): """Parse input file and replace a search string. This also does string substitutions from context on the fileReplacePairs. It does this before it search & replaces the in file. Be careful of order. If fileReplacePairs is not an ordered collection, replacements could evaluate in any given order. If this is coming in from pipeline yaml it will be an ordered dictionary, so life is good. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileReplace - in. mandatory. str, path-like, or an iterable (list/tuple) of strings/paths. Each str/path can be a glob, relative or absolute path. - out. optional. path-like. Can refer to a file or a directory.
python
{ "resource": "" }
q135
set_logging_config
train
def set_logging_config(log_level, handlers): """Set python logging library config. Run this ONCE at the start of your process. It formats the python logging module's output.
python
{ "resource": "" }
q136
set_root_logger
train
def set_root_logger(root_log_level, log_path=None): """Set the root logger 'pypyr'. Do this before you do anything else. Run once and only once at initialization. """ handlers = [] console_handler = logging.StreamHandler() handlers.append(console_handler) if log_path: file_handler = logging.FileHandler(log_path) handlers.append(file_handler)
python
{ "resource": "" }
q137
get_parsed_context
train
def get_parsed_context(pipeline, context_in_string): """Execute get_parsed_context handler if specified. Dynamically load the module specified by the context_parser key in pipeline dict and execute the get_parsed_context function on that module. Args: pipeline: dict. Pipeline object. context_in_string: string. Argument string used to initialize context. Returns: pypyr.context.Context() instance. Raises: AttributeError: parser specified on pipeline missing get_parsed_context function. """ logger.debug("starting") if 'context_parser' in pipeline: parser_module_name = pipeline['context_parser'] logger.debug(f"context parser found: {parser_module_name}") parser_module = pypyr.moduleloader.get_module(parser_module_name) try: logger.debug(f"running parser {parser_module_name}") result_context = parser_module.get_parsed_context( context_in_string) logger.debug(f"step {parser_module_name} done") # Downstream steps likely to expect context not to be None, hence # empty rather than None. if result_context is None:
python
{ "resource": "" }
q138
main
train
def main( pipeline_name, pipeline_context_input, working_dir, log_level, log_path, ): """Entry point for pypyr pipeline runner. Call this once per pypyr run. Call me if you want to run a pypyr pipeline from your own code. This function does some one-off 1st time initialization before running the actual pipeline. pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name: string. Name of pipeline, sans .yaml at end. pipeline_context_input: string. Initialize the pypyr context with this
python
{ "resource": "" }
q139
prepare_context
train
def prepare_context(pipeline, context_in_string, context): """Prepare context for pipeline run. Args: pipeline: dict. Dictionary representing the pipeline. context_in_string: string. Argument string used to initialize context. context: pypyr.context.Context. Merge any new context generated from context_in_string into this context instance. Returns: None. The context instance to use for the pipeline run is contained in the context arg, it's not passed back as
python
{ "resource": "" }
q140
load_and_run_pipeline
train
def load_and_run_pipeline(pipeline_name, pipeline_context_input=None, working_dir=None, context=None, parse_input=True, loader=None): """Load and run the specified pypyr pipeline. This function runs the actual pipeline by name. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. By default pypyr uses file loader. This means that pipeline_name.yaml should be in the working_dir/pipelines/ directory. Args: pipeline_name (str): Name of pipeline, sans .yaml at end. pipeline_context_input (str): Initialize the pypyr context with this string. working_dir (path): Look for pipelines and modules in this directory. If context arg passed, will use context.working_dir and ignore this argument. If context is None, working_dir must be specified. context (pypyr.context.Context): Use if you already have a Context object, such as if you are running a pipeline from within a pipeline and you want to re-use the same context
python
{ "resource": "" }
q141
run_pipeline
train
def run_pipeline(pipeline, context, pipeline_context_input=None, parse_input=True): """Run the specified pypyr pipeline. This function runs the actual pipeline. If you are running another pipeline from within a pipeline, call this, not main(). Do call main() instead for your 1st pipeline if there are pipelines calling pipelines. Pipeline and context should be already loaded. Args: pipeline (dict): Dictionary representing the pipeline. context (pypyr.context.Context): Reusable context object. pipeline_context_input (str): Initialize the pypyr context with this string. parse_input (bool): run context_parser in pipeline. Returns: None """ logger.debug("starting") try: if parse_input: logger.debug("executing context_parser") prepare_context(pipeline=pipeline, context_in_string=pipeline_context_input, context=context) else: logger.debug("skipping context_parser") # run main steps pypyr.stepsrunner.run_step_group(
python
{ "resource": "" }
q142
run_step
train
def run_step(context): """Write payload out to yaml file. Args: context: pypyr.context.Context. Mandatory. The following context keys expected: - fileWriteYaml - path. mandatory. path-like. Write output file to here. Will create directories in path for you. - payload. optional. Write this to output file. If not specified, output entire context. Returns: None. Raises: pypyr.errors.KeyNotInContextError: fileWriteYaml or fileWriteYaml['path'] missing in context. pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or fileWriteYaml['path'] exists but is None. """ logger.debug("started") context.assert_child_key_has_value('fileWriteYaml', 'path', __name__) out_path = context.get_formatted_string(context['fileWriteYaml']['path']) # doing it like this to safeguard against accidentally dumping all context # with potentially sensitive values in it to disk if payload exists but is # None.
python
{ "resource": "" }
q143
run_step
train
def run_step(context): """Print debug info to console. context is a dictionary or dictionary-like. If you use pypyr.steps.debug as a simple step (i.e you do NOT specify the debug input context), it will just dump the entire context to stdout. Configure the debug step with the following optional context item: debug: keys: str (for single key) or list (of str keys). Only dump the specified keys. format: bool. Defaults False. Applies formatting expressions on dump. """ logger.debug("started") debug = context.get('debug', None) if debug: keys = debug.get('keys', None) format = debug.get('format', False) if keys: logger.debug(f"Writing to output: {keys}") if isinstance(keys, str): payload = {keys: context[keys]}
python
{ "resource": "" }
q144
get_error_name
train
def get_error_name(error): """Return canonical error name as string. For builtin errors like ValueError or Exception, will return the bare name, like ValueError or Exception. For all other exceptions, will return modulename.errorname, such as arbpackage.mod.myerror Args: error: Exception object. Returns:
python
{ "resource": "" }
q145
get_module
train
def get_module(module_abs_import): """Use importlib to get the module dynamically. Get instance of the module specified by the module_abs_import. This means that module_abs_import must be resolvable from this package. Args: module_abs_import: string. Absolute name of module to import. Raises: PyModuleNotFoundError: if module not found. """ logger.debug("starting") logger.debug(f"loading module {module_abs_import}") try: imported_module = importlib.import_module(module_abs_import) logger.debug("done") return imported_module except ModuleNotFoundError as err: msg = ("The module doesn't exist. Looking for a file like this: " f"{module_abs_import}") extended_msg = (f"{module_abs_import}.py should be in your working " "dir or it should be installed to the python path." "\nIf you have 'package.sub.mod' your current working "
python
{ "resource": "" }
q146
set_working_directory
train
def set_working_directory(working_directory): """Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths """ logger.debug("starting")
python
{ "resource": "" }
q147
Context.assert_child_key_has_value
train
def assert_child_key_has_value(self, parent, child, caller): """Assert that context contains key that has child which has a value. Args: parent: parent key child: validate this sub-key of parent exists AND isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None """ assert parent, ("parent parameter must be specified.")
python
{ "resource": "" }
q148
Context.assert_key_has_value
train
def assert_key_has_value(self, key, caller): """Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if
python
{ "resource": "" }
q149
Context.assert_keys_exist
train
def assert_keys_exist(self, caller, *keys): """Assert that context contains keys. Args: keys: validates that these keys exists in context caller: string. calling function or module name - this used to construct error messages
python
{ "resource": "" }
q150
Context.assert_keys_have_values
train
def assert_keys_have_values(self, caller, *keys): """Check that keys list are all in context and all have values. Args: *keys: Will check each of these keys in context caller: string. Calling function name - just
python
{ "resource": "" }
q151
Context.get_formatted_iterable
train
def get_formatted_iterable(self, obj, memo=None): """Recursively loop through obj, formatting as it goes. Interpolates strings from the context dictionary. This is not a full on deepcopy, and it's on purpose not a full on deepcopy. It will handle dict, list, set, tuple for iteration, without any especial cuteness for other types or types not derived from these. For lists: if value is a string, format it. For dicts: format key. If value str, format it. For sets/tuples: if type str, format it. This is what formatting or interpolating a string means: So where a string like this 'Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: obj: iterable. Recurse through and format strings found in dicts, lists, tuples. Does not mutate the input
python
{ "resource": "" }
q152
Context.get_formatted_string
train
def get_formatted_string(self, input_string): """Return formatted value for input_string. get_formatted gets a context[key] value. get_formatted_string is for any arbitrary string that is not in the context. Only valid if input_string is a type string. Return a string interpolated from the context dictionary. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} Then this will return string: "Piping down the valleys wild" Args: input_string: string to parse for substitutions. Returns: Formatted string. Raises: KeyNotInContextError: context[key] has {somekey} where somekey does not exist in context dictionary. TypeError: Attempt operation on a non-string type. """ if isinstance(input_string, str): try:
python
{ "resource": "" }
q153
Context.get_formatted_as_type
train
def get_formatted_as_type(self, value, default=None, out_type=str): """Return formatted value for input value, returns as out_type. Caveat emptor: if out_type is bool and value a string, return will be True if str is 'True'. It will be False for all other cases. Args: value: the value to format default: if value is None, set to this out_type: cast return as this type Returns: Formatted value of type out_type """ if value is None: value = default if isinstance(value, SpecialTagDirective): result = value.get_value(self) return types.cast_to_type(result, out_type) if isinstance(value, str): result = self.get_formatted_string(value) result_type = type(result) if out_type is result_type: # get_formatted_string result is already a string return result
python
{ "resource": "" }
q154
Context.get_processed_string
train
def get_processed_string(self, input_string): """Run token substitution on input_string against context. You probably don't want to call this directly yourself - rather use get_formatted, get_formatted_iterable, or get_formatted_string because these contain more friendly error handling plumbing and context logic. If you do want to call it yourself, go for it, it doesn't touch state. If input_string='Piping {key1} the {key2} wild' And context={'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} An input string with a single formatting expression and nothing else will return the object at that context path: input_string='{key1}'. This means that the return obj will be the same type as the source object. This return object in itself has token substitions run on it iteratively. By comparison, multiple formatting expressions and/or the inclusion of literal text will result in a string return type: input_string='{key1} literal text {key2}' Then this will return string: "Piping down the valleys wild" Args: input_string: string to Parse Returns: any given type: Formatted string with {substitutions} made from context. If it's a !sic string, x from !sic x, with no substitutions made on x. If input_string was a single expression (e.g '{field}'), then returns the object with {substitutions} made for its attributes. Raises: KeyNotInContextError: input_string is not a sic string and has {somekey} where somekey does not exist in context dictionary. """ # arguably, this doesn't really belong here, or at least it makes a # nonsense of the function name. given how py and strings # look and feel pretty much like strings from user's perspective, and # given legacy code back when sic strings were in fact just strings, # keep in here for backwards compatibility. if isinstance(input_string, SpecialTagDirective): return input_string.get_value(self) else: # is this a special one field formatstring? i.e "{field}", with # nothing else?
python
{ "resource": "" }
q155
Context.keys_of_type_exist
train
def keys_of_type_exist(self, *keys): """Check if keys exist in context and if types are as expected. Args: *keys: *args for keys to check in context. Each arg is a tuple (str, type) Returns: Tuple of namedtuple ContextItemInfo, same order as *keys. ContextItemInfo(key, key_in_context, expected_type, is_expected_type) Remember if there is only one key in keys,
python
{ "resource": "" }
q156
Context.merge
train
def merge(self, add_me): """Merge add_me into context and applies interpolation. Bottom-up merge where add_me merges into context. Applies string interpolation where the type is a string. Where a key exists in context already, add_me's value will overwrite what's in context already. Supports nested hierarchy. add_me can contains dicts/lists/enumerables that contain other enumerables et. It doesn't restrict levels of nesting, so if you really want to go crazy with the levels you can, but you might blow your stack. If something from add_me exists in context already, but add_me's value is of a different type, add_me will overwrite context. Do note this. i.e if you had context['int_key'] == 1 and add_me['int_key'] == 'clearly not a number', the end result would be context['int_key'] == 'clearly not a number' If add_me contains lists/sets/tuples, this merges these additively, meaning it appends values from add_me to the existing sequence. Args: add_me: dict. Merge this dict into context. Returns: None. All operations mutate this instance of context. """ def merge_recurse(current, add_me): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. add_me: dict. Merge this to current. """ for k, v in add_me.items(): # key supports interpolation k = self.get_formatted_string(k) # str not mergable, so it doesn't matter if it exists in dest if isinstance(v, str): # just overwrite dest - str adds/edits indiscriminately current[k] = self.get_formatted_string(v) elif isinstance(v, (bytes, bytearray)): # bytes aren't mergable or formattable
python
{ "resource": "" }
q157
Context.set_defaults
train
def set_defaults(self, defaults): """Set defaults in context if keys do not exist already. Adds the input dict (defaults) into the context, only where keys in defaults do not already exist in context. Supports nested hierarchies. Example: Given a context like this: key1: value1 key2: key2.1: value2.1 key3: None And defaults input like this: key1: 'updated value here won't overwrite since it already exists' key2: key2.2: value2.2 key3: 'key 3 exists so I won't overwrite Will result in context: key1: value1 key2: key2.1: value2.1 key2.2: value2.2 key3: None Args: defaults: dict. Add this dict into context. Returns: None. All
python
{ "resource": "" }
q158
FileInRewriterStep.run_step
train
def run_step(self, rewriter): """Do the file in to out rewrite. Doesn't do anything more crazy than call files_in_to_out on the rewriter. Args: rewriter: pypyr.filesystem.FileRewriter instance. """ assert rewriter, ("FileRewriter instance required
python
{ "resource": "" }
q159
ObjectRewriterStep.run_step
train
def run_step(self, representer): """Do the object in-out rewrite. Args: representer: A pypyr.filesystem.ObjectRepresenter instance. """ assert representer, ("ObjectRepresenter instance required to run "
python
{ "resource": "" }
q160
StreamRewriterStep.run_step
train
def run_step(self): """Do the file in-out rewrite.""" rewriter
python
{ "resource": "" }
q161
StreamReplacePairsRewriterStep.run_step
train
def run_step(self): """Write in to out, replacing strings per the replace_pairs.""" formatted_replacements = self.context.get_formatted_iterable( self.replace_pairs)
python
{ "resource": "" }
q162
StreamReplacePairsRewriterStep.iter_replace_strings
train
def iter_replace_strings(replacements): """Create a function that uses replacement pairs to process a string. The returned function takes an iterator and yields on each processed line. Args: replacements: Dict containing 'find_string': 'replace_string' pairs Returns: function with signature: iterator of strings = function(iterable) """ def function_iter_replace_strings(iterable_strings): """Yield a formatted string from iterable_strings using a generator. Args: iterable_strings: Iterable containing strings. E.g a
python
{ "resource": "" }
q163
run_step
train
def run_step(context): """Set new context keys from formatting expressions with substitutions. Context is a dictionary or dictionary-like. context['contextSetf'] must exist. It's a dictionary. Will iterate context['contextSetf'] and save the values as new keys to the context. For example, say input context is: key1: value1 key2: value2 key3: value3 contextSetf: key2: 'aaa_{key1}_zzz' key4: 'bbb_{key3}_yyy' This will result in return context: key1: value1 key2: aaa_value1_zzz key3: bbb_value3_yyy
python
{ "resource": "" }
q164
cast_to_type
train
def cast_to_type(obj, out_type): """Cast obj to out_type if it's not out_type already. If the obj happens to be out_type already, it just returns obj as is. Args: obj: input object out_type: type. Returns: obj cast to out_type. Usual python conversion / casting rules
python
{ "resource": "" }
q165
get_pipeline_yaml
train
def get_pipeline_yaml(file): """Return pipeline yaml from open file object. Use specific custom representers to model the custom pypyr pipeline yaml format, to load in special literal types like py and sic strings. If looking to extend the pypyr pipeline syntax with special types, add these to the tag_representers list. Args: file: open file-like object. Returns:
python
{ "resource": "" }
q166
get_yaml_parser_roundtrip
train
def get_yaml_parser_roundtrip(): """Create the yaml parser object with this factory method. The round-trip parser preserves: - comments - block style and key ordering are kept, so you can diff the round-tripped source - flow style sequences ( ‘a: b, c, d’) (based on request and test by Anthony Sottile) - anchor names that are hand-crafted (i.e. not of the form``idNNN``) - merges in dictionaries are preserved
python
{ "resource": "" }
q167
get_yaml_parser_roundtrip_for_context
train
def get_yaml_parser_roundtrip_for_context(): """Create a yaml parser that can serialize the pypyr Context. Create yaml parser with get_yaml_parser_roundtrip, adding Context. This allows the yaml parser to serialize
python
{ "resource": "" }
q168
run_step
train
def run_step(context): """Load a json file into the pypyr context. json parsed from the file will be merged into the pypyr context. This will overwrite existing values if the same keys are already in there. I.e if file json has {'eggs' : 'boiled'} and context {'eggs': 'fried'} already exists, returned context['eggs'] will be 'boiled'. The json should not be an array [] on the top level, but rather an Object. Args: context: pypyr.context.Context. Mandatory. The following context key must exist - fetchJson - path. path-like. Path to file on disk. - key. string. If exists, write json structure to this context key. Else json writes to context root. Also supports a passing path as string to fetchJson, but in this case you won't be able to specify a key. All inputs support formatting expressions. Returns: None. updates context arg. Raises: FileNotFoundError: take a guess pypyr.errors.KeyNotInContextError: fetchJson.path missing in context. pypyr.errors.KeyInContextHasNoValueError: fetchJson.path exists but is
python
{ "resource": "" }
q169
QueryCountMiddleware._ignore_request
train
def _ignore_request(self, path): """Check to see if we should ignore the request.""" return any([
python
{ "resource": "" }
q170
QueryCountMiddleware._ignore_sql
train
def _ignore_sql(self, query): """Check to see if we should ignore the sql query.""" return any([
python
{ "resource": "" }
q171
QueryCountMiddleware._duplicate_queries
train
def _duplicate_queries(self, output): """Appends the most common duplicate queries to the given output.""" if QC_SETTINGS['DISPLAY_DUPLICATES']: for query, count in self.queries.most_common(QC_SETTINGS['DISPLAY_DUPLICATES']): lines = ['\nRepeated {0} times.'.format(count)]
python
{ "resource": "" }
q172
QueryCountMiddleware._calculate_num_queries
train
def _calculate_num_queries(self): """ Calculate the total number of request and response queries. Used for count header
python
{ "resource": "" }
q173
_process_settings
train
def _process_settings(**kwargs): """ Apply user supplied settings. """ # If we are in this method due to a signal, only reload for our settings setting_name = kwargs.get('setting', None) if setting_name is not None and setting_name != 'QUERYCOUNT': return # Support the old-style settings if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False): QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS # Apply new-style settings if not getattr(settings, 'QUERYCOUNT', False): return # Duplicate display is a special case, configure it specifically if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT: duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES'] if duplicate_settings is not None: duplicate_settings =
python
{ "resource": "" }
q174
NCloudBot._get_webapi_requests
train
def _get_webapi_requests(self): """Update headers of webapi for Requests.""" headers = { 'Accept': '*/*', 'Accept-Language': 'zh-CN,zh;q=0.8,gl;q=0.6,zh-TW;q=0.4', 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': 'http://music.163.com', 'Host':
python
{ "resource": "" }
q175
NCloudBot._build_response
train
def _build_response(self, resp): """Build internal Response object from given response.""" # rememberLogin # if self.method is 'LOGIN' and resp.json().get('code') == 200: # cookiesJar.save_cookies(resp, NCloudBot.username)
python
{ "resource": "" }
q176
NCloudBot.send
train
def send(self): """Sens the request.""" success = False if self.method is None: raise ParamsError() try: if self.method == 'SEARCH': req = self._get_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] resp = req.post(_url, data=self.data) self._build_response(resp) self.response.ok = True else: if isinstance(self.data, dict): data = encrypted_request(self.data) req = self._get_webapi_requests() _url = self.__NETEAST_HOST + self._METHODS[self.method] if self.method in ('USER_DJ', 'USER_FOLLOWS', 'USER_EVENT'): _url = _url % self.params['uid'] if self.method in ('LYRIC', 'MUSIC_COMMENT'): _url = _url % self.params['id']
python
{ "resource": "" }
q177
set_option
train
def set_option(name, value): """ Set plydata option Parameters ---------- name : str Name of the option value : object New value of the option Returns ------- old : object
python
{ "resource": "" }
q178
GroupedDataFrame.group_indices
train
def group_indices(self): """ Return group indices """ # No groups if not self.plydata_groups: return np.ones(len(self), dtype=int)
python
{ "resource": "" }
q179
_make_verb_helper
train
def _make_verb_helper(verb_func, add_groups=False): """ Create function that prepares verb for the verb function The functions created add expressions to be evaluated to the verb, then call the core verb function Parameters ---------- verb_func : function Core verb function. This is the function called after expressions created and added to the verb. The core function should be one of those that implement verbs that evaluate expressions. add_groups : bool If True, a groups attribute is added to the verb. The groups are the columns created after evaluating the
python
{ "resource": "" }
q180
_get_base_dataframe
train
def _get_base_dataframe(df): """ Remove all columns other than those grouped on """ if isinstance(df, GroupedDataFrame): base_df = GroupedDataFrame(
python
{ "resource": "" }
q181
_add_group_columns
train
def _add_group_columns(data, gdf): """ Add group columns to data with a value from the grouped dataframe It is assumed that the grouped dataframe contains a single group >>> data = pd.DataFrame({ ... 'x': [5, 6, 7]}) >>> gdf = GroupedDataFrame({ ... 'g': list('aaa'), ... 'x': range(3)}, groups=['g']) >>> _add_group_columns(data, gdf) g x 0 a 5 1 a 6 2 a 7 """ n = len(data) if isinstance(gdf, GroupedDataFrame): for i, col in enumerate(gdf.plydata_groups): if col not in data: group_values = [gdf[col].iloc[0]] * n # Need to be careful and maintain the dtypes # of the group columns if pdtypes.is_categorical_dtype(gdf[col]): col_values = pd.Categorical(
python
{ "resource": "" }
q182
_create_column
train
def _create_column(data, col, value): """ Create column in dataframe Helper method meant to deal with problematic column values. e.g When the series index does not match that of the data. Parameters ---------- data : pandas.DataFrame dataframe in which to insert value col : column label Column name value : object Value to assign to column Returns ------- data : pandas.DataFrame Modified original dataframe >>> df = pd.DataFrame({'x': [1, 2, 3]}) >>> y = pd.Series([11, 12, 13], index=[21, 22, 23]) Data index and value index do not match >>> _create_column(df, 'y', y) x y 0 1 11 1 2 12 2 3 13 Non-empty dataframe, scalar value >>> _create_column(df, 'z', 3) x y z 0 1 11 3 1 2 12 3 2 3 13 3 Empty dataframe, scalar value >>> df = pd.DataFrame() >>> _create_column(df, 'w', 3) w 0 3 >>> _create_column(df, 'z', 'abc') w z 0 3 abc """ with suppress(AttributeError):
python
{ "resource": "" }
q183
build_expressions
train
def build_expressions(verb): """ Build expressions for helper verbs Parameters ---------- verb : verb A verb with a *functions* attribute. Returns ------- out : tuple (List of Expressions, New columns). The expressions and the new columns in which the results of those expressions will be stored. Even when a result will stored in a column with an existing label, that column is still considered new, i.e An expression ``x='x+1'``, will create a new_column `x` to replace an old column `x`. """ def partial(func, col, *args, **kwargs): """ Make a function that acts on a column in a dataframe Parameters ---------- func : callable Function col : str Column args : tuple Arguments to pass to func kwargs : dict Keyword arguments to func Results ------- new_func : callable Function that takes a dataframe, and calls the original function on a column in the dataframe. """ def new_func(gdf): return func(gdf[col], *args, **kwargs) return new_func def make_statement(func, col): """ A statement of function called on a column in a dataframe Parameters ---------- func : str or callable Function to call on a dataframe column col : str Column """ if isinstance(func, str): expr = '{}({})'.format(func, col) elif callable(func): expr = partial(func, col, *verb.args, **verb.kwargs) else: raise TypeError("{} is not a function".format(func)) return expr def func_name(func): """ Return name of a function. If the function is `np.sin`, we return `sin`. """ if isinstance(func, str): return func try: return func.__name__
python
{ "resource": "" }
q184
Evaluator.process
train
def process(self): """ Run the expressions Returns ------- out : pandas.DataFrame Resulting data """ # Short cut if self._all_expressions_evaluated(): if self.drop: # Drop extra columns. They do not correspond to # any expressions. columns = [expr.column for expr in self.expressions] self.data = self.data.loc[:, columns] return
python
{ "resource": "" }
q185
Evaluator._all_expressions_evaluated
train
def _all_expressions_evaluated(self): """ Return True all expressions match with the columns Saves some processor cycles """ def present(expr): return expr.stmt ==
python
{ "resource": "" }
q186
Evaluator._get_group_dataframes
train
def _get_group_dataframes(self): """ Get group dataframes Returns ------- out : tuple or generator Group dataframes """ if isinstance(self.data, GroupedDataFrame): grouper = self.data.groupby() # groupby on categorical columns uses the categories
python
{ "resource": "" }
q187
Evaluator._evaluate_group_dataframe
train
def _evaluate_group_dataframe(self, gdf): """ Evaluate a single group dataframe Parameters ---------- gdf : pandas.DataFrame Input group dataframe Returns ------- out : pandas.DataFrame Result data """ gdf._is_copy
python
{ "resource": "" }
q188
Evaluator._concat
train
def _concat(self, egdfs): """ Concatenate evaluated group dataframes Parameters ---------- egdfs : iterable Evaluated dataframes Returns ------- edata : pandas.DataFrame Evaluated data """ egdfs = list(egdfs) edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False) # groupby can mixup the rows. We try to maintain the original # order, but we can only do that if the result has a one to # one relationship with the original one2one = ( self.keep_index and
python
{ "resource": "" }
q189
Selector._resolve_slices
train
def _resolve_slices(data_columns, names): """ Convert any slices into column names Parameters ---------- data_columns : pandas.Index Dataframe columns names : tuple Names (including slices) of columns in the dataframe. Returns ------- out : tuple Names of columns in the dataframe. Has no
python
{ "resource": "" }
q190
Selector.select
train
def select(cls, verb): """ Return selected columns for the select verb Parameters ---------- verb : object verb with the column selection attributes: - names - startswith - endswith - contains - matches """ columns = verb.data.columns contains = verb.contains matches = verb.matches groups = _get_groups(verb) names = cls._resolve_slices(columns, verb.names) names_set = set(names) groups_set = set(groups) lst = [[]] if names or groups: # group variable missing from the selection are prepended missing = [g for g in groups if g not in names_set] missing_set = set(missing) c1 = missing + [x for x in names if x not in missing_set]
python
{ "resource": "" }
q191
Selector._at
train
def _at(cls, verb): """ A verb with a select text match """ # Named (listed) columns are always included columns = cls.select(verb) final_columns_set = set(cls.select(verb)) groups_set = set(_get_groups(verb)) final_columns_set -= groups_set - set(verb.names) def pred(col):
python
{ "resource": "" }
q192
Selector._if
train
def _if(cls, verb): """ A verb with a predicate function """ pred = verb.predicate data = verb.data groups = set(_get_groups(verb)) # force predicate if isinstance(pred, str): if not pred.endswith('_dtype'):
python
{ "resource": "" }
q193
get_verb_function
train
def get_verb_function(data, verb): """ Return function that implements the verb for given data type """ try: module = type_lookup[type(data)] except KeyError: # Some guess work for subclasses for type_, mod in type_lookup.items(): if isinstance(data, type_): module = mod break try:
python
{ "resource": "" }
q194
Expression
train
def Expression(*args, **kwargs): """ Return an appropriate Expression given the arguments Parameters ---------- args : tuple Positional arguments passed to the Expression class kwargs : dict Keyword arguments passed to the Expression class """ # dispatch
python
{ "resource": "" }
q195
EvalEnvironment.with_outer_namespace
train
def with_outer_namespace(self, outer_namespace): """Return a new EvalEnvironment with an extra namespace added. This namespace will be used only for variables that are not found in any
python
{ "resource": "" }
q196
EvalEnvironment.subset
train
def subset(self, names): """Creates a new, flat EvalEnvironment that contains only the variables specified.""" vld = VarLookupDict(self._namespaces)
python
{ "resource": "" }
q197
Q
train
def Q(name): """ Quote a variable name A way to 'quote' variable names, especially ones that do not otherwise meet Python's variable name rules. Parameters ---------- name : str Name of variable Returns ------- value : object Value of variable Examples -------- >>> import pandas as pd >>> from plydata import define >>> df = pd.DataFrame({'class': [10, 20, 30]}) Since ``class`` is a reserved python keyword it cannot be a variable name, and therefore cannot be used in an expression without quoting it. >>> df >> define(y='class+1') Traceback (most recent call last): File "<string>", line 1 class+1 ^
python
{ "resource": "" }
q198
regular_index
train
def regular_index(*dfs): """ Change & restore the indices of dataframes Dataframe with duplicate values can be hard to work with. When split and recombined, you cannot restore the row order. This can be the case even if the index has unique but irregular/unordered. This contextmanager resets the unordered indices of any dataframe passed to it, on exit it restores the original index. A regular index is of the form:: RangeIndex(start=0, stop=n, step=1) Parameters ---------- dfs : tuple Dataframes Yields ------ dfs : tuple Dataframe Examples -------- Create dataframes with different indices >>> df1 = pd.DataFrame([4, 3, 2, 1]) >>> df2 =
python
{ "resource": "" }
q199
unique
train
def unique(lst): """ Return unique elements :class:`pandas.unique` and :class:`numpy.unique` cast mixed type lists to the same type. They are faster, but some times we want to maintain the type. Parameters ---------- lst : list-like List of items Returns ------- out : list Unique items in the order that they appear in the input. Examples -------- >>> import pandas as pd >>> import numpy as np >>> lst = ['one', 'two', 123, 'three'] >>> pd.unique(lst) array(['one', 'two', '123', 'three'], dtype=object) >>> np.unique(lst) array(['123', 'one', 'three', 'two'],
python
{ "resource": "" }