id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
100
import importlib import re from absl import app from absl import flags import gin import seqio import tensorflow.compat.v1 as tf The provided code snippet includes necessary dependencies for implementing the `sequence_length` function. Write a Python function `def sequence_length(value=512)` to solve the following problem: Sequence length used when tokenizing. Args: value: an integer or dictionary Returns: a dictionary Here is the function: def sequence_length(value=512): """Sequence length used when tokenizing. Args: value: an integer or dictionary Returns: a dictionary """ if isinstance(value, int): return {"inputs": value, "targets": value} else: return value
Sequence length used when tokenizing. Args: value: an integer or dictionary Returns: a dictionary
101
import importlib import re from absl import app from absl import flags import gin import seqio import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS The provided code snippet includes necessary dependencies for implementing the `pretty` function. Write a Python function `def pretty(value)` to solve the following problem: Optional pretty printing helper for detokenized inputs. Makes any text delimiter regex specified in `--delimiters` bold in textual output. Args: value: string representing the detokenized output Returns: a string with appropriate styling applied Here is the function: def pretty(value): """Optional pretty printing helper for detokenized inputs. Makes any text delimiter regex specified in `--delimiters` bold in textual output. Args: value: string representing the detokenized output Returns: a string with appropriate styling applied """ if not FLAGS.pretty or not FLAGS.detokenize: return value combined_matcher = re.compile(f"({'|'.join(FLAGS.delimiters)})") return combined_matcher.sub(u"\u001b[1m\\1\u001b[0m", value)
Optional pretty printing helper for detokenized inputs. Makes any text delimiter regex specified in `--delimiters` bold in textual output. Args: value: string representing the detokenized output Returns: a string with appropriate styling applied
102
import importlib import re from absl import app from absl import flags import gin import seqio import tensorflow.compat.v1 as tf def import_modules(modules): for module in modules: importlib.import_module(module)
null
103
import functools import os import re from typing import Any, Callable, Iterable, Mapping, MutableSequence, Optional, Sequence, Tuple, Union from absl import logging import gin import numpy as np import seqio import t5.data import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import typing_extensions _MODEL_FEATURES = [ "inputs", "inputs_position", "inputs_segmentation", "targets", "targets_position", "targets_segmentation", "targets_subsegmentation" ] The provided code snippet includes necessary dependencies for implementing the `filter_features` function. Write a Python function `def filter_features(ex)` to solve the following problem: Filters example features, keeping only valid model features. Here is the function: def filter_features(ex): """Filters example features, keeping only valid model features.""" return {k: v for k, v in ex.items() if k in _MODEL_FEATURES}
Filters example features, keeping only valid model features.
104
import functools import os import re from typing import Any, Callable, Iterable, Mapping, MutableSequence, Optional, Sequence, Tuple, Union from absl import logging import gin import numpy as np import seqio import t5.data import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import typing_extensions def get_step_from_checkpoint_path(checkpoint_path): """Returns the global step for the checkpoint at `checkpoint_path`. **Note**: This function works for checkpoints that are saved in the TF format only. Assumes `checkpoint_path` corresponds to a file which contains the substring model.ckpt-{global_step} Args: checkpoint_path: str of path to a checkpoint file. Returns: int of the global step corresponding to the checkpoint file. Raises: ValueError if checkpoint_path does not correspond to a model checkpoint file which contains the global_step in its filename. """ match = re.match(r".*model\.ckpt\-(\d+).*", checkpoint_path) if match is None: raise ValueError("Invalid checkpoint path {}".format(checkpoint_path)) return int(match.group(1)) def get_latest_checkpoint_from_dir(model_dir): """Helper function to return the latest checkpoint number from a directory. Args: model_dir: str, Directory with checkpoint files. Returns: an int, latest checkpoint number. Raises: ValueError: if no checkpoints are found. """ ckpt = tf.train.latest_checkpoint(model_dir) if ckpt is None: raise ValueError("No checkpoints found in model directory: %s" % model_dir) return int(re.sub(".*ckpt-", "", ckpt)) The provided code snippet includes necessary dependencies for implementing the `get_checkpoints_iterator` function. Write a Python function `def get_checkpoints_iterator(checkpoint_steps, model_dir)` to solve the following problem: Get checkpoints from model directory. **Note**: This only works for models checkpoints saved using Tensorflow. Args: checkpoint_steps: list, int or str. If checkpoint_step is an int, find the checkpoint with the closest global step and return a singleton list. If checkpoint_step is a list of ints, replace each int with the path to the checkpoint with the closest global step. If checkpoint_step == "all", return the path of every checkpoint in model_dir, starting from the earliest checkpoint. if the checkpoint_steps is None, returns step from the tf.train.checkpoint_iterator for continuous eval. If -1, get the latest checkpoint from the model directory. model_dir: str, model directory. If model_dir is None, then checkpoint_steps must be an integer or list of integers. Returns: a iterator with the checkpoint steps (integers). Here is the function: def get_checkpoints_iterator(checkpoint_steps, model_dir): """Get checkpoints from model directory. **Note**: This only works for models checkpoints saved using Tensorflow. Args: checkpoint_steps: list, int or str. If checkpoint_step is an int, find the checkpoint with the closest global step and return a singleton list. If checkpoint_step is a list of ints, replace each int with the path to the checkpoint with the closest global step. If checkpoint_step == "all", return the path of every checkpoint in model_dir, starting from the earliest checkpoint. if the checkpoint_steps is None, returns step from the tf.train.checkpoint_iterator for continuous eval. If -1, get the latest checkpoint from the model directory. model_dir: str, model directory. If model_dir is None, then checkpoint_steps must be an integer or list of integers. Returns: a iterator with the checkpoint steps (integers). """ def _get_closest_checkpoint(target_checkpoint): """Returns checkpoint with closest global step to `target_checkpoint`.""" checkpoints = set() for f in tf.io.gfile.listdir(model_dir): try: checkpoints.add(int(get_step_from_checkpoint_path(f))) except ValueError: continue if not checkpoints: raise ValueError("No checkpoint files found in {}".format(model_dir)) closest = float("inf") for c in checkpoints: if abs(target_checkpoint - c) < abs(target_checkpoint - closest): closest = c if closest != target_checkpoint: logging.info( "Using checkpoint at step %d which is closest to requested step %d", closest, target_checkpoint, ) return closest if checkpoint_steps is None: if model_dir is None: raise ValueError("checkpoint_steps and model_dir both cannot be None.") def _generate_checkpoints(): for c in tf.train.checkpoints_iterator(model_dir): yield get_step_from_checkpoint_path(c) return _generate_checkpoints() elif checkpoint_steps == "all": if model_dir is None: raise ValueError( "model_dir cannot be None when checkpoint_steps={}".format( checkpoint_steps)) ckpt_paths = tf.gfile.Glob(os.path.join(model_dir, "model.ckpt*")) return [get_step_from_checkpoint_path(c) for c in ckpt_paths] elif isinstance(checkpoint_steps, int): if model_dir: if checkpoint_steps == -1: return [get_latest_checkpoint_from_dir(model_dir)] else: return [_get_closest_checkpoint(checkpoint_steps)] else: return [checkpoint_steps] else: if model_dir: closests = np.unique( [_get_closest_checkpoint(c) for c in checkpoint_steps]) return closests else: return checkpoint_steps
Get checkpoints from model directory. **Note**: This only works for models checkpoints saved using Tensorflow. Args: checkpoint_steps: list, int or str. If checkpoint_step is an int, find the checkpoint with the closest global step and return a singleton list. If checkpoint_step is a list of ints, replace each int with the path to the checkpoint with the closest global step. If checkpoint_step == "all", return the path of every checkpoint in model_dir, starting from the earliest checkpoint. if the checkpoint_steps is None, returns step from the tf.train.checkpoint_iterator for continuous eval. If -1, get the latest checkpoint from the model directory. model_dir: str, model directory. If model_dir is None, then checkpoint_steps must be an integer or list of integers. Returns: a iterator with the checkpoint steps (integers).
105
import functools import os import re from typing import Any, Callable, Iterable, Mapping, MutableSequence, Optional, Sequence, Tuple, Union from absl import logging import gin import numpy as np import seqio import t5.data import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import typing_extensions def write_lines_to_file(lines, filename): """Write each line to filename, replacing the file if it exists.""" if tf.io.gfile.exists(filename): tf.io.gfile.remove(filename) with tf.io.gfile.GFile(filename, "w") as output_file: output_file.write("\n".join([str(l) for l in lines])) def write_targets_and_examples(summary_dir, targets, datasets): """Writes plaintext targets and inputs to the summary directory. Args: summary_dir: str, directory to store plaintext targets and examples targets: dict, task_name -> targets for each task. datasets: dict, task_name -> tf.data.Dataset for each task. """ if targets.keys() != datasets.keys(): raise ValueError("Targets and datasets must have the same tasks.") for task in targets.keys(): targets_filename = os.path.join( summary_dir, "{}_targets".format(task), ) write_lines_to_file(targets[task], targets_filename) inputs = [] for ex in tfds.as_numpy(datasets[task]): if "inputs_pretokenized" in ex: inputs.append(ex["inputs_pretokenized"]) else: inputs.append(ex["inputs"]) inputs_filename = os.path.join( summary_dir, "{}_inputs".format(task)) write_lines_to_file(inputs, inputs_filename) def get_vocabulary(mixture_or_task_name=None): """Return vocabulary from the mixture or task.""" if not mixture_or_task_name: # Attempt to extract the mixture/task name from the gin config. try: mixture_or_task_name = gin.query_parameter("%MIXTURE_NAME") except ValueError: logging.warning("Could not extract mixture/task name from gin config.") if mixture_or_task_name: provider = t5.data.get_mixture_or_task(mixture_or_task_name) features = provider.output_features if "inputs" in features and "targets" in features: return (features["inputs"].vocabulary, features["targets"].vocabulary) else: feature_values = list(features.values()) vocabulary = feature_values[0].vocabulary for feature in feature_values[1:]: if feature.vocabulary != vocabulary: logging.warning("No feature_name was provided to get_vocabulary, but " "output_features have different vocabularies.") vocabulary = None break if vocabulary: return vocabulary logging.warning("Using default vocabulary.") return t5.data.get_default_vocabulary() class PredictOrScoreFnCallable(typing_extensions.Protocol): """Signature for `predict_or_score_fn` passed to `run_eval`.""" def __call__( self, checkpoint_step: int, vocabulary: seqio.Vocabulary, tasks: Sequence[seqio.Task], datasets: Mapping[str, tf.data.Dataset], sequence_length: Union[None, Mapping[str, int]] ) -> MutableSequence[Union[str, float]]: ... class DatasetFnCallable(typing_extensions.Protocol): def __call__( self, task: seqio.Task, sequence_length: Mapping[str, int], split: str, ) -> tf.data.Dataset: ... def get_targets_and_examples( tasks: Sequence[seqio.Task], dataset_fn: Callable[[seqio.Task], tf.data.Dataset], sequence_dims: Mapping[str, int], num_examples: Optional[int] = None, use_memory_cache: bool = True, target_field_name: str = "targets" ) -> Tuple[Mapping[str, Any], Mapping[str, tf.data.Dataset], Mapping[str, int]]: """Get targets, cached datasets, and maximum sequence lengths per feature. Args: tasks: tasks objects to get targets and examples for. dataset_fn: function, returns the dataset from the task object. sequence_dims: dict of feature names to their sequence dimension. num_examples: an optional maximum number of examples to take from the beginning of each task dataset. use_memory_cache: whether to use tf.data.Dataset#cache. may cause memory issues for large datasets. target_field_name: Field name of the target in the input dataset examples. Returns: cached_targets: unpreprocessed targets for each task cached_task_datasets: cached datasets for each task, with cardinality set max_sequence_length: maximum sequence lengths for inputs and targets across all tasks. """ # Pre-load in all of the targets once before entering continuous eval loop cached_targets = {} cached_task_datasets = {} max_sequence_length = {k: 0 for k in tasks[0].output_features.keys()} for task in tasks: assert max_sequence_length.keys() == task.output_features.keys(), ( "all tasks must have the same features") for task in tasks: ds = dataset_fn(task) if num_examples: ds = ds.take(num_examples) if use_memory_cache: ds = ds.cache() targets = [] for ex in tfds.as_numpy(ds): for k in max_sequence_length: sequence_dim = sequence_dims.get(k, 0) sequence_length = ex[k].shape[sequence_dim] max_sequence_length[k] = max(max_sequence_length[k], sequence_length) # Create list of postprocessed targets pretokenized_target_field_name = target_field_name + "_pretokenized" if pretokenized_target_field_name in ex: target = ex[pretokenized_target_field_name] else: target = task.output_features[target_field_name].vocabulary.decode( list(ex[target_field_name])) if isinstance(target, bytes): target = target.decode("utf-8") targets.append(task.postprocess_fn(target, example=ex, is_target=True)) cached_targets[task.name] = targets cached_task_datasets[task.name] = ds.apply( tf.data.experimental.assert_cardinality(len(targets))) return cached_targets, cached_task_datasets, max_sequence_length The provided code snippet includes necessary dependencies for implementing the `run_eval` function. Write a Python function `def run_eval( mixture_or_task_name: str, predict_or_score_fn: PredictOrScoreFnCallable, checkpoint_steps: Iterable[int], dataset_fn: DatasetFnCallable, summary_dir: Optional[str] = None, split: Optional[str] = "validation", sequence_length: Optional[Mapping[str, int]] = None, batch_size: Optional[int] = None)` to solve the following problem: Run evaluation on the given mixture or task. Args: mixture_or_task_name: str, the name of the Mixture or Task to evaluate on. Must be pre-registered in the global `TaskRegistry` or `MixtureRegistry.` predict_or_score_fn: function, This function takes in the sequence length, checkpoint step, tasks to evaluate, an eval_dataset_fn, a dict mapping task names to cached examples, a dict mapping task names to datasets, and returns a list of outputs or a list of scores. checkpoint_steps: an iterator with integers for checkpoint steps to evaluate on. dataset_fn: function, This function takes a task and returns the dataset associated with it. summary_dir: str, path to write TensorBoard events file summaries for eval. If None, use model_dir/eval_{split}. split: str, the mixture/task split to evaluate on. sequence_length: an integer or a dict from feature-key to integer the sequence length to pad or truncate to, e.g. {"inputs": 512, "targets": 128}. If None, sequence length is automatically computed during eval. batch_size: integer, used only to check that expected padding matches the targets. If None, the check is skipped. Here is the function: def run_eval( mixture_or_task_name: str, predict_or_score_fn: PredictOrScoreFnCallable, checkpoint_steps: Iterable[int], dataset_fn: DatasetFnCallable, summary_dir: Optional[str] = None, split: Optional[str] = "validation", sequence_length: Optional[Mapping[str, int]] = None, batch_size: Optional[int] = None): """Run evaluation on the given mixture or task. Args: mixture_or_task_name: str, the name of the Mixture or Task to evaluate on. Must be pre-registered in the global `TaskRegistry` or `MixtureRegistry.` predict_or_score_fn: function, This function takes in the sequence length, checkpoint step, tasks to evaluate, an eval_dataset_fn, a dict mapping task names to cached examples, a dict mapping task names to datasets, and returns a list of outputs or a list of scores. checkpoint_steps: an iterator with integers for checkpoint steps to evaluate on. dataset_fn: function, This function takes a task and returns the dataset associated with it. summary_dir: str, path to write TensorBoard events file summaries for eval. If None, use model_dir/eval_{split}. split: str, the mixture/task split to evaluate on. sequence_length: an integer or a dict from feature-key to integer the sequence length to pad or truncate to, e.g. {"inputs": 512, "targets": 128}. If None, sequence length is automatically computed during eval. batch_size: integer, used only to check that expected padding matches the targets. If None, the check is skipped. """ vocabulary = get_vocabulary(mixture_or_task_name) tasks = t5.data.get_subtasks( t5.data.get_mixture_or_task(mixture_or_task_name)) tasks = seqio.evaluation.get_valid_eval_tasks(tasks, split) if not tasks: logging.info( "All provided tasks have metric_fns=[] or no matching splits; " "eval is not possible.") return summary_writer = None cached_targets, cached_datasets, max_sequence_length = ( get_targets_and_examples( tasks=tasks, dataset_fn=functools.partial( dataset_fn, split=split, sequence_length=None), sequence_dims={})) if summary_dir: write_targets_and_examples(summary_dir, cached_targets, cached_datasets) if sequence_length is None: logging.info("Setting sequence lengths to %s", max_sequence_length) sequence_length = max_sequence_length elif (sequence_length["inputs"] < max_sequence_length["inputs"] or sequence_length["targets"] < max_sequence_length["targets"]): logging.warning( "Given sequence lengths are insufficient for some evaluation inputs " "or targets. These sequences will be truncated to fit, likely " "leading to sub-optimal results. Consider passing `None` for " "sequence_length to have them be automatically computed.\n Got: %s, " "\n Max Lengths:%s", sequence_length, max_sequence_length) elif (sequence_length["inputs"] > max_sequence_length["inputs"] or sequence_length["targets"] > max_sequence_length["targets"]): logging.warning( "Given sequence lengths are longer than necessary for some " "evaluation inputs or targets, resulting in wasted computation. " "Consider passing `None` for sequence_length to have them be " "automatically computed.\n Got: %s,\n Max Lengths: %s", sequence_length, max_sequence_length) for step in checkpoint_steps: logging.info("Evaluating checkpoint step: %d", step) outputs = predict_or_score_fn( checkpoint_step=step, vocabulary=vocabulary, tasks=tasks, datasets=cached_datasets, sequence_length=sequence_length) for task in tasks: # Extract the portion of decodes corresponding to this dataset dataset = cached_datasets[task.name] dataset_size = len(cached_targets[task.name]) predictions = [ task.postprocess_fn(d, example=ex) for d, ex in zip(outputs[:dataset_size], tfds.as_numpy(dataset)) ] if summary_dir: outputs_filename = os.path.join( summary_dir, "{}_{}_outputs".format(task.name, step)) write_lines_to_file(outputs[:dataset_size], outputs_filename) predictions_filename = os.path.join( summary_dir, "{}_{}_predictions".format(task.name, step)) write_lines_to_file(predictions, predictions_filename) # Remove the used decodes. del outputs[:dataset_size] with tf.Graph().as_default(): if summary_dir: summary_writer = summary_writer or tf.summary.FileWriter( summary_dir) for metric_fn in task.metric_fns: if summary_dir: summary = tf.Summary() targets = cached_targets[task.name] metric_result = metric_fn(targets, predictions) for metric_name, metric_value in metric_result.items(): tag = "eval/{}/{}".format(task.name, metric_name) logging.info("%s at step %d: %.3f", tag, step, metric_value) if summary_dir: summary.value.add(tag=tag, simple_value=metric_value) summary_writer.add_summary(summary, step) # pytype: disable=attribute-error if summary_dir: summary_writer.flush() # pytype: disable=attribute-error # Only padding should remain. if batch_size: expected_pad = -sum(len(t) for t in cached_targets.values()) % batch_size if outputs and len(outputs) != expected_pad: raise ValueError("{} padded outputs, {} expected.".format( len(outputs), expected_pad))
Run evaluation on the given mixture or task. Args: mixture_or_task_name: str, the name of the Mixture or Task to evaluate on. Must be pre-registered in the global `TaskRegistry` or `MixtureRegistry.` predict_or_score_fn: function, This function takes in the sequence length, checkpoint step, tasks to evaluate, an eval_dataset_fn, a dict mapping task names to cached examples, a dict mapping task names to datasets, and returns a list of outputs or a list of scores. checkpoint_steps: an iterator with integers for checkpoint steps to evaluate on. dataset_fn: function, This function takes a task and returns the dataset associated with it. summary_dir: str, path to write TensorBoard events file summaries for eval. If None, use model_dir/eval_{split}. split: str, the mixture/task split to evaluate on. sequence_length: an integer or a dict from feature-key to integer the sequence length to pad or truncate to, e.g. {"inputs": 512, "targets": 128}. If None, sequence length is automatically computed during eval. batch_size: integer, used only to check that expected padding matches the targets. If None, the check is skipped.
106
import importlib import os import sys from absl import app from absl import flags from absl import logging import gin from mesh_tensorflow.transformer import utils import pkg_resources import t5 from t5.models import mesh_transformer from t5.models import mtf_model import tensorflow.compat.v1 as tf def main(_): if FLAGS.module_import: for module in FLAGS.module_import: importlib.import_module(module) if FLAGS.t5_tfds_data_dir: t5.data.set_tfds_data_dir_override(FLAGS.t5_tfds_data_dir) # Add search path for gin files stored in package. assert pkg_resources.resource_filename is not None gin.add_config_file_search_path( pkg_resources.resource_filename(__name__, "gin")) try: suffix = 0 command_dir = os.path.join(FLAGS.model_dir, "commands") tf.io.gfile.makedirs(command_dir) command_filename = os.path.join(command_dir, "command") while tf.io.gfile.exists(command_filename): suffix += 1 command_filename = os.path.join(command_dir, "command.{}".format(suffix)) with tf.io.gfile.GFile(command_filename, "w") as f: f.write(" ".join(sys.argv)) except (tf.errors.PermissionDeniedError, tf.errors.InvalidArgumentError): logging.info( "No write access to model directory. Skipping command logging.") utils.parse_gin_defaults_and_flags( skip_unknown=(FLAGS.skip_all_gin_unknowns or ( mesh_transformer.DEPRECATED_GIN_REFERENCES + tuple(FLAGS.additional_deprecated_gin_references))), finalize_config=False) # We must overide this binding explicitly since it is set to a deprecated # function or class in many existing configs. gin.bind_parameter("run.vocabulary", mesh_transformer.get_vocabulary()) gin.finalize() # Set cache dir after loading gin to avoid unintentionally overriding it. t5.data.add_global_cache_dirs(FLAGS.additional_task_cache_dirs) if FLAGS.use_model_api: model = mtf_model.MtfModel( tpu_job_name=FLAGS.tpu_job_name, tpu=FLAGS.tpu, gcp_project=FLAGS.gcp_project, tpu_zone=FLAGS.tpu_zone, tpu_topology=FLAGS.tpu_topology, model_parallelism=FLAGS.model_parallelism, model_dir=FLAGS.model_dir, batch_size=FLAGS.batch_size, sequence_length={"inputs": FLAGS.input_sequence_length, "targets": FLAGS.target_sequence_length} ) if FLAGS.checkpoint_mode != "specific" and FLAGS.checkpoint_steps: raise ValueError("checkpoint_mode is set to %s and checkpoint_steps is " "also set. To use a particular checkpoint, please set " "checkpoint_mode to 'specific'. For other modes, please " "ensure that checkpoint_steps is not set." % FLAGS.checkpoint_mode) if FLAGS.checkpoint_mode == "latest": checkpoint_steps = -1 elif FLAGS.checkpoint_mode == "all": checkpoint_steps = "all" else: checkpoint_steps = [int(c) for c in FLAGS.checkpoint_steps] if FLAGS.mode == "train": model.train(mixture_or_task_name=FLAGS.mixture_or_task, steps=FLAGS.train_steps) elif FLAGS.mode == "eval": model.eval(mixture_or_task_name=FLAGS.mixture_or_task, checkpoint_steps=checkpoint_steps, summary_dir=FLAGS.eval_summary_dir, split=FLAGS.eval_split) elif FLAGS.mode == "finetune": if not (FLAGS.checkpoint_mode == "latest" or (FLAGS.checkpoint_mode == "specific" and len(FLAGS.checkpoint_steps) == 1)): raise ValueError( "Must specify a single checkpoint for finetuning a model.") if isinstance(checkpoint_steps, list): checkpoint_steps = checkpoint_steps[0] model.finetune( mixture_or_task_name=FLAGS.mixture_or_task, steps=FLAGS.train_steps, pretrained_model_dir=FLAGS.pretrained_model_dir, checkpoint_steps=checkpoint_steps) elif FLAGS.mode == "predict": model.predict( checkpoint_steps=checkpoint_steps, input_file=FLAGS.input_file, output_file=FLAGS.output_file, beam_size=FLAGS.beam_size, temperature=FLAGS.temperature, keep_top_k=FLAGS.keep_top_k,) elif FLAGS.mode == "score": model.score( FLAGS.input_file, FLAGS.target_file, scores_file=FLAGS.output_file, checkpoint_steps=checkpoint_steps) elif FLAGS.mode in ("export_predict", "export_score"): if not (FLAGS.checkpoint_mode == "latest" or (FLAGS.checkpoint_mode == "specific" and len(FLAGS.checkpoint_steps) == 1)): raise ValueError( "Must specify a single checkpoint for exporting a model.") if isinstance(checkpoint_steps, list): checkpoint_steps = checkpoint_steps[0] model.export( export_dir=FLAGS.export_dir, checkpoint_step=checkpoint_steps, beam_size=FLAGS.beam_size, temperature=FLAGS.temperature, keep_top_k=FLAGS.keep_top_k, eval_with_score=(FLAGS.mode == "export_score")) else: raise ValueError("--mode flag must be set when using Model API.") else: if FLAGS.mode: raise ValueError("--mode flag should only be set when using Model API.") if not FLAGS.tpu: with gin.unlock_config(): gin.bind_parameter("utils.get_variable_dtype.slice_dtype", "float32") gin.bind_parameter( "utils.get_variable_dtype.activation_dtype", "float32") utils.run( tpu_job_name=FLAGS.tpu_job_name, tpu=FLAGS.tpu, gcp_project=FLAGS.gcp_project, tpu_zone=FLAGS.tpu_zone, model_dir=FLAGS.model_dir) def console_entry_point(): tf.disable_v2_behavior() tf.logging.set_verbosity(tf.logging.INFO) app.run(main)
null
107
import functools import os import gin import gin.tf import mesh_tensorflow as mtf from mesh_tensorflow import optimize from mesh_tensorflow.transformer import dataset as transformer_dataset from mesh_tensorflow.transformer import learning_rate_schedules from mesh_tensorflow.transformer import utils as mtf_utils from t5.models import mesh_transformer from t5.models import utils from t5.models.t5_model import T5Model import tensorflow.compat.v1 as tf def _parse_operative_config(model_dir): with gin.unlock_config(): gin.parse_config_file( os.path.join(model_dir, "operative_config.gin"), skip_unknown=mesh_transformer.DEPRECATED_GIN_REFERENCES)
null
108
import functools from absl import logging import gin import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio from t5.models import utils as model_utils import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `mesh_train_dataset_fn` function. Write a Python function `def mesh_train_dataset_fn( mixture_or_task_name, sequence_length, vocabulary=None, dataset_split=tfds.Split.TRAIN, shuffle=True, seed=None, use_cached=False, pack=True)` to solve the following problem: Returns the tf.data.Dataset for training on a given mixture. This uses the format required for utils.run's `train_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. vocabulary: unused argument, maintains compatibility with other dataset_fns. dataset_split: string, which split of the dataset to load. In most cases this should be "train". shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data use_cached: bool, whether to load the cached version of this dataset. pack: bool, whether to pack the dataset. Returns: A tf.data.Dataset of preprocessed, tokenized, and batched examples. Here is the function: def mesh_train_dataset_fn( mixture_or_task_name, sequence_length, vocabulary=None, dataset_split=tfds.Split.TRAIN, shuffle=True, seed=None, use_cached=False, pack=True): """Returns the tf.data.Dataset for training on a given mixture. This uses the format required for utils.run's `train_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. vocabulary: unused argument, maintains compatibility with other dataset_fns. dataset_split: string, which split of the dataset to load. In most cases this should be "train". shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data use_cached: bool, whether to load the cached version of this dataset. pack: bool, whether to pack the dataset. Returns: A tf.data.Dataset of preprocessed, tokenized, and batched examples. """ del vocabulary mixture_or_task = seqio.get_mixture_or_task(mixture_or_task_name) ds = mixture_or_task.get_dataset( sequence_length, split=dataset_split, use_cached=use_cached, shuffle=shuffle, num_epochs=None, seed=seed) # Select just the output features which are present in the dataset. feature_keys = tuple(k for k in mixture_or_task.output_features if k in tf.data.get_output_shapes(ds)) # Filtering feature keys is done in pack_or_pad function. However, when # packing is turned off, input_features aren't filtered leading to training # problems due to strings showing up in the input example. Filtering features # ensures that we don't rely on pack_or_pad to filter features for training. def _filter_features(ex): return {k: ex[k] for k in feature_keys} ds = ds.map( _filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE) eos_keys = set( k for k, f in mixture_or_task.output_features.items() if f.add_eos) ds = transformer_dataset.pack_or_pad( ds, sequence_length, pack=pack, feature_keys=feature_keys, ensure_eos=eos_keys) return ds
Returns the tf.data.Dataset for training on a given mixture. This uses the format required for utils.run's `train_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. vocabulary: unused argument, maintains compatibility with other dataset_fns. dataset_split: string, which split of the dataset to load. In most cases this should be "train". shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data use_cached: bool, whether to load the cached version of this dataset. pack: bool, whether to pack the dataset. Returns: A tf.data.Dataset of preprocessed, tokenized, and batched examples.
109
import functools from absl import logging import gin import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio from t5.models import utils as model_utils import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `mesh_inference_dataset_fn` function. Write a Python function `def mesh_inference_dataset_fn( mixture_or_task_name, sequence_length, dataset_split, shuffle=False, seed=None, vocabulary=None, num_inference_examples=-1, use_cached=False, priming_sequence_length=None)` to solve the following problem: Returns all tf.data.Datasets for LM inference on a given mixture. For Tasks without inputs (such as language modeling), the first `priming_sequence_length` tokens in the target are used as the "inputs" for inference. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. NOTE, this function does NOT receive the split specified in utils.run. It needs to be specified separately. shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_inference_examples: maximum number of examples per task to do inference on. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. evals but should not be used for iterative decoding. priming_sequence_length: If the Task only has "targets", select the first this many tokens from each target sequence to use as "inputs". This is useful for decoder-only language models where you would like to use a portion of the targets as a priming sequence for generation. Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. Here is the function: def mesh_inference_dataset_fn( mixture_or_task_name, sequence_length, dataset_split, shuffle=False, seed=None, vocabulary=None, num_inference_examples=-1, use_cached=False, priming_sequence_length=None): """Returns all tf.data.Datasets for LM inference on a given mixture. For Tasks without inputs (such as language modeling), the first `priming_sequence_length` tokens in the target are used as the "inputs" for inference. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. NOTE, this function does NOT receive the split specified in utils.run. It needs to be specified separately. shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_inference_examples: maximum number of examples per task to do inference on. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. evals but should not be used for iterative decoding. priming_sequence_length: If the Task only has "targets", select the first this many tokens from each target sequence to use as "inputs". This is useful for decoder-only language models where you would like to use a portion of the targets as a priming sequence for generation. Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. """ del vocabulary mixture_or_task = seqio.get_mixture_or_task(mixture_or_task_name) def _split_targets_for_primed_inference(ex): ex["inputs"] = ex["targets"][:priming_sequence_length] ex["targets"] = ex["targets"][priming_sequence_length:] ex["inputs"] = tf.pad( ex["inputs"], [[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT") ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,)) return ex def _prepare_for_unprimed_inference(ex): ex["inputs"] = tf.constant([], dtype=tf.int64) return ex def _get_dataset_for_single_task(task, sequence_length): """Get a tensorflow.data.Dataset for the provided task.""" ds = task.get_dataset( sequence_length, split=dataset_split, use_cached=use_cached, shuffle=shuffle, seed=seed) if "inputs" not in ds.element_spec: if not priming_sequence_length or priming_sequence_length <= 0: logging.warning("Priming sequence length not specified so priming " "with the empty string.") ds = ds.map(_prepare_for_unprimed_inference) else: logging.info("Using the first %d tokens of each target as input.", priming_sequence_length) ds = ds.map(_split_targets_for_primed_inference) elif priming_sequence_length is not None: raise ValueError( "Setting a priming sequence length only makes sense for decoder-only " "Tasks, which have `targets` but no `inputs`.") eos_keys = set( k for k, f in mixture_or_task.output_features.items() if f.add_eos) logging.info( "Padding '%s' with sequence lengths: %s", task.name, sequence_length) ds = transformer_dataset.pack_or_pad( ds, sequence_length, pack=False, feature_keys=tuple(task.output_features), ensure_eos=eos_keys) if num_inference_examples is not None and num_inference_examples >= 0: ds = ds.take(num_inference_examples) return ds outputs = [] for task in seqio.get_subtasks(mixture_or_task): if dataset_split not in task.splits: logging.info("Task %s has no '%s' split, skipping inference.", task.name, dataset_split) continue outputs.append( transformer_dataset.EvalDataset( task.name, functools.partial( _get_dataset_for_single_task, task=task, sequence_length=sequence_length), task.postprocess_fn, task.metric_fns, ) ) if not outputs: logging.warning("No %s data found for %s.", dataset_split, mixture_or_task_name) return outputs
Returns all tf.data.Datasets for LM inference on a given mixture. For Tasks without inputs (such as language modeling), the first `priming_sequence_length` tokens in the target are used as the "inputs" for inference. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. NOTE, this function does NOT receive the split specified in utils.run. It needs to be specified separately. shuffle: Whether or not to shuffle dataset. seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_inference_examples: maximum number of examples per task to do inference on. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. evals but should not be used for iterative decoding. priming_sequence_length: If the Task only has "targets", select the first this many tokens from each target sequence to use as "inputs". This is useful for decoder-only language models where you would like to use a portion of the targets as a priming sequence for generation. Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
110
import functools from absl import logging import gin import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio from t5.models import utils as model_utils import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `mesh_eval_dataset_fn` function. Write a Python function `def mesh_eval_dataset_fn( mixture_or_task_name, sequence_length, dataset_split, vocabulary=None, num_eval_examples=-1, use_cached=False, pack=False, shuffle_eval_examples=False, seed=None)` to solve the following problem: Returns all tf.data.Datasets for evaluation on a given mixture. This uses the format required for utils.run's `eval_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_eval_examples: maximum number of examples per task to use for continuous eval. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. pack: a boolean, whether to pack examples. This is useful for perplexity evals but should not be used for iterative decoding. shuffle_eval_examples: boolean, whether to shuffle eval examples, applied only when num_eval_examples is not None. Intended to be able to eval on a different eval slice at every iteration. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. Here is the function: def mesh_eval_dataset_fn( mixture_or_task_name, sequence_length, dataset_split, vocabulary=None, num_eval_examples=-1, use_cached=False, pack=False, shuffle_eval_examples=False, seed=None): """Returns all tf.data.Datasets for evaluation on a given mixture. This uses the format required for utils.run's `eval_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_eval_examples: maximum number of examples per task to use for continuous eval. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. pack: a boolean, whether to pack examples. This is useful for perplexity evals but should not be used for iterative decoding. shuffle_eval_examples: boolean, whether to shuffle eval examples, applied only when num_eval_examples is not None. Intended to be able to eval on a different eval slice at every iteration. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples. """ del vocabulary mixture_or_task = seqio.get_mixture_or_task(mixture_or_task_name) def _get_dataset_for_single_task(task, sequence_length): """Get a tensorflow.data.Dataset for the provided task.""" if shuffle_eval_examples and seed is None: logging.warning(("shuffle_seed_examples is true but no seed was ", "provided. Using a random seed.")) ds = task.get_dataset( sequence_length, split=dataset_split, use_cached=use_cached, shuffle=shuffle_eval_examples, seed=seed, ) eos_keys = set( k for k, f in mixture_or_task.output_features.items() if f.add_eos) if sequence_length is None: logging.info( "Skipping packing/padding for '%s' since sequence length is None.", task.name) else: logging.info( "%sing '%s' with sequence lengths: %s", "Pack" if pack else "Padd", task.name, sequence_length) ds = transformer_dataset.pack_or_pad( ds, sequence_length, pack=pack, feature_keys=tuple(task.output_features), ensure_eos=eos_keys) if num_eval_examples is not None and num_eval_examples >= 0: ds = ds.take(num_eval_examples) return ds outputs = [] for task in seqio.get_subtasks(mixture_or_task): if dataset_split not in task.splits: logging.info( "Task %s has no '%s' split, skipping eval.", task.name, dataset_split ) continue outputs.append( transformer_dataset.EvalDataset( task.name, functools.partial( _get_dataset_for_single_task, task=task, sequence_length=sequence_length), task.postprocess_fn, task.metric_fns, ) ) if not outputs: logging.warning("No %s data found for %s.", dataset_split, mixture_or_task_name) return outputs
Returns all tf.data.Datasets for evaluation on a given mixture. This uses the format required for utils.run's `eval_dataset_fn` argument in the Mesh TF transformer standalone. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. sequence_length: dict mapping feature key to the int length for that feature the max sequence length. If set to None, packing and padding will be disabled. dataset_split: string, which split of the dataset to load. vocabulary: unused argument, maintains compatibility with other dataaset_fns num_eval_examples: maximum number of examples per task to use for continuous eval. If None or less than 0, use all examples. use_cached: bool, whether to load the cached version of this dataset. pack: a boolean, whether to pack examples. This is useful for perplexity evals but should not be used for iterative decoding. shuffle_eval_examples: boolean, whether to shuffle eval examples, applied only when num_eval_examples is not None. Intended to be able to eval on a different eval slice at every iteration. seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and shuffle seed for tf.data Returns: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
111
import functools from absl import logging import gin import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio from t5.models import utils as model_utils import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `tsv_dataset_fn` function. Write a Python function `def tsv_dataset_fn( filename, sequence_length, dataset_split, vocabulary, shuffle_buffer_size=10000)` to solve the following problem: r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`. Here is the function: def tsv_dataset_fn( filename, sequence_length, dataset_split, vocabulary, shuffle_buffer_size=10000): r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`.""" # Currently `tf.gfile.glob` is broken on GCS, so we only read a file or # list of files. return transformer_dataset.packed_parallel_tsv_dataset( dataset=tf.data.TextLineDataset(filename).shuffle(shuffle_buffer_size), sequence_length=sequence_length, vocabulary=vocabulary, dataset_split=dataset_split, append_eos=True, eos_id=1)
r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`.
112
import functools from absl import logging import gin import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio from t5.models import utils as model_utils import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds The provided code snippet includes necessary dependencies for implementing the `get_vocabulary` function. Write a Python function `def get_vocabulary(mixture_or_task_name=None)` to solve the following problem: Get the appropriate value for the utils.run.vocabulary argument. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. Returns: Either a single seqio.vocabularies.Vocabulary or a tuple of seqio.vocabularies.Vocabulary for inputs and targets. Here is the function: def get_vocabulary(mixture_or_task_name=None): """Get the appropriate value for the utils.run.vocabulary argument. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. Returns: Either a single seqio.vocabularies.Vocabulary or a tuple of seqio.vocabularies.Vocabulary for inputs and targets. """ return model_utils.get_vocabulary(mixture_or_task_name)
Get the appropriate value for the utils.run.vocabulary argument. Args: mixture_or_task_name: string, an identifier for a Mixture or Task in the appropriate registry. Must be specified via gin. Returns: Either a single seqio.vocabularies.Vocabulary or a tuple of seqio.vocabularies.Vocabulary for inputs and targets.
113
import functools import itertools import os import re import time from absl import logging import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio import t5.data from t5.models import utils from t5.models.t5_model import T5Model import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import torch import torch.utils.tensorboard The provided code snippet includes necessary dependencies for implementing the `tokens_to_batches` function. Write a Python function `def tokens_to_batches(dataset, sequence_length, batch_size, output_features, mixture_or_task=None)` to solve the following problem: Convert a dataset of token sequences to batches of padded/masked examples. Args: dataset: tf.data.Dataset containing examples with token sequences. sequence_length: dict of int, a dict mapping feature name to length. batch_size: int, the number of padded sequences in each batch. output_features: list of str, features to include in the dataset. mixture_or_task: a Task or Mixture object, used to correctly specify eos if provided. If none, eos is always added at the end of the sequence. Returns: A generator that produces batches of numpy examples. Here is the function: def tokens_to_batches(dataset, sequence_length, batch_size, output_features, mixture_or_task=None): """Convert a dataset of token sequences to batches of padded/masked examples. Args: dataset: tf.data.Dataset containing examples with token sequences. sequence_length: dict of int, a dict mapping feature name to length. batch_size: int, the number of padded sequences in each batch. output_features: list of str, features to include in the dataset. mixture_or_task: a Task or Mixture object, used to correctly specify eos if provided. If none, eos is always added at the end of the sequence. Returns: A generator that produces batches of numpy examples. """ if mixture_or_task: eos_keys = set( k for k, f in mixture_or_task.output_features.items() if f.add_eos) else: eos_keys = True dataset = transformer_dataset.pack_or_pad( dataset, sequence_length, pack=False, feature_keys=output_features, ensure_eos=eos_keys, ) def _map_fn(ex): for key in output_features: tensor = ex[key] mask = tf.cast(tf.greater(tensor, 0), tensor.dtype) ex[key + "_mask"] = mask return ex dataset = dataset.map( _map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE, ) dataset = dataset.batch(batch_size, drop_remainder=False) return tfds.as_numpy(dataset)
Convert a dataset of token sequences to batches of padded/masked examples. Args: dataset: tf.data.Dataset containing examples with token sequences. sequence_length: dict of int, a dict mapping feature name to length. batch_size: int, the number of padded sequences in each batch. output_features: list of str, features to include in the dataset. mixture_or_task: a Task or Mixture object, used to correctly specify eos if provided. If none, eos is always added at the end of the sequence. Returns: A generator that produces batches of numpy examples.
114
import functools import itertools import os import re import time from absl import logging import mesh_tensorflow.transformer.dataset as transformer_dataset import seqio import t5.data from t5.models import utils from t5.models.t5_model import T5Model import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import torch import torch.utils.tensorboard The provided code snippet includes necessary dependencies for implementing the `_get_dataset` function. Write a Python function `def _get_dataset(mixture_or_task_or_name, sequence_length, split, shuffle=True)` to solve the following problem: Get a tf.data.Dataset for a given Task or Mixture. Args: mixture_or_task_or_name: Task or Mixture or str, the name of the Mixture or Task to train on or the Tasks or Mixture object itself. Must be pre-registered in the global `t5.data.TaskRegistry` or `t5.data.MixtureRegistry.` sequence_length: dict of int, a dict mapping feature name to length. split: str or `tensorflow_datasets.Split`, the data split to load. shuffle: boolean, whether to shuffle the dataset. Returns: A generator that produces batches of numpy examples. Here is the function: def _get_dataset(mixture_or_task_or_name, sequence_length, split, shuffle=True): """Get a tf.data.Dataset for a given Task or Mixture. Args: mixture_or_task_or_name: Task or Mixture or str, the name of the Mixture or Task to train on or the Tasks or Mixture object itself. Must be pre-registered in the global `t5.data.TaskRegistry` or `t5.data.MixtureRegistry.` sequence_length: dict of int, a dict mapping feature name to length. split: str or `tensorflow_datasets.Split`, the data split to load. shuffle: boolean, whether to shuffle the dataset. Returns: A generator that produces batches of numpy examples. """ if isinstance(mixture_or_task_or_name, str): task = seqio.get_mixture_or_task(mixture_or_task_or_name) else: task = mixture_or_task_or_name return task.get_dataset(sequence_length, split, shuffle=shuffle)
Get a tf.data.Dataset for a given Task or Mixture. Args: mixture_or_task_or_name: Task or Mixture or str, the name of the Mixture or Task to train on or the Tasks or Mixture object itself. Must be pre-registered in the global `t5.data.TaskRegistry` or `t5.data.MixtureRegistry.` sequence_length: dict of int, a dict mapping feature name to length. split: str or `tensorflow_datasets.Split`, the data split to load. shuffle: boolean, whether to shuffle the dataset. Returns: A generator that produces batches of numpy examples.
115
import argparse, os, shutil, time import cv2 import torch from torch import nn from torch.utils.data import DataLoader from torchvision.transforms import Compose, ToTensor, Resize from torchvision.transforms.functional import to_pil_image from threading import Thread, Lock from tqdm import tqdm from PIL import Image from dataset import VideoDataset from model import MattingBase, MattingRefine def cv2_frame_to_cuda(frame): frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).cuda()
null
116
import argparse import kornia import torch import os import random from torch import nn from torch import distributed as dist from torch import multiprocessing as mp from torch.nn import functional as F from torch.cuda.amp import autocast, GradScaler from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader, Subset from torch.optim import Adam from torchvision.utils import make_grid from tqdm import tqdm from torchvision import transforms as T from PIL import Image from data_path import DATA_PATH from dataset import ImagesDataset, ZipDataset, VideoDataset, SampleDataset from dataset import augmentation as A from model import MattingRefine from model.utils import load_matched_state_dict args = parser.parse_args() distributed_num_gpus = torch.cuda.device_count() assert args.batch_size % distributed_num_gpus == 0 def compute_loss(pred_pha_lg, pred_fgr_lg, pred_pha_sm, pred_fgr_sm, pred_err_sm, true_pha_lg, true_fgr_lg): def random_crop(*imgs): def valid(model, dataloader, writer, step): DATA_PATH = { 'videomatte240k': { 'train': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' }, 'valid': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' } }, 'photomatte13k': { 'train': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' }, 'valid': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' } }, 'distinction': { 'train': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR', }, 'valid': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' }, }, 'adobe': { 'train': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR', }, 'valid': { 'fgr': 'PATH_TO_IMAGES_DIR', 'pha': 'PATH_TO_IMAGES_DIR' }, }, 'backgrounds': { 'train': 'PATH_TO_IMAGES_DIR', 'valid': 'PATH_TO_IMAGES_DIR' }, } def load_matched_state_dict(model, state_dict, print_stats=True): def train_worker(rank, addr, port): # Distributed Setup os.environ['MASTER_ADDR'] = addr os.environ['MASTER_PORT'] = port dist.init_process_group("nccl", rank=rank, world_size=distributed_num_gpus) # Training DataLoader dataset_train = ZipDataset([ ZipDataset([ ImagesDataset(DATA_PATH[args.dataset_name]['train']['pha'], mode='L'), ImagesDataset(DATA_PATH[args.dataset_name]['train']['fgr'], mode='RGB'), ], transforms=A.PairCompose([ A.PairRandomAffineAndResize((2048, 2048), degrees=(-5, 5), translate=(0.1, 0.1), scale=(0.3, 1), shear=(-5, 5)), A.PairRandomHorizontalFlip(), A.PairRandomBoxBlur(0.1, 5), A.PairRandomSharpen(0.1), A.PairApplyOnlyAtIndices([1], T.ColorJitter(0.15, 0.15, 0.15, 0.05)), A.PairApply(T.ToTensor()) ]), assert_equal_length=True), ImagesDataset(DATA_PATH['backgrounds']['train'], mode='RGB', transforms=T.Compose([ A.RandomAffineAndResize((2048, 2048), degrees=(-5, 5), translate=(0.1, 0.1), scale=(1, 2), shear=(-5, 5)), T.RandomHorizontalFlip(), A.RandomBoxBlur(0.1, 5), A.RandomSharpen(0.1), T.ColorJitter(0.15, 0.15, 0.15, 0.05), T.ToTensor() ])), ]) dataset_train_len_per_gpu_worker = int(len(dataset_train) / distributed_num_gpus) dataset_train = Subset(dataset_train, range(rank * dataset_train_len_per_gpu_worker, (rank + 1) * dataset_train_len_per_gpu_worker)) dataloader_train = DataLoader(dataset_train, shuffle=True, pin_memory=True, drop_last=True, batch_size=args.batch_size // distributed_num_gpus, num_workers=args.num_workers // distributed_num_gpus) # Validation DataLoader if rank == 0: dataset_valid = ZipDataset([ ZipDataset([ ImagesDataset(DATA_PATH[args.dataset_name]['valid']['pha'], mode='L'), ImagesDataset(DATA_PATH[args.dataset_name]['valid']['fgr'], mode='RGB') ], transforms=A.PairCompose([ A.PairRandomAffineAndResize((2048, 2048), degrees=(-5, 5), translate=(0.1, 0.1), scale=(0.3, 1), shear=(-5, 5)), A.PairApply(T.ToTensor()) ]), assert_equal_length=True), ImagesDataset(DATA_PATH['backgrounds']['valid'], mode='RGB', transforms=T.Compose([ A.RandomAffineAndResize((2048, 2048), degrees=(-5, 5), translate=(0.1, 0.1), scale=(1, 1.2), shear=(-5, 5)), T.ToTensor() ])), ]) dataset_valid = SampleDataset(dataset_valid, 50) dataloader_valid = DataLoader(dataset_valid, pin_memory=True, drop_last=True, batch_size=args.batch_size // distributed_num_gpus, num_workers=args.num_workers // distributed_num_gpus) # Model model = MattingRefine(args.model_backbone, args.model_backbone_scale, args.model_refine_mode, args.model_refine_sample_pixels, args.model_refine_thresholding, args.model_refine_kernel_size).to(rank) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) model_distributed = nn.parallel.DistributedDataParallel(model, device_ids=[rank]) if args.model_last_checkpoint is not None: load_matched_state_dict(model, torch.load(args.model_last_checkpoint)) optimizer = Adam([ {'params': model.backbone.parameters(), 'lr': 5e-5}, {'params': model.aspp.parameters(), 'lr': 5e-5}, {'params': model.decoder.parameters(), 'lr': 1e-4}, {'params': model.refiner.parameters(), 'lr': 3e-4}, ]) scaler = GradScaler() # Logging and checkpoints if rank == 0: if not os.path.exists(f'checkpoint/{args.model_name}'): os.makedirs(f'checkpoint/{args.model_name}') writer = SummaryWriter(f'log/{args.model_name}') # Run loop for epoch in range(args.epoch_start, args.epoch_end): for i, ((true_pha, true_fgr), true_bgr) in enumerate(tqdm(dataloader_train)): step = epoch * len(dataloader_train) + i true_pha = true_pha.to(rank, non_blocking=True) true_fgr = true_fgr.to(rank, non_blocking=True) true_bgr = true_bgr.to(rank, non_blocking=True) true_pha, true_fgr, true_bgr = random_crop(true_pha, true_fgr, true_bgr) true_src = true_bgr.clone() # Augment with shadow aug_shadow_idx = torch.rand(len(true_src)) < 0.3 if aug_shadow_idx.any(): aug_shadow = true_pha[aug_shadow_idx].mul(0.3 * random.random()) aug_shadow = T.RandomAffine(degrees=(-5, 5), translate=(0.2, 0.2), scale=(0.5, 1.5), shear=(-5, 5))(aug_shadow) aug_shadow = kornia.filters.box_blur(aug_shadow, (random.choice(range(20, 40)),) * 2) true_src[aug_shadow_idx] = true_src[aug_shadow_idx].sub_(aug_shadow).clamp_(0, 1) del aug_shadow del aug_shadow_idx # Composite foreground onto source true_src = true_fgr * true_pha + true_src * (1 - true_pha) # Augment with noise aug_noise_idx = torch.rand(len(true_src)) < 0.4 if aug_noise_idx.any(): true_src[aug_noise_idx] = true_src[aug_noise_idx].add_(torch.randn_like(true_src[aug_noise_idx]).mul_(0.03 * random.random())).clamp_(0, 1) true_bgr[aug_noise_idx] = true_bgr[aug_noise_idx].add_(torch.randn_like(true_bgr[aug_noise_idx]).mul_(0.03 * random.random())).clamp_(0, 1) del aug_noise_idx # Augment background with jitter aug_jitter_idx = torch.rand(len(true_src)) < 0.8 if aug_jitter_idx.any(): true_bgr[aug_jitter_idx] = kornia.augmentation.ColorJitter(0.18, 0.18, 0.18, 0.1)(true_bgr[aug_jitter_idx]) del aug_jitter_idx # Augment background with affine aug_affine_idx = torch.rand(len(true_bgr)) < 0.3 if aug_affine_idx.any(): true_bgr[aug_affine_idx] = T.RandomAffine(degrees=(-1, 1), translate=(0.01, 0.01))(true_bgr[aug_affine_idx]) del aug_affine_idx with autocast(): pred_pha, pred_fgr, pred_pha_sm, pred_fgr_sm, pred_err_sm, _ = model_distributed(true_src, true_bgr) loss = compute_loss(pred_pha, pred_fgr, pred_pha_sm, pred_fgr_sm, pred_err_sm, true_pha, true_fgr) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() optimizer.zero_grad() if rank == 0: if (i + 1) % args.log_train_loss_interval == 0: writer.add_scalar('loss', loss, step) if (i + 1) % args.log_train_images_interval == 0: writer.add_image('train_pred_pha', make_grid(pred_pha, nrow=5), step) writer.add_image('train_pred_fgr', make_grid(pred_fgr, nrow=5), step) writer.add_image('train_pred_com', make_grid(pred_fgr * pred_pha, nrow=5), step) writer.add_image('train_pred_err', make_grid(pred_err_sm, nrow=5), step) writer.add_image('train_true_src', make_grid(true_src, nrow=5), step) del true_pha, true_fgr, true_src, true_bgr del pred_pha, pred_fgr, pred_pha_sm, pred_fgr_sm, pred_err_sm if (i + 1) % args.log_valid_interval == 0: valid(model, dataloader_valid, writer, step) if (step + 1) % args.checkpoint_interval == 0: torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}-iter-{step}.pth') if rank == 0: torch.save(model.state_dict(), f'checkpoint/{args.model_name}/epoch-{epoch}.pth') # Clean up dist.destroy_process_group()
null
117
import argparse import torch import os import shutil from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from torchvision import transforms as T from torchvision.transforms.functional import to_pil_image from threading import Thread from tqdm import tqdm from dataset import ImagesDataset, ZipDataset from dataset import augmentation as A from model import MattingBase, MattingRefine from inference_utils import HomographicAlignment def writer(img, path): img = to_pil_image(img[0].cpu()) img.save(path)
null
118
import os import re from setuptools import setup from setuptools import find_packages with open("README.md", "r") as fh: long_description = fh.read() with open("requirements.txt", encoding="utf8") as f: requirements = f.readlines() def find_version(*filepath): here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, *filepath)) as fp: version_match = re.search( r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M ) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.")
null
119
import warnings import os import numpy as np from gradient_free_optimizers import ( HillClimbingOptimizer, StochasticHillClimbingOptimizer, RepulsingHillClimbingOptimizer, SimulatedAnnealingOptimizer, DownhillSimplexOptimizer, RandomSearchOptimizer, GridSearchOptimizer, RandomRestartHillClimbingOptimizer, PowellsMethod, PatternSearch, LipschitzOptimizer, DirectAlgorithm, RandomAnnealingOptimizer, ParallelTemperingOptimizer, ParticleSwarmOptimizer, SpiralOptimization, EvolutionStrategyOptimizer, BayesianOptimizer, TreeStructuredParzenEstimators, ForestOptimizer, ) from surfaces.test_functions import SphereFunction, AckleyFunction from search_path_gif import search_path_gif def warn(*args, **kwargs): pass
null
120
import warnings import os import gc import glob import numpy as np import pandas as pd from tqdm import tqdm import matplotlib as mpl import matplotlib.pyplot as plt from gradient_free_optimizers.optimizers.core_optimizer.converter import Converter def warn(*args, **kwargs): pass
null
121
import warnings import os import gc import glob import numpy as np import pandas as pd from tqdm import tqdm import matplotlib as mpl import matplotlib.pyplot as plt from gradient_free_optimizers.optimizers.core_optimizer.converter import Converter def plot_search_paths( path, optimizer, opt_para, n_iter_max, objective_function, search_space, initialize, random_state, title, ): if opt_para == {}: show_opt_para = False else: show_opt_para = True opt = optimizer( search_space, initialize=initialize, random_state=random_state, **opt_para ) opt.search( objective_function, n_iter=n_iter_max, # memory=False, verbosity=False, ) conv = Converter(search_space) for n_iter in tqdm(range(1, n_iter_max + 1)): def objective_function_np(args): params = {} for i, para_name in enumerate(search_space): params[para_name] = args[i] return objective_function(params) plt.figure(figsize=(7, 7)) plt.set_cmap("jet_r") # jet_r x_all, y_all = search_space["x0"], search_space["x1"] xi, yi = np.meshgrid(x_all, y_all) zi = objective_function_np((xi, yi)) zi = np.rot90(zi, k=1) plt.imshow( zi, alpha=0.15, # interpolation="antialiased", # vmin=z.min(), # vmax=z.max(), # origin="lower", extent=[x_all.min(), x_all.max(), y_all.min(), y_all.max()], ) for n, opt_ in enumerate(opt.optimizers): n_optimizers = len(opt.optimizers) n_iter_tmp = int(n_iter / n_optimizers) n_iter_mod = n_iter % n_optimizers if n_iter_mod > n: n_iter_tmp += 1 if n_iter_tmp == 0: continue pos_list = np.array(opt_.pos_new_list) score_list = np.array(opt_.score_new_list) # print("\n pos_list \n", pos_list, "\n") # print("\n score_list \n", score_list, "\n") if len(pos_list) == 0: continue values_list = conv.positions2values(pos_list) values_list = np.array(values_list) plt.plot( values_list[:n_iter_tmp, 0], values_list[:n_iter_tmp, 1], linestyle="--", marker=",", color="black", alpha=0.33, label=n, linewidth=0.5, ) plt.scatter( values_list[:n_iter_tmp, 0], values_list[:n_iter_tmp, 1], c=score_list[:n_iter_tmp], marker="H", s=15, vmin=np.amin(score_list[:n_iter_tmp]), vmax=np.amax(score_list[:n_iter_tmp]), label=n, edgecolors="black", linewidth=0.3, ) plt.xlabel("x") plt.ylabel("y") nth_iteration = "\n\nnth Iteration: " + str(n_iter) opt_para_name = "" opt_para_value = "\n\n" if show_opt_para: opt_para_name += "\n Parameter:" for para_name, para_value in opt_para.items(): opt_para_name += "\n " + " " + para_name + ": " opt_para_value += "\n " + str(para_value) + " " if title == True: title_name = opt.name + "\n" + opt_para_name plt.title(title_name, loc="left") plt.title(opt_para_value, loc="center") elif isinstance(title, str): plt.title(title, loc="left") plt.title(nth_iteration, loc="right", fontsize=8) # plt.xlim((-101, 201)) # plt.ylim((-101, 201)) clb = plt.colorbar() clb.set_label("score", labelpad=-50, y=1.03, rotation=0) # plt.legend(loc="upper left", bbox_to_anchor=(-0.10, 1.2)) # plt.axis("off") if show_opt_para: plt.subplots_adjust(top=0.75) plt.tight_layout() # plt.margins(0, 0) plt.savefig( path + "/_plots/" + opt._name_ + "_" + "{0:0=3d}".format(n_iter) + ".jpg", dpi=150, pad_inches=0, # bbox_inches="tight", ) plt.ioff() # Clear the current axes. plt.cla() # Clear the current figure. plt.clf() # Closes all the figure windows. plt.close("all") gc.collect() def search_path_gif( path, optimizer, opt_para, name, n_iter, objective_function, search_space, initialize, random_state=0, title=True, ): path = os.path.join(os.getcwd(), path) print("\n\nname", name) plots_dir = path + "/_plots/" print("plots_dir", plots_dir) os.makedirs(plots_dir, exist_ok=True) plot_search_paths( path=path, optimizer=optimizer, opt_para=opt_para, n_iter_max=n_iter, objective_function=objective_function, search_space=search_space, initialize=initialize, random_state=random_state, title=title, ) ### ffmpeg framerate = str(n_iter / 10) # framerate = str(10) _framerate = " -framerate " + framerate + " " _opt_ = optimizer(search_space) _input = " -i " + path + "/_plots/" + str(_opt_._name_) + "_" + "%03d.jpg " _scale = " -vf scale=1200:-1:flags=lanczos " _output = os.path.join(path, name) ffmpeg_command = ( "ffmpeg -hide_banner -loglevel error -y" + _framerate + _input + _scale + _output ) print("\n -----> ffmpeg_command \n", ffmpeg_command, "\n") print("create " + name) os.system(ffmpeg_command) ### remove _plots rm_files = glob.glob(path + "/_plots/*.jpg") for f in rm_files: os.remove(f) os.rmdir(plots_dir)
null
122
import numpy as np from gradient_free_optimizers import HillClimbingOptimizer def convex_function(pos_new): score = -(pos_new["x1"] * pos_new["x1"] + pos_new["x2"] * pos_new["x2"]) return score
null
123
import numpy as np from gradient_free_optimizers import GridSearchOptimizer def convex_function(pos_new): score = -(pos_new["x1"] * pos_new["x1"] + pos_new["x2"] * pos_new["x2"]) return score
null
124
import numpy as np from gradient_free_optimizers import RandomSearchOptimizer def ackley_function(pos_new): x = pos_new["x1"] y = pos_new["x2"] a1 = -20 * np.exp(-0.2 * np.sqrt(0.5 * (x * x + y * y))) a2 = -np.exp(0.5 * (np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y))) score = a1 + a2 + 20 return -score
null
125
import numpy as np from gradient_free_optimizers import RandomSearchOptimizer def convex_function(pos_new): score = -(pos_new["x1"] * pos_new["x1"] + pos_new["x2"] * pos_new["x2"]) return score
null
126
import numpy as np from gradient_free_optimizers import RandomSearchOptimizer def constraint_1(para): # only values in 'x1' higher than -5 are valid return para["x1"] > -5
null
127
from keras.models import Sequential from keras.layers import ( Dense, Conv2D, MaxPooling2D, Flatten, Dropout, Activation, ) from keras.datasets import cifar10 from keras.utils import to_categorical from gradient_free_optimizers import BayesianOptimizer import numpy as np (X_train, y_train), (X_test, y_test) = cifar10.load_data() y_train = to_categorical(y_train, 10) y_test = to_categorical(y_test, 10) X_train = X_train[0:1000] y_train = y_train[0:1000] X_test = X_test[0:1000] y_test = y_test[0:1000] def cnn(para): nn = Sequential() nn.add( Conv2D( para["filter.0"], (3, 3), padding="same", input_shape=X_train.shape[1:], ) ) nn.add(Activation("relu")) nn.add(Conv2D(para["filter.0"], (3, 3))) nn.add(Activation("relu")) nn.add(MaxPooling2D(pool_size=(2, 2))) nn.add(Dropout(0.25)) nn.add(Conv2D(para["filter.0"], (3, 3), padding="same")) nn.add(Activation("relu")) nn.add(Conv2D(para["filter.0"], (3, 3))) nn.add(Activation("relu")) nn.add(MaxPooling2D(pool_size=(2, 2))) nn.add(Dropout(0.25)) nn.add(Flatten()) nn.add(Dense(para["dense.0"])) nn.add(Activation("relu")) nn.add(Dropout(0.5)) nn.add(Dense(10)) nn.add(Activation("softmax")) nn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) nn.fit(X_train, y_train, epochs=5, batch_size=256) _, score = nn.evaluate(x=X_test, y=y_test) return score
null
128
import numpy as np import matplotlib.pyplot as plt from gradient_free_optimizers import HillClimbingOptimizer def gaussian_function(x, A, B, C): return A * np.exp(-(x-B)**2/(2*C**2)) x_range = np.arange(min_x, max_x, step_x) y_gauss_hist = plt.hist(gauss_np1, density=True, bins=bins)[0] def fit_gaussian(para): A, B, C = para["A"], para["B"], para["C"] y_gauss_func = gaussian_function(x_range, A, B, C) # compare results of function and hist samples diff = np.subtract(y_gauss_func, y_gauss_hist) # we want to minimize the difference score = - np.abs(diff).sum() return score
null
129
import numpy as np from sklearn.model_selection import cross_val_score from sklearn.ensemble import GradientBoostingClassifier from sklearn.datasets import load_wine from gradient_free_optimizers import HillClimbingOptimizer X, y = data.data, data.target def model(para): gbc = GradientBoostingClassifier( n_estimators=para["n_estimators"], max_depth=para["max_depth"], min_samples_split=para["min_samples_split"], min_samples_leaf=para["min_samples_leaf"], ) scores = cross_val_score(gbc, X, y, cv=3) return scores.mean()
null
130
import numpy as np from gradient_free_optimizers import LipschitzOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
131
import numpy as np from gradient_free_optimizers import EvolutionStrategyOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
132
import numpy as np from gradient_free_optimizers import SimulatedAnnealingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
133
import numpy as np from gradient_free_optimizers import GridSearchOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
134
import numpy as np from gradient_free_optimizers import PatternSearch def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
135
import numpy as np from gradient_free_optimizers import PowellsMethod def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
136
import numpy as np from gradient_free_optimizers import SpiralOptimization def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
137
import numpy as np from gradient_free_optimizers import HillClimbingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
138
import numpy as np from gradient_free_optimizers import DownhillSimplexOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
139
import numpy as np from gradient_free_optimizers import ParallelTemperingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
140
import numpy as np from gradient_free_optimizers import StochasticHillClimbingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
141
import numpy as np from gradient_free_optimizers import BayesianOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
142
import numpy as np from gradient_free_optimizers import DirectAlgorithm def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
143
import numpy as np from gradient_free_optimizers import RepulsingHillClimbingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
144
import numpy as np from gradient_free_optimizers import TreeStructuredParzenEstimators def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
145
import numpy as np from gradient_free_optimizers import ParticleSwarmOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
146
import numpy as np from gradient_free_optimizers import RandomSearchOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
147
import numpy as np from gradient_free_optimizers import RandomRestartHillClimbingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
148
import numpy as np from gradient_free_optimizers import RandomAnnealingOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
149
import numpy as np from gradient_free_optimizers import ForestOptimizer def sphere_function(para): x = para["x"] y = para["y"] return -(x * x + y * y)
null
150
import numpy as np from sklearn.model_selection import cross_val_score from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import load_wine from gradient_free_optimizers import HillClimbingOptimizer X, y = data.data, data.target def model(para): gbc = DecisionTreeClassifier( min_samples_split=para["min_samples_split"], min_samples_leaf=para["min_samples_leaf"], ) scores = cross_val_score(gbc, X, y, cv=5) return scores.mean()
null
151
import random import numpy as np The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(nth_process, random_state)` to solve the following problem: Sets the random seed separately for each thread (to avoid getting the same results in each thread) Here is the function: def set_random_seed(nth_process, random_state): """ Sets the random seed separately for each thread (to avoid getting the same results in each thread) """ if nth_process is None: nth_process = 0 if random_state is None: random_state = np.random.randint(0, high=2**31 - 2, dtype=np.int64).item() random.seed(random_state + nth_process) np.random.seed(random_state + nth_process) return random_state + nth_process
Sets the random seed separately for each thread (to avoid getting the same results in each thread)
152
import random import numpy as np def move_random(ss_positions): position = [] for search_space_pos in ss_positions: pos_ = random.choice(search_space_pos) position.append(pos_) return np.array(position)
null
153
import numpy as np from scipy.stats import norm from ..smb_opt.smbo import SMBO from ..smb_opt.surrogate_models import EnsembleRegressor from ..smb_opt.acquisition_function import ExpectedImprovement from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.svm import SVR from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.neural_network import MLPRegressor def normalize(array): num = array - array.min() den = array.max() - array.min() if den == 0: return np.random.random_sample(array.shape) else: return ((num / den) + 0) / 1
null
154
import random import numpy as np from ..local_opt import HillClimbingOptimizer def max_list_idx(list_): max_item = max(list_) max_item_idx = [i for i, j in enumerate(list_) if j == max_item] return max_item_idx[-1:][0]
null
155
import numpy as np from collections import OrderedDict from ..local_opt import HillClimbingOptimizer def sort_list_idx(list_): list_np = np.array(list_) idx_sorted = list(list_np.argsort()[::-1]) return idx_sorted
null
156
import numpy as np from ..local_opt import HillClimbingOptimizer def roation(n_dim, vector): if n_dim == 1: return -1 # not sure about that I = np.identity(n_dim - 1) R = np.pad(I, ((1, 0), (0, 1)), constant_values=(0, 0)) R[0, n_dim - 1] = -1 return np.matmul(R, vector)
null
157
import numpy as np from .base_population_optimizer import BasePopulationOptimizer from ._spiral import Spiral def centeroid(array_list): centeroid = [] for idx in range(array_list[0].shape[0]): center_dim_pos = [] for array in array_list: center_dim_pos.append(array[idx]) center_dim_mean = np.array(center_dim_pos).mean() centeroid.append(center_dim_mean) return centeroid
null
158
import math import numpy as np from ..core_optimizer import CoreOptimizer def split(positions_l, population): div_int = math.ceil(len(positions_l) / population) dist_init_positions = [] for nth_indiv in range(population): indiv_pos = [] for nth_indiv_pos in range(div_int): idx = nth_indiv + nth_indiv_pos * population if idx < len(positions_l): indiv_pos.append(positions_l[idx]) dist_init_positions.append(indiv_pos) return dist_init_positions
null
159
import numpy as np from sklearn.linear_model import BayesianRidge from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import Matern, WhiteKernel, RBF from sklearn.ensemble import ExtraTreesRegressor as _ExtraTreesRegressor_ from sklearn.ensemble import RandomForestRegressor as _RandomForestRegressor_ from sklearn.ensemble import ( GradientBoostingRegressor as _GradientBoostingRegressor_, ) The provided code snippet includes necessary dependencies for implementing the `_return_std` function. Write a Python function `def _return_std(X, trees, predictions, min_variance)` to solve the following problem: used from: https://github.com/scikit-optimize/scikit-optimize/blob/master/skopt/learning/forest.py Here is the function: def _return_std(X, trees, predictions, min_variance): """ used from: https://github.com/scikit-optimize/scikit-optimize/blob/master/skopt/learning/forest.py """ variance = np.zeros(len(X)) trees = list(trees) for tree in trees: if isinstance(tree, np.ndarray): tree = tree[0] var_tree = tree.tree_.impurity[tree.apply(X)] var_tree[var_tree < min_variance] = min_variance mean_tree = tree.predict(X) variance += var_tree ** 2 + mean_tree ** 2 variance /= len(trees) variance -= predictions ** 2.0 variance[variance < 0.0] = 0.0 std = variance ** 0.5 return std
used from: https://github.com/scikit-optimize/scikit-optimize/blob/master/skopt/learning/forest.py
160
import numpy as np from scipy.stats import norm def normalize(array): num = array - array.min() den = array.max() - array.min() if den == 0: return np.random.random_sample(array.shape) else: return ((num / den) + 0) / 1
null
161
import numpy as np from scipy.stats import norm from .smbo import SMBO from .surrogate_models import ( GPR_linear, GPR, ) from .acquisition_function import ExpectedImprovement def normalize(array): num = array - array.min() den = array.max() - array.min() if den == 0: return np.random.random_sample(array.shape) else: return ((num / den) + 0) / 1
null
162
import numpy as np from scipy.stats import norm from .smbo import SMBO from .surrogate_models import ( RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor, ) from .acquisition_function import ExpectedImprovement def normalize(array): num = array - array.min() den = array.max() - array.min() if den == 0: return np.random.random_sample(array.shape) else: return ((num / den) + 0) / 1
null
163
import random import numpy as np from .hill_climbing_optimizer import HillClimbingOptimizer def sort_list_idx(list_): list_np = np.array(list_) idx_sorted = list(list_np.argsort()[::-1]) return idx_sorted
null
164
import random import numpy as np from .hill_climbing_optimizer import HillClimbingOptimizer def centeroid(array_list): centeroid = [] for idx in range(array_list[0].shape[0]): center_dim_pos = [] for array in array_list: center_dim_pos.append(array[idx]) center_dim_mean = np.array(center_dim_pos).mean() centeroid.append(center_dim_mean) return centeroid
null
165
import numpy as np from ..base_optimizer import BaseOptimizer from numpy.random import normal, laplace, logistic, gumbel def max_list_idx(list_): max_item = max(list_) max_item_idx = [i for i, j in enumerate(list_) if j == max_item] return max_item_idx[-1:][0]
null
166
import numpy as np def _print_times(eval_time, iter_time, n_iter): opt_time = iter_time - eval_time iterPerSec = n_iter / iter_time print( indent, "Evaluation time :", eval_time, "sec", indent, "[{} %]".format(round(eval_time / iter_time * 100, 2)), ) print( indent, "Optimization time :", opt_time, "sec", indent, "[{} %]".format(round(opt_time / iter_time * 100, 2)), ) if iterPerSec >= 1: print( indent, "Iteration time :", iter_time, "sec", indent, "[{} iter/sec]".format(round(iterPerSec, 2)), ) else: secPerIter = iter_time / n_iter print( indent, "Iteration time :", iter_time, "sec", indent, "[{} sec/iter]".format(round(secPerIter, 2)), ) print(" ") def _print_results(objective_function, score_best, para_best, random_seed): print("\nResults: '{}'".format(objective_function.__name__), " ") if para_best is None: print(indent, "Best score:", score_best, " ") print(indent, "Best parameter:", para_best, " ") else: para_names = list(para_best.keys()) para_names_align = align_para_names(para_names) print(indent, "Best score:", score_best, " ") print(indent, "Best parameter:") for para_key in para_best.keys(): added_spaces = para_names_align[para_key] print( indent, indent, "'{}'".format(para_key), "{}:".format(added_spaces), para_best[para_key], " ", ) print(" ") print(indent, "Random seed:", random_seed, " ") print(" ") def print_info( verbosity, objective_function, score_best, para_best, eval_times, iter_times, n_iter, random_seed, ): eval_time = np.array(eval_times).sum() iter_time = np.array(iter_times).sum() if "print_results" in verbosity: _print_results(objective_function, score_best, para_best, random_seed) if "print_times" in verbosity: _print_times(eval_time, iter_time, n_iter)
null
167
import time import numpy as np def time_exceeded(start_time, max_time): run_time = time.time() - start_time return max_time and run_time > max_time
null
168
import time import numpy as np def score_exceeded(score_best, max_score): return max_score and score_best >= max_score
null
169
import time import numpy as np def no_change(score_new_list, early_stopping): if "n_iter_no_change" not in early_stopping: print( "Warning n_iter_no_change-parameter must be set in order for early stopping to work" ) return False n_iter_no_change = early_stopping["n_iter_no_change"] if len(score_new_list) <= n_iter_no_change: return False scores_np = np.array(score_new_list) max_score = max(score_new_list) max_index = np.argmax(scores_np) length_pos = len(score_new_list) diff = length_pos - max_index if diff > n_iter_no_change: return True first_n = length_pos - n_iter_no_change scores_first_n = score_new_list[:first_n] max_first_n = max(scores_first_n) if "tol_abs" in early_stopping and early_stopping["tol_abs"] is not None: tol_abs = early_stopping["tol_abs"] if abs(max_first_n - max_score) < tol_abs: return True if "tol_rel" in early_stopping and early_stopping["tol_rel"] is not None: tol_rel = early_stopping["tol_rel"] percent_imp = ((max_score - max_first_n) / abs(max_first_n)) * 100 if percent_imp < tol_rel: return True
null
170
import traceback from dataclasses import dataclass from pathlib import Path from typing import Dict, List from readmeai.config.settings import ConfigLoader from readmeai.core.logger import Logger _logger = Logger(__name__) class QuickStart: """Information about using, running, and testing a repository.""" install_command: str run_command: str test_command: str prerequisites: str language_counts: Dict[str, int] language_key: str language_name: str = None def count_languages( summaries: List[str], config_loader: ConfigLoader ) -> Dict[str, int]: """ Counts the occurrences of each language in the summaries. """ parser_files = config_loader.parsers.get("parsers") language_counts = {} for file_path, _ in summaries: language = Path(file_path).suffix[1:] if str(file_path) in [ dependency_file for dependency_file in parser_files ]: continue if language and language not in config_loader.blacklist: language_counts[language] = language_counts.get(language, 0) + 1 return language_counts def get_top_language_setup( language_counts: Dict[str, int], config_loader: ConfigLoader ) -> QuickStart: """ Determines the top language and retrieves its setup commands. """ if not language_counts: return None languages = config_loader.languages.get("language_names") commands = config_loader.commands.get("quickstart_guide") language_key = get_top_language(language_counts) language_name = languages.get(language_key, languages.get("default")) quickstart_commands = commands.get(language_name, commands.get("default")) prerequisites = f"**{language_name}**: `version x.y.z`" return QuickStart( *quickstart_commands, prerequisites, language_counts, language_key, language_name, ) The provided code snippet includes necessary dependencies for implementing the `get_setup_data` function. Write a Python function `def get_setup_data( config_loader: ConfigLoader, summaries: List[str] ) -> QuickStart` to solve the following problem: Generates the 'Quick Start' section of the README file. Here is the function: def get_setup_data( config_loader: ConfigLoader, summaries: List[str] ) -> QuickStart: """ Generates the 'Quick Start' section of the README file. """ default_setup = QuickStart("", "", "", {}, "", "") try: language_counts = count_languages(summaries, config_loader) setup = ( get_top_language_setup(language_counts, config_loader) or default_setup ) except Exception as exc: _logger.debug(f"Exception: {exc}\n{traceback.format_exc()}") setup = default_setup _logger.info(f"Quickstart information: {setup}") return setup
Generates the 'Quick Start' section of the README file.
171
import re from typing import List EMOJI_PATTERN = re.compile( pattern="[" "\U0001f600-\U0001f64f" # emoticons "\U0001f300-\U0001f5ff" # symbols & pictographs "\U0001f680-\U0001f6ff" # transport & map symbols "\U0001f700-\U0001f77f" # alchemical symbols "\U0001f780-\U0001f7ff" # Geometric Shapes Extended "\U0001f800-\U0001f8ff" # Supplemental Arrows-C "\U0001f900-\U0001f9ff" # Supplemental Symbols and Pictographs "\U0001fa00-\U0001fa6f" # Chess Symbols "\U0001fa70-\U0001faff" # Symbols and Pictographs Extended-A "\U00002702-\U000027b0" # Dingbats "\U000024c2-\U0001f251" # flags (iOS) "]+", flags=re.UNICODE, ) The provided code snippet includes necessary dependencies for implementing the `remove_emojis` function. Write a Python function `def remove_emojis(md_content: List[str]) -> List[str]` to solve the following problem: Removes emojis from the content list. Here is the function: def remove_emojis(md_content: List[str]) -> List[str]: """Removes emojis from the content list.""" modified_content = [] for section in md_content: lines = section.split("\n") for index, line in enumerate(lines): if ( line.startswith("#") or "Table of Contents" in section or "Quick Links" in section ): lines[index] = EMOJI_PATTERN.sub("", line) modified_content.append("\n".join(lines)) return modified_content
Removes emojis from the content list.
172
import re from typing import List The provided code snippet includes necessary dependencies for implementing the `split_markdown_headings` function. Write a Python function `def split_markdown_headings(markdown_text: str) -> dict` to solve the following problem: Splits a markdown document by level 2 headings into separate sections. Here is the function: def split_markdown_headings(markdown_text: str) -> dict: """ Splits a markdown document by level 2 headings into separate sections. """ sections = re.split(r"(?m)^## ", markdown_text) split_sections = {} for section in sections: if section.strip(): heading = section.split("\n", 1)[0].strip() file_name = heading.lower().replace(" ", "_") + ".md" content = "## " + section if split_sections else section content = content.rstrip("\n") split_sections[file_name] = content return split_sections
Splits a markdown document by level 2 headings into separate sections.
173
import re from typing import List The provided code snippet includes necessary dependencies for implementing the `update_heading_names` function. Write a Python function `def update_heading_names(md_contents: dict) -> dict` to solve the following problem: Updates dict keys by removing leading emojis, underscores, and spaces. Here is the function: def update_heading_names(md_contents: dict) -> dict: """ Updates dict keys by removing leading emojis, underscores, and spaces. """ updated_md_headings = {} for key in md_contents: new_key = re.sub(r"^[^\w]+", "", key).lstrip("_") if re.match(r"^<.*>.md$", new_key): new_key = "header.md" updated_md_headings[new_key] = md_contents[key] return updated_md_headings
Updates dict keys by removing leading emojis, underscores, and spaces.
174
from pathlib import Path from typing import List, Tuple from readmeai.services.git import fetch_git_file_url def is_valid_tuple_summary(summary: Tuple[str, str]) -> bool: """Checks if a summary is a valid tuple format.""" return isinstance(summary, tuple) and len(summary) == 2 The provided code snippet includes necessary dependencies for implementing the `format_code_summaries` function. Write a Python function `def format_code_summaries( placeholder: str, code_summaries: Tuple[str, str] ) -> List[Tuple[str, str]]` to solve the following problem: Converts the given code summaries into a formatted list. Here is the function: def format_code_summaries( placeholder: str, code_summaries: Tuple[str, str] ) -> List[Tuple[str, str]]: """Converts the given code summaries into a formatted list.""" formatted_summaries = [] for summary in code_summaries: if is_valid_tuple_summary(summary): module, summary_text = summary else: module, summary_text = summary, placeholder formatted_summaries.append((module, summary_text)) return formatted_summaries
Converts the given code summaries into a formatted list.
175
from pathlib import Path from typing import List, Tuple from readmeai.services.git import fetch_git_file_url def construct_markdown_table( data: List[Tuple[str, str]], repo_url: str, full_name: str ) -> str: """Builds a Markdown table from the provided data.""" headers = ["File", "Summary"] table_rows = [headers, ["---", "---"]] for module, summary in data: file_name = str(Path(module).name) if "invalid" in full_name.lower(): return file_name host_url = fetch_git_file_url(module, full_name, repo_url) md_format_host_url = f"[{file_name}]({host_url})" table_rows.append([md_format_host_url, summary]) return format_as_markdown_table(table_rows) def group_summaries_by_folder(summaries: List[Tuple[str, str]]) -> dict: """Groups code summaries by their sub-directory.""" folder_map = {} for module, summary in summaries: folder_name = extract_folder_name(module) folder_map.setdefault(folder_name, []).append((module, summary)) return folder_map The provided code snippet includes necessary dependencies for implementing the `generate_markdown_tables` function. Write a Python function `def generate_markdown_tables( table_widget: str, summaries: List[Tuple[str, str]], project_name: str, repository_url: str, ) -> str` to solve the following problem: Produces Markdown tables for each project sub-directory. Here is the function: def generate_markdown_tables( table_widget: str, summaries: List[Tuple[str, str]], project_name: str, repository_url: str, ) -> str: """Produces Markdown tables for each project sub-directory.""" summaries_by_folder = group_summaries_by_folder(summaries) markdown_tables = [] for folder, entries in summaries_by_folder.items(): table_in_markdown = construct_markdown_table( entries, repository_url, project_name ) table_wrapper = table_widget.format(folder, table_in_markdown) markdown_tables.append(table_wrapper) return "\n".join(markdown_tables)
Produces Markdown tables for each project sub-directory.
176
from typing import Tuple from readmeai.cli.options import BadgeOptions from readmeai.config.settings import ConfigLoader from readmeai.services.git import GitHost from readmeai.utils.file_handler import FileHandler from readmeai.utils.file_resources import get_resource_path _package = "readmeai.generators" _submodule = "assets" class FileHandler: """File I/O factory class to read and write files.""" def __init__(self): """Initialize the file handler.""" self.file_actions: Dict[str, Dict[str, Callable[[str], Any]]] = { "json": {"read": self.read_json, "write": self.write_json}, "md": {"read": self.read_markdown, "write": self.write_markdown}, "toml": {"read": self.read_toml, "write": self.write_toml}, "txt": {"read": self.read_text, "write": self.write_text}, "yaml": {"read": self.read_yaml, "write": self.write_yaml}, } self.cache = {} self.read_json = functools.lru_cache(maxsize=100)(self.read_json) self.read_toml = functools.lru_cache(maxsize=100)(self.read_toml) def read(self, file_path: Union[str, Path]) -> Any: """Read the content of a file.""" if file_path in self.cache: return self.cache[file_path] try: file_extension = str(file_path).rsplit(".", maxsplit=1)[-1] reader = self.get_action(file_extension, "read") content = reader(file_path) self.cache[file_path] = content return content except Exception as exc: raise FileReadError(exc, file_path) from exc def write(self, file_path: Union[str, Path], content: Any) -> None: """Write the content to a file.""" try: file_extension = str(file_path).rsplit(".", maxsplit=1)[-1] writer = self.get_action(file_extension, "write") writer(file_path, content) except Exception as exc: raise FileWriteError(exc, file_path) from exc def get_action( self, file_extension: str, action_type: str ) -> Callable[[str], Any]: """Get method for the passed file extension and I/O operation.""" file_actions = self.file_actions.get(file_extension) if not file_actions: raise ValueError(f"Unsupported file type: {file_extension}") action = file_actions.get(action_type) if not action: raise ValueError(f"Unsupported action type: {action_type}") return action def read_json(file_path: Union[str, Path]) -> Dict[str, Any]: """Read the content of a JSON file.""" with open(file_path, encoding="utf-8") as file: return json.load(file) def read_markdown(file_path: Union[str, Path]) -> str: """Read the content of a Markdown file.""" with open(file_path, encoding="utf-8") as file: return file.read() def read_toml(file_path: Union[str, Path]) -> Dict[str, Any]: """Read the content of a TOML file.""" if sys.version_info < (3, 11): with open(file_path, encoding="utf-8") as file: data = toml.load(file) else: with open(file_path, "rb") as file: data = toml.load(file) return {key.lower(): value for key, value in data.items()} def read_text(file_path: Union[str, Path]) -> str: """Read the content of a TXT file.""" with open(file_path, encoding="utf-8") as file: return file.read() def read_yaml(file_path: Union[str, Path]) -> Dict[str, Any]: """Read the content of a YAML file.""" with open(file_path, encoding="utf-8") as file: return yaml.safe_load(file) def write_json( file_path: Union[str, Path], content: Dict[str, Any] ) -> None: """Write the content to a JSON file.""" with open(file_path, "w", encoding="utf-8") as file: json.dump(content, file, indent=4) def write_markdown(file_path: Union[str, Path], content: str) -> None: """Write the content to a Markdown file.""" with open(file_path, "w", encoding="utf-8") as file: file.write(content) def write_toml( file_path: Union[str, Path], content: Dict[str, Any] ) -> None: """Write the content to a TOML file.""" with open(file_path, "w", encoding="utf-8") as file: toml.dump(content, file) def write_text(file_path: Union[str, Path], content: str) -> None: """Write the content to a TXT file.""" with open(file_path, "w", encoding="utf-8") as file: file.write(content) def write_yaml( file_path: Union[str, Path], content: Dict[str, Any] ) -> None: """Write the content to a YAML file.""" with open(file_path, "w", encoding="utf-8") as file: yaml.safe_dump(content, file) def get_resource_path( file_path: str, package: str = "readmeai.config", sub_module: str = "settings", ) -> Path: """Retrieves the path to a resource file within the package. This function attempts to first use `importlib.resources` for preferred access to resources within the package. It falls back to `pkg_resources` for compatibility with older environments. Parameters ---------- file_path The path to the resource file relative to the package's submodule. package, optional The package name containing the resource file. - default: "readmeai.config" submodule, optional The submodule within the package where the resource is located. - default: "settings" Returns ------- The absolute path to the resource file. Raises ------ FileReadError FileReadError: If the resource file cannot be found or accessed. """ resource_path = None try: resource_path = resources.files(package).joinpath( sub_module, file_path ) except TypeError: # pragma: no cover try: import pkg_resources submodule = sub_module.replace(".", "/") resource_path = Path( pkg_resources.resource_filename( "readmeai", f"{submodule}/{file_path}" ) ).resolve() except Exception as exc: # pragma: no cover raise FileReadError( "Error loading resource file using pkg_resources", str(resource_path), ) from exc if not resource_path.exists(): raise FileReadError("Resource file not found", str(resource_path)) return resource_path The provided code snippet includes necessary dependencies for implementing the `skill_icons` function. Write a Python function `def skill_icons(conf: ConfigLoader, dependencies: list) -> str` to solve the following problem: Generates badges for the README using skill icons, from the repository - https://github.com/tandpfun/skill-icons. Here is the function: def skill_icons(conf: ConfigLoader, dependencies: list) -> str: """ Generates badges for the README using skill icons, from the repository - https://github.com/tandpfun/skill-icons. """ dependencies.extend(["md"]) icons_path = get_resource_path( conf.files.skill_icons, _package, _submodule ) icons_dict = FileHandler().read(icons_path) skill_icons = [ icon for icon in icons_dict["icons"]["names"] if icon in dependencies ] skill_icons = ",".join(skill_icons) skill_icons = icons_dict["url"]["base_url"] + skill_icons if conf.md.badge_style == "skills-light": skill_icons = f"{skill_icons}&theme=light" conf.md.skill_icons = conf.md.skill_icons.format(skill_icons) return conf.md.badge_icons.format( alignment=conf.md.alignment, badge_icons=conf.md.skill_icons )
Generates badges for the README using skill icons, from the repository - https://github.com/tandpfun/skill-icons.
177
from typing import Dict, Type from readmeai.core.parsers import BaseFileParser from readmeai.parsers.configuration.docker import ( DockerComposeParser, DockerfileParser, ) from readmeai.parsers.configuration.properties import PropertiesParser from readmeai.parsers.language.cpp import ( CMakeParser, ConfigureAcParser, MakefileAmParser, ) from readmeai.parsers.language.go import GoModParser from readmeai.parsers.language.python import ( RequirementsParser, TomlParser, YamlParser, ) from readmeai.parsers.language.rust import CargoTomlParser from readmeai.parsers.language.swift import SwiftPackageParser from readmeai.parsers.package.gradle import ( BuildGradleKtsParser, BuildGradleParser, ) from readmeai.parsers.package.maven import MavenParser from readmeai.parsers.package.npm import PackageJsonParser from readmeai.parsers.package.yarn import YarnLockParser PARSER_REGISTRY = { # Configuration ".properties": PropertiesParser, # Language/Framework # Python "Pipfile": TomlParser(), "pyproject.toml": TomlParser(), "requirements.in": RequirementsParser(), "requirements.txt": RequirementsParser(), "requirements-dev.txt": RequirementsParser(), "requirements-test.txt": RequirementsParser(), "requirements-prod.txt": RequirementsParser(), "dev-requirements.txt": RequirementsParser(), "environment.yml": YamlParser(), "environment.yaml": YamlParser(), # "setup.py": setup_py_parser, # "setup.cfg": setup_cfg_parser, # C/C++ "cmakeLists.txt": CMakeParser(), "configure.ac": ConfigureAcParser(), "Makefile.am": MakefileAmParser(), # JavaScript/Node.js "package.json": PackageJsonParser(), "yarn.lock": YarnLockParser(), # Kotlin and Kotlin DSL "build.gradle": BuildGradleParser(), "build.gradle.kts": BuildGradleKtsParser(), # Go "go.mod": GoModParser(), # Java "pom.xml": MavenParser(), # Rust "cargo.toml": CargoTomlParser(), # Swift "Package.swift": SwiftPackageParser(), "Dockerfile": DockerfileParser(), "docker-compose.yaml": DockerComposeParser(), # Package Managers # Monitoring and Logging } class BaseFileParser(ABC): """Abstract base class for dependency file parsers.""" def __init__(self) -> None: """Initializes the handler with given configuration.""" self._logger = Logger(__name__) def parse(self, content: str) -> List[str]: """Parses content of dependency file and returns list of dependencies.""" ... def log_error(self, message: str): """Logs error message when parsing fails.""" self._logger.error(f"Error parsing dependency file {message}") def handle_parsing_error(self, error: Exception) -> List[str]: """Standardized error handling for parsing exceptions.""" self.log_error(str(error)) return [] The provided code snippet includes necessary dependencies for implementing the `parser_handler` function. Write a Python function `def parser_handler() -> Dict[str, BaseFileParser]` to solve the following problem: Returns a dictionary of callable file parser methods. Here is the function: def parser_handler() -> Dict[str, BaseFileParser]: """Returns a dictionary of callable file parser methods.""" return PARSER_REGISTRY
Returns a dictionary of callable file parser methods.
178
import asyncio import tempfile import traceback from pathlib import Path from typing import Optional from readmeai._exceptions import ReadmeGeneratorError from readmeai.cli.options import ImageOptions, ModelOptions from readmeai.config.settings import ConfigLoader, GitSettings from readmeai.core.logger import Logger from readmeai.core.preprocess import preprocessor from readmeai.core.utils import get_environment from readmeai.generators.builder import MarkdownBuilder from readmeai.models.dalle import DalleHandler from readmeai.models.factory import ModelFactory from readmeai.services.git import clone_repository from readmeai.utils.file_handler import FileHandler _logger = Logger(__name__) async def readme_generator(conf: ConfigLoader, output_file: Path) -> None: """Orchestrates the README.md file generation process.""" with tempfile.TemporaryDirectory() as temp_dir: await clone_repository(conf.config.git.repository, temp_dir) ( dependencies, raw_files, ) = preprocessor(conf, temp_dir) _logger.info(f"Total files analyzed: {len(raw_files)}") _logger.info(f"Dependencies found: {dependencies}") async with ModelFactory.model_handler(conf).use_api() as llm: responses = await llm.batch_request(dependencies, raw_files) ( summaries, features, overview, slogan, ) = responses conf.config.md.features = conf.config.md.features.format(features) conf.config.md.overview = conf.config.md.overview.format(overview) conf.config.md.slogan = slogan if ( conf.config.md.image == ImageOptions.LLM.value and conf.config.llm.api != ModelOptions.OFFLINE.value ): conf.config.md.width = "60%" dalle = DalleHandler(conf) image_url = dalle.run() conf.config.md.image = dalle.download(image_url) elif ( conf.config.md.image == ImageOptions.LLM.value and conf.config.llm.api == ModelOptions.OFFLINE.value ): conf.config.md.image = ImageOptions.BLUE.value readme_md = MarkdownBuilder( conf, dependencies, summaries, temp_dir ).build() FileHandler().write(output_file, readme_md) _logger.info("README generation process completed successfully!") _logger.info(f"README.md file saved to: {output_file}") _logger.info("Share it @ github.com/eli64s/readme-ai/discussions") class ReadmeGeneratorError(ReadmeAIError): """Exceptions related to readme generation.""" def __init__(self, traceback, *args): self.traceback = traceback super().__init__(f"Error generating readme: {traceback}", *args) def get_environment(llm_api: str = "", llm_model: str = "") -> tuple: """Set LLM environment variables based on the specified LLM service.""" default_models = { llms.OPENAI.name: "gpt-3.5-turbo", llms.OLLAMA.name: "mistral", llms.GEMINI.name: "gemini-pro", } env_keys = { llms.OPENAI.name: SecretKey.OPENAI_API_KEY.value, llms.OLLAMA.name: SecretKey.OLLAMA_HOST.value, llms.GEMINI.name: SecretKey.GOOGLE_API_KEY.value, } if llm_api and llm_api not in env_keys: if llm_api == llms.OFFLINE.name: return _set_offline("\n\n\t\t\t\tOffline mode enabled by user") _logger.warning("Invalid LLM service provided to CLI.") return _set_offline( "\n\n\t\t...No LLM API settings found in environment..." ) # If OPENAI_API_KEY does not exist in env when --api OPENAI is set if ( llm_api == llms.OPENAI.name and SecretKey.OPENAI_API_KEY.value not in os.environ ): return _set_offline( "OPENAI_API_KEY not found in environment. Switching to offline mode." ) # If GOOGLE_API_KEY does not exist in env when --api gemini is set if ( llm_api == llms.GEMINI.name and SecretKey.GOOGLE_API_KEY.value not in os.environ ): return _set_offline( "GOOGLE_API_KEY not found in environment. Switching to offline mode." ) # If no specific API is provided or the provided API is valid for api_name, env_key in env_keys.items(): if llm_api == api_name or (not llm_api and env_key in os.environ): model = llm_model if llm_model else default_models[api_name] _logger.info(f"{api_name} settings FOUND in environment!") return api_name, model # If no environment variables are found or OFFLINE is explicitly set if llm_api == llms.OFFLINE.name: return _set_offline("Offline mode enabled by user via CLI.") return _set_offline( "\n\n\t\t...No LLM API settings found in environment..." ) The provided code snippet includes necessary dependencies for implementing the `readme_agent` function. Write a Python function `def readme_agent( alignment: Optional[str], api: Optional[str], badge_color: Optional[str], badge_style: Optional[str], base_url: Optional[str], context_window: Optional[int], emojis: Optional[bool], image: Optional[str], # language: Optional[str], model: Optional[str], output_file: Optional[str], rate_limit: Optional[int], repository: str, temperature: Optional[float], # template: Optional[str], tree_depth: Optional[int], top_p: Optional[float], ) -> None` to solve the following problem: Configures and runs the README file generator agent. Here is the function: def readme_agent( alignment: Optional[str], api: Optional[str], badge_color: Optional[str], badge_style: Optional[str], base_url: Optional[str], context_window: Optional[int], emojis: Optional[bool], image: Optional[str], # language: Optional[str], model: Optional[str], output_file: Optional[str], rate_limit: Optional[int], repository: str, temperature: Optional[float], # template: Optional[str], tree_depth: Optional[int], top_p: Optional[float], ) -> None: """Configures and runs the README file generator agent.""" try: conf = ConfigLoader() api, model = get_environment(api, model) conf.config.api.rate_limit = rate_limit conf.config.llm = conf.config.llm.copy( update={ "api": api, "base_url": base_url, "context_window": context_window, "model": model, "temperature": temperature, "top_p": top_p, } ) conf.config.md = conf.config.md.copy( update={ "alignment": alignment, "badge_color": badge_color, "badge_style": badge_style, "emojis": emojis, "image": image, "tree_depth": tree_depth, } ) conf.config.git = GitSettings(repository=repository) _logger.info(f"Repository validated: {conf.config.git}") _logger.info(f"LLM API settings: {conf.config.llm}") asyncio.run(readme_generator(conf, output_file)) except Exception as exc: raise ReadmeGeneratorError(exc, traceback.format_exc()) from exc
Configures and runs the README file generator agent.
179
from __future__ import annotations from enum import Enum from typing import Optional import click class ImageOptions(str, Enum): """ Enum for CLI options for README file header images. """ # Custom image options CUSTOM = "custom" LLM = "llm" # Default image options BLACK = "https://img.icons8.com/external-tal-revivo-regular-tal-revivo/96/external-readme-is-a-easy-to-build-a-developer-hub-that-adapts-to-the-user-logo-regular-tal-revivo.png" BLUE = "https://raw.githubusercontent.com/PKief/vscode-material-icon-theme/ec559a9f6bfd399b82bb44393651661b08aaf7ba/icons/folder-markdown-open.svg" CLOUD = "https://cdn-icons-png.flaticon.com/512/6295/6295417.png" GRADIENT = "https://img.icons8.com/?size=512&id=55494&format=png" GREY = "https://img.icons8.com/external-tal-revivo-filled-tal-revivo/96/external-markdown-a-lightweight-markup-language-with-plain-text-formatting-syntax-logo-filled-tal-revivo.png" PURPLE = "https://img.icons8.com/external-tal-revivo-duo-tal-revivo/100/external-markdown-a-lightweight-markup-language-with-plain-text-formatting-syntax-logo-duo-tal-revivo.png" The provided code snippet includes necessary dependencies for implementing the `prompt_for_image` function. Write a Python function `def prompt_for_image( context: Optional[click.Context], parameter: Optional[click.Parameter], value: Optional[str], ) -> str` to solve the following problem: Prompt the user for a custom image URL. Here is the function: def prompt_for_image( context: Optional[click.Context], parameter: Optional[click.Parameter], value: Optional[str], ) -> str: """Prompt the user for a custom image URL.""" if value == ImageOptions.CUSTOM.name: return click.prompt("Provide an image file path or URL") elif value == ImageOptions.LLM.name: return ImageOptions.LLM.value elif value in ImageOptions.__members__: return ImageOptions[value].value else: raise click.BadParameter(f"Invalid image provided: {value}")
Prompt the user for a custom image URL.
180
import os import platform import shutil from enum import Enum from pathlib import Path from typing import Optional import git from readmeai._exceptions import GitCloneError from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `find_git_executable` function. Write a Python function `def find_git_executable() -> Optional[Path]` to solve the following problem: Find the path to the git executable, if available. Here is the function: def find_git_executable() -> Optional[Path]: """Find the path to the git executable, if available.""" try: git_exec_path = os.environ.get("GIT_PYTHON_GIT_EXECUTABLE") if git_exec_path: return Path(git_exec_path) # For Windows, set default location of git executable. if platform.system() == "Windows": default_windows_path = Path("C:\\Program Files\\Git\\cmd\\git.EXE") if default_windows_path.exists(): return default_windows_path # For other OS, set executable path from PATH environment variable. paths = os.environ["PATH"].split(os.pathsep) for path in paths: git_path = Path(path) / "git" if git_path.exists(): return git_path return None except Exception as exc: raise ValueError("Error finding Git executable") from exc
Find the path to the git executable, if available.
181
import os import platform import shutil from enum import Enum from pathlib import Path from typing import Optional import git from readmeai._exceptions import GitCloneError from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `validate_file_permissions` function. Write a Python function `def validate_file_permissions(temp_dir: Path) -> None` to solve the following problem: Validates file permissions of the cloned repository. Here is the function: def validate_file_permissions(temp_dir: Path) -> None: """Validates file permissions of the cloned repository.""" try: if platform.system() != "Windows": permissions = temp_dir.stat().st_mode & 0o777 if permissions != 0o700: raise SystemExit( f"Invalid file permissions for {temp_dir}.\n" f"Expected 0o700, but found {oct(permissions)}." ) except Exception as exc: raise ValueError( f"Error validating file permissions: {str(exc)}" ) from exc
Validates file permissions of the cloned repository.
182
import os import platform import shutil from enum import Enum from pathlib import Path from typing import Optional import git from readmeai._exceptions import GitCloneError from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `validate_git_executable` function. Write a Python function `def validate_git_executable(git_exec_path: str) -> None` to solve the following problem: Validate the path to the git executable. Here is the function: def validate_git_executable(git_exec_path: str) -> None: """Validate the path to the git executable.""" try: if not git_exec_path or not Path(git_exec_path).exists(): raise ValueError(f"Git executable not found at {git_exec_path}") except Exception as exc: raise ValueError("Error validating Git executable path") from exc
Validate the path to the git executable.
183
from dataclasses import dataclass from typing import Any, Dict, List, Optional import aiohttp from readmeai.core.logger import Logger from readmeai.services.git import fetch_git_api_url _logger = Logger(__name__) class RepositoryMetadata: """Dataclass to store GitHub repository metadata.""" name: str full_name: str owner: str owner_url: Optional[str] description: Optional[str] # Repository statistics stars_count: int forks_count: int watchers_count: int open_issues_count: int # Repository details default_branch: str created_at: str updated_at: str pushed_at: str size_kb: int # Repository URLs clone_url_http: str clone_url_ssh: str contributors_url: Optional[str] languages_url: str issues_url: Optional[str] # Programming languages and topics language: Optional[str] languages: List[str] topics: List[str] # Additional repository settings has_wiki: bool has_issues: bool has_projects: bool is_private: bool homepage_url: Optional[str] # License information license_name: Optional[str] license_url: Optional[str] def _parse_repository_metadata(repo_data: dict) -> RepositoryMetadata: """Converts raw repository data from GitHub API into dataclass.""" languages = repo_data.get("languages", {}) license_info = repo_data.get("license", {}) or {} owner_info = repo_data.get("owner", {}) or {} return RepositoryMetadata( name=repo_data.get("name", ""), full_name=repo_data.get("full_name", ""), owner=owner_info.get("login", ""), owner_url=owner_info.get("html_url", ""), description=repo_data.get("description", ""), stars_count=repo_data.get("stargazers_count", 0), forks_count=repo_data.get("forks_count", 0), watchers_count=repo_data.get("watchers_count", 0), open_issues_count=repo_data.get("open_issues_count", 0), default_branch=repo_data.get("default_branch", ""), created_at=repo_data.get("created_at", ""), updated_at=repo_data.get("updated_at", ""), pushed_at=repo_data.get("pushed_at", ""), size_kb=repo_data.get("size", 0), clone_url_http=repo_data.get("clone_url", ""), clone_url_ssh=repo_data.get("ssh_url", ""), contributors_url=repo_data.get("contributors_url"), languages_url=repo_data.get("languages_url", ""), issues_url=repo_data.get("issues_url"), language=repo_data.get("language", ""), languages=list(languages.keys()) if languages else [], topics=repo_data.get("topics", []), has_wiki=repo_data.get("has_wiki", False), has_issues=repo_data.get("has_issues", False), has_projects=repo_data.get("has_projects", False), is_private=repo_data.get("private", False), homepage_url=repo_data.get("homepage", ""), license_name=license_info.get("name", ""), license_url=license_info.get("url", ""), ) async def _fetch_repository_metadata( session: aiohttp.ClientSession, url: str, **kwargs ) -> Dict[str, Any]: """Fetches repository metadata from the git host provider.""" async with session.get(url, **kwargs) as response: response.raise_for_status() if response.status != 200: raise aiohttp.ClientResponseError( request_info=response.request_info, history=response.history, status=response.status, ) return await response.json() async def fetch_git_api_url(repo_url: str) -> str: """Parses the repository URL and returns the API URL.""" try: parts = repo_url.rstrip("/").split("/") repo_name = f"{parts[-2]}/{parts[-1]}" for service in GitHost: if service in repo_url: api_url = f"{service.api_url}{repo_name}" _logger.info(f"{service.name.upper()} API URL: {api_url}") return api_url raise ValueError("Unsupported Git service.") except (IndexError, ValueError) as exc: raise ValueError(f"Invalid repository URL: {repo_url}") from exc The provided code snippet includes necessary dependencies for implementing the `fetch_git_repository_metadata` function. Write a Python function `async def fetch_git_repository_metadata( session: aiohttp.ClientSession, repository: str ) -> Optional[RepositoryMetadata]` to solve the following problem: Retrieves GitHub repository metadata and returns a dataclass. Here is the function: async def fetch_git_repository_metadata( session: aiohttp.ClientSession, repository: str ) -> Optional[RepositoryMetadata]: """Retrieves GitHub repository metadata and returns a dataclass.""" api_url = await fetch_git_api_url(repository) if not api_url: return None try: metadata = await _fetch_repository_metadata(session, api_url) return _parse_repository_metadata(metadata) if metadata else None except aiohttp.ClientError as exc: _logger.error( f"Client error while fetching repository metadata: {exc}" ) return None
Retrieves GitHub repository metadata and returns a dataclass.
184
from typing import Dict, List, Union import readmeai.config.settings as Settings from readmeai.core.logger import Logger _logger = Logger(__name__) def get_prompt_template(prompts: dict, prompt_type: str) -> str: """Retrieves the template for the given prompt type.""" prompt_templates = { "features": prompts["prompts"]["features"], "overview": prompts["prompts"]["overview"], "slogan": prompts["prompts"]["slogan"], } return prompt_templates.get(prompt_type, "") def inject_prompt_context(template: str, context: dict) -> str: """Formats the template with the provided context.""" try: return template.format(*[context[key] for key in context]) except KeyError as exc: _logger.error(f"Missing context for prompt key: {exc}") return "" The provided code snippet includes necessary dependencies for implementing the `get_prompt_context` function. Write a Python function `def get_prompt_context(prompts: dict, prompt_type: str, context: dict) -> str` to solve the following problem: Generates a prompt for the LLM API. Here is the function: def get_prompt_context(prompts: dict, prompt_type: str, context: dict) -> str: """Generates a prompt for the LLM API.""" prompt_template = get_prompt_template(prompts, prompt_type) if not prompt_template: _logger.error(f"Prompt type '{prompt_type}' not found.") return "" return inject_prompt_context(prompt_template, context)
Generates a prompt for the LLM API.
185
from typing import Dict, List, Union import readmeai.config.settings as Settings from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `set_additional_contexts` function. Write a Python function `async def set_additional_contexts( config: Settings, dependencies: List[str], file_summaries: List[str], ) -> List[dict]` to solve the following problem: Generates additional prompts (features, overview, slogan) for LLM. Here is the function: async def set_additional_contexts( config: Settings, dependencies: List[str], file_summaries: List[str], ) -> List[dict]: """Generates additional prompts (features, overview, slogan) for LLM.""" return [ {"type": prompt_type, "context": context} for prompt_type, context in [ ( "features", { "repo": config.git.repository, "dependencies": dependencies, "file_summary": file_summaries, }, ), ( "overview", { "name": config.git.name, "file_summary": file_summaries, }, ), ( "slogan", { "name": config.git.name, "repo": config.git.repository, "file_summary": file_summaries, }, ), ] ]
Generates additional prompts (features, overview, slogan) for LLM.
186
from typing import Dict, List, Union import readmeai.config.settings as Settings from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `set_summary_context` function. Write a Python function `async def set_summary_context( config: Settings, dependencies: List[str], file_summaries: List[str], ) -> List[Dict[str, Union[str, dict]]]` to solve the following problem: Generates the summary prompts to be used by the LLM API. Here is the function: async def set_summary_context( config: Settings, dependencies: List[str], file_summaries: List[str], ) -> List[Dict[str, Union[str, dict]]]: """Generates the summary prompts to be used by the LLM API.""" return [ {"type": prompt_type, "context": context} for prompt_type, context in [ ( "file_summary", { "tree": config.md.tree, "dependencies": dependencies, "file_summary": file_summaries, }, ), ] ]
Generates the summary prompts to be used by the LLM API.
187
from tiktoken import get_encoding from readmeai.config.settings import Settings from readmeai.core.logger import Logger _logger = Logger(__name__) def count_tokens(text: str, encoder: str) -> int: """Return the number of tokens in a text string.""" try: encoding = _set_encoding_cache(encoder) token_count = len(encoding.encode(text, disallowed_special=())) except (UnicodeEncodeError, ValueError) as exc: _logger.error( f"Error counting tokens for '{text}' with {encoder}: {exc}" ) token_count = 0 return token_count def truncate_tokens(encoder: str, text: str, max_count: int) -> str: """Truncate a text string to a maximum number of tokens.""" if not text: return text try: encoder = _set_encoding_cache(encoder) token_count = len(encoder.encode(text)) if token_count <= max_count: return text char_total = len(text) chars_per_token = char_total / token_count truncated_total = int(chars_per_token * max_count) return text[:truncated_total] except Exception as exc: _logger.error(f"Error truncating tokens for '{text}': {exc}") return text The provided code snippet includes necessary dependencies for implementing the `token_handler` function. Write a Python function `async def token_handler( config: Settings, index: str, prompt: str, tokens: int ) -> str` to solve the following problem: Handle token count for the prompt. Here is the function: async def token_handler( config: Settings, index: str, prompt: str, tokens: int ) -> str: """Handle token count for the prompt.""" encoder = config.llm.encoder max_count = config.llm.context_window token_count = count_tokens(prompt, encoder) if token_count > max_count: _logger.debug( f"Truncating '{index}' prompt: {token_count} > {max_count} tokens!" ) prompt = truncate_tokens(encoder, prompt, tokens) return prompt
Handle token count for the prompt.
188
from tiktoken import get_encoding from readmeai.config.settings import Settings from readmeai.core.logger import Logger The provided code snippet includes necessary dependencies for implementing the `update_max_tokens` function. Write a Python function `def update_max_tokens( max_tokens: int, prompt: str, target: str = "Hello!" ) -> int` to solve the following problem: Adjust the maximum number of tokens based on the specific prompt. Here is the function: def update_max_tokens( max_tokens: int, prompt: str, target: str = "Hello!" ) -> int: """Adjust the maximum number of tokens based on the specific prompt.""" is_valid_prompt = prompt.strip().startswith(target.strip()) return max_tokens if is_valid_prompt else max_tokens // 2
Adjust the maximum number of tokens based on the specific prompt.
189
import re def clean_text(text: str) -> str: """Format and clean generated text from the LLM.""" # Dynamically remove all text before and including the first colon if any exist text = re.sub(r"^[^:]*:\s*", "", text) # Remove any text before and including "**:" text = re.sub(r"\*\*:\s*", "", text, flags=re.DOTALL) # Remove single and double quotes that are missing their closing counterpart text = re.sub(r"['\"](.*?)$", r"\1", text) text = re.sub(r"^(.*?)['\"]", r"\1", text) # Remove specific pattern and rephrase text = re.sub( r"\*\*Code Summary:\*\*\s*(.*?)\s*provides functions to", r"Provides functions to", text, flags=re.DOTALL, ) # Remove single and double quotes around any text text = re.sub(r"(?<!\w)['\"](.*?)['\"](?!\w)", r"\1", text) # Remove newlines and tabs text = text.replace("\n", "").replace("\t", "") # Remove non-letter characters from the beginning of the string text = re.sub(r"^[^a-zA-Z]*", "", text) # Remove extra white space around punctuation except for '(' text = re.sub(r"\s*([)'.!,?;:])(?!\.\s*\w)", r"\1", text) # Remove extra white space before opening parentheses text = re.sub(r"(\()\s*", r"\1", text) # Replace multiple consecutive spaces with a single space text = re.sub(r" +", " ", text) # Remove extra white space around hyphens text = re.sub(r"\s*-\s*", "-", text) # Specifically target and remove trailing special characters like asterisks text = re.sub(r"\*+$", "", text) text = text.strip() # Ensure the first letter is capitalized if it's alphabetic if text and not text[0].isupper() and text[0].isalpha(): text = text[0].upper() + text[1:] return text def format_md_table(text: str) -> str: """ Pattern to match a Markdown table. Looks for a header row with at least two columns, followed by a separator row, and then one or more data rows. This version is designed to be more robust in removing text around the markdown table. """ pattern = ( r"(?:.*\n)*(\|.*\|.*\n\|[-: ]+\|[-: ]+\|.*\n(?:\|.*\|.*\n)*)(?:.*\n)*" ) match = re.search(pattern, text, re.DOTALL) return match.group(1).strip() if match else "" The provided code snippet includes necessary dependencies for implementing the `clean_response` function. Write a Python function `def clean_response(prompt_type: str, response_text: str) -> str` to solve the following problem: Post-processes the response from the LLM. Here is the function: def clean_response(prompt_type: str, response_text: str) -> str: """Post-processes the response from the LLM.""" if prompt_type == "features": return format_md_table(response_text) elif prompt_type != "features": return clean_text(response_text) else: return response_text
Post-processes the response from the LLM.
190
import re The provided code snippet includes necessary dependencies for implementing the `fix_md_table_rows` function. Write a Python function `def fix_md_table_rows(md_table: str) -> str` to solve the following problem: Format a Markdown table with feature and description columns. Here is the function: def fix_md_table_rows(md_table: str) -> str: """Format a Markdown table with feature and description columns.""" lines = md_table.split("||") formatted_md_table = ( "| Feature | Description |\n|---------|-------------|\n" ) for line in lines[2:]: clean_line = line.strip("|") parts = clean_line.split("|") if len(parts) >= 3: feature = parts[1].strip() description = parts[2].strip() formatted_row = f"| {feature} | {description} |\n" formatted_md_table += formatted_row return formatted_md_table
Format a Markdown table with feature and description columns.
191
import nox def install(session, groups, root=True): """Install the package in the current session.""" if root: groups = ["main", *groups] session.run_always( "poetry", "install", "--no-root", "--sync", f"--only={','.join(groups)}", external=True, ) if root: session.install(".") The provided code snippet includes necessary dependencies for implementing the `tests` function. Write a Python function `def tests(session)` to solve the following problem: Run the test suite across Python versions Here is the function: def tests(session): """Run the test suite across Python versions""" session.install(".") session.install(".[test]") session.run( "pytest", "-vv", "-n auto", "--asyncio-mode=auto", "--cov=./", "--cov-branch", "--cov-report=xml", "--cov-report=term-missing", )
Run the test suite across Python versions
192
from typing import List, Optional from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `summary` function. Write a Python function `def summary(text: str, minlength: Optional[int] = None, maxlength: Optional[int] = None)` to solve the following problem: Runs a summarization model against a block of text. Args: text: text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: summary text Here is the function: def summary(text: str, minlength: Optional[int] = None, maxlength: Optional[int] = None): """ Runs a summarization model against a block of text. Args: text: text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: summary text """ return application.get().pipeline("summary", (text, minlength, maxlength))
Runs a summarization model against a block of text. Args: text: text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: summary text
193
from typing import List, Optional from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `batchsummary` function. Write a Python function `def batchsummary(texts: List[str] = Body(...), minlength: Optional[int] = Body(default=None), maxlength: Optional[int] = Body(default=None))` to solve the following problem: Runs a summarization model against a block of text. Args: texts: list of text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: list of summary text Here is the function: def batchsummary(texts: List[str] = Body(...), minlength: Optional[int] = Body(default=None), maxlength: Optional[int] = Body(default=None)): """ Runs a summarization model against a block of text. Args: texts: list of text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: list of summary text """ return application.get().pipeline("summary", (texts, minlength, maxlength))
Runs a summarization model against a block of text. Args: texts: list of text to summarize minlength: minimum length for summary maxlength: maximum length for summary Returns: list of summary text
194
from typing import List from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `tabular` function. Write a Python function `def tabular(file: str)` to solve the following problem: Splits tabular data into rows and columns. Args: file: file to process Returns: list of (id, text, tag) elements Here is the function: def tabular(file: str): """ Splits tabular data into rows and columns. Args: file: file to process Returns: list of (id, text, tag) elements """ return application.get().pipeline("tabular", (file,))
Splits tabular data into rows and columns. Args: file: file to process Returns: list of (id, text, tag) elements
195
from typing import List from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `batchtabular` function. Write a Python function `def batchtabular(files: List[str] = Body(...))` to solve the following problem: Splits tabular data into rows and columns. Args: files: list of files to process Returns: list of (id, text, tag) elements Here is the function: def batchtabular(files: List[str] = Body(...)): """ Splits tabular data into rows and columns. Args: files: list of files to process Returns: list of (id, text, tag) elements """ return application.get().pipeline("tabular", (files,))
Splits tabular data into rows and columns. Args: files: list of files to process Returns: list of (id, text, tag) elements
196
from typing import List, Optional from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `translate` function. Write a Python function `def translate(text: str, target: Optional[str] = "en", source: Optional[str] = None)` to solve the following problem: Translates text from source language into target language. Args: text: text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: translated text Here is the function: def translate(text: str, target: Optional[str] = "en", source: Optional[str] = None): """ Translates text from source language into target language. Args: text: text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: translated text """ return application.get().pipeline("translation", (text, target, source))
Translates text from source language into target language. Args: text: text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: translated text
197
from typing import List, Optional from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `batchtranslate` function. Write a Python function `def batchtranslate(texts: List[str] = Body(...), target: Optional[str] = Body(default="en"), source: Optional[str] = Body(default=None))` to solve the following problem: Translates text from source language into target language. Args: texts: list of text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: list of translated text Here is the function: def batchtranslate(texts: List[str] = Body(...), target: Optional[str] = Body(default="en"), source: Optional[str] = Body(default=None)): """ Translates text from source language into target language. Args: texts: list of text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: list of translated text """ return application.get().pipeline("translation", (texts, target, source))
Translates text from source language into target language. Args: texts: list of text to translate target: target language code, defaults to "en" source: source language code, detects language if not provided Returns: list of translated text
198
from typing import List, Optional from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute The provided code snippet includes necessary dependencies for implementing the `extract` function. Write a Python function `def extract(queue: List[dict] = Body(...), texts: Optional[List[str]] = Body(default=None))` to solve the following problem: Extracts answers to input questions. Args: queue: list of {name: value, query: value, question: value, snippet: value} texts: optional list of text Returns: list of {name: value, answer: value} Here is the function: def extract(queue: List[dict] = Body(...), texts: Optional[List[str]] = Body(default=None)): """ Extracts answers to input questions. Args: queue: list of {name: value, query: value, question: value, snippet: value} texts: optional list of text Returns: list of {name: value, answer: value} """ return application.get().extract(queue, texts)
Extracts answers to input questions. Args: queue: list of {name: value, query: value, question: value, snippet: value} texts: optional list of text Returns: list of {name: value, answer: value}
199
from typing import List from fastapi import APIRouter, Body from .. import application from ..route import EncodingAPIRoute def label(text: str = Body(...), labels: List[str] = Body(...)): """ Applies a zero shot classifier to text using a list of labels. Returns a list of {id: value, score: value} sorted by highest score, where id is the index in labels. Args: text: input text labels: list of labels Returns: list of {id: value, score: value} per text element """ return application.get().label(text, labels) The provided code snippet includes necessary dependencies for implementing the `batchlabel` function. Write a Python function `def batchlabel(texts: List[str] = Body(...), labels: List[str] = Body(...))` to solve the following problem: Applies a zero shot classifier to list of text using a list of labels. Returns a list of {id: value, score: value} sorted by highest score, where id is the index in labels per text element. Args: texts: list of text labels: list of labels Returns: list of {id: value score: value} per text element Here is the function: def batchlabel(texts: List[str] = Body(...), labels: List[str] = Body(...)): """ Applies a zero shot classifier to list of text using a list of labels. Returns a list of {id: value, score: value} sorted by highest score, where id is the index in labels per text element. Args: texts: list of text labels: list of labels Returns: list of {id: value score: value} per text element """ return application.get().label(texts, labels)
Applies a zero shot classifier to list of text using a list of labels. Returns a list of {id: value, score: value} sorted by highest score, where id is the index in labels per text element. Args: texts: list of text labels: list of labels Returns: list of {id: value score: value} per text element