repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_preconditions_checking
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
python
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
[ "def", "compile_action_preconditions_checking", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "tf", ".", "Tensor", ":", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "with", "tf", ".", "name_scope", "(", "'action_preconditions_checking'", ")", ":", "preconds", "=", "self", ".", "compile_action_preconditions", "(", "state", ",", "action", ")", "all_preconds", "=", "tf", ".", "stack", "(", "[", "p", ".", "tensor", "for", "p", "in", "preconds", "]", ",", "axis", "=", "1", ")", "checking", "=", "tf", ".", "reduce_all", "(", "all_preconds", ",", "axis", "=", "1", ")", "return", "checking" ]
Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`.
[ "Combines", "the", "action", "preconditions", "into", "an", "applicability", "checking", "op", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L321-L338
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_bound_constraints
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
python
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
[ "def", "compile_action_bound_constraints", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "Bounds", "]", ":", "scope", "=", "self", ".", "action_precondition_scope", "(", "state", ")", "lower_bounds", "=", "self", ".", "rddl", ".", "domain", ".", "action_lower_bound_constraints", "upper_bounds", "=", "self", ".", "rddl", ".", "domain", ".", "action_upper_bound_constraints", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "with", "tf", ".", "name_scope", "(", "'action_bound_constraints'", ")", ":", "bounds", "=", "{", "}", "for", "name", "in", "self", ".", "rddl", ".", "domain", ".", "action_fluent_ordering", ":", "lower_expr", "=", "lower_bounds", ".", "get", "(", "name", ")", "lower", "=", "None", "if", "lower_expr", "is", "not", "None", ":", "with", "tf", ".", "name_scope", "(", "'lower_bound'", ")", ":", "lower", "=", "self", ".", "_compile_expression", "(", "lower_expr", ",", "scope", ")", "upper_expr", "=", "upper_bounds", ".", "get", "(", "name", ")", "upper", "=", "None", "if", "upper_expr", "is", "not", "None", ":", "with", "tf", ".", "name_scope", "(", "'upper_bound'", ")", ":", "upper", "=", "self", ".", "_compile_expression", "(", "upper_expr", ",", "scope", ")", "bounds", "[", "name", "]", "=", "(", "lower", ",", "upper", ")", "return", "bounds" ]
Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds.
[ "Compiles", "all", "actions", "bounds", "for", "the", "given", "state", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L340-L377
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.non_fluents_scope
def non_fluents_scope(self) -> Dict[str, TensorFluent]: '''Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' if self.__dict__.get('non_fluents') is None: self._initialize_non_fluents() return dict(self.non_fluents)
python
def non_fluents_scope(self) -> Dict[str, TensorFluent]: '''Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' if self.__dict__.get('non_fluents') is None: self._initialize_non_fluents() return dict(self.non_fluents)
[ "def", "non_fluents_scope", "(", "self", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "if", "self", ".", "__dict__", ".", "get", "(", "'non_fluents'", ")", "is", "None", ":", "self", ".", "_initialize_non_fluents", "(", ")", "return", "dict", "(", "self", ".", "non_fluents", ")" ]
Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "non", "-", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L379-L387
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.state_scope
def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))
python
def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))
[ "def", "state_scope", "(", "self", ",", "state_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".", "state_fluent_ordering", ",", "state_fluents", ")", ")" ]
Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "state", "-", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L389-L398
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.action_scope
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
python
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
[ "def", "action_scope", "(", "self", ",", "action_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".", "action_fluent_ordering", ",", "action_fluents", ")", ")" ]
Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "action", "-", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L400-L409
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.next_state_scope
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
python
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
[ "def", "next_state_scope", "(", "self", ",", "next_state_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".", "next_state_fluent_ordering", ",", "next_state_fluents", ")", ")" ]
Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "next", "state", "-", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L411-L420
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.transition_scope
def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) return scope
python
def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) return scope
[ "def", "transition_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "scope", "=", "{", "}", "scope", ".", "update", "(", "self", ".", "non_fluents_scope", "(", ")", ")", "scope", ".", "update", "(", "self", ".", "state_scope", "(", "state", ")", ")", "scope", ".", "update", "(", "self", ".", "action_scope", "(", "action", ")", ")", "return", "scope" ]
Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "complete", "transition", "fluent", "scope", "for", "the", "current", "state", "and", "action", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L422-L439
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.reward_scope
def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) scope.update(self.next_state_scope(next_state)) return scope
python
def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) scope.update(self.next_state_scope(next_state)) return scope
[ "def", "reward_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "next_state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "scope", "=", "{", "}", "scope", ".", "update", "(", "self", ".", "non_fluents_scope", "(", ")", ")", "scope", ".", "update", "(", "self", ".", "state_scope", "(", "state", ")", ")", "scope", ".", "update", "(", "self", ".", "action_scope", "(", "action", ")", ")", "scope", ".", "update", "(", "self", ".", "next_state_scope", "(", "next_state", ")", ")", "return", "scope" ]
Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "complete", "reward", "fluent", "scope", "for", "the", "current", "state", "action", "fluents", "and", "next_state", "fluents", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L441-L461
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.state_invariant_scope
def state_invariant_scope(self, state: Sequence[tf.Tensor]): '''Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) return scope
python
def state_invariant_scope(self, state: Sequence[tf.Tensor]): '''Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) return scope
[ "def", "state_invariant_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", ":", "scope", "=", "{", "}", "scope", ".", "update", "(", "self", ".", "non_fluents_scope", "(", ")", ")", "scope", ".", "update", "(", "self", ".", "state_scope", "(", "state", ")", ")", "return", "scope" ]
Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "state", "invariant", "fluent", "scope", "for", "the", "current", "state", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L463-L475
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_pvariables
def _initialize_pvariables(self, pvariables: Dict[str, PVariable], ordering: List[str], initializer: Optional[InitializerList] = None) -> List[Tuple[str, TensorFluent]]: '''Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor. ''' if initializer is not None: init = dict() for ((name, args), value) in initializer: arity = len(args) if args is not None else 0 name = '{}/{}'.format(name, arity) init[name] = init.get(name, []) init[name].append((args, value)) fluents = [] for name in ordering: pvar = pvariables[name] shape = self.rddl._param_types_to_shape(pvar.param_types) dtype = utils.range_type_to_dtype(pvar.range) fluent = np.full(shape, pvar.default) if initializer is not None: for args, val in init.get(name, []): if args is not None: idx = [] for ptype, arg in zip(pvar.param_types, args): idx.append(self.rddl.object_table[ptype]['idx'][arg]) idx = tuple(idx) fluent[idx] = val else: fluent = val with self.graph.as_default(): t = tf.constant(fluent, dtype=dtype, name=utils.identifier(name)) scope = [None] * len(t.shape) fluent = TensorFluent(t, scope, batch=False) fluent_pair = (name, fluent) fluents.append(fluent_pair) return fluents
python
def _initialize_pvariables(self, pvariables: Dict[str, PVariable], ordering: List[str], initializer: Optional[InitializerList] = None) -> List[Tuple[str, TensorFluent]]: '''Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor. ''' if initializer is not None: init = dict() for ((name, args), value) in initializer: arity = len(args) if args is not None else 0 name = '{}/{}'.format(name, arity) init[name] = init.get(name, []) init[name].append((args, value)) fluents = [] for name in ordering: pvar = pvariables[name] shape = self.rddl._param_types_to_shape(pvar.param_types) dtype = utils.range_type_to_dtype(pvar.range) fluent = np.full(shape, pvar.default) if initializer is not None: for args, val in init.get(name, []): if args is not None: idx = [] for ptype, arg in zip(pvar.param_types, args): idx.append(self.rddl.object_table[ptype]['idx'][arg]) idx = tuple(idx) fluent[idx] = val else: fluent = val with self.graph.as_default(): t = tf.constant(fluent, dtype=dtype, name=utils.identifier(name)) scope = [None] * len(t.shape) fluent = TensorFluent(t, scope, batch=False) fluent_pair = (name, fluent) fluents.append(fluent_pair) return fluents
[ "def", "_initialize_pvariables", "(", "self", ",", "pvariables", ":", "Dict", "[", "str", ",", "PVariable", "]", ",", "ordering", ":", "List", "[", "str", "]", ",", "initializer", ":", "Optional", "[", "InitializerList", "]", "=", "None", ")", "->", "List", "[", "Tuple", "[", "str", ",", "TensorFluent", "]", "]", ":", "if", "initializer", "is", "not", "None", ":", "init", "=", "dict", "(", ")", "for", "(", "(", "name", ",", "args", ")", ",", "value", ")", "in", "initializer", ":", "arity", "=", "len", "(", "args", ")", "if", "args", "is", "not", "None", "else", "0", "name", "=", "'{}/{}'", ".", "format", "(", "name", ",", "arity", ")", "init", "[", "name", "]", "=", "init", ".", "get", "(", "name", ",", "[", "]", ")", "init", "[", "name", "]", ".", "append", "(", "(", "args", ",", "value", ")", ")", "fluents", "=", "[", "]", "for", "name", "in", "ordering", ":", "pvar", "=", "pvariables", "[", "name", "]", "shape", "=", "self", ".", "rddl", ".", "_param_types_to_shape", "(", "pvar", ".", "param_types", ")", "dtype", "=", "utils", ".", "range_type_to_dtype", "(", "pvar", ".", "range", ")", "fluent", "=", "np", ".", "full", "(", "shape", ",", "pvar", ".", "default", ")", "if", "initializer", "is", "not", "None", ":", "for", "args", ",", "val", "in", "init", ".", "get", "(", "name", ",", "[", "]", ")", ":", "if", "args", "is", "not", "None", ":", "idx", "=", "[", "]", "for", "ptype", ",", "arg", "in", "zip", "(", "pvar", ".", "param_types", ",", "args", ")", ":", "idx", ".", "append", "(", "self", ".", "rddl", ".", "object_table", "[", "ptype", "]", "[", "'idx'", "]", "[", "arg", "]", ")", "idx", "=", "tuple", "(", "idx", ")", "fluent", "[", "idx", "]", "=", "val", "else", ":", "fluent", "=", "val", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "t", "=", "tf", ".", "constant", "(", "fluent", ",", "dtype", "=", "dtype", ",", "name", "=", "utils", ".", "identifier", "(", "name", ")", ")", "scope", "=", "[", "None", "]", "*", "len", "(", "t", ".", "shape", ")", "fluent", "=", "TensorFluent", "(", "t", ",", "scope", ",", "batch", "=", "False", ")", "fluent_pair", "=", "(", "name", ",", "fluent", ")", "fluents", ".", "append", "(", "fluent_pair", ")", "return", "fluents" ]
Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor.
[ "Instantiates", "pvariables", "given", "an", "initialization", "list", "and", "returns", "a", "list", "of", "TensorFluents", "in", "the", "given", "ordering", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L497-L541
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_non_fluents
def _initialize_non_fluents(self): '''Returns the non-fluents instantiated.''' non_fluents = self.rddl.domain.non_fluents initializer = self.rddl.non_fluents.init_non_fluent self.non_fluents = self._initialize_pvariables( non_fluents, self.rddl.domain.non_fluent_ordering, initializer) return self.non_fluents
python
def _initialize_non_fluents(self): '''Returns the non-fluents instantiated.''' non_fluents = self.rddl.domain.non_fluents initializer = self.rddl.non_fluents.init_non_fluent self.non_fluents = self._initialize_pvariables( non_fluents, self.rddl.domain.non_fluent_ordering, initializer) return self.non_fluents
[ "def", "_initialize_non_fluents", "(", "self", ")", ":", "non_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "non_fluents", "initializer", "=", "self", ".", "rddl", ".", "non_fluents", ".", "init_non_fluent", "self", ".", "non_fluents", "=", "self", ".", "_initialize_pvariables", "(", "non_fluents", ",", "self", ".", "rddl", ".", "domain", ".", "non_fluent_ordering", ",", "initializer", ")", "return", "self", ".", "non_fluents" ]
Returns the non-fluents instantiated.
[ "Returns", "the", "non", "-", "fluents", "instantiated", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L543-L551
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_initial_state_fluents
def _initialize_initial_state_fluents(self): '''Returns the initial state-fluents instantiated.''' state_fluents = self.rddl.domain.state_fluents initializer = self.rddl.instance.init_state self.initial_state_fluents = self._initialize_pvariables( state_fluents, self.rddl.domain.state_fluent_ordering, initializer) return self.initial_state_fluents
python
def _initialize_initial_state_fluents(self): '''Returns the initial state-fluents instantiated.''' state_fluents = self.rddl.domain.state_fluents initializer = self.rddl.instance.init_state self.initial_state_fluents = self._initialize_pvariables( state_fluents, self.rddl.domain.state_fluent_ordering, initializer) return self.initial_state_fluents
[ "def", "_initialize_initial_state_fluents", "(", "self", ")", ":", "state_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "state_fluents", "initializer", "=", "self", ".", "rddl", ".", "instance", ".", "init_state", "self", ".", "initial_state_fluents", "=", "self", ".", "_initialize_pvariables", "(", "state_fluents", ",", "self", ".", "rddl", ".", "domain", ".", "state_fluent_ordering", ",", "initializer", ")", "return", "self", ".", "initial_state_fluents" ]
Returns the initial state-fluents instantiated.
[ "Returns", "the", "initial", "state", "-", "fluents", "instantiated", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L553-L561
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_default_action_fluents
def _initialize_default_action_fluents(self): '''Returns the default action-fluents instantiated.''' action_fluents = self.rddl.domain.action_fluents self.default_action_fluents = self._initialize_pvariables( action_fluents, self.rddl.domain.action_fluent_ordering) return self.default_action_fluents
python
def _initialize_default_action_fluents(self): '''Returns the default action-fluents instantiated.''' action_fluents = self.rddl.domain.action_fluents self.default_action_fluents = self._initialize_pvariables( action_fluents, self.rddl.domain.action_fluent_ordering) return self.default_action_fluents
[ "def", "_initialize_default_action_fluents", "(", "self", ")", ":", "action_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "action_fluents", "self", ".", "default_action_fluents", "=", "self", ".", "_initialize_pvariables", "(", "action_fluents", ",", "self", ".", "rddl", ".", "domain", ".", "action_fluent_ordering", ")", "return", "self", ".", "default_action_fluents" ]
Returns the default action-fluents instantiated.
[ "Returns", "the", "default", "action", "-", "fluents", "instantiated", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L563-L569
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_batch_fluents
def _compile_batch_fluents(self, fluents: List[Tuple[str, TensorFluent]], batch_size: int) -> Sequence[tf.Tensor]: '''Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size. ''' batch_fluents = [] with self.graph.as_default(): for name, fluent in fluents: name_scope = utils.identifier(name) with tf.name_scope(name_scope): t = tf.stack([fluent.tensor] * batch_size) batch_fluents.append(t) return tuple(batch_fluents)
python
def _compile_batch_fluents(self, fluents: List[Tuple[str, TensorFluent]], batch_size: int) -> Sequence[tf.Tensor]: '''Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size. ''' batch_fluents = [] with self.graph.as_default(): for name, fluent in fluents: name_scope = utils.identifier(name) with tf.name_scope(name_scope): t = tf.stack([fluent.tensor] * batch_size) batch_fluents.append(t) return tuple(batch_fluents)
[ "def", "_compile_batch_fluents", "(", "self", ",", "fluents", ":", "List", "[", "Tuple", "[", "str", ",", "TensorFluent", "]", "]", ",", "batch_size", ":", "int", ")", "->", "Sequence", "[", "tf", ".", "Tensor", "]", ":", "batch_fluents", "=", "[", "]", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "for", "name", ",", "fluent", "in", "fluents", ":", "name_scope", "=", "utils", ".", "identifier", "(", "name", ")", "with", "tf", ".", "name_scope", "(", "name_scope", ")", ":", "t", "=", "tf", ".", "stack", "(", "[", "fluent", ".", "tensor", "]", "*", "batch_size", ")", "batch_fluents", ".", "append", "(", "t", ")", "return", "tuple", "(", "batch_fluents", ")" ]
Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size.
[ "Compiles", "fluents", "into", "tensors", "with", "given", "batch_size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L571-L587
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_expression
def _compile_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent. ''' etype2compiler = { 'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression } etype = expr.etype if etype[0] not in etype2compiler: raise ValueError('Expression type unknown: {}'.format(etype)) with self.graph.as_default(): compiler_fn = etype2compiler[etype[0]] return compiler_fn(expr, scope, batch_size, noise)
python
def _compile_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent. ''' etype2compiler = { 'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression } etype = expr.etype if etype[0] not in etype2compiler: raise ValueError('Expression type unknown: {}'.format(etype)) with self.graph.as_default(): compiler_fn = etype2compiler[etype[0]] return compiler_fn(expr, scope, batch_size, noise)
[ "def", "_compile_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype2compiler", "=", "{", "'constant'", ":", "self", ".", "_compile_constant_expression", ",", "'pvar'", ":", "self", ".", "_compile_pvariable_expression", ",", "'randomvar'", ":", "self", ".", "_compile_random_variable_expression", ",", "'arithmetic'", ":", "self", ".", "_compile_arithmetic_expression", ",", "'boolean'", ":", "self", ".", "_compile_boolean_expression", ",", "'relational'", ":", "self", ".", "_compile_relational_expression", ",", "'func'", ":", "self", ".", "_compile_function_expression", ",", "'control'", ":", "self", ".", "_compile_control_flow_expression", ",", "'aggregation'", ":", "self", ".", "_compile_aggregation_expression", "}", "etype", "=", "expr", ".", "etype", "if", "etype", "[", "0", "]", "not", "in", "etype2compiler", ":", "raise", "ValueError", "(", "'Expression type unknown: {}'", ".", "format", "(", "etype", ")", ")", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "compiler_fn", "=", "etype2compiler", "[", "etype", "[", "0", "]", "]", "return", "compiler_fn", "(", "expr", ",", "scope", ",", "batch_size", ",", "noise", ")" ]
Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.
[ "Compile", "the", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L589-L623
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_constant_expression
def _compile_constant_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args dtype = utils.python_type_to_dtype(etype[1]) fluent = TensorFluent.constant(args, dtype=dtype) return fluent
python
def _compile_constant_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args dtype = utils.python_type_to_dtype(etype[1]) fluent = TensorFluent.constant(args, dtype=dtype) return fluent
[ "def", "_compile_constant_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "dtype", "=", "utils", ".", "python_type_to_dtype", "(", "etype", "[", "1", "]", ")", "fluent", "=", "TensorFluent", ".", "constant", "(", "args", ",", "dtype", "=", "dtype", ")", "return", "fluent" ]
Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "constant", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L625-L645
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_pvariable_expression
def _compile_pvariable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args name = expr._pvar_to_name(args) if name not in scope: raise ValueError('Variable {} not in scope.'.format(name)) fluent = scope[name] scope = args[1] if args[1] is not None else [] if isinstance(fluent, TensorFluent): fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch) elif isinstance(fluent, tf.Tensor): fluent = TensorFluent(fluent, scope, batch=self.batch_mode) else: raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent)) return fluent
python
def _compile_pvariable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args name = expr._pvar_to_name(args) if name not in scope: raise ValueError('Variable {} not in scope.'.format(name)) fluent = scope[name] scope = args[1] if args[1] is not None else [] if isinstance(fluent, TensorFluent): fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch) elif isinstance(fluent, tf.Tensor): fluent = TensorFluent(fluent, scope, batch=self.batch_mode) else: raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent)) return fluent
[ "def", "_compile_pvariable_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "name", "=", "expr", ".", "_pvar_to_name", "(", "args", ")", "if", "name", "not", "in", "scope", ":", "raise", "ValueError", "(", "'Variable {} not in scope.'", ".", "format", "(", "name", ")", ")", "fluent", "=", "scope", "[", "name", "]", "scope", "=", "args", "[", "1", "]", "if", "args", "[", "1", "]", "is", "not", "None", "else", "[", "]", "if", "isinstance", "(", "fluent", ",", "TensorFluent", ")", ":", "fluent", "=", "TensorFluent", "(", "fluent", ".", "tensor", ",", "scope", ",", "batch", "=", "fluent", ".", "batch", ")", "elif", "isinstance", "(", "fluent", ",", "tf", ".", "Tensor", ")", ":", "fluent", "=", "TensorFluent", "(", "fluent", ",", "scope", ",", "batch", "=", "self", ".", "batch_mode", ")", "else", ":", "raise", "ValueError", "(", "'Variable in scope must be TensorFluent-like: {}'", ".", "format", "(", "fluent", ")", ")", "return", "fluent" ]
Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "pvariable", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L647-L676
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_random_variable_expression
def _compile_random_variable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'KronDelta': sample = self._compile_expression(args[0], scope, batch_size, noise) elif etype[1] == 'Bernoulli': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Bernoulli(mean, batch_size) elif etype[1] == 'Uniform': low = self._compile_expression(args[0], scope, batch_size, noise) high = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Uniform(low, high, batch_size) elif etype[1] == 'Normal': if noise is None: mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Normal(mean, variance, batch_size) else: xi = noise.pop() xi = TensorFluent(xi, scope=[], batch=True) mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) sample = mean + TensorFluent.sqrt(variance) * xi elif etype[1] == 'Laplace': mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Laplace(mean, variance, batch_size) elif etype[1] == 'Gamma': shape = self._compile_expression(args[0], scope, batch_size, noise) scale = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Gamma(shape, scale, batch_size) elif etype[1] == 'Exponential': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Exponential(mean, batch_size) else: raise ValueError('Invalid random variable expression:\n{}.'.format(expr)) return sample
python
def _compile_random_variable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'KronDelta': sample = self._compile_expression(args[0], scope, batch_size, noise) elif etype[1] == 'Bernoulli': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Bernoulli(mean, batch_size) elif etype[1] == 'Uniform': low = self._compile_expression(args[0], scope, batch_size, noise) high = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Uniform(low, high, batch_size) elif etype[1] == 'Normal': if noise is None: mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Normal(mean, variance, batch_size) else: xi = noise.pop() xi = TensorFluent(xi, scope=[], batch=True) mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) sample = mean + TensorFluent.sqrt(variance) * xi elif etype[1] == 'Laplace': mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Laplace(mean, variance, batch_size) elif etype[1] == 'Gamma': shape = self._compile_expression(args[0], scope, batch_size, noise) scale = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Gamma(shape, scale, batch_size) elif etype[1] == 'Exponential': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Exponential(mean, batch_size) else: raise ValueError('Invalid random variable expression:\n{}.'.format(expr)) return sample
[ "def", "_compile_random_variable_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "if", "etype", "[", "1", "]", "==", "'KronDelta'", ":", "sample", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "elif", "etype", "[", "1", "]", "==", "'Bernoulli'", ":", "mean", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Bernoulli", "(", "mean", ",", "batch_size", ")", "elif", "etype", "[", "1", "]", "==", "'Uniform'", ":", "low", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "high", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Uniform", "(", "low", ",", "high", ",", "batch_size", ")", "elif", "etype", "[", "1", "]", "==", "'Normal'", ":", "if", "noise", "is", "None", ":", "mean", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "variance", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Normal", "(", "mean", ",", "variance", ",", "batch_size", ")", "else", ":", "xi", "=", "noise", ".", "pop", "(", ")", "xi", "=", "TensorFluent", "(", "xi", ",", "scope", "=", "[", "]", ",", "batch", "=", "True", ")", "mean", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "variance", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "sample", "=", "mean", "+", "TensorFluent", ".", "sqrt", "(", "variance", ")", "*", "xi", "elif", "etype", "[", "1", "]", "==", "'Laplace'", ":", "mean", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "variance", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Laplace", "(", "mean", ",", "variance", ",", "batch_size", ")", "elif", "etype", "[", "1", "]", "==", "'Gamma'", ":", "shape", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "scale", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Gamma", "(", "shape", ",", "scale", ",", "batch_size", ")", "elif", "etype", "[", "1", "]", "==", "'Exponential'", ":", "mean", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "dist", ",", "sample", "=", "TensorFluent", ".", "Exponential", "(", "mean", ",", "batch_size", ")", "else", ":", "raise", "ValueError", "(", "'Invalid random variable expression:\\n{}.'", ".", "format", "(", "expr", ")", ")", "return", "sample" ]
Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "random", "variable", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L678-L734
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_arithmetic_expression
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2op = { '+': lambda x: x, '-': lambda x: -x } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2op = { '+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: x / y, } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
python
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2op = { '+': lambda x: x, '-': lambda x: -x } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2op = { '+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: x / y, } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
[ "def", "_compile_arithmetic_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "if", "len", "(", "args", ")", "==", "1", ":", "etype2op", "=", "{", "'+'", ":", "lambda", "x", ":", "x", ",", "'-'", ":", "lambda", "x", ":", "-", "x", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2op", ":", "raise", "ValueError", "(", "'Invalid binary arithmetic expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2op", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ")", "else", ":", "etype2op", "=", "{", "'+'", ":", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "'-'", ":", "lambda", "x", ",", "y", ":", "x", "-", "y", ",", "'*'", ":", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "'/'", ":", "lambda", "x", ",", "y", ":", "x", "/", "y", ",", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2op", ":", "raise", "ValueError", "(", "'Invalid binary arithmetic expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2op", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "y", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ",", "y", ")", "return", "fluent" ]
Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "an", "arithmetic", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L736-L784
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_function_expression
def _compile_function_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2func = { 'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor } if etype[1] not in etype2func: raise ValueError('Invalid unary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2func = { 'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min } if etype[1] not in etype2func: raise ValueError('Invalid binary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
python
def _compile_function_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2func = { 'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor } if etype[1] not in etype2func: raise ValueError('Invalid unary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2func = { 'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min } if etype[1] not in etype2func: raise ValueError('Invalid binary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
[ "def", "_compile_function_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "if", "len", "(", "args", ")", "==", "1", ":", "etype2func", "=", "{", "'abs'", ":", "TensorFluent", ".", "abs", ",", "'exp'", ":", "TensorFluent", ".", "exp", ",", "'log'", ":", "TensorFluent", ".", "log", ",", "'sqrt'", ":", "TensorFluent", ".", "sqrt", ",", "'cos'", ":", "TensorFluent", ".", "cos", ",", "'sin'", ":", "TensorFluent", ".", "sin", ",", "'tan'", ":", "TensorFluent", ".", "tan", ",", "'acos'", ":", "TensorFluent", ".", "acos", ",", "'arccos'", ":", "TensorFluent", ".", "acos", ",", "'asin'", ":", "TensorFluent", ".", "asin", ",", "'arcsin'", ":", "TensorFluent", ".", "asin", ",", "'atan'", ":", "TensorFluent", ".", "atan", ",", "'arctan'", ":", "TensorFluent", ".", "atan", ",", "'round'", ":", "TensorFluent", ".", "round", ",", "'ceil'", ":", "TensorFluent", ".", "ceil", ",", "'floor'", ":", "TensorFluent", ".", "floor", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2func", ":", "raise", "ValueError", "(", "'Invalid unary function expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2func", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ")", "else", ":", "etype2func", "=", "{", "'pow'", ":", "TensorFluent", ".", "pow", ",", "'max'", ":", "TensorFluent", ".", "max", ",", "'min'", ":", "TensorFluent", ".", "min", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2func", ":", "raise", "ValueError", "(", "'Invalid binary function expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "op", "=", "etype2func", "[", "etype", "[", "1", "]", "]", "x", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "y", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "op", "(", "x", ",", "y", ")", "return", "fluent" ]
Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "function", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L874-L936
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_control_flow_expression
def _compile_control_flow_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'if': condition = self._compile_expression(args[0], scope, batch_size, noise) true_case = self._compile_expression(args[1], scope, batch_size, noise) false_case = self._compile_expression(args[2], scope, batch_size, noise) fluent = TensorFluent.if_then_else(condition, true_case, false_case) else: raise ValueError('Invalid control flow expression:\n{}'.format(expr)) return fluent
python
def _compile_control_flow_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'if': condition = self._compile_expression(args[0], scope, batch_size, noise) true_case = self._compile_expression(args[1], scope, batch_size, noise) false_case = self._compile_expression(args[2], scope, batch_size, noise) fluent = TensorFluent.if_then_else(condition, true_case, false_case) else: raise ValueError('Invalid control flow expression:\n{}'.format(expr)) return fluent
[ "def", "_compile_control_flow_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "if", "etype", "[", "1", "]", "==", "'if'", ":", "condition", "=", "self", ".", "_compile_expression", "(", "args", "[", "0", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "true_case", "=", "self", ".", "_compile_expression", "(", "args", "[", "1", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "false_case", "=", "self", ".", "_compile_expression", "(", "args", "[", "2", "]", ",", "scope", ",", "batch_size", ",", "noise", ")", "fluent", "=", "TensorFluent", ".", "if_then_else", "(", "condition", ",", "true_case", ",", "false_case", ")", "else", ":", "raise", "ValueError", "(", "'Invalid control flow expression:\\n{}'", ".", "format", "(", "expr", ")", ")", "return", "fluent" ]
Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "control", "flow", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L938-L963
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_aggregation_expression
def _compile_aggregation_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args typed_var_list = args[:-1] vars_list = [var for _, (var, _) in typed_var_list] expr = args[-1] x = self._compile_expression(expr, scope) etype2aggr = { 'sum': x.sum, 'prod': x.prod, 'avg': x.avg, 'maximum': x.maximum, 'minimum': x.minimum, 'exists': x.exists, 'forall': x.forall } if etype[1] not in etype2aggr: raise ValueError('Invalid aggregation expression {}.'.format(expr)) aggr = etype2aggr[etype[1]] fluent = aggr(vars_list=vars_list) return fluent
python
def _compile_aggregation_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args typed_var_list = args[:-1] vars_list = [var for _, (var, _) in typed_var_list] expr = args[-1] x = self._compile_expression(expr, scope) etype2aggr = { 'sum': x.sum, 'prod': x.prod, 'avg': x.avg, 'maximum': x.maximum, 'minimum': x.minimum, 'exists': x.exists, 'forall': x.forall } if etype[1] not in etype2aggr: raise ValueError('Invalid aggregation expression {}.'.format(expr)) aggr = etype2aggr[etype[1]] fluent = aggr(vars_list=vars_list) return fluent
[ "def", "_compile_aggregation_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[", "tf", ".", "Tensor", "]", "]", "=", "None", ")", "->", "TensorFluent", ":", "etype", "=", "expr", ".", "etype", "args", "=", "expr", ".", "args", "typed_var_list", "=", "args", "[", ":", "-", "1", "]", "vars_list", "=", "[", "var", "for", "_", ",", "(", "var", ",", "_", ")", "in", "typed_var_list", "]", "expr", "=", "args", "[", "-", "1", "]", "x", "=", "self", ".", "_compile_expression", "(", "expr", ",", "scope", ")", "etype2aggr", "=", "{", "'sum'", ":", "x", ".", "sum", ",", "'prod'", ":", "x", ".", "prod", ",", "'avg'", ":", "x", ".", "avg", ",", "'maximum'", ":", "x", ".", "maximum", ",", "'minimum'", ":", "x", ".", "minimum", ",", "'exists'", ":", "x", ".", "exists", ",", "'forall'", ":", "x", ".", "forall", "}", "if", "etype", "[", "1", "]", "not", "in", "etype2aggr", ":", "raise", "ValueError", "(", "'Invalid aggregation expression {}.'", ".", "format", "(", "expr", ")", ")", "aggr", "=", "etype2aggr", "[", "etype", "[", "1", "]", "]", "fluent", "=", "aggr", "(", "vars_list", "=", "vars_list", ")", "return", "fluent" ]
Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "an", "aggregation", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L965-L1006
yymao/easyquery
easyquery.py
Query.mask
def mask(self, table): """ Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- mask : numpy bool array """ if self._operator is None: if self._operands is None: return np.ones(self._get_table_len(table), dtype=np.bool) else: return self._create_mask(table, self._operands) if self._operator == 'NOT': return ~self._operands.mask(table) if self._operator == 'AND': op_func = np.logical_and elif self._operator == 'OR': op_func = np.logical_or elif self._operator == 'XOR': op_func = np.logical_xor mask_this = self._operands[0].mask(table) for op in self._operands[1:]: mask_this = op_func(mask_this, op.mask(table), out=mask_this) return mask_this
python
def mask(self, table): """ Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- mask : numpy bool array """ if self._operator is None: if self._operands is None: return np.ones(self._get_table_len(table), dtype=np.bool) else: return self._create_mask(table, self._operands) if self._operator == 'NOT': return ~self._operands.mask(table) if self._operator == 'AND': op_func = np.logical_and elif self._operator == 'OR': op_func = np.logical_or elif self._operator == 'XOR': op_func = np.logical_xor mask_this = self._operands[0].mask(table) for op in self._operands[1:]: mask_this = op_func(mask_this, op.mask(table), out=mask_this) return mask_this
[ "def", "mask", "(", "self", ",", "table", ")", ":", "if", "self", ".", "_operator", "is", "None", ":", "if", "self", ".", "_operands", "is", "None", ":", "return", "np", ".", "ones", "(", "self", ".", "_get_table_len", "(", "table", ")", ",", "dtype", "=", "np", ".", "bool", ")", "else", ":", "return", "self", ".", "_create_mask", "(", "table", ",", "self", ".", "_operands", ")", "if", "self", ".", "_operator", "==", "'NOT'", ":", "return", "~", "self", ".", "_operands", ".", "mask", "(", "table", ")", "if", "self", ".", "_operator", "==", "'AND'", ":", "op_func", "=", "np", ".", "logical_and", "elif", "self", ".", "_operator", "==", "'OR'", ":", "op_func", "=", "np", ".", "logical_or", "elif", "self", ".", "_operator", "==", "'XOR'", ":", "op_func", "=", "np", ".", "logical_xor", "mask_this", "=", "self", ".", "_operands", "[", "0", "]", ".", "mask", "(", "table", ")", "for", "op", "in", "self", ".", "_operands", "[", "1", ":", "]", ":", "mask_this", "=", "op_func", "(", "mask_this", ",", "op", ".", "mask", "(", "table", ")", ",", "out", "=", "mask_this", ")", "return", "mask_this" ]
Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- mask : numpy bool array
[ "Use", "the", "current", "Query", "object", "to", "count", "the", "number", "of", "entries", "in", "table", "that", "satisfy", "queries", "." ]
train
https://github.com/yymao/easyquery/blob/cd94c100e26f59042cd9ffb26d0a7b61cdcd457d/easyquery.py#L201-L234
yymao/easyquery
easyquery.py
Query.filter
def filter(self, table, column_slice=None): """ Use the current Query object to create a mask (a boolean array) for `table`. Parameters ---------- table : NumPy structured array, astropy Table, etc. column_slice : Column to return. Default is None (return all columns). Returns ------- table : filtered table """ if self._operator is None and self._operands is None: return table if column_slice is None else self._get_table_column(table, column_slice) if self._operator == 'AND' and column_slice is None: for op in self._operands: table = op.filter(table) return table return self._mask_table( table if column_slice is None else self._get_table_column(table, column_slice), self.mask(table) )
python
def filter(self, table, column_slice=None): """ Use the current Query object to create a mask (a boolean array) for `table`. Parameters ---------- table : NumPy structured array, astropy Table, etc. column_slice : Column to return. Default is None (return all columns). Returns ------- table : filtered table """ if self._operator is None and self._operands is None: return table if column_slice is None else self._get_table_column(table, column_slice) if self._operator == 'AND' and column_slice is None: for op in self._operands: table = op.filter(table) return table return self._mask_table( table if column_slice is None else self._get_table_column(table, column_slice), self.mask(table) )
[ "def", "filter", "(", "self", ",", "table", ",", "column_slice", "=", "None", ")", ":", "if", "self", ".", "_operator", "is", "None", "and", "self", ".", "_operands", "is", "None", ":", "return", "table", "if", "column_slice", "is", "None", "else", "self", ".", "_get_table_column", "(", "table", ",", "column_slice", ")", "if", "self", ".", "_operator", "==", "'AND'", "and", "column_slice", "is", "None", ":", "for", "op", "in", "self", ".", "_operands", ":", "table", "=", "op", ".", "filter", "(", "table", ")", "return", "table", "return", "self", ".", "_mask_table", "(", "table", "if", "column_slice", "is", "None", "else", "self", ".", "_get_table_column", "(", "table", ",", "column_slice", ")", ",", "self", ".", "mask", "(", "table", ")", ")" ]
Use the current Query object to create a mask (a boolean array) for `table`. Parameters ---------- table : NumPy structured array, astropy Table, etc. column_slice : Column to return. Default is None (return all columns). Returns ------- table : filtered table
[ "Use", "the", "current", "Query", "object", "to", "create", "a", "mask", "(", "a", "boolean", "array", ")", "for", "table", "." ]
train
https://github.com/yymao/easyquery/blob/cd94c100e26f59042cd9ffb26d0a7b61cdcd457d/easyquery.py#L237-L262
yymao/easyquery
easyquery.py
Query.count
def count(self, table): """ Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- count : int """ if self._operator is None and self._operands is None: return self._get_table_len(table) return np.count_nonzero(self.mask(table))
python
def count(self, table): """ Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- count : int """ if self._operator is None and self._operands is None: return self._get_table_len(table) return np.count_nonzero(self.mask(table))
[ "def", "count", "(", "self", ",", "table", ")", ":", "if", "self", ".", "_operator", "is", "None", "and", "self", ".", "_operands", "is", "None", ":", "return", "self", ".", "_get_table_len", "(", "table", ")", "return", "np", ".", "count_nonzero", "(", "self", ".", "mask", "(", "table", ")", ")" ]
Use the current Query object to count the number of entries in `table` that satisfy `queries`. Parameters ---------- table : NumPy structured array, astropy Table, etc. Returns ------- count : int
[ "Use", "the", "current", "Query", "object", "to", "count", "the", "number", "of", "entries", "in", "table", "that", "satisfy", "queries", "." ]
train
https://github.com/yymao/easyquery/blob/cd94c100e26f59042cd9ffb26d0a7b61cdcd457d/easyquery.py#L266-L282
yymao/easyquery
easyquery.py
Query.variable_names
def variable_names(self): """ Get all variable names required for this query """ if self._variable_names is None: if self._operator is None: if self._operands is None: self._variable_names = tuple() else: self._variable_names = self._get_variable_names(self._operands) elif self._operator == 'NOT': self._variable_names = self._operands.variable_names else: v = list() for op in self._operands: v.extend(op.variable_names) self._variable_names = tuple(set(v)) return self._variable_names
python
def variable_names(self): """ Get all variable names required for this query """ if self._variable_names is None: if self._operator is None: if self._operands is None: self._variable_names = tuple() else: self._variable_names = self._get_variable_names(self._operands) elif self._operator == 'NOT': self._variable_names = self._operands.variable_names else: v = list() for op in self._operands: v.extend(op.variable_names) self._variable_names = tuple(set(v)) return self._variable_names
[ "def", "variable_names", "(", "self", ")", ":", "if", "self", ".", "_variable_names", "is", "None", ":", "if", "self", ".", "_operator", "is", "None", ":", "if", "self", ".", "_operands", "is", "None", ":", "self", ".", "_variable_names", "=", "tuple", "(", ")", "else", ":", "self", ".", "_variable_names", "=", "self", ".", "_get_variable_names", "(", "self", ".", "_operands", ")", "elif", "self", ".", "_operator", "==", "'NOT'", ":", "self", ".", "_variable_names", "=", "self", ".", "_operands", ".", "variable_names", "else", ":", "v", "=", "list", "(", ")", "for", "op", "in", "self", ".", "_operands", ":", "v", ".", "extend", "(", "op", ".", "variable_names", ")", "self", ".", "_variable_names", "=", "tuple", "(", "set", "(", "v", ")", ")", "return", "self", ".", "_variable_names" ]
Get all variable names required for this query
[ "Get", "all", "variable", "names", "required", "for", "this", "query" ]
train
https://github.com/yymao/easyquery/blob/cd94c100e26f59042cd9ffb26d0a7b61cdcd457d/easyquery.py#L313-L334
non-Jedi/gyr
gyr/matrix_objects.py
Event.user
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
python
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
[ "def", "user", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_user", "except", "AttributeError", ":", "self", ".", "_user", "=", "MatrixUser", "(", "self", ".", "mxid", ",", "self", ".", "Api", "(", "identity", "=", "self", ".", "mxid", ")", ")", "return", "self", ".", "_user" ]
Creates a User object when requested.
[ "Creates", "a", "User", "object", "when", "requested", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L49-L55
non-Jedi/gyr
gyr/matrix_objects.py
Event.room
def room(self): """Creates a Room object when requested.""" try: return self._room except AttributeError: room_id = self.json["room_id"] self._room = MatrixRoom(room_id, self.Api) return self._room
python
def room(self): """Creates a Room object when requested.""" try: return self._room except AttributeError: room_id = self.json["room_id"] self._room = MatrixRoom(room_id, self.Api) return self._room
[ "def", "room", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_room", "except", "AttributeError", ":", "room_id", "=", "self", ".", "json", "[", "\"room_id\"", "]", "self", ".", "_room", "=", "MatrixRoom", "(", "room_id", ",", "self", ".", "Api", ")", "return", "self", ".", "_room" ]
Creates a Room object when requested.
[ "Creates", "a", "Room", "object", "when", "requested", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L58-L65
non-Jedi/gyr
gyr/matrix_objects.py
MatrixUser.join
def join(self, room_str): """Joins room id or alias even if it must first be created.""" response = self.user_api.join_room(room_str) return self._mkroom(response["room_id"])
python
def join(self, room_str): """Joins room id or alias even if it must first be created.""" response = self.user_api.join_room(room_str) return self._mkroom(response["room_id"])
[ "def", "join", "(", "self", ",", "room_str", ")", ":", "response", "=", "self", ".", "user_api", ".", "join_room", "(", "room_str", ")", "return", "self", ".", "_mkroom", "(", "response", "[", "\"room_id\"", "]", ")" ]
Joins room id or alias even if it must first be created.
[ "Joins", "room", "id", "or", "alias", "even", "if", "it", "must", "first", "be", "created", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L234-L237
non-Jedi/gyr
gyr/matrix_objects.py
MatrixUser.refresh_rooms
def refresh_rooms(self): """Calls GET /joined_rooms to refresh rooms list.""" for room_id in self.user_api.get_joined_rooms()["joined_rooms"]: self._rooms[room_id] = MatrixRoom(room_id, self.user_api)
python
def refresh_rooms(self): """Calls GET /joined_rooms to refresh rooms list.""" for room_id in self.user_api.get_joined_rooms()["joined_rooms"]: self._rooms[room_id] = MatrixRoom(room_id, self.user_api)
[ "def", "refresh_rooms", "(", "self", ")", ":", "for", "room_id", "in", "self", ".", "user_api", ".", "get_joined_rooms", "(", ")", "[", "\"joined_rooms\"", "]", ":", "self", ".", "_rooms", "[", "room_id", "]", "=", "MatrixRoom", "(", "room_id", ",", "self", ".", "user_api", ")" ]
Calls GET /joined_rooms to refresh rooms list.
[ "Calls", "GET", "/", "joined_rooms", "to", "refresh", "rooms", "list", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L274-L277
alfredodeza/notario
notario/decorators.py
not_empty
def not_empty(_object): """ Validates the given input (has to be a valid data structure) is empty. Input *has* to be one of: `list`, `dict`, or `string`. It is specially useful when most of the validators being created are dealing with data structures that should not be empty. """ if is_callable(_object): _validator = _object @wraps(_validator) @instance_of() def decorated(value): ensure(value, "%s is empty" % safe_repr(value)) return _validator(value) return decorated try: ensure(len(_object), "%s is empty" % safe_repr(_object)) except TypeError: raise AssertionError("not of any valid types: [list, dict, str]")
python
def not_empty(_object): """ Validates the given input (has to be a valid data structure) is empty. Input *has* to be one of: `list`, `dict`, or `string`. It is specially useful when most of the validators being created are dealing with data structures that should not be empty. """ if is_callable(_object): _validator = _object @wraps(_validator) @instance_of() def decorated(value): ensure(value, "%s is empty" % safe_repr(value)) return _validator(value) return decorated try: ensure(len(_object), "%s is empty" % safe_repr(_object)) except TypeError: raise AssertionError("not of any valid types: [list, dict, str]")
[ "def", "not_empty", "(", "_object", ")", ":", "if", "is_callable", "(", "_object", ")", ":", "_validator", "=", "_object", "@", "wraps", "(", "_validator", ")", "@", "instance_of", "(", ")", "def", "decorated", "(", "value", ")", ":", "ensure", "(", "value", ",", "\"%s is empty\"", "%", "safe_repr", "(", "value", ")", ")", "return", "_validator", "(", "value", ")", "return", "decorated", "try", ":", "ensure", "(", "len", "(", "_object", ")", ",", "\"%s is empty\"", "%", "safe_repr", "(", "_object", ")", ")", "except", "TypeError", ":", "raise", "AssertionError", "(", "\"not of any valid types: [list, dict, str]\"", ")" ]
Validates the given input (has to be a valid data structure) is empty. Input *has* to be one of: `list`, `dict`, or `string`. It is specially useful when most of the validators being created are dealing with data structures that should not be empty.
[ "Validates", "the", "given", "input", "(", "has", "to", "be", "a", "valid", "data", "structure", ")", "is", "empty", ".", "Input", "*", "has", "*", "to", "be", "one", "of", ":", "list", "dict", "or", "string", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/decorators.py#L72-L92
alfredodeza/notario
notario/decorators.py
optional
def optional(_object): """ This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1} """ if is_callable(_object): validator = _object @wraps(validator) def decorated(value): if value: return validator(value) return return decorated else: def optional(*args): return _object optional.is_optional = True optional._object = _object return optional
python
def optional(_object): """ This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1} """ if is_callable(_object): validator = _object @wraps(validator) def decorated(value): if value: return validator(value) return return decorated else: def optional(*args): return _object optional.is_optional = True optional._object = _object return optional
[ "def", "optional", "(", "_object", ")", ":", "if", "is_callable", "(", "_object", ")", ":", "validator", "=", "_object", "@", "wraps", "(", "validator", ")", "def", "decorated", "(", "value", ")", ":", "if", "value", ":", "return", "validator", "(", "value", ")", "return", "return", "decorated", "else", ":", "def", "optional", "(", "*", "args", ")", ":", "return", "_object", "optional", ".", "is_optional", "=", "True", "optional", ".", "_object", "=", "_object", "return", "optional" ]
This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1}
[ "This", "decorator", "has", "a", "double", "functionality", "it", "can", "wrap", "validators", "and", "make", "them", "optional", "or", "it", "can", "wrap", "keys", "and", "make", "that", "entry", "optional", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/decorators.py#L95-L156
RowleyGroup/pyqueue
pyqueue/job.py
Job.set_walltime
def set_walltime(self, walltime): """ Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self """ if not isinstance(walltime, timedelta): raise TypeError( 'walltime must be an instance of datetime.timedelta. %s given' % type(walltime) ) self._options['walltime'] = walltime return self
python
def set_walltime(self, walltime): """ Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self """ if not isinstance(walltime, timedelta): raise TypeError( 'walltime must be an instance of datetime.timedelta. %s given' % type(walltime) ) self._options['walltime'] = walltime return self
[ "def", "set_walltime", "(", "self", ",", "walltime", ")", ":", "if", "not", "isinstance", "(", "walltime", ",", "timedelta", ")", ":", "raise", "TypeError", "(", "'walltime must be an instance of datetime.timedelta. %s given'", "%", "type", "(", "walltime", ")", ")", "self", ".", "_options", "[", "'walltime'", "]", "=", "walltime", "return", "self" ]
Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self
[ "Setting", "a", "walltime", "for", "the", "job" ]
train
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/job.py#L155-L172
ocaballeror/LyricFetch
lyricfetch/scraping.py
get_url
def get_url(url, parser='html'): """ Requests the specified url and returns a BeautifulSoup object with its contents. """ url = request.quote(url, safe=':/?=&') logger.debug('URL: %s', url) req = request.Request(url, headers={'User-Agent': 'foobar'}) try: response = request.urlopen(req) except HTTPError: raise except (ssl.SSLError, URLError): # Some websites (like metal-archives) use older TLS versions and can # make the ssl module trow a VERSION_TOO_LOW error. Here we try to use # the older TLSv1 to see if we can fix that context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) response = request.urlopen(req, context=context) response = response.read() if parser == 'html': return BeautifulSoup(response, 'html.parser', from_encoding='utf-8') elif parser == 'json': return json.loads(response) elif parser == 'raw': return response.decode() raise ValueError('Unrecognized parser')
python
def get_url(url, parser='html'): """ Requests the specified url and returns a BeautifulSoup object with its contents. """ url = request.quote(url, safe=':/?=&') logger.debug('URL: %s', url) req = request.Request(url, headers={'User-Agent': 'foobar'}) try: response = request.urlopen(req) except HTTPError: raise except (ssl.SSLError, URLError): # Some websites (like metal-archives) use older TLS versions and can # make the ssl module trow a VERSION_TOO_LOW error. Here we try to use # the older TLSv1 to see if we can fix that context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) response = request.urlopen(req, context=context) response = response.read() if parser == 'html': return BeautifulSoup(response, 'html.parser', from_encoding='utf-8') elif parser == 'json': return json.loads(response) elif parser == 'raw': return response.decode() raise ValueError('Unrecognized parser')
[ "def", "get_url", "(", "url", ",", "parser", "=", "'html'", ")", ":", "url", "=", "request", ".", "quote", "(", "url", ",", "safe", "=", "':/?=&'", ")", "logger", ".", "debug", "(", "'URL: %s'", ",", "url", ")", "req", "=", "request", ".", "Request", "(", "url", ",", "headers", "=", "{", "'User-Agent'", ":", "'foobar'", "}", ")", "try", ":", "response", "=", "request", ".", "urlopen", "(", "req", ")", "except", "HTTPError", ":", "raise", "except", "(", "ssl", ".", "SSLError", ",", "URLError", ")", ":", "# Some websites (like metal-archives) use older TLS versions and can", "# make the ssl module trow a VERSION_TOO_LOW error. Here we try to use", "# the older TLSv1 to see if we can fix that", "context", "=", "ssl", ".", "SSLContext", "(", "ssl", ".", "PROTOCOL_TLSv1", ")", "response", "=", "request", ".", "urlopen", "(", "req", ",", "context", "=", "context", ")", "response", "=", "response", ".", "read", "(", ")", "if", "parser", "==", "'html'", ":", "return", "BeautifulSoup", "(", "response", ",", "'html.parser'", ",", "from_encoding", "=", "'utf-8'", ")", "elif", "parser", "==", "'json'", ":", "return", "json", ".", "loads", "(", "response", ")", "elif", "parser", "==", "'raw'", ":", "return", "response", ".", "decode", "(", ")", "raise", "ValueError", "(", "'Unrecognized parser'", ")" ]
Requests the specified url and returns a BeautifulSoup object with its contents.
[ "Requests", "the", "specified", "url", "and", "returns", "a", "BeautifulSoup", "object", "with", "its", "contents", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L17-L43
ocaballeror/LyricFetch
lyricfetch/scraping.py
get_lastfm
def get_lastfm(method, lastfm_key='', **kwargs): """ Request the specified method from the lastfm api. """ if not lastfm_key: if 'lastfm_key' not in CONFIG or not CONFIG['lastfm_key']: logger.warning('No lastfm key configured') return '' else: lastfm_key = CONFIG['lastfm_key'] url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json' url = url.format(method, lastfm_key) for key in kwargs: url += '&{}={}'.format(key, kwargs[key]) response = get_url(url, parser='json') if 'error' in response: logger.error('Error number %d in lastfm query: %s', response['error'], response['message']) return '' return response
python
def get_lastfm(method, lastfm_key='', **kwargs): """ Request the specified method from the lastfm api. """ if not lastfm_key: if 'lastfm_key' not in CONFIG or not CONFIG['lastfm_key']: logger.warning('No lastfm key configured') return '' else: lastfm_key = CONFIG['lastfm_key'] url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json' url = url.format(method, lastfm_key) for key in kwargs: url += '&{}={}'.format(key, kwargs[key]) response = get_url(url, parser='json') if 'error' in response: logger.error('Error number %d in lastfm query: %s', response['error'], response['message']) return '' return response
[ "def", "get_lastfm", "(", "method", ",", "lastfm_key", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "not", "lastfm_key", ":", "if", "'lastfm_key'", "not", "in", "CONFIG", "or", "not", "CONFIG", "[", "'lastfm_key'", "]", ":", "logger", ".", "warning", "(", "'No lastfm key configured'", ")", "return", "''", "else", ":", "lastfm_key", "=", "CONFIG", "[", "'lastfm_key'", "]", "url", "=", "'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json'", "url", "=", "url", ".", "format", "(", "method", ",", "lastfm_key", ")", "for", "key", "in", "kwargs", ":", "url", "+=", "'&{}={}'", ".", "format", "(", "key", ",", "kwargs", "[", "key", "]", ")", "response", "=", "get_url", "(", "url", ",", "parser", "=", "'json'", ")", "if", "'error'", "in", "response", ":", "logger", ".", "error", "(", "'Error number %d in lastfm query: %s'", ",", "response", "[", "'error'", "]", ",", "response", "[", "'message'", "]", ")", "return", "''", "return", "response" ]
Request the specified method from the lastfm api.
[ "Request", "the", "specified", "method", "from", "the", "lastfm", "api", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L46-L68
ocaballeror/LyricFetch
lyricfetch/scraping.py
normalize
def normalize(string, chars_to_remove=None, replacement=''): """ Remove accented characters and such. The argument chars_to_remove is a dictionary that maps a string of chars to a single character. Every occurrence of every character in the first string will be replaced by that second character passed as value. If only one mapping is desired, chars_to_remove may be a single string, but a third parameter, replacement, must be provided to complete the translation. """ ret = string.translate(str.maketrans({ 'á': 'a', 'ä': 'a', 'æ': 'ae', 'é': 'e', 'í': 'i', 'ó': 'o', 'ö': 'o', 'ú': 'u', 'ü': 'u', 'ñ': 'n', })) if isinstance(chars_to_remove, dict): for chars, replace in chars_to_remove.items(): reg = '[' + re.escape(chars) + ']' ret = re.sub(reg, replace, ret) elif isinstance(chars_to_remove, str): reg = '[' + re.escape(chars_to_remove) + ']' ret = re.sub(reg, replacement, ret) return ret
python
def normalize(string, chars_to_remove=None, replacement=''): """ Remove accented characters and such. The argument chars_to_remove is a dictionary that maps a string of chars to a single character. Every occurrence of every character in the first string will be replaced by that second character passed as value. If only one mapping is desired, chars_to_remove may be a single string, but a third parameter, replacement, must be provided to complete the translation. """ ret = string.translate(str.maketrans({ 'á': 'a', 'ä': 'a', 'æ': 'ae', 'é': 'e', 'í': 'i', 'ó': 'o', 'ö': 'o', 'ú': 'u', 'ü': 'u', 'ñ': 'n', })) if isinstance(chars_to_remove, dict): for chars, replace in chars_to_remove.items(): reg = '[' + re.escape(chars) + ']' ret = re.sub(reg, replace, ret) elif isinstance(chars_to_remove, str): reg = '[' + re.escape(chars_to_remove) + ']' ret = re.sub(reg, replacement, ret) return ret
[ "def", "normalize", "(", "string", ",", "chars_to_remove", "=", "None", ",", "replacement", "=", "''", ")", ":", "ret", "=", "string", ".", "translate", "(", "str", ".", "maketrans", "(", "{", "'á':", " ", "a',", "", "'ä':", " ", "a',", "", "'æ':", " ", "ae',", "", "'é':", " ", "e',", "", "'í':", " ", "i',", "", "'ó':", " ", "o',", "", "'ö':", " ", "o',", "", "'ú':", " ", "u',", "", "'ü':", " ", "u',", "", "'ñ':", " ", "n',", "", "}", ")", ")", "if", "isinstance", "(", "chars_to_remove", ",", "dict", ")", ":", "for", "chars", ",", "replace", "in", "chars_to_remove", ".", "items", "(", ")", ":", "reg", "=", "'['", "+", "re", ".", "escape", "(", "chars", ")", "+", "']'", "ret", "=", "re", ".", "sub", "(", "reg", ",", "replace", ",", "ret", ")", "elif", "isinstance", "(", "chars_to_remove", ",", "str", ")", ":", "reg", "=", "'['", "+", "re", ".", "escape", "(", "chars_to_remove", ")", "+", "']'", "ret", "=", "re", ".", "sub", "(", "reg", ",", "replacement", ",", "ret", ")", "return", "ret" ]
Remove accented characters and such. The argument chars_to_remove is a dictionary that maps a string of chars to a single character. Every occurrence of every character in the first string will be replaced by that second character passed as value. If only one mapping is desired, chars_to_remove may be a single string, but a third parameter, replacement, must be provided to complete the translation.
[ "Remove", "accented", "characters", "and", "such", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L71-L103
ocaballeror/LyricFetch
lyricfetch/scraping.py
metrolyrics
def metrolyrics(song): """ Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found. """ translate = {URLESCAPE: '', ' ': '-'} title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) url = 'http://www.metrolyrics.com/{}-lyrics-{}.html'.format(title, artist) soup = get_url(url) body = soup.find(id='lyrics-body-text') if body is None: return '' text = '' verses = body.find_all('p') for verse in verses: text += verse.get_text().strip() text += '\n\n' return text.strip()
python
def metrolyrics(song): """ Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found. """ translate = {URLESCAPE: '', ' ': '-'} title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) url = 'http://www.metrolyrics.com/{}-lyrics-{}.html'.format(title, artist) soup = get_url(url) body = soup.find(id='lyrics-body-text') if body is None: return '' text = '' verses = body.find_all('p') for verse in verses: text += verse.get_text().strip() text += '\n\n' return text.strip()
[ "def", "metrolyrics", "(", "song", ")", ":", "translate", "=", "{", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "title", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "title", ")", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "artist", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "artist", ")", "url", "=", "'http://www.metrolyrics.com/{}-lyrics-{}.html'", ".", "format", "(", "title", ",", "artist", ")", "soup", "=", "get_url", "(", "url", ")", "body", "=", "soup", ".", "find", "(", "id", "=", "'lyrics-body-text'", ")", "if", "body", "is", "None", ":", "return", "''", "text", "=", "''", "verses", "=", "body", ".", "find_all", "(", "'p'", ")", "for", "verse", "in", "verses", ":", "text", "+=", "verse", ".", "get_text", "(", ")", ".", "strip", "(", ")", "text", "+=", "'\\n\\n'", "return", "text", ".", "strip", "(", ")" ]
Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "metrolyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L106-L131
ocaballeror/LyricFetch
lyricfetch/scraping.py
darklyrics
def darklyrics(song): """ Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found. """ # Darklyrics relies on the album name if not hasattr(song, 'album') or not song.album: song.fetch_album_name() if not hasattr(song, 'album') or not song.album: # If we don't have the name of the album, there's nothing we can do # on darklyrics return '' artist = song.artist.lower() artist = normalize(artist, URLESCAPES, '') album = song.album.lower() album = normalize(album, URLESCAPES, '') title = song.title url = 'http://www.darklyrics.com/lyrics/{}/{}.html'.format(artist, album) soup = get_url(url) text = '' for header in soup.find_all('h3'): song = str(header.get_text()) next_sibling = header.next_sibling if song.lower().find(title.lower()) != -1: while next_sibling is not None and\ (next_sibling.name is None or next_sibling.name != 'h3'): if next_sibling.name is None: text += str(next_sibling) next_sibling = next_sibling.next_sibling return text.strip()
python
def darklyrics(song): """ Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found. """ # Darklyrics relies on the album name if not hasattr(song, 'album') or not song.album: song.fetch_album_name() if not hasattr(song, 'album') or not song.album: # If we don't have the name of the album, there's nothing we can do # on darklyrics return '' artist = song.artist.lower() artist = normalize(artist, URLESCAPES, '') album = song.album.lower() album = normalize(album, URLESCAPES, '') title = song.title url = 'http://www.darklyrics.com/lyrics/{}/{}.html'.format(artist, album) soup = get_url(url) text = '' for header in soup.find_all('h3'): song = str(header.get_text()) next_sibling = header.next_sibling if song.lower().find(title.lower()) != -1: while next_sibling is not None and\ (next_sibling.name is None or next_sibling.name != 'h3'): if next_sibling.name is None: text += str(next_sibling) next_sibling = next_sibling.next_sibling return text.strip()
[ "def", "darklyrics", "(", "song", ")", ":", "# Darklyrics relies on the album name", "if", "not", "hasattr", "(", "song", ",", "'album'", ")", "or", "not", "song", ".", "album", ":", "song", ".", "fetch_album_name", "(", ")", "if", "not", "hasattr", "(", "song", ",", "'album'", ")", "or", "not", "song", ".", "album", ":", "# If we don't have the name of the album, there's nothing we can do", "# on darklyrics", "return", "''", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "URLESCAPES", ",", "''", ")", "album", "=", "song", ".", "album", ".", "lower", "(", ")", "album", "=", "normalize", "(", "album", ",", "URLESCAPES", ",", "''", ")", "title", "=", "song", ".", "title", "url", "=", "'http://www.darklyrics.com/lyrics/{}/{}.html'", ".", "format", "(", "artist", ",", "album", ")", "soup", "=", "get_url", "(", "url", ")", "text", "=", "''", "for", "header", "in", "soup", ".", "find_all", "(", "'h3'", ")", ":", "song", "=", "str", "(", "header", ".", "get_text", "(", ")", ")", "next_sibling", "=", "header", ".", "next_sibling", "if", "song", ".", "lower", "(", ")", ".", "find", "(", "title", ".", "lower", "(", ")", ")", "!=", "-", "1", ":", "while", "next_sibling", "is", "not", "None", "and", "(", "next_sibling", ".", "name", "is", "None", "or", "next_sibling", ".", "name", "!=", "'h3'", ")", ":", "if", "next_sibling", ".", "name", "is", "None", ":", "text", "+=", "str", "(", "next_sibling", ")", "next_sibling", "=", "next_sibling", ".", "next_sibling", "return", "text", ".", "strip", "(", ")" ]
Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "darklyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L134-L167
ocaballeror/LyricFetch
lyricfetch/scraping.py
azlyrics
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
python
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
[ "def", "azlyrics", "(", "song", ")", ":", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "if", "artist", "[", "0", ":", "2", "]", "==", "'a '", ":", "artist", "=", "artist", "[", "2", ":", "]", "artist", "=", "normalize", "(", "artist", ",", "URLESCAPES", ",", "''", ")", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "URLESCAPES", ",", "''", ")", "url", "=", "'https://www.azlyrics.com/lyrics/{}/{}.html'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "body", "=", "soup", ".", "find_all", "(", "'div'", ",", "class_", "=", "''", ")", "[", "-", "1", "]", "return", "body", ".", "get_text", "(", ")", ".", "strip", "(", ")" ]
Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "azlyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L170-L185
ocaballeror/LyricFetch
lyricfetch/scraping.py
genius
def genius(song): """ Returns the lyrics found in genius.com for the specified mp3 file or an empty string if not found. """ translate = { '@': 'at', '&': 'and', URLESCAPE: '', ' ': '-' } artist = song.artist.capitalize() artist = normalize(artist, translate) title = song.title.capitalize() title = normalize(title, translate) url = 'https://www.genius.com/{}-{}-lyrics'.format(artist, title) soup = get_url(url) for content in soup.find_all('p'): if content: text = content.get_text().strip() if text: return text return ''
python
def genius(song): """ Returns the lyrics found in genius.com for the specified mp3 file or an empty string if not found. """ translate = { '@': 'at', '&': 'and', URLESCAPE: '', ' ': '-' } artist = song.artist.capitalize() artist = normalize(artist, translate) title = song.title.capitalize() title = normalize(title, translate) url = 'https://www.genius.com/{}-{}-lyrics'.format(artist, title) soup = get_url(url) for content in soup.find_all('p'): if content: text = content.get_text().strip() if text: return text return ''
[ "def", "genius", "(", "song", ")", ":", "translate", "=", "{", "'@'", ":", "'at'", ",", "'&'", ":", "'and'", ",", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "artist", "=", "song", ".", "artist", ".", "capitalize", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "title", "=", "song", ".", "title", ".", "capitalize", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "url", "=", "'https://www.genius.com/{}-{}-lyrics'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "for", "content", "in", "soup", ".", "find_all", "(", "'p'", ")", ":", "if", "content", ":", "text", "=", "content", ".", "get_text", "(", ")", ".", "strip", "(", ")", "if", "text", ":", "return", "text", "return", "''" ]
Returns the lyrics found in genius.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "genius", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L188-L212
ocaballeror/LyricFetch
lyricfetch/scraping.py
metalarchives
def metalarchives(song): """ Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found. """ artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
python
def metalarchives(song): """ Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found. """ artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
[ "def", "metalarchives", "(", "song", ")", ":", "artist", "=", "normalize", "(", "song", ".", "artist", ")", "title", "=", "normalize", "(", "song", ".", "title", ")", "url", "=", "'https://www.metal-archives.com/search/ajax-advanced/searching/songs'", "url", "+=", "f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1'", "soup", "=", "get_url", "(", "url", ",", "parser", "=", "'json'", ")", "if", "not", "soup", ":", "return", "''", "song_id_re", "=", "re", ".", "compile", "(", "r'lyricsLink_([0-9]*)'", ")", "ids", "=", "set", "(", "re", ".", "search", "(", "song_id_re", ",", "a", ")", "for", "sub", "in", "soup", "[", "'aaData'", "]", "for", "a", "in", "sub", ")", "if", "not", "ids", ":", "return", "''", "if", "None", "in", "ids", ":", "ids", ".", "remove", "(", "None", ")", "ids", "=", "map", "(", "lambda", "a", ":", "a", ".", "group", "(", "1", ")", ",", "ids", ")", "for", "song_id", "in", "ids", ":", "url", "=", "'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}'", "lyrics", "=", "get_url", "(", "url", ".", "format", "(", "song_id", ")", ",", "parser", "=", "'html'", ")", "lyrics", "=", "lyrics", ".", "get_text", "(", ")", ".", "strip", "(", ")", "if", "not", "re", ".", "search", "(", "'lyrics not available'", ",", "lyrics", ")", ":", "return", "lyrics", "return", "''" ]
Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "MetalArchives", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L215-L244
ocaballeror/LyricFetch
lyricfetch/scraping.py
lyricswikia
def lyricswikia(song): """ Returns the lyrics found in lyrics.wikia.com for the specified mp3 file or an empty string if not found. """ artist = song.artist.title() artist = normalize(artist, ' ', '_') title = song.title title = normalize(title, ' ', '_') url = 'https://lyrics.wikia.com/wiki/{}:{}'.format(artist, title) soup = get_url(url) text = '' content = soup.find('div', class_='lyricbox') if not content: return '' for unformat in content.findChildren(['i', 'b']): unformat.unwrap() for remove in content.findChildren(['div', 'span']): remove.decompose() nlcount = 0 for line in content.children: if line is None or line == '<br/>' or line == '\n': if nlcount == 2: text += '\n\n' nlcount = 0 else: nlcount += 1 else: nlcount = 0 text += str(line).replace('<br/>', '\n') return text.strip()
python
def lyricswikia(song): """ Returns the lyrics found in lyrics.wikia.com for the specified mp3 file or an empty string if not found. """ artist = song.artist.title() artist = normalize(artist, ' ', '_') title = song.title title = normalize(title, ' ', '_') url = 'https://lyrics.wikia.com/wiki/{}:{}'.format(artist, title) soup = get_url(url) text = '' content = soup.find('div', class_='lyricbox') if not content: return '' for unformat in content.findChildren(['i', 'b']): unformat.unwrap() for remove in content.findChildren(['div', 'span']): remove.decompose() nlcount = 0 for line in content.children: if line is None or line == '<br/>' or line == '\n': if nlcount == 2: text += '\n\n' nlcount = 0 else: nlcount += 1 else: nlcount = 0 text += str(line).replace('<br/>', '\n') return text.strip()
[ "def", "lyricswikia", "(", "song", ")", ":", "artist", "=", "song", ".", "artist", ".", "title", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "' '", ",", "'_'", ")", "title", "=", "song", ".", "title", "title", "=", "normalize", "(", "title", ",", "' '", ",", "'_'", ")", "url", "=", "'https://lyrics.wikia.com/wiki/{}:{}'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "text", "=", "''", "content", "=", "soup", ".", "find", "(", "'div'", ",", "class_", "=", "'lyricbox'", ")", "if", "not", "content", ":", "return", "''", "for", "unformat", "in", "content", ".", "findChildren", "(", "[", "'i'", ",", "'b'", "]", ")", ":", "unformat", ".", "unwrap", "(", ")", "for", "remove", "in", "content", ".", "findChildren", "(", "[", "'div'", ",", "'span'", "]", ")", ":", "remove", ".", "decompose", "(", ")", "nlcount", "=", "0", "for", "line", "in", "content", ".", "children", ":", "if", "line", "is", "None", "or", "line", "==", "'<br/>'", "or", "line", "==", "'\\n'", ":", "if", "nlcount", "==", "2", ":", "text", "+=", "'\\n\\n'", "nlcount", "=", "0", "else", ":", "nlcount", "+=", "1", "else", ":", "nlcount", "=", "0", "text", "+=", "str", "(", "line", ")", ".", "replace", "(", "'<br/>'", ",", "'\\n'", ")", "return", "text", ".", "strip", "(", ")" ]
Returns the lyrics found in lyrics.wikia.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "lyrics", ".", "wikia", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L247-L280
ocaballeror/LyricFetch
lyricfetch/scraping.py
musixmatch
def musixmatch(song): """ Returns the lyrics found in musixmatch for the specified mp3 file or an empty string if not found. """ escape = re.sub("'-¡¿", '', URLESCAPE) translate = { escape: '', ' ': '-' } artist = song.artist.title() artist = re.sub(r"( '|' )", '', artist) artist = re.sub(r"'", '-', artist) title = song.title title = re.sub(r"( '|' )", '', title) title = re.sub(r"'", '-', title) artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) url = 'https://www.musixmatch.com/lyrics/{}/{}'.format(artist, title) soup = get_url(url) text = '' contents = soup.find_all('p', class_='mxm-lyrics__content') for p in contents: text += p.get_text().strip() if p != contents[-1]: text += '\n\n' return text.strip()
python
def musixmatch(song): """ Returns the lyrics found in musixmatch for the specified mp3 file or an empty string if not found. """ escape = re.sub("'-¡¿", '', URLESCAPE) translate = { escape: '', ' ': '-' } artist = song.artist.title() artist = re.sub(r"( '|' )", '', artist) artist = re.sub(r"'", '-', artist) title = song.title title = re.sub(r"( '|' )", '', title) title = re.sub(r"'", '-', title) artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) url = 'https://www.musixmatch.com/lyrics/{}/{}'.format(artist, title) soup = get_url(url) text = '' contents = soup.find_all('p', class_='mxm-lyrics__content') for p in contents: text += p.get_text().strip() if p != contents[-1]: text += '\n\n' return text.strip()
[ "def", "musixmatch", "(", "song", ")", ":", "escape", "=", "re", ".", "sub", "(", "\"'-¡¿\", ", "'", ", ", "U", "LESCAPE)", "", "translate", "=", "{", "escape", ":", "''", ",", "' '", ":", "'-'", "}", "artist", "=", "song", ".", "artist", ".", "title", "(", ")", "artist", "=", "re", ".", "sub", "(", "r\"( '|' )\"", ",", "''", ",", "artist", ")", "artist", "=", "re", ".", "sub", "(", "r\"'\"", ",", "'-'", ",", "artist", ")", "title", "=", "song", ".", "title", "title", "=", "re", ".", "sub", "(", "r\"( '|' )\"", ",", "''", ",", "title", ")", "title", "=", "re", ".", "sub", "(", "r\"'\"", ",", "'-'", ",", "title", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "artist", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "artist", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "title", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "title", ")", "url", "=", "'https://www.musixmatch.com/lyrics/{}/{}'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "text", "=", "''", "contents", "=", "soup", ".", "find_all", "(", "'p'", ",", "class_", "=", "'mxm-lyrics__content'", ")", "for", "p", "in", "contents", ":", "text", "+=", "p", ".", "get_text", "(", ")", ".", "strip", "(", ")", "if", "p", "!=", "contents", "[", "-", "1", "]", ":", "text", "+=", "'\\n\\n'", "return", "text", ".", "strip", "(", ")" ]
Returns the lyrics found in musixmatch for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "musixmatch", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L283-L314
ocaballeror/LyricFetch
lyricfetch/scraping.py
songlyrics
def songlyrics(song): """ Returns the lyrics found in songlyrics.com for the specified mp3 file or an empty string if not found. """ translate = { URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) artist = re.sub(r'\-{2,}', '-', artist) title = re.sub(r'\-{2,}', '-', title) url = 'http://www.songlyrics.com/{}/{}-lyrics'.format(artist, title) soup = get_url(url) text = soup.find(id='songLyricsDiv') if not text: return '' text = text.getText().strip() if not text or text.lower().startswith('we do not have the lyrics for'): return '' return text
python
def songlyrics(song): """ Returns the lyrics found in songlyrics.com for the specified mp3 file or an empty string if not found. """ translate = { URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) artist = re.sub(r'\-{2,}', '-', artist) title = re.sub(r'\-{2,}', '-', title) url = 'http://www.songlyrics.com/{}/{}-lyrics'.format(artist, title) soup = get_url(url) text = soup.find(id='songLyricsDiv') if not text: return '' text = text.getText().strip() if not text or text.lower().startswith('we do not have the lyrics for'): return '' return text
[ "def", "songlyrics", "(", "song", ")", ":", "translate", "=", "{", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "artist", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "artist", ")", "title", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "title", ")", "url", "=", "'http://www.songlyrics.com/{}/{}-lyrics'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "text", "=", "soup", ".", "find", "(", "id", "=", "'songLyricsDiv'", ")", "if", "not", "text", ":", "return", "''", "text", "=", "text", ".", "getText", "(", ")", ".", "strip", "(", ")", "if", "not", "text", "or", "text", ".", "lower", "(", ")", ".", "startswith", "(", "'we do not have the lyrics for'", ")", ":", "return", "''", "return", "text" ]
Returns the lyrics found in songlyrics.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "songlyrics", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L319-L346
ocaballeror/LyricFetch
lyricfetch/scraping.py
lyricscom
def lyricscom(song): """ Returns the lyrics found in lyrics.com for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() artist = normalize(artist, ' ', '+') title = song.title url = 'https://www.lyrics.com/artist/{}'.format(artist) soup = get_url(url) location = '' for a in soup.select('tr a'): if a.string.lower() == title.lower(): location = a['href'] break if location == '': return '' url = 'https://www.lyrics.com' + location soup = get_url(url) body = soup.find(id='lyric-body-text') if not body: return '' return body.get_text().strip()
python
def lyricscom(song): """ Returns the lyrics found in lyrics.com for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() artist = normalize(artist, ' ', '+') title = song.title url = 'https://www.lyrics.com/artist/{}'.format(artist) soup = get_url(url) location = '' for a in soup.select('tr a'): if a.string.lower() == title.lower(): location = a['href'] break if location == '': return '' url = 'https://www.lyrics.com' + location soup = get_url(url) body = soup.find(id='lyric-body-text') if not body: return '' return body.get_text().strip()
[ "def", "lyricscom", "(", "song", ")", ":", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "' '", ",", "'+'", ")", "title", "=", "song", ".", "title", "url", "=", "'https://www.lyrics.com/artist/{}'", ".", "format", "(", "artist", ")", "soup", "=", "get_url", "(", "url", ")", "location", "=", "''", "for", "a", "in", "soup", ".", "select", "(", "'tr a'", ")", ":", "if", "a", ".", "string", ".", "lower", "(", ")", "==", "title", ".", "lower", "(", ")", ":", "location", "=", "a", "[", "'href'", "]", "break", "if", "location", "==", "''", ":", "return", "''", "url", "=", "'https://www.lyrics.com'", "+", "location", "soup", "=", "get_url", "(", "url", ")", "body", "=", "soup", ".", "find", "(", "id", "=", "'lyric-body-text'", ")", "if", "not", "body", ":", "return", "''", "return", "body", ".", "get_text", "(", ")", ".", "strip", "(", ")" ]
Returns the lyrics found in lyrics.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "lyrics", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L349-L374
ocaballeror/LyricFetch
lyricfetch/scraping.py
vagalume
def vagalume(song): """ Returns the lyrics found in vagalume.com.br for the specified mp3 file or an empty string if not found. """ translate = { '@': 'a', URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) url = 'https://www.vagalume.com.br/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.select('div#lyrics') if body == []: return '' content = body[0] for br in content.find_all('br'): br.replace_with('\n') return content.get_text().strip()
python
def vagalume(song): """ Returns the lyrics found in vagalume.com.br for the specified mp3 file or an empty string if not found. """ translate = { '@': 'a', URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) url = 'https://www.vagalume.com.br/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.select('div#lyrics') if body == []: return '' content = body[0] for br in content.find_all('br'): br.replace_with('\n') return content.get_text().strip()
[ "def", "vagalume", "(", "song", ")", ":", "translate", "=", "{", "'@'", ":", "'a'", ",", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "artist", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "artist", ")", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "title", "=", "re", ".", "sub", "(", "r'\\-{2,}'", ",", "'-'", ",", "title", ")", "url", "=", "'https://www.vagalume.com.br/{}/{}.html'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "body", "=", "soup", ".", "select", "(", "'div#lyrics'", ")", "if", "body", "==", "[", "]", ":", "return", "''", "content", "=", "body", "[", "0", "]", "for", "br", "in", "content", ".", "find_all", "(", "'br'", ")", ":", "br", ".", "replace_with", "(", "'\\n'", ")", "return", "content", ".", "get_text", "(", ")", ".", "strip", "(", ")" ]
Returns the lyrics found in vagalume.com.br for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "vagalume", ".", "com", ".", "br", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L377-L404
ocaballeror/LyricFetch
lyricfetch/scraping.py
lyricsmode
def lyricsmode(song): """ Returns the lyrics found in lyricsmode.com for the specified mp3 file or an empty string if not found. """ translate = { URLESCAPE: '', ' ': '_' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) artist = re.sub(r'\_{2,}', '_', artist) title = re.sub(r'\_{2,}', '_', title) if artist[0:4].lower() == 'the ': artist = artist[4:] if artist[0:2].lower() == 'a ': prefix = artist[2] else: prefix = artist[0] url = 'http://www.lyricsmode.com/lyrics/{}/{}/{}.html' url = url.format(prefix, artist, title) soup = get_url(url) content = soup.find(id='lyrics_text') return content.get_text().strip()
python
def lyricsmode(song): """ Returns the lyrics found in lyricsmode.com for the specified mp3 file or an empty string if not found. """ translate = { URLESCAPE: '', ' ': '_' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) artist = re.sub(r'\_{2,}', '_', artist) title = re.sub(r'\_{2,}', '_', title) if artist[0:4].lower() == 'the ': artist = artist[4:] if artist[0:2].lower() == 'a ': prefix = artist[2] else: prefix = artist[0] url = 'http://www.lyricsmode.com/lyrics/{}/{}/{}.html' url = url.format(prefix, artist, title) soup = get_url(url) content = soup.find(id='lyrics_text') return content.get_text().strip()
[ "def", "lyricsmode", "(", "song", ")", ":", "translate", "=", "{", "URLESCAPE", ":", "''", ",", "' '", ":", "'_'", "}", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "artist", "=", "re", ".", "sub", "(", "r'\\_{2,}'", ",", "'_'", ",", "artist", ")", "title", "=", "re", ".", "sub", "(", "r'\\_{2,}'", ",", "'_'", ",", "title", ")", "if", "artist", "[", "0", ":", "4", "]", ".", "lower", "(", ")", "==", "'the '", ":", "artist", "=", "artist", "[", "4", ":", "]", "if", "artist", "[", "0", ":", "2", "]", ".", "lower", "(", ")", "==", "'a '", ":", "prefix", "=", "artist", "[", "2", "]", "else", ":", "prefix", "=", "artist", "[", "0", "]", "url", "=", "'http://www.lyricsmode.com/lyrics/{}/{}/{}.html'", "url", "=", "url", ".", "format", "(", "prefix", ",", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "content", "=", "soup", ".", "find", "(", "id", "=", "'lyrics_text'", ")", "return", "content", ".", "get_text", "(", ")", ".", "strip", "(", ")" ]
Returns the lyrics found in lyricsmode.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "lyricsmode", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L407-L437
ocaballeror/LyricFetch
lyricfetch/scraping.py
letras
def letras(song): """ Returns the lyrics found in letras.com for the specified mp3 file or an empty string if not found. """ translate = { '&': 'a', URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) url = 'https://www.letras.com/{}/{}/'.format(artist, title) soup = get_url(url) if not soup: return '' found_title = soup.select_one('div.cnt-head_title h1') if not found_title: # The site didn't find lyrics and took us to the homepage return '' found_title = found_title.get_text() found_title = re.sub(r'[\W_]+', '', found_title.lower()) if found_title != re.sub(r'[\W_]+', '', song.title.lower()): # The site took us to the wrong song page return '' content = soup.find('article') if not content: return '' text = '' for br in content.find_all('br'): br.replace_with('\n') for p in content.find_all('p'): text += p.get_text() + '\n\n' return text.strip()
python
def letras(song): """ Returns the lyrics found in letras.com for the specified mp3 file or an empty string if not found. """ translate = { '&': 'a', URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) title = song.title.lower() title = normalize(title, translate) url = 'https://www.letras.com/{}/{}/'.format(artist, title) soup = get_url(url) if not soup: return '' found_title = soup.select_one('div.cnt-head_title h1') if not found_title: # The site didn't find lyrics and took us to the homepage return '' found_title = found_title.get_text() found_title = re.sub(r'[\W_]+', '', found_title.lower()) if found_title != re.sub(r'[\W_]+', '', song.title.lower()): # The site took us to the wrong song page return '' content = soup.find('article') if not content: return '' text = '' for br in content.find_all('br'): br.replace_with('\n') for p in content.find_all('p'): text += p.get_text() + '\n\n' return text.strip()
[ "def", "letras", "(", "song", ")", ":", "translate", "=", "{", "'&'", ":", "'a'", ",", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "artist", "=", "normalize", "(", "artist", ",", "translate", ")", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "url", "=", "'https://www.letras.com/{}/{}/'", ".", "format", "(", "artist", ",", "title", ")", "soup", "=", "get_url", "(", "url", ")", "if", "not", "soup", ":", "return", "''", "found_title", "=", "soup", ".", "select_one", "(", "'div.cnt-head_title h1'", ")", "if", "not", "found_title", ":", "# The site didn't find lyrics and took us to the homepage", "return", "''", "found_title", "=", "found_title", ".", "get_text", "(", ")", "found_title", "=", "re", ".", "sub", "(", "r'[\\W_]+'", ",", "''", ",", "found_title", ".", "lower", "(", ")", ")", "if", "found_title", "!=", "re", ".", "sub", "(", "r'[\\W_]+'", ",", "''", ",", "song", ".", "title", ".", "lower", "(", ")", ")", ":", "# The site took us to the wrong song page", "return", "''", "content", "=", "soup", ".", "find", "(", "'article'", ")", "if", "not", "content", ":", "return", "''", "text", "=", "''", "for", "br", "in", "content", ".", "find_all", "(", "'br'", ")", ":", "br", ".", "replace_with", "(", "'\\n'", ")", "for", "p", "in", "content", ".", "find_all", "(", "'p'", ")", ":", "text", "+=", "p", ".", "get_text", "(", ")", "+", "'\\n\\n'", "return", "text", ".", "strip", "(", ")" ]
Returns the lyrics found in letras.com for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "letras", ".", "com", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L440-L482
ocaballeror/LyricFetch
lyricfetch/scraping.py
id_source
def id_source(source, full=False): """ Returns the name of a website-scrapping function. """ if source not in source_ids: return '' if full: return source_ids[source][1] else: return source_ids[source][0]
python
def id_source(source, full=False): """ Returns the name of a website-scrapping function. """ if source not in source_ids: return '' if full: return source_ids[source][1] else: return source_ids[source][0]
[ "def", "id_source", "(", "source", ",", "full", "=", "False", ")", ":", "if", "source", "not", "in", "source_ids", ":", "return", "''", "if", "full", ":", "return", "source_ids", "[", "source", "]", "[", "1", "]", "else", ":", "return", "source_ids", "[", "source", "]", "[", "0", "]" ]
Returns the name of a website-scrapping function.
[ "Returns", "the", "name", "of", "a", "website", "-", "scrapping", "function", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L501-L511
tipsi/tipsi_tools
tipsi_tools/mon_server/gitlab_runners.py
update_gitlab_loop
async def update_gitlab_loop(update_metrics, params): """ app = Sanic() mserver = MetricsServer(app) mserver.add_task(update_gitlab_loop, params={'url': GITLAB_URL, 'token': token}) """ gitlab_api = gitlab.Gitlab(url=params['url'], private_token=params['token'], api_version=4) while True: try: metrics = update_gitlab_runners(gitlab_api) update_metrics(metrics) except Exception: update_metrics({}) log.exception('During loop') await asyncio.sleep(LOOP)
python
async def update_gitlab_loop(update_metrics, params): """ app = Sanic() mserver = MetricsServer(app) mserver.add_task(update_gitlab_loop, params={'url': GITLAB_URL, 'token': token}) """ gitlab_api = gitlab.Gitlab(url=params['url'], private_token=params['token'], api_version=4) while True: try: metrics = update_gitlab_runners(gitlab_api) update_metrics(metrics) except Exception: update_metrics({}) log.exception('During loop') await asyncio.sleep(LOOP)
[ "async", "def", "update_gitlab_loop", "(", "update_metrics", ",", "params", ")", ":", "gitlab_api", "=", "gitlab", ".", "Gitlab", "(", "url", "=", "params", "[", "'url'", "]", ",", "private_token", "=", "params", "[", "'token'", "]", ",", "api_version", "=", "4", ")", "while", "True", ":", "try", ":", "metrics", "=", "update_gitlab_runners", "(", "gitlab_api", ")", "update_metrics", "(", "metrics", ")", "except", "Exception", ":", "update_metrics", "(", "{", "}", ")", "log", ".", "exception", "(", "'During loop'", ")", "await", "asyncio", ".", "sleep", "(", "LOOP", ")" ]
app = Sanic() mserver = MetricsServer(app) mserver.add_task(update_gitlab_loop, params={'url': GITLAB_URL, 'token': token})
[ "app", "=", "Sanic", "()", "mserver", "=", "MetricsServer", "(", "app", ")", "mserver", ".", "add_task", "(", "update_gitlab_loop", "params", "=", "{", "url", ":", "GITLAB_URL", "token", ":", "token", "}", ")" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/mon_server/gitlab_runners.py#L28-L42
nion-software/nionswift-instrumentation-kit
nion/instrumentation/stem_controller.py
STEMController.set_probe_position
def set_probe_position(self, new_probe_position): """ Set the probe position, in normalized coordinates with origin at top left. """ if new_probe_position is not None: # convert the probe position to a FloatPoint and limit it to the 0.0 to 1.0 range in both axes. new_probe_position = Geometry.FloatPoint.make(new_probe_position) new_probe_position = Geometry.FloatPoint(y=max(min(new_probe_position.y, 1.0), 0.0), x=max(min(new_probe_position.x, 1.0), 0.0)) old_probe_position = self.__probe_position_value.value if ((old_probe_position is None) != (new_probe_position is None)) or (old_probe_position != new_probe_position): # this path is only taken if set_probe_position is not called as a result of the probe_position model # value changing. self.__probe_position_value.value = new_probe_position # update the probe position for listeners and also explicitly update for probe_graphic_connections. self.probe_state_changed_event.fire(self.probe_state, self.probe_position)
python
def set_probe_position(self, new_probe_position): """ Set the probe position, in normalized coordinates with origin at top left. """ if new_probe_position is not None: # convert the probe position to a FloatPoint and limit it to the 0.0 to 1.0 range in both axes. new_probe_position = Geometry.FloatPoint.make(new_probe_position) new_probe_position = Geometry.FloatPoint(y=max(min(new_probe_position.y, 1.0), 0.0), x=max(min(new_probe_position.x, 1.0), 0.0)) old_probe_position = self.__probe_position_value.value if ((old_probe_position is None) != (new_probe_position is None)) or (old_probe_position != new_probe_position): # this path is only taken if set_probe_position is not called as a result of the probe_position model # value changing. self.__probe_position_value.value = new_probe_position # update the probe position for listeners and also explicitly update for probe_graphic_connections. self.probe_state_changed_event.fire(self.probe_state, self.probe_position)
[ "def", "set_probe_position", "(", "self", ",", "new_probe_position", ")", ":", "if", "new_probe_position", "is", "not", "None", ":", "# convert the probe position to a FloatPoint and limit it to the 0.0 to 1.0 range in both axes.", "new_probe_position", "=", "Geometry", ".", "FloatPoint", ".", "make", "(", "new_probe_position", ")", "new_probe_position", "=", "Geometry", ".", "FloatPoint", "(", "y", "=", "max", "(", "min", "(", "new_probe_position", ".", "y", ",", "1.0", ")", ",", "0.0", ")", ",", "x", "=", "max", "(", "min", "(", "new_probe_position", ".", "x", ",", "1.0", ")", ",", "0.0", ")", ")", "old_probe_position", "=", "self", ".", "__probe_position_value", ".", "value", "if", "(", "(", "old_probe_position", "is", "None", ")", "!=", "(", "new_probe_position", "is", "None", ")", ")", "or", "(", "old_probe_position", "!=", "new_probe_position", ")", ":", "# this path is only taken if set_probe_position is not called as a result of the probe_position model", "# value changing.", "self", ".", "__probe_position_value", ".", "value", "=", "new_probe_position", "# update the probe position for listeners and also explicitly update for probe_graphic_connections.", "self", ".", "probe_state_changed_event", ".", "fire", "(", "self", ".", "probe_state", ",", "self", ".", "probe_position", ")" ]
Set the probe position, in normalized coordinates with origin at top left.
[ "Set", "the", "probe", "position", "in", "normalized", "coordinates", "with", "origin", "at", "top", "left", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/stem_controller.py#L157-L170
nion-software/nionswift-instrumentation-kit
nion/instrumentation/stem_controller.py
STEMController.apply_metadata_groups
def apply_metadata_groups(self, properties: typing.Dict, metatdata_groups: typing.Tuple[typing.List[str], str]) -> None: """Apply metadata groups to properties. Metadata groups is a tuple with two elements. The first is a list of strings representing a dict-path in which to add the controls. The second is a control group from which to read a list of controls to be added as name value pairs to the dict-path. """ pass
python
def apply_metadata_groups(self, properties: typing.Dict, metatdata_groups: typing.Tuple[typing.List[str], str]) -> None: """Apply metadata groups to properties. Metadata groups is a tuple with two elements. The first is a list of strings representing a dict-path in which to add the controls. The second is a control group from which to read a list of controls to be added as name value pairs to the dict-path. """ pass
[ "def", "apply_metadata_groups", "(", "self", ",", "properties", ":", "typing", ".", "Dict", ",", "metatdata_groups", ":", "typing", ".", "Tuple", "[", "typing", ".", "List", "[", "str", "]", ",", "str", "]", ")", "->", "None", ":", "pass" ]
Apply metadata groups to properties. Metadata groups is a tuple with two elements. The first is a list of strings representing a dict-path in which to add the controls. The second is a control group from which to read a list of controls to be added as name value pairs to the dict-path.
[ "Apply", "metadata", "groups", "to", "properties", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/stem_controller.py#L222-L229
numan/py-analytics
analytics/__init__.py
create_analytic_backend
def create_analytic_backend(settings): """ Creates a new Analytics backend from the settings :param settings: Dictionary of settings for the analytics backend :returns: A backend object implementing the analytics api >>> >>> analytics = create_analytic({ >>> 'backend': 'analytics.backends.redis.Redis', >>> 'settings': { >>> 'defaults': { >>> 'host': 'localhost', >>> 'port': 6379, >>> 'db': 0, >>> }, >>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}] >>> }, >>> }) """ backend = settings.get('backend') if isinstance(backend, basestring): backend = import_string(backend) elif backend: backend = backend else: raise KeyError('backend') return backend(settings.get("settings", {}))
python
def create_analytic_backend(settings): """ Creates a new Analytics backend from the settings :param settings: Dictionary of settings for the analytics backend :returns: A backend object implementing the analytics api >>> >>> analytics = create_analytic({ >>> 'backend': 'analytics.backends.redis.Redis', >>> 'settings': { >>> 'defaults': { >>> 'host': 'localhost', >>> 'port': 6379, >>> 'db': 0, >>> }, >>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}] >>> }, >>> }) """ backend = settings.get('backend') if isinstance(backend, basestring): backend = import_string(backend) elif backend: backend = backend else: raise KeyError('backend') return backend(settings.get("settings", {}))
[ "def", "create_analytic_backend", "(", "settings", ")", ":", "backend", "=", "settings", ".", "get", "(", "'backend'", ")", "if", "isinstance", "(", "backend", ",", "basestring", ")", ":", "backend", "=", "import_string", "(", "backend", ")", "elif", "backend", ":", "backend", "=", "backend", "else", ":", "raise", "KeyError", "(", "'backend'", ")", "return", "backend", "(", "settings", ".", "get", "(", "\"settings\"", ",", "{", "}", ")", ")" ]
Creates a new Analytics backend from the settings :param settings: Dictionary of settings for the analytics backend :returns: A backend object implementing the analytics api >>> >>> analytics = create_analytic({ >>> 'backend': 'analytics.backends.redis.Redis', >>> 'settings': { >>> 'defaults': { >>> 'host': 'localhost', >>> 'port': 6379, >>> 'db': 0, >>> }, >>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}] >>> }, >>> })
[ "Creates", "a", "new", "Analytics", "backend", "from", "the", "settings" ]
train
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/__init__.py#L27-L55
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py
ScanControlStateController.initialize_state
def initialize_state(self): """ Call this to initialize the state of the UI after everything has been connected. """ if self.__scan_hardware_source: self.__profile_changed_event_listener = self.__scan_hardware_source.profile_changed_event.listen(self.__update_profile_index) self.__frame_parameters_changed_event_listener = self.__scan_hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters) self.__data_item_states_changed_event_listener = self.__scan_hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed) self.__acquisition_state_changed_event_listener = self.__scan_hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed) self.__probe_state_changed_event_listener = self.__scan_hardware_source.probe_state_changed_event.listen(self.__probe_state_changed) self.__channel_state_changed_event_listener = self.__scan_hardware_source.channel_state_changed_event.listen(self.__channel_state_changed) subscan_state_model = self.__scan_hardware_source.subscan_state_model def subscan_state_changed(name): if callable(self.on_subscan_state_changed): self.on_subscan_state_changed(subscan_state_model.value) self.__subscan_state_changed_listener = subscan_state_model.property_changed_event.listen(subscan_state_changed) subscan_state_changed("value") if self.on_display_name_changed: self.on_display_name_changed(self.display_name) if self.on_subscan_state_changed: self.on_subscan_state_changed(self.__scan_hardware_source.subscan_state) channel_count = self.__scan_hardware_source.channel_count if self.on_channel_count_changed: self.on_channel_count_changed(channel_count) self.__channel_enabled = [False] * channel_count for channel_index in range(channel_count): channel_id, name, enabled = self.__scan_hardware_source.get_channel_state(channel_index) self.__channel_state_changed(channel_index, channel_id, name, enabled) self.__channel_enabled[channel_index] = enabled self.__update_buttons() if self.on_profiles_changed: profile_items = list(ScanControlStateController.profiles.items()) profile_items.sort(key=lambda k_v: k_v[1]) profiles = map(lambda k_v: k_v[0], profile_items) self.on_profiles_changed(profiles) self.__update_profile_index(self.__scan_hardware_source.selected_profile_index) if self.on_linked_changed: self.on_linked_changed(self.__linked) if self.on_simulate_button_state_changed: use_simulator = self.__scan_hardware_source.use_hardware_simulator self.on_simulate_button_state_changed(use_simulator, use_simulator) if self.on_data_item_states_changed: self.on_data_item_states_changed(list()) probe_state = self.__scan_hardware_source.probe_state probe_position = self.__scan_hardware_source.probe_position self.__probe_state_changed(probe_state, probe_position)
python
def initialize_state(self): """ Call this to initialize the state of the UI after everything has been connected. """ if self.__scan_hardware_source: self.__profile_changed_event_listener = self.__scan_hardware_source.profile_changed_event.listen(self.__update_profile_index) self.__frame_parameters_changed_event_listener = self.__scan_hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters) self.__data_item_states_changed_event_listener = self.__scan_hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed) self.__acquisition_state_changed_event_listener = self.__scan_hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed) self.__probe_state_changed_event_listener = self.__scan_hardware_source.probe_state_changed_event.listen(self.__probe_state_changed) self.__channel_state_changed_event_listener = self.__scan_hardware_source.channel_state_changed_event.listen(self.__channel_state_changed) subscan_state_model = self.__scan_hardware_source.subscan_state_model def subscan_state_changed(name): if callable(self.on_subscan_state_changed): self.on_subscan_state_changed(subscan_state_model.value) self.__subscan_state_changed_listener = subscan_state_model.property_changed_event.listen(subscan_state_changed) subscan_state_changed("value") if self.on_display_name_changed: self.on_display_name_changed(self.display_name) if self.on_subscan_state_changed: self.on_subscan_state_changed(self.__scan_hardware_source.subscan_state) channel_count = self.__scan_hardware_source.channel_count if self.on_channel_count_changed: self.on_channel_count_changed(channel_count) self.__channel_enabled = [False] * channel_count for channel_index in range(channel_count): channel_id, name, enabled = self.__scan_hardware_source.get_channel_state(channel_index) self.__channel_state_changed(channel_index, channel_id, name, enabled) self.__channel_enabled[channel_index] = enabled self.__update_buttons() if self.on_profiles_changed: profile_items = list(ScanControlStateController.profiles.items()) profile_items.sort(key=lambda k_v: k_v[1]) profiles = map(lambda k_v: k_v[0], profile_items) self.on_profiles_changed(profiles) self.__update_profile_index(self.__scan_hardware_source.selected_profile_index) if self.on_linked_changed: self.on_linked_changed(self.__linked) if self.on_simulate_button_state_changed: use_simulator = self.__scan_hardware_source.use_hardware_simulator self.on_simulate_button_state_changed(use_simulator, use_simulator) if self.on_data_item_states_changed: self.on_data_item_states_changed(list()) probe_state = self.__scan_hardware_source.probe_state probe_position = self.__scan_hardware_source.probe_position self.__probe_state_changed(probe_state, probe_position)
[ "def", "initialize_state", "(", "self", ")", ":", "if", "self", ".", "__scan_hardware_source", ":", "self", ".", "__profile_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "profile_changed_event", ".", "listen", "(", "self", ".", "__update_profile_index", ")", "self", ".", "__frame_parameters_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "frame_parameters_changed_event", ".", "listen", "(", "self", ".", "__update_frame_parameters", ")", "self", ".", "__data_item_states_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "data_item_states_changed_event", ".", "listen", "(", "self", ".", "__data_item_states_changed", ")", "self", ".", "__acquisition_state_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "acquisition_state_changed_event", ".", "listen", "(", "self", ".", "__acquisition_state_changed", ")", "self", ".", "__probe_state_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "probe_state_changed_event", ".", "listen", "(", "self", ".", "__probe_state_changed", ")", "self", ".", "__channel_state_changed_event_listener", "=", "self", ".", "__scan_hardware_source", ".", "channel_state_changed_event", ".", "listen", "(", "self", ".", "__channel_state_changed", ")", "subscan_state_model", "=", "self", ".", "__scan_hardware_source", ".", "subscan_state_model", "def", "subscan_state_changed", "(", "name", ")", ":", "if", "callable", "(", "self", ".", "on_subscan_state_changed", ")", ":", "self", ".", "on_subscan_state_changed", "(", "subscan_state_model", ".", "value", ")", "self", ".", "__subscan_state_changed_listener", "=", "subscan_state_model", ".", "property_changed_event", ".", "listen", "(", "subscan_state_changed", ")", "subscan_state_changed", "(", "\"value\"", ")", "if", "self", ".", "on_display_name_changed", ":", "self", ".", "on_display_name_changed", "(", "self", ".", "display_name", ")", "if", "self", ".", "on_subscan_state_changed", ":", "self", ".", "on_subscan_state_changed", "(", "self", ".", "__scan_hardware_source", ".", "subscan_state", ")", "channel_count", "=", "self", ".", "__scan_hardware_source", ".", "channel_count", "if", "self", ".", "on_channel_count_changed", ":", "self", ".", "on_channel_count_changed", "(", "channel_count", ")", "self", ".", "__channel_enabled", "=", "[", "False", "]", "*", "channel_count", "for", "channel_index", "in", "range", "(", "channel_count", ")", ":", "channel_id", ",", "name", ",", "enabled", "=", "self", ".", "__scan_hardware_source", ".", "get_channel_state", "(", "channel_index", ")", "self", ".", "__channel_state_changed", "(", "channel_index", ",", "channel_id", ",", "name", ",", "enabled", ")", "self", ".", "__channel_enabled", "[", "channel_index", "]", "=", "enabled", "self", ".", "__update_buttons", "(", ")", "if", "self", ".", "on_profiles_changed", ":", "profile_items", "=", "list", "(", "ScanControlStateController", ".", "profiles", ".", "items", "(", ")", ")", "profile_items", ".", "sort", "(", "key", "=", "lambda", "k_v", ":", "k_v", "[", "1", "]", ")", "profiles", "=", "map", "(", "lambda", "k_v", ":", "k_v", "[", "0", "]", ",", "profile_items", ")", "self", ".", "on_profiles_changed", "(", "profiles", ")", "self", ".", "__update_profile_index", "(", "self", ".", "__scan_hardware_source", ".", "selected_profile_index", ")", "if", "self", ".", "on_linked_changed", ":", "self", ".", "on_linked_changed", "(", "self", ".", "__linked", ")", "if", "self", ".", "on_simulate_button_state_changed", ":", "use_simulator", "=", "self", ".", "__scan_hardware_source", ".", "use_hardware_simulator", "self", ".", "on_simulate_button_state_changed", "(", "use_simulator", ",", "use_simulator", ")", "if", "self", ".", "on_data_item_states_changed", ":", "self", ".", "on_data_item_states_changed", "(", "list", "(", ")", ")", "probe_state", "=", "self", ".", "__scan_hardware_source", ".", "probe_state", "probe_position", "=", "self", ".", "__scan_hardware_source", ".", "probe_position", "self", ".", "__probe_state_changed", "(", "probe_state", ",", "probe_position", ")" ]
Call this to initialize the state of the UI after everything has been connected.
[ "Call", "this", "to", "initialize", "the", "state", "of", "the", "UI", "after", "everything", "has", "been", "connected", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py#L262-L307
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py
ScanControlStateController.handle_play_pause_clicked
def handle_play_pause_clicked(self): """ Call this when the user clicks the play/pause button. """ if self.__scan_hardware_source: if self.is_playing: self.__scan_hardware_source.stop_playing() else: self.__scan_hardware_source.start_playing()
python
def handle_play_pause_clicked(self): """ Call this when the user clicks the play/pause button. """ if self.__scan_hardware_source: if self.is_playing: self.__scan_hardware_source.stop_playing() else: self.__scan_hardware_source.start_playing()
[ "def", "handle_play_pause_clicked", "(", "self", ")", ":", "if", "self", ".", "__scan_hardware_source", ":", "if", "self", ".", "is_playing", ":", "self", ".", "__scan_hardware_source", ".", "stop_playing", "(", ")", "else", ":", "self", ".", "__scan_hardware_source", ".", "start_playing", "(", ")" ]
Call this when the user clicks the play/pause button.
[ "Call", "this", "when", "the", "user", "clicks", "the", "play", "/", "pause", "button", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py#L315-L321
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py
ScanControlStateController.handle_record_clicked
def handle_record_clicked(self, callback_fn): """ Call this when the user clicks the record button. """ assert callable(callback_fn) if self.__scan_hardware_source: def finish_record(data_and_metadata_list): record_index = self.__scan_hardware_source.record_index for data_and_metadata in data_and_metadata_list: data_item = DataItem.DataItem() data_item.ensure_data_source() display_name = data_and_metadata.metadata.get("hardware_source", dict()).get("hardware_source_name") display_name = display_name if display_name else _("Record") channel_name = data_and_metadata.metadata.get("hardware_source", dict()).get("channel_name") title_base = "{} ({})".format(display_name, channel_name) if channel_name else display_name data_item.title = "{} {}".format(title_base, record_index) data_item.set_xdata(data_and_metadata) callback_fn(data_item) self.__scan_hardware_source.record_index += 1 self.__scan_hardware_source.record_async(finish_record)
python
def handle_record_clicked(self, callback_fn): """ Call this when the user clicks the record button. """ assert callable(callback_fn) if self.__scan_hardware_source: def finish_record(data_and_metadata_list): record_index = self.__scan_hardware_source.record_index for data_and_metadata in data_and_metadata_list: data_item = DataItem.DataItem() data_item.ensure_data_source() display_name = data_and_metadata.metadata.get("hardware_source", dict()).get("hardware_source_name") display_name = display_name if display_name else _("Record") channel_name = data_and_metadata.metadata.get("hardware_source", dict()).get("channel_name") title_base = "{} ({})".format(display_name, channel_name) if channel_name else display_name data_item.title = "{} {}".format(title_base, record_index) data_item.set_xdata(data_and_metadata) callback_fn(data_item) self.__scan_hardware_source.record_index += 1 self.__scan_hardware_source.record_async(finish_record)
[ "def", "handle_record_clicked", "(", "self", ",", "callback_fn", ")", ":", "assert", "callable", "(", "callback_fn", ")", "if", "self", ".", "__scan_hardware_source", ":", "def", "finish_record", "(", "data_and_metadata_list", ")", ":", "record_index", "=", "self", ".", "__scan_hardware_source", ".", "record_index", "for", "data_and_metadata", "in", "data_and_metadata_list", ":", "data_item", "=", "DataItem", ".", "DataItem", "(", ")", "data_item", ".", "ensure_data_source", "(", ")", "display_name", "=", "data_and_metadata", ".", "metadata", ".", "get", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", ".", "get", "(", "\"hardware_source_name\"", ")", "display_name", "=", "display_name", "if", "display_name", "else", "_", "(", "\"Record\"", ")", "channel_name", "=", "data_and_metadata", ".", "metadata", ".", "get", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", ".", "get", "(", "\"channel_name\"", ")", "title_base", "=", "\"{} ({})\"", ".", "format", "(", "display_name", ",", "channel_name", ")", "if", "channel_name", "else", "display_name", "data_item", ".", "title", "=", "\"{} {}\"", ".", "format", "(", "title_base", ",", "record_index", ")", "data_item", ".", "set_xdata", "(", "data_and_metadata", ")", "callback_fn", "(", "data_item", ")", "self", ".", "__scan_hardware_source", ".", "record_index", "+=", "1", "self", ".", "__scan_hardware_source", ".", "record_async", "(", "finish_record", ")" ]
Call this when the user clicks the record button.
[ "Call", "this", "when", "the", "user", "clicks", "the", "record", "button", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py#L330-L351
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py
IconCanvasItem.size_to_content
def size_to_content(self, horizontal_padding=None, vertical_padding=None): """ Size the canvas item to the text content. """ if horizontal_padding is None: horizontal_padding = 0 if vertical_padding is None: vertical_padding = 0 self.sizing.set_fixed_size(Geometry.IntSize(18 + 2 * horizontal_padding, 18 + 2 * vertical_padding))
python
def size_to_content(self, horizontal_padding=None, vertical_padding=None): """ Size the canvas item to the text content. """ if horizontal_padding is None: horizontal_padding = 0 if vertical_padding is None: vertical_padding = 0 self.sizing.set_fixed_size(Geometry.IntSize(18 + 2 * horizontal_padding, 18 + 2 * vertical_padding))
[ "def", "size_to_content", "(", "self", ",", "horizontal_padding", "=", "None", ",", "vertical_padding", "=", "None", ")", ":", "if", "horizontal_padding", "is", "None", ":", "horizontal_padding", "=", "0", "if", "vertical_padding", "is", "None", ":", "vertical_padding", "=", "0", "self", ".", "sizing", ".", "set_fixed_size", "(", "Geometry", ".", "IntSize", "(", "18", "+", "2", "*", "horizontal_padding", ",", "18", "+", "2", "*", "vertical_padding", ")", ")" ]
Size the canvas item to the text content.
[ "Size", "the", "canvas", "item", "to", "the", "text", "content", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/ScanControlPanel.py#L581-L590
Parsely/redis-fluster
fluster/utils.py
round_controlled
def round_controlled(cycled_iterable, rounds=1): """Return after <rounds> passes through a cycled iterable.""" round_start = None rounds_completed = 0 for item in cycled_iterable: if round_start is None: round_start = item elif item == round_start: rounds_completed += 1 if rounds_completed == rounds: return yield item
python
def round_controlled(cycled_iterable, rounds=1): """Return after <rounds> passes through a cycled iterable.""" round_start = None rounds_completed = 0 for item in cycled_iterable: if round_start is None: round_start = item elif item == round_start: rounds_completed += 1 if rounds_completed == rounds: return yield item
[ "def", "round_controlled", "(", "cycled_iterable", ",", "rounds", "=", "1", ")", ":", "round_start", "=", "None", "rounds_completed", "=", "0", "for", "item", "in", "cycled_iterable", ":", "if", "round_start", "is", "None", ":", "round_start", "=", "item", "elif", "item", "==", "round_start", ":", "rounds_completed", "+=", "1", "if", "rounds_completed", "==", "rounds", ":", "return", "yield", "item" ]
Return after <rounds> passes through a cycled iterable.
[ "Return", "after", "<rounds", ">", "passes", "through", "a", "cycled", "iterable", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/utils.py#L1-L15
ocaballeror/LyricFetch
lyricfetch/stats.py
Record.success_rate
def success_rate(self): """ Returns a float with the rate of success from all the logged results. """ if self.successes + self.fails == 0: success_rate = 0 else: total_attempts = self.successes + self.fails success_rate = (self.successes * 100 / total_attempts) return success_rate
python
def success_rate(self): """ Returns a float with the rate of success from all the logged results. """ if self.successes + self.fails == 0: success_rate = 0 else: total_attempts = self.successes + self.fails success_rate = (self.successes * 100 / total_attempts) return success_rate
[ "def", "success_rate", "(", "self", ")", ":", "if", "self", ".", "successes", "+", "self", ".", "fails", "==", "0", ":", "success_rate", "=", "0", "else", ":", "total_attempts", "=", "self", ".", "successes", "+", "self", ".", "fails", "success_rate", "=", "(", "self", ".", "successes", "*", "100", "/", "total_attempts", ")", "return", "success_rate" ]
Returns a float with the rate of success from all the logged results.
[ "Returns", "a", "float", "with", "the", "rate", "of", "success", "from", "all", "the", "logged", "results", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/stats.py#L46-L56
ocaballeror/LyricFetch
lyricfetch/stats.py
Stats.add_result
def add_result(self, source, found, runtime): """ Adds a new record to the statistics 'database'. This function is intended to be called after a website has been scraped. The arguments indicate the function that was called, the time taken to scrap the website and a boolean indicating if the lyrics were found or not. """ self.source_stats[source.__name__].add_runtime(runtime) if found: self.source_stats[source.__name__].successes += 1 else: self.source_stats[source.__name__].fails += 1
python
def add_result(self, source, found, runtime): """ Adds a new record to the statistics 'database'. This function is intended to be called after a website has been scraped. The arguments indicate the function that was called, the time taken to scrap the website and a boolean indicating if the lyrics were found or not. """ self.source_stats[source.__name__].add_runtime(runtime) if found: self.source_stats[source.__name__].successes += 1 else: self.source_stats[source.__name__].fails += 1
[ "def", "add_result", "(", "self", ",", "source", ",", "found", ",", "runtime", ")", ":", "self", ".", "source_stats", "[", "source", ".", "__name__", "]", ".", "add_runtime", "(", "runtime", ")", "if", "found", ":", "self", ".", "source_stats", "[", "source", ".", "__name__", "]", ".", "successes", "+=", "1", "else", ":", "self", ".", "source_stats", "[", "source", ".", "__name__", "]", ".", "fails", "+=", "1" ]
Adds a new record to the statistics 'database'. This function is intended to be called after a website has been scraped. The arguments indicate the function that was called, the time taken to scrap the website and a boolean indicating if the lyrics were found or not.
[ "Adds", "a", "new", "record", "to", "the", "statistics", "database", ".", "This", "function", "is", "intended", "to", "be", "called", "after", "a", "website", "has", "been", "scraped", ".", "The", "arguments", "indicate", "the", "function", "that", "was", "called", "the", "time", "taken", "to", "scrap", "the", "website", "and", "a", "boolean", "indicating", "if", "the", "lyrics", "were", "found", "or", "not", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/stats.py#L67-L78
ocaballeror/LyricFetch
lyricfetch/stats.py
Stats.avg_time
def avg_time(self, source=None): """ Returns the average time taken to scrape lyrics. If a string or a function is passed as source, return the average time taken to scrape lyrics from that source, otherwise return the total average. """ if source is None: runtimes = [] for rec in self.source_stats.values(): runtimes.extend([r for r in rec.runtimes if r != 0]) return avg(runtimes) else: if callable(source): return avg(self.source_stats[source.__name__].runtimes) else: return avg(self.source_stats[source].runtimes)
python
def avg_time(self, source=None): """ Returns the average time taken to scrape lyrics. If a string or a function is passed as source, return the average time taken to scrape lyrics from that source, otherwise return the total average. """ if source is None: runtimes = [] for rec in self.source_stats.values(): runtimes.extend([r for r in rec.runtimes if r != 0]) return avg(runtimes) else: if callable(source): return avg(self.source_stats[source.__name__].runtimes) else: return avg(self.source_stats[source].runtimes)
[ "def", "avg_time", "(", "self", ",", "source", "=", "None", ")", ":", "if", "source", "is", "None", ":", "runtimes", "=", "[", "]", "for", "rec", "in", "self", ".", "source_stats", ".", "values", "(", ")", ":", "runtimes", ".", "extend", "(", "[", "r", "for", "r", "in", "rec", ".", "runtimes", "if", "r", "!=", "0", "]", ")", "return", "avg", "(", "runtimes", ")", "else", ":", "if", "callable", "(", "source", ")", ":", "return", "avg", "(", "self", ".", "source_stats", "[", "source", ".", "__name__", "]", ".", "runtimes", ")", "else", ":", "return", "avg", "(", "self", ".", "source_stats", "[", "source", "]", ".", "runtimes", ")" ]
Returns the average time taken to scrape lyrics. If a string or a function is passed as source, return the average time taken to scrape lyrics from that source, otherwise return the total average.
[ "Returns", "the", "average", "time", "taken", "to", "scrape", "lyrics", ".", "If", "a", "string", "or", "a", "function", "is", "passed", "as", "source", "return", "the", "average", "time", "taken", "to", "scrape", "lyrics", "from", "that", "source", "otherwise", "return", "the", "total", "average", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/stats.py#L80-L95
ocaballeror/LyricFetch
lyricfetch/stats.py
Stats.calculate
def calculate(self): """ Calculate the overall counts of best, worst, fastest, slowest, total found, total not found and total runtime Results are returned in a dictionary with the above parameters as keys. """ best, worst, fastest, slowest = (), (), (), () found = notfound = total_time = 0 for source, rec in self.source_stats.items(): if not best or rec.successes > best[1]: best = (source, rec.successes, rec.success_rate()) if not worst or rec.successes < worst[1]: worst = (source, rec.successes, rec.success_rate()) avg_time = self.avg_time(source) if not fastest or (avg_time != 0 and avg_time < fastest[1]): fastest = (source, avg_time) if not slowest or (avg_time != 0 and avg_time > slowest[1]): slowest = (source, avg_time) found += rec.successes notfound += rec.fails total_time += sum(rec.runtimes) return { 'best': best, 'worst': worst, 'fastest': fastest, 'slowest': slowest, 'found': found, 'notfound': notfound, 'total_time': total_time }
python
def calculate(self): """ Calculate the overall counts of best, worst, fastest, slowest, total found, total not found and total runtime Results are returned in a dictionary with the above parameters as keys. """ best, worst, fastest, slowest = (), (), (), () found = notfound = total_time = 0 for source, rec in self.source_stats.items(): if not best or rec.successes > best[1]: best = (source, rec.successes, rec.success_rate()) if not worst or rec.successes < worst[1]: worst = (source, rec.successes, rec.success_rate()) avg_time = self.avg_time(source) if not fastest or (avg_time != 0 and avg_time < fastest[1]): fastest = (source, avg_time) if not slowest or (avg_time != 0 and avg_time > slowest[1]): slowest = (source, avg_time) found += rec.successes notfound += rec.fails total_time += sum(rec.runtimes) return { 'best': best, 'worst': worst, 'fastest': fastest, 'slowest': slowest, 'found': found, 'notfound': notfound, 'total_time': total_time }
[ "def", "calculate", "(", "self", ")", ":", "best", ",", "worst", ",", "fastest", ",", "slowest", "=", "(", ")", ",", "(", ")", ",", "(", ")", ",", "(", ")", "found", "=", "notfound", "=", "total_time", "=", "0", "for", "source", ",", "rec", "in", "self", ".", "source_stats", ".", "items", "(", ")", ":", "if", "not", "best", "or", "rec", ".", "successes", ">", "best", "[", "1", "]", ":", "best", "=", "(", "source", ",", "rec", ".", "successes", ",", "rec", ".", "success_rate", "(", ")", ")", "if", "not", "worst", "or", "rec", ".", "successes", "<", "worst", "[", "1", "]", ":", "worst", "=", "(", "source", ",", "rec", ".", "successes", ",", "rec", ".", "success_rate", "(", ")", ")", "avg_time", "=", "self", ".", "avg_time", "(", "source", ")", "if", "not", "fastest", "or", "(", "avg_time", "!=", "0", "and", "avg_time", "<", "fastest", "[", "1", "]", ")", ":", "fastest", "=", "(", "source", ",", "avg_time", ")", "if", "not", "slowest", "or", "(", "avg_time", "!=", "0", "and", "avg_time", ">", "slowest", "[", "1", "]", ")", ":", "slowest", "=", "(", "source", ",", "avg_time", ")", "found", "+=", "rec", ".", "successes", "notfound", "+=", "rec", ".", "fails", "total_time", "+=", "sum", "(", "rec", ".", "runtimes", ")", "return", "{", "'best'", ":", "best", ",", "'worst'", ":", "worst", ",", "'fastest'", ":", "fastest", ",", "'slowest'", ":", "slowest", ",", "'found'", ":", "found", ",", "'notfound'", ":", "notfound", ",", "'total_time'", ":", "total_time", "}" ]
Calculate the overall counts of best, worst, fastest, slowest, total found, total not found and total runtime Results are returned in a dictionary with the above parameters as keys.
[ "Calculate", "the", "overall", "counts", "of", "best", "worst", "fastest", "slowest", "total", "found", "total", "not", "found", "and", "total", "runtime" ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/stats.py#L97-L130
ocaballeror/LyricFetch
lyricfetch/stats.py
Stats.print_stats
def print_stats(self): """ Print a series of relevant stats about a full execution. This function is meant to be called at the end of the program. """ stats = self.calculate() total_time = '%d:%02d:%02d' % (stats['total_time'] / 3600, (stats['total_time'] / 3600) / 60, (stats['total_time'] % 3600) % 60) output = """\ Total runtime: {total_time} Lyrics found: {found} Lyrics not found:{notfound} Most useful source:\ {best} ({best_count} lyrics found) ({best_rate:.2f}% success rate) Least useful source:\ {worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate) Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search) Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search) Average time per website: {avg_time:.2f}s xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxx PER WEBSITE STATS: xxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx """ output = output.format(total_time=total_time, found=stats['found'], notfound=stats['notfound'], best=stats['best'][0].capitalize(), best_count=stats['best'][1], best_rate=stats['best'][2], worst=stats['worst'][0].capitalize(), worst_count=stats['worst'][1], worst_rate=stats['worst'][2], fastest=stats['fastest'][0].capitalize(), fastest_time=stats['fastest'][1], slowest=stats['slowest'][0].capitalize(), slowest_time=stats['slowest'][1], avg_time=self.avg_time()) for source in sources: stat = str(self.source_stats[source.__name__]) output += f'\n{source.__name__.upper()}\n{stat}\n' print(output)
python
def print_stats(self): """ Print a series of relevant stats about a full execution. This function is meant to be called at the end of the program. """ stats = self.calculate() total_time = '%d:%02d:%02d' % (stats['total_time'] / 3600, (stats['total_time'] / 3600) / 60, (stats['total_time'] % 3600) % 60) output = """\ Total runtime: {total_time} Lyrics found: {found} Lyrics not found:{notfound} Most useful source:\ {best} ({best_count} lyrics found) ({best_rate:.2f}% success rate) Least useful source:\ {worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate) Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search) Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search) Average time per website: {avg_time:.2f}s xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxx PER WEBSITE STATS: xxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx """ output = output.format(total_time=total_time, found=stats['found'], notfound=stats['notfound'], best=stats['best'][0].capitalize(), best_count=stats['best'][1], best_rate=stats['best'][2], worst=stats['worst'][0].capitalize(), worst_count=stats['worst'][1], worst_rate=stats['worst'][2], fastest=stats['fastest'][0].capitalize(), fastest_time=stats['fastest'][1], slowest=stats['slowest'][0].capitalize(), slowest_time=stats['slowest'][1], avg_time=self.avg_time()) for source in sources: stat = str(self.source_stats[source.__name__]) output += f'\n{source.__name__.upper()}\n{stat}\n' print(output)
[ "def", "print_stats", "(", "self", ")", ":", "stats", "=", "self", ".", "calculate", "(", ")", "total_time", "=", "'%d:%02d:%02d'", "%", "(", "stats", "[", "'total_time'", "]", "/", "3600", ",", "(", "stats", "[", "'total_time'", "]", "/", "3600", ")", "/", "60", ",", "(", "stats", "[", "'total_time'", "]", "%", "3600", ")", "%", "60", ")", "output", "=", "\"\"\"\\\nTotal runtime: {total_time}\n Lyrics found: {found}\n Lyrics not found:{notfound}\n Most useful source:\\\n{best} ({best_count} lyrics found) ({best_rate:.2f}% success rate)\n Least useful source:\\\n{worst} ({worst_count} lyrics found) ({worst_rate:.2f}% success rate)\n Fastest website to scrape: {fastest} (Avg: {fastest_time:.2f}s per search)\n Slowest website to scrape: {slowest} (Avg: {slowest_time:.2f}s per search)\n Average time per website: {avg_time:.2f}s\n\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nxxx PER WEBSITE STATS: xxx\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\"\"\"", "output", "=", "output", ".", "format", "(", "total_time", "=", "total_time", ",", "found", "=", "stats", "[", "'found'", "]", ",", "notfound", "=", "stats", "[", "'notfound'", "]", ",", "best", "=", "stats", "[", "'best'", "]", "[", "0", "]", ".", "capitalize", "(", ")", ",", "best_count", "=", "stats", "[", "'best'", "]", "[", "1", "]", ",", "best_rate", "=", "stats", "[", "'best'", "]", "[", "2", "]", ",", "worst", "=", "stats", "[", "'worst'", "]", "[", "0", "]", ".", "capitalize", "(", ")", ",", "worst_count", "=", "stats", "[", "'worst'", "]", "[", "1", "]", ",", "worst_rate", "=", "stats", "[", "'worst'", "]", "[", "2", "]", ",", "fastest", "=", "stats", "[", "'fastest'", "]", "[", "0", "]", ".", "capitalize", "(", ")", ",", "fastest_time", "=", "stats", "[", "'fastest'", "]", "[", "1", "]", ",", "slowest", "=", "stats", "[", "'slowest'", "]", "[", "0", "]", ".", "capitalize", "(", ")", ",", "slowest_time", "=", "stats", "[", "'slowest'", "]", "[", "1", "]", ",", "avg_time", "=", "self", ".", "avg_time", "(", ")", ")", "for", "source", "in", "sources", ":", "stat", "=", "str", "(", "self", ".", "source_stats", "[", "source", ".", "__name__", "]", ")", "output", "+=", "f'\\n{source.__name__.upper()}\\n{stat}\\n'", "print", "(", "output", ")" ]
Print a series of relevant stats about a full execution. This function is meant to be called at the end of the program.
[ "Print", "a", "series", "of", "relevant", "stats", "about", "a", "full", "execution", ".", "This", "function", "is", "meant", "to", "be", "called", "at", "the", "end", "of", "the", "program", "." ]
train
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/stats.py#L132-L175
thiagopbueno/rddl2tf
rddl2tf/fluentscope.py
TensorFluentScope.broadcast
def broadcast(cls, s1: ParamsList, s2: ParamsList) -> BroadcastTuple: '''It broadcasts the smaller scope over the larger scope. It handles scope intersection as well as differences in scopes in order to output a resulting scope so that input scopes are contained within it (i.e., input scopes are subscopes of the output scope). Also, if necessary, it outputs permutations of the input scopes so that tensor broadcasting invariants are not violated. Note: For more information on broadcasting, please report to NumPy's official documentation available at the following URLs: 1. https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html 2. https://docs.scipy.org/doc/numpy/reference/generated/numpy.broadcast.html Args: s1: A fluent's scope. s2: A fluent's scope. Returns: A tuple with the output scope and permutations of the input scopes. ''' if len(s1) == 0: return s2, [], [] if len(s2) == 0: return s1, [], [] subscope = list(set(s1) & set(s2)) if len(subscope) == len(s1): subscope = s1 elif len(subscope) == len(s2): subscope = s2 perm1 = [] if s1[-len(subscope):] != subscope: i = 0 for var in s1: if var not in subscope: perm1.append(i) i += 1 else: j = subscope.index(var) perm1.append(len(s1) - len(subscope) + j) perm2 = [] if s2[-len(subscope):] != subscope: i = 0 for var in s2: if var not in subscope: perm2.append(i) i += 1 else: j = subscope.index(var) perm2.append(len(s2) - len(subscope) + j) scope = [] # type: ParamsList if len(s1) >= len(s2): if perm1 == []: scope = s1 else: for i in range(len(s1)): scope.append(s1[perm1.index(i)]) else: if perm2 == []: scope = s2 else: for i in range(len(s2)): scope.append(s2[perm2.index(i)]) return (scope, perm1, perm2)
python
def broadcast(cls, s1: ParamsList, s2: ParamsList) -> BroadcastTuple: '''It broadcasts the smaller scope over the larger scope. It handles scope intersection as well as differences in scopes in order to output a resulting scope so that input scopes are contained within it (i.e., input scopes are subscopes of the output scope). Also, if necessary, it outputs permutations of the input scopes so that tensor broadcasting invariants are not violated. Note: For more information on broadcasting, please report to NumPy's official documentation available at the following URLs: 1. https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html 2. https://docs.scipy.org/doc/numpy/reference/generated/numpy.broadcast.html Args: s1: A fluent's scope. s2: A fluent's scope. Returns: A tuple with the output scope and permutations of the input scopes. ''' if len(s1) == 0: return s2, [], [] if len(s2) == 0: return s1, [], [] subscope = list(set(s1) & set(s2)) if len(subscope) == len(s1): subscope = s1 elif len(subscope) == len(s2): subscope = s2 perm1 = [] if s1[-len(subscope):] != subscope: i = 0 for var in s1: if var not in subscope: perm1.append(i) i += 1 else: j = subscope.index(var) perm1.append(len(s1) - len(subscope) + j) perm2 = [] if s2[-len(subscope):] != subscope: i = 0 for var in s2: if var not in subscope: perm2.append(i) i += 1 else: j = subscope.index(var) perm2.append(len(s2) - len(subscope) + j) scope = [] # type: ParamsList if len(s1) >= len(s2): if perm1 == []: scope = s1 else: for i in range(len(s1)): scope.append(s1[perm1.index(i)]) else: if perm2 == []: scope = s2 else: for i in range(len(s2)): scope.append(s2[perm2.index(i)]) return (scope, perm1, perm2)
[ "def", "broadcast", "(", "cls", ",", "s1", ":", "ParamsList", ",", "s2", ":", "ParamsList", ")", "->", "BroadcastTuple", ":", "if", "len", "(", "s1", ")", "==", "0", ":", "return", "s2", ",", "[", "]", ",", "[", "]", "if", "len", "(", "s2", ")", "==", "0", ":", "return", "s1", ",", "[", "]", ",", "[", "]", "subscope", "=", "list", "(", "set", "(", "s1", ")", "&", "set", "(", "s2", ")", ")", "if", "len", "(", "subscope", ")", "==", "len", "(", "s1", ")", ":", "subscope", "=", "s1", "elif", "len", "(", "subscope", ")", "==", "len", "(", "s2", ")", ":", "subscope", "=", "s2", "perm1", "=", "[", "]", "if", "s1", "[", "-", "len", "(", "subscope", ")", ":", "]", "!=", "subscope", ":", "i", "=", "0", "for", "var", "in", "s1", ":", "if", "var", "not", "in", "subscope", ":", "perm1", ".", "append", "(", "i", ")", "i", "+=", "1", "else", ":", "j", "=", "subscope", ".", "index", "(", "var", ")", "perm1", ".", "append", "(", "len", "(", "s1", ")", "-", "len", "(", "subscope", ")", "+", "j", ")", "perm2", "=", "[", "]", "if", "s2", "[", "-", "len", "(", "subscope", ")", ":", "]", "!=", "subscope", ":", "i", "=", "0", "for", "var", "in", "s2", ":", "if", "var", "not", "in", "subscope", ":", "perm2", ".", "append", "(", "i", ")", "i", "+=", "1", "else", ":", "j", "=", "subscope", ".", "index", "(", "var", ")", "perm2", ".", "append", "(", "len", "(", "s2", ")", "-", "len", "(", "subscope", ")", "+", "j", ")", "scope", "=", "[", "]", "# type: ParamsList", "if", "len", "(", "s1", ")", ">=", "len", "(", "s2", ")", ":", "if", "perm1", "==", "[", "]", ":", "scope", "=", "s1", "else", ":", "for", "i", "in", "range", "(", "len", "(", "s1", ")", ")", ":", "scope", ".", "append", "(", "s1", "[", "perm1", ".", "index", "(", "i", ")", "]", ")", "else", ":", "if", "perm2", "==", "[", "]", ":", "scope", "=", "s2", "else", ":", "for", "i", "in", "range", "(", "len", "(", "s2", ")", ")", ":", "scope", ".", "append", "(", "s2", "[", "perm2", ".", "index", "(", "i", ")", "]", ")", "return", "(", "scope", ",", "perm1", ",", "perm2", ")" ]
It broadcasts the smaller scope over the larger scope. It handles scope intersection as well as differences in scopes in order to output a resulting scope so that input scopes are contained within it (i.e., input scopes are subscopes of the output scope). Also, if necessary, it outputs permutations of the input scopes so that tensor broadcasting invariants are not violated. Note: For more information on broadcasting, please report to NumPy's official documentation available at the following URLs: 1. https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html 2. https://docs.scipy.org/doc/numpy/reference/generated/numpy.broadcast.html Args: s1: A fluent's scope. s2: A fluent's scope. Returns: A tuple with the output scope and permutations of the input scopes.
[ "It", "broadcasts", "the", "smaller", "scope", "over", "the", "larger", "scope", "." ]
train
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentscope.py#L68-L137
inodb/sufam
sufam/__main__.py
get_baseparser_extended_df
def get_baseparser_extended_df(sample, bp_lines, ref, alt): """Turn baseParser results into a dataframe""" columns = "chrom\tpos\tref\tcov\tA\tC\tG\tT\t*\t-\t+".split() if bp_lines is None: return None # change baseparser output to get most common maf per indel bpdf = pd.DataFrame([[sample] + l.rstrip('\n').split("\t") for l in bp_lines if len(l) > 0], columns=["sample"] + columns, dtype=np.object) bpdf[bpdf == ""] = None # remove zero coverage rows bpdf = bpdf[bpdf["cov"].astype(int) > 0] if len(bpdf) == 0: return None if ref and alt: # add columns for validation allele bpdf = pd.concat([bpdf, pd.DataFrame({"val_ref": pd.Series(ref), "val_alt": pd.Series(alt)})], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_val_al, axis=1)], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_most_common_indel, axis=1)], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_most_common_al, axis=1)], axis=1) bpdf["most_common_count"] = bpdf.apply(lambda x: max([x.most_common_al_count, x.most_common_indel_count]), axis=1) bpdf["most_common_maf"] = bpdf.apply(lambda x: max([x.most_common_al_maf, x.most_common_indel_maf]), axis=1) return bpdf
python
def get_baseparser_extended_df(sample, bp_lines, ref, alt): """Turn baseParser results into a dataframe""" columns = "chrom\tpos\tref\tcov\tA\tC\tG\tT\t*\t-\t+".split() if bp_lines is None: return None # change baseparser output to get most common maf per indel bpdf = pd.DataFrame([[sample] + l.rstrip('\n').split("\t") for l in bp_lines if len(l) > 0], columns=["sample"] + columns, dtype=np.object) bpdf[bpdf == ""] = None # remove zero coverage rows bpdf = bpdf[bpdf["cov"].astype(int) > 0] if len(bpdf) == 0: return None if ref and alt: # add columns for validation allele bpdf = pd.concat([bpdf, pd.DataFrame({"val_ref": pd.Series(ref), "val_alt": pd.Series(alt)})], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_val_al, axis=1)], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_most_common_indel, axis=1)], axis=1) bpdf = pd.concat([bpdf, bpdf.apply(_most_common_al, axis=1)], axis=1) bpdf["most_common_count"] = bpdf.apply(lambda x: max([x.most_common_al_count, x.most_common_indel_count]), axis=1) bpdf["most_common_maf"] = bpdf.apply(lambda x: max([x.most_common_al_maf, x.most_common_indel_maf]), axis=1) return bpdf
[ "def", "get_baseparser_extended_df", "(", "sample", ",", "bp_lines", ",", "ref", ",", "alt", ")", ":", "columns", "=", "\"chrom\\tpos\\tref\\tcov\\tA\\tC\\tG\\tT\\t*\\t-\\t+\"", ".", "split", "(", ")", "if", "bp_lines", "is", "None", ":", "return", "None", "# change baseparser output to get most common maf per indel", "bpdf", "=", "pd", ".", "DataFrame", "(", "[", "[", "sample", "]", "+", "l", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "\"\\t\"", ")", "for", "l", "in", "bp_lines", "if", "len", "(", "l", ")", ">", "0", "]", ",", "columns", "=", "[", "\"sample\"", "]", "+", "columns", ",", "dtype", "=", "np", ".", "object", ")", "bpdf", "[", "bpdf", "==", "\"\"", "]", "=", "None", "# remove zero coverage rows", "bpdf", "=", "bpdf", "[", "bpdf", "[", "\"cov\"", "]", ".", "astype", "(", "int", ")", ">", "0", "]", "if", "len", "(", "bpdf", ")", "==", "0", ":", "return", "None", "if", "ref", "and", "alt", ":", "# add columns for validation allele", "bpdf", "=", "pd", ".", "concat", "(", "[", "bpdf", ",", "pd", ".", "DataFrame", "(", "{", "\"val_ref\"", ":", "pd", ".", "Series", "(", "ref", ")", ",", "\"val_alt\"", ":", "pd", ".", "Series", "(", "alt", ")", "}", ")", "]", ",", "axis", "=", "1", ")", "bpdf", "=", "pd", ".", "concat", "(", "[", "bpdf", ",", "bpdf", ".", "apply", "(", "_val_al", ",", "axis", "=", "1", ")", "]", ",", "axis", "=", "1", ")", "bpdf", "=", "pd", ".", "concat", "(", "[", "bpdf", ",", "bpdf", ".", "apply", "(", "_most_common_indel", ",", "axis", "=", "1", ")", "]", ",", "axis", "=", "1", ")", "bpdf", "=", "pd", ".", "concat", "(", "[", "bpdf", ",", "bpdf", ".", "apply", "(", "_most_common_al", ",", "axis", "=", "1", ")", "]", ",", "axis", "=", "1", ")", "bpdf", "[", "\"most_common_count\"", "]", "=", "bpdf", ".", "apply", "(", "lambda", "x", ":", "max", "(", "[", "x", ".", "most_common_al_count", ",", "x", ".", "most_common_indel_count", "]", ")", ",", "axis", "=", "1", ")", "bpdf", "[", "\"most_common_maf\"", "]", "=", "bpdf", ".", "apply", "(", "lambda", "x", ":", "max", "(", "[", "x", ".", "most_common_al_maf", ",", "x", ".", "most_common_indel_maf", "]", ")", ",", "axis", "=", "1", ")", "return", "bpdf" ]
Turn baseParser results into a dataframe
[ "Turn", "baseParser", "results", "into", "a", "dataframe" ]
train
https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/__main__.py#L92-L119
inodb/sufam
sufam/__main__.py
filter_out_mutations_in_normal
def filter_out_mutations_in_normal(tumordf, normaldf, most_common_maf_min=0.2, most_common_count_maf_threshold=20, most_common_count_min=1): """Remove mutations that are in normal""" df = tumordf.merge(normaldf, on=["chrom", "pos"], suffixes=("_T", "_N")) # filters common_al = (df.most_common_al_count_T == df.most_common_count_T) & (df.most_common_al_T == df.most_common_al_N) common_indel = (df.most_common_indel_count_T == df.most_common_count_T) & \ (df.most_common_indel_T == df.imost_common_indel_N) normal_criteria = ((df.most_common_count_N >= most_common_count_maf_threshold) & (df.most_common_maf_N > most_common_maf_min)) | \ ((df.most_common_count_N < most_common_count_maf_threshold) & (df.most_common_count_N > most_common_count_min)) df = df[~(common_al | common_indel) & normal_criteria] # restore column names of tumor for c in df.columns: if c.endswith("_N"): del df[c] df.columns = [c[:-2] if c.endswith("_T") else c for c in df.columns] return df
python
def filter_out_mutations_in_normal(tumordf, normaldf, most_common_maf_min=0.2, most_common_count_maf_threshold=20, most_common_count_min=1): """Remove mutations that are in normal""" df = tumordf.merge(normaldf, on=["chrom", "pos"], suffixes=("_T", "_N")) # filters common_al = (df.most_common_al_count_T == df.most_common_count_T) & (df.most_common_al_T == df.most_common_al_N) common_indel = (df.most_common_indel_count_T == df.most_common_count_T) & \ (df.most_common_indel_T == df.imost_common_indel_N) normal_criteria = ((df.most_common_count_N >= most_common_count_maf_threshold) & (df.most_common_maf_N > most_common_maf_min)) | \ ((df.most_common_count_N < most_common_count_maf_threshold) & (df.most_common_count_N > most_common_count_min)) df = df[~(common_al | common_indel) & normal_criteria] # restore column names of tumor for c in df.columns: if c.endswith("_N"): del df[c] df.columns = [c[:-2] if c.endswith("_T") else c for c in df.columns] return df
[ "def", "filter_out_mutations_in_normal", "(", "tumordf", ",", "normaldf", ",", "most_common_maf_min", "=", "0.2", ",", "most_common_count_maf_threshold", "=", "20", ",", "most_common_count_min", "=", "1", ")", ":", "df", "=", "tumordf", ".", "merge", "(", "normaldf", ",", "on", "=", "[", "\"chrom\"", ",", "\"pos\"", "]", ",", "suffixes", "=", "(", "\"_T\"", ",", "\"_N\"", ")", ")", "# filters", "common_al", "=", "(", "df", ".", "most_common_al_count_T", "==", "df", ".", "most_common_count_T", ")", "&", "(", "df", ".", "most_common_al_T", "==", "df", ".", "most_common_al_N", ")", "common_indel", "=", "(", "df", ".", "most_common_indel_count_T", "==", "df", ".", "most_common_count_T", ")", "&", "(", "df", ".", "most_common_indel_T", "==", "df", ".", "imost_common_indel_N", ")", "normal_criteria", "=", "(", "(", "df", ".", "most_common_count_N", ">=", "most_common_count_maf_threshold", ")", "&", "(", "df", ".", "most_common_maf_N", ">", "most_common_maf_min", ")", ")", "|", "(", "(", "df", ".", "most_common_count_N", "<", "most_common_count_maf_threshold", ")", "&", "(", "df", ".", "most_common_count_N", ">", "most_common_count_min", ")", ")", "df", "=", "df", "[", "~", "(", "common_al", "|", "common_indel", ")", "&", "normal_criteria", "]", "# restore column names of tumor", "for", "c", "in", "df", ".", "columns", ":", "if", "c", ".", "endswith", "(", "\"_N\"", ")", ":", "del", "df", "[", "c", "]", "df", ".", "columns", "=", "[", "c", "[", ":", "-", "2", "]", "if", "c", ".", "endswith", "(", "\"_T\"", ")", "else", "c", "for", "c", "in", "df", ".", "columns", "]", "return", "df" ]
Remove mutations that are in normal
[ "Remove", "mutations", "that", "are", "in", "normal" ]
train
https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/__main__.py#L122-L144
inodb/sufam
sufam/__main__.py
select_only_revertant_mutations
def select_only_revertant_mutations(bpdf, snv=None, ins=None, dlt=None): """ Selects only mutations that revert the given mutations in a single event. """ if sum([bool(snv), bool(ins), bool(dlt)]) != 1: raise(Exception("Should be either snv, ins or del".format(snv))) if snv: if snv not in ["A", "C", "G", "T"]: raise(Exception("snv {} should be A, C, G or T".format(snv))) return bpdf[(bpdf.most_common_al == snv) & (bpdf.most_common_al_count == bpdf.most_common_count)] elif bool(ins): return \ bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) + len(ins) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) | ((bpdf.most_common_indel.apply(lambda x: len(ins) - len(x) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))] elif bool(dlt): return \ bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) - len(dlt) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) | ((bpdf.most_common_indel.apply(lambda x: -len(dlt) - len(x) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))] else: # should never happen raise(Exception("No mutation given?"))
python
def select_only_revertant_mutations(bpdf, snv=None, ins=None, dlt=None): """ Selects only mutations that revert the given mutations in a single event. """ if sum([bool(snv), bool(ins), bool(dlt)]) != 1: raise(Exception("Should be either snv, ins or del".format(snv))) if snv: if snv not in ["A", "C", "G", "T"]: raise(Exception("snv {} should be A, C, G or T".format(snv))) return bpdf[(bpdf.most_common_al == snv) & (bpdf.most_common_al_count == bpdf.most_common_count)] elif bool(ins): return \ bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) + len(ins) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) | ((bpdf.most_common_indel.apply(lambda x: len(ins) - len(x) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))] elif bool(dlt): return \ bpdf[((bpdf.most_common_indel.apply(lambda x: len(x) - len(dlt) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "+") & (bpdf.most_common_count == bpdf.most_common_indel_count)) | ((bpdf.most_common_indel.apply(lambda x: -len(dlt) - len(x) % 3 if x else None) == 0) & (bpdf.most_common_indel_type == "-") & (bpdf.most_common_count == bpdf.most_common_indel_count))] else: # should never happen raise(Exception("No mutation given?"))
[ "def", "select_only_revertant_mutations", "(", "bpdf", ",", "snv", "=", "None", ",", "ins", "=", "None", ",", "dlt", "=", "None", ")", ":", "if", "sum", "(", "[", "bool", "(", "snv", ")", ",", "bool", "(", "ins", ")", ",", "bool", "(", "dlt", ")", "]", ")", "!=", "1", ":", "raise", "(", "Exception", "(", "\"Should be either snv, ins or del\"", ".", "format", "(", "snv", ")", ")", ")", "if", "snv", ":", "if", "snv", "not", "in", "[", "\"A\"", ",", "\"C\"", ",", "\"G\"", ",", "\"T\"", "]", ":", "raise", "(", "Exception", "(", "\"snv {} should be A, C, G or T\"", ".", "format", "(", "snv", ")", ")", ")", "return", "bpdf", "[", "(", "bpdf", ".", "most_common_al", "==", "snv", ")", "&", "(", "bpdf", ".", "most_common_al_count", "==", "bpdf", ".", "most_common_count", ")", "]", "elif", "bool", "(", "ins", ")", ":", "return", "bpdf", "[", "(", "(", "bpdf", ".", "most_common_indel", ".", "apply", "(", "lambda", "x", ":", "len", "(", "x", ")", "+", "len", "(", "ins", ")", "%", "3", "if", "x", "else", "None", ")", "==", "0", ")", "&", "(", "bpdf", ".", "most_common_indel_type", "==", "\"+\"", ")", "&", "(", "bpdf", ".", "most_common_count", "==", "bpdf", ".", "most_common_indel_count", ")", ")", "|", "(", "(", "bpdf", ".", "most_common_indel", ".", "apply", "(", "lambda", "x", ":", "len", "(", "ins", ")", "-", "len", "(", "x", ")", "%", "3", "if", "x", "else", "None", ")", "==", "0", ")", "&", "(", "bpdf", ".", "most_common_indel_type", "==", "\"-\"", ")", "&", "(", "bpdf", ".", "most_common_count", "==", "bpdf", ".", "most_common_indel_count", ")", ")", "]", "elif", "bool", "(", "dlt", ")", ":", "return", "bpdf", "[", "(", "(", "bpdf", ".", "most_common_indel", ".", "apply", "(", "lambda", "x", ":", "len", "(", "x", ")", "-", "len", "(", "dlt", ")", "%", "3", "if", "x", "else", "None", ")", "==", "0", ")", "&", "(", "bpdf", ".", "most_common_indel_type", "==", "\"+\"", ")", "&", "(", "bpdf", ".", "most_common_count", "==", "bpdf", ".", "most_common_indel_count", ")", ")", "|", "(", "(", "bpdf", ".", "most_common_indel", ".", "apply", "(", "lambda", "x", ":", "-", "len", "(", "dlt", ")", "-", "len", "(", "x", ")", "%", "3", "if", "x", "else", "None", ")", "==", "0", ")", "&", "(", "bpdf", ".", "most_common_indel_type", "==", "\"-\"", ")", "&", "(", "bpdf", ".", "most_common_count", "==", "bpdf", ".", "most_common_indel_count", ")", ")", "]", "else", ":", "# should never happen", "raise", "(", "Exception", "(", "\"No mutation given?\"", ")", ")" ]
Selects only mutations that revert the given mutations in a single event.
[ "Selects", "only", "mutations", "that", "revert", "the", "given", "mutations", "in", "a", "single", "event", "." ]
train
https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/__main__.py#L147-L172
inodb/sufam
sufam/__main__.py
validate_mutations
def validate_mutations(vcffile, bams, reffa, chr_reffa, samples, output_format, outfile, mpileup_parameters=mpileup_parser.MPILEUP_DEFAULT_PARAMS): """Check if mutations in vcf are in bam""" output_header = "sample chrom pos ref cov A C G T * - + " \ "val_ref val_alt val_al_type val_al_count val_maf "\ "most_common_indel most_common_indel_count most_common_indel_maf most_common_indel_type most_common_al " \ "most_common_al_count most_common_al_maf most_common_count most_common_maf".split() # for backwards compatibility # if bam or samples is a string, convert to list instead if isinstance(samples, six.string_types): samples = [samples] if isinstance(bams, six.string_types): bams = [bams] if output_format == 'vcf': vcf_reader = vcf.Reader(open(vcffile)) vcf_reader.samples = samples vcf_reader.formats['GT'] = vcf.parser._Format(id='GT', num=1, type='String', desc="Genotype") vcf_reader.formats['AD'] = vcf.parser._Format(id='AD', num='R', type='Integer', desc="Allelic depth") vcf_reader.formats['DP'] = vcf.parser._Format(id='DP', num=1, type='Integer', desc="Depth") vcf_writer = vcf.Writer(outfile, vcf_reader) else: vcf_reader = open(vcffile) if output_format == "sufam": outfile.write("\t".join(output_header)) outfile.write("\n") for record in vcf_reader: if output_format != 'vcf': line = record if line.startswith("#CHROM"): header = line[1:].rstrip('\n').split("\t") # create spoof pyvcf record if vcf_reader is not used _Record = namedtuple('Record', header) if line.startswith("#"): continue if len(header) == 0: raise(Exception("No header found in vcf file #CHROM not found")) # zip all column values, except alt (needs to be list in pyvcf) record_args = dict(zip(header, line.rstrip('\n').split("\t"))) record_args['ALT'] = [record_args['ALT']] record = _Record(**record_args) # determine type of mutation record_type = "snv" if len(record.ALT) > 1: warnings.warn("Multiple ALT in one record is not implemented - using first") if len(record.REF) > len(record.ALT[0]): record_type = "deletion" elif len(record.ALT[0]) > len(record.REF): record_type = "insertion" # no coverage results no_cov = pd.Series({ "chrom": str(record.CHROM), "pos": str(record.POS), "ref": str(record.REF), "cov": 0, "A": 0, "C": 0, "G": 0, "T": 0, "val_ref": str(record.REF), "val_alt": str(record.ALT[0]), "val_al_type": record_type, "val_al_count": 0, "val_maf": 0}) # collect mpileup baseparser results per bam bps = [] for i, bam in enumerate(bams): sample = samples[i] no_cov['sample'] = sample bp_lines = mpileup_parser.run_and_parse(bam, str(record.CHROM), str(record.POS), str(record.POS), reffa, chr_reffa, mpileup_parameters) bpdf = get_baseparser_extended_df(sample, bp_lines, str(record.REF), str(record.ALT[0])) if bpdf is None: bp = no_cov else: bp = bpdf.ix[0, :] bps += [bp] # output call if output_format == "vcf": _write_bp_vcf(outfile, bps, vcf_writer, record) else: # only one bam file supported for outputs other than vcf _write_bp(outfile, bps[0], output_header, output_format)
python
def validate_mutations(vcffile, bams, reffa, chr_reffa, samples, output_format, outfile, mpileup_parameters=mpileup_parser.MPILEUP_DEFAULT_PARAMS): """Check if mutations in vcf are in bam""" output_header = "sample chrom pos ref cov A C G T * - + " \ "val_ref val_alt val_al_type val_al_count val_maf "\ "most_common_indel most_common_indel_count most_common_indel_maf most_common_indel_type most_common_al " \ "most_common_al_count most_common_al_maf most_common_count most_common_maf".split() # for backwards compatibility # if bam or samples is a string, convert to list instead if isinstance(samples, six.string_types): samples = [samples] if isinstance(bams, six.string_types): bams = [bams] if output_format == 'vcf': vcf_reader = vcf.Reader(open(vcffile)) vcf_reader.samples = samples vcf_reader.formats['GT'] = vcf.parser._Format(id='GT', num=1, type='String', desc="Genotype") vcf_reader.formats['AD'] = vcf.parser._Format(id='AD', num='R', type='Integer', desc="Allelic depth") vcf_reader.formats['DP'] = vcf.parser._Format(id='DP', num=1, type='Integer', desc="Depth") vcf_writer = vcf.Writer(outfile, vcf_reader) else: vcf_reader = open(vcffile) if output_format == "sufam": outfile.write("\t".join(output_header)) outfile.write("\n") for record in vcf_reader: if output_format != 'vcf': line = record if line.startswith("#CHROM"): header = line[1:].rstrip('\n').split("\t") # create spoof pyvcf record if vcf_reader is not used _Record = namedtuple('Record', header) if line.startswith("#"): continue if len(header) == 0: raise(Exception("No header found in vcf file #CHROM not found")) # zip all column values, except alt (needs to be list in pyvcf) record_args = dict(zip(header, line.rstrip('\n').split("\t"))) record_args['ALT'] = [record_args['ALT']] record = _Record(**record_args) # determine type of mutation record_type = "snv" if len(record.ALT) > 1: warnings.warn("Multiple ALT in one record is not implemented - using first") if len(record.REF) > len(record.ALT[0]): record_type = "deletion" elif len(record.ALT[0]) > len(record.REF): record_type = "insertion" # no coverage results no_cov = pd.Series({ "chrom": str(record.CHROM), "pos": str(record.POS), "ref": str(record.REF), "cov": 0, "A": 0, "C": 0, "G": 0, "T": 0, "val_ref": str(record.REF), "val_alt": str(record.ALT[0]), "val_al_type": record_type, "val_al_count": 0, "val_maf": 0}) # collect mpileup baseparser results per bam bps = [] for i, bam in enumerate(bams): sample = samples[i] no_cov['sample'] = sample bp_lines = mpileup_parser.run_and_parse(bam, str(record.CHROM), str(record.POS), str(record.POS), reffa, chr_reffa, mpileup_parameters) bpdf = get_baseparser_extended_df(sample, bp_lines, str(record.REF), str(record.ALT[0])) if bpdf is None: bp = no_cov else: bp = bpdf.ix[0, :] bps += [bp] # output call if output_format == "vcf": _write_bp_vcf(outfile, bps, vcf_writer, record) else: # only one bam file supported for outputs other than vcf _write_bp(outfile, bps[0], output_header, output_format)
[ "def", "validate_mutations", "(", "vcffile", ",", "bams", ",", "reffa", ",", "chr_reffa", ",", "samples", ",", "output_format", ",", "outfile", ",", "mpileup_parameters", "=", "mpileup_parser", ".", "MPILEUP_DEFAULT_PARAMS", ")", ":", "output_header", "=", "\"sample chrom pos ref cov A C G T * - + \"", "\"val_ref val_alt val_al_type val_al_count val_maf \"", "\"most_common_indel most_common_indel_count most_common_indel_maf most_common_indel_type most_common_al \"", "\"most_common_al_count most_common_al_maf most_common_count most_common_maf\"", ".", "split", "(", ")", "# for backwards compatibility", "# if bam or samples is a string, convert to list instead", "if", "isinstance", "(", "samples", ",", "six", ".", "string_types", ")", ":", "samples", "=", "[", "samples", "]", "if", "isinstance", "(", "bams", ",", "six", ".", "string_types", ")", ":", "bams", "=", "[", "bams", "]", "if", "output_format", "==", "'vcf'", ":", "vcf_reader", "=", "vcf", ".", "Reader", "(", "open", "(", "vcffile", ")", ")", "vcf_reader", ".", "samples", "=", "samples", "vcf_reader", ".", "formats", "[", "'GT'", "]", "=", "vcf", ".", "parser", ".", "_Format", "(", "id", "=", "'GT'", ",", "num", "=", "1", ",", "type", "=", "'String'", ",", "desc", "=", "\"Genotype\"", ")", "vcf_reader", ".", "formats", "[", "'AD'", "]", "=", "vcf", ".", "parser", ".", "_Format", "(", "id", "=", "'AD'", ",", "num", "=", "'R'", ",", "type", "=", "'Integer'", ",", "desc", "=", "\"Allelic depth\"", ")", "vcf_reader", ".", "formats", "[", "'DP'", "]", "=", "vcf", ".", "parser", ".", "_Format", "(", "id", "=", "'DP'", ",", "num", "=", "1", ",", "type", "=", "'Integer'", ",", "desc", "=", "\"Depth\"", ")", "vcf_writer", "=", "vcf", ".", "Writer", "(", "outfile", ",", "vcf_reader", ")", "else", ":", "vcf_reader", "=", "open", "(", "vcffile", ")", "if", "output_format", "==", "\"sufam\"", ":", "outfile", ".", "write", "(", "\"\\t\"", ".", "join", "(", "output_header", ")", ")", "outfile", ".", "write", "(", "\"\\n\"", ")", "for", "record", "in", "vcf_reader", ":", "if", "output_format", "!=", "'vcf'", ":", "line", "=", "record", "if", "line", ".", "startswith", "(", "\"#CHROM\"", ")", ":", "header", "=", "line", "[", "1", ":", "]", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "\"\\t\"", ")", "# create spoof pyvcf record if vcf_reader is not used", "_Record", "=", "namedtuple", "(", "'Record'", ",", "header", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "if", "len", "(", "header", ")", "==", "0", ":", "raise", "(", "Exception", "(", "\"No header found in vcf file #CHROM not found\"", ")", ")", "# zip all column values, except alt (needs to be list in pyvcf)", "record_args", "=", "dict", "(", "zip", "(", "header", ",", "line", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "\"\\t\"", ")", ")", ")", "record_args", "[", "'ALT'", "]", "=", "[", "record_args", "[", "'ALT'", "]", "]", "record", "=", "_Record", "(", "*", "*", "record_args", ")", "# determine type of mutation", "record_type", "=", "\"snv\"", "if", "len", "(", "record", ".", "ALT", ")", ">", "1", ":", "warnings", ".", "warn", "(", "\"Multiple ALT in one record is not implemented - using first\"", ")", "if", "len", "(", "record", ".", "REF", ")", ">", "len", "(", "record", ".", "ALT", "[", "0", "]", ")", ":", "record_type", "=", "\"deletion\"", "elif", "len", "(", "record", ".", "ALT", "[", "0", "]", ")", ">", "len", "(", "record", ".", "REF", ")", ":", "record_type", "=", "\"insertion\"", "# no coverage results", "no_cov", "=", "pd", ".", "Series", "(", "{", "\"chrom\"", ":", "str", "(", "record", ".", "CHROM", ")", ",", "\"pos\"", ":", "str", "(", "record", ".", "POS", ")", ",", "\"ref\"", ":", "str", "(", "record", ".", "REF", ")", ",", "\"cov\"", ":", "0", ",", "\"A\"", ":", "0", ",", "\"C\"", ":", "0", ",", "\"G\"", ":", "0", ",", "\"T\"", ":", "0", ",", "\"val_ref\"", ":", "str", "(", "record", ".", "REF", ")", ",", "\"val_alt\"", ":", "str", "(", "record", ".", "ALT", "[", "0", "]", ")", ",", "\"val_al_type\"", ":", "record_type", ",", "\"val_al_count\"", ":", "0", ",", "\"val_maf\"", ":", "0", "}", ")", "# collect mpileup baseparser results per bam", "bps", "=", "[", "]", "for", "i", ",", "bam", "in", "enumerate", "(", "bams", ")", ":", "sample", "=", "samples", "[", "i", "]", "no_cov", "[", "'sample'", "]", "=", "sample", "bp_lines", "=", "mpileup_parser", ".", "run_and_parse", "(", "bam", ",", "str", "(", "record", ".", "CHROM", ")", ",", "str", "(", "record", ".", "POS", ")", ",", "str", "(", "record", ".", "POS", ")", ",", "reffa", ",", "chr_reffa", ",", "mpileup_parameters", ")", "bpdf", "=", "get_baseparser_extended_df", "(", "sample", ",", "bp_lines", ",", "str", "(", "record", ".", "REF", ")", ",", "str", "(", "record", ".", "ALT", "[", "0", "]", ")", ")", "if", "bpdf", "is", "None", ":", "bp", "=", "no_cov", "else", ":", "bp", "=", "bpdf", ".", "ix", "[", "0", ",", ":", "]", "bps", "+=", "[", "bp", "]", "# output call", "if", "output_format", "==", "\"vcf\"", ":", "_write_bp_vcf", "(", "outfile", ",", "bps", ",", "vcf_writer", ",", "record", ")", "else", ":", "# only one bam file supported for outputs other than vcf", "_write_bp", "(", "outfile", ",", "bps", "[", "0", "]", ",", "output_header", ",", "output_format", ")" ]
Check if mutations in vcf are in bam
[ "Check", "if", "mutations", "in", "vcf", "are", "in", "bam" ]
train
https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/__main__.py#L208-L289
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster._prep_clients
def _prep_clients(self, clients): """Prep a client by tagging it with and id and wrapping methods. Methods are wrapper to catch ConnectionError so that we can remove it from the pool until the instance comes back up. :returns: patched clients """ for pool_id, client in enumerate(clients): # Tag it with an id we'll use to identify it in the pool if hasattr(client, "pool_id"): raise ValueError("%r is already part of a pool.", client) setattr(client, "pool_id", pool_id) # Wrap all public functions self._wrap_functions(client) return clients
python
def _prep_clients(self, clients): """Prep a client by tagging it with and id and wrapping methods. Methods are wrapper to catch ConnectionError so that we can remove it from the pool until the instance comes back up. :returns: patched clients """ for pool_id, client in enumerate(clients): # Tag it with an id we'll use to identify it in the pool if hasattr(client, "pool_id"): raise ValueError("%r is already part of a pool.", client) setattr(client, "pool_id", pool_id) # Wrap all public functions self._wrap_functions(client) return clients
[ "def", "_prep_clients", "(", "self", ",", "clients", ")", ":", "for", "pool_id", ",", "client", "in", "enumerate", "(", "clients", ")", ":", "# Tag it with an id we'll use to identify it in the pool", "if", "hasattr", "(", "client", ",", "\"pool_id\"", ")", ":", "raise", "ValueError", "(", "\"%r is already part of a pool.\"", ",", "client", ")", "setattr", "(", "client", ",", "\"pool_id\"", ",", "pool_id", ")", "# Wrap all public functions", "self", ".", "_wrap_functions", "(", "client", ")", "return", "clients" ]
Prep a client by tagging it with and id and wrapping methods. Methods are wrapper to catch ConnectionError so that we can remove it from the pool until the instance comes back up. :returns: patched clients
[ "Prep", "a", "client", "by", "tagging", "it", "with", "and", "id", "and", "wrapping", "methods", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L79-L94
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster._wrap_functions
def _wrap_functions(self, client): """Wrap public functions to catch ConnectionError. When an error happens, it puts the client in the penalty box so that it won't be retried again for a little while. """ def wrap(fn): def wrapper(*args, **kwargs): """Simple wrapper for to catch dead clients.""" try: return fn(*args, **kwargs) except (ConnectionError, TimeoutError): # TO THE PENALTY BOX! self._penalize_client(client) raise return functools.update_wrapper(wrapper, fn) for name in dir(client): if name.startswith("_"): continue # Some things aren't wrapped if name in ("echo", "execute_command", "parse_response"): continue obj = getattr(client, name) if not callable(obj): continue log.debug("Wrapping %s", name) setattr(client, name, wrap(obj))
python
def _wrap_functions(self, client): """Wrap public functions to catch ConnectionError. When an error happens, it puts the client in the penalty box so that it won't be retried again for a little while. """ def wrap(fn): def wrapper(*args, **kwargs): """Simple wrapper for to catch dead clients.""" try: return fn(*args, **kwargs) except (ConnectionError, TimeoutError): # TO THE PENALTY BOX! self._penalize_client(client) raise return functools.update_wrapper(wrapper, fn) for name in dir(client): if name.startswith("_"): continue # Some things aren't wrapped if name in ("echo", "execute_command", "parse_response"): continue obj = getattr(client, name) if not callable(obj): continue log.debug("Wrapping %s", name) setattr(client, name, wrap(obj))
[ "def", "_wrap_functions", "(", "self", ",", "client", ")", ":", "def", "wrap", "(", "fn", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Simple wrapper for to catch dead clients.\"\"\"", "try", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "ConnectionError", ",", "TimeoutError", ")", ":", "# TO THE PENALTY BOX!", "self", ".", "_penalize_client", "(", "client", ")", "raise", "return", "functools", ".", "update_wrapper", "(", "wrapper", ",", "fn", ")", "for", "name", "in", "dir", "(", "client", ")", ":", "if", "name", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "# Some things aren't wrapped", "if", "name", "in", "(", "\"echo\"", ",", "\"execute_command\"", ",", "\"parse_response\"", ")", ":", "continue", "obj", "=", "getattr", "(", "client", ",", "name", ")", "if", "not", "callable", "(", "obj", ")", ":", "continue", "log", ".", "debug", "(", "\"Wrapping %s\"", ",", "name", ")", "setattr", "(", "client", ",", "name", ",", "wrap", "(", "obj", ")", ")" ]
Wrap public functions to catch ConnectionError. When an error happens, it puts the client in the penalty box so that it won't be retried again for a little while.
[ "Wrap", "public", "functions", "to", "catch", "ConnectionError", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L96-L124
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster._prune_penalty_box
def _prune_penalty_box(self): """Restores clients that have reconnected. This function should be called first for every public method. """ added = False for client in self.penalty_box.get(): log.info("Client %r is back up.", client) self.active_clients.append(client) added = True if added: self._sort_clients()
python
def _prune_penalty_box(self): """Restores clients that have reconnected. This function should be called first for every public method. """ added = False for client in self.penalty_box.get(): log.info("Client %r is back up.", client) self.active_clients.append(client) added = True if added: self._sort_clients()
[ "def", "_prune_penalty_box", "(", "self", ")", ":", "added", "=", "False", "for", "client", "in", "self", ".", "penalty_box", ".", "get", "(", ")", ":", "log", ".", "info", "(", "\"Client %r is back up.\"", ",", "client", ")", "self", ".", "active_clients", ".", "append", "(", "client", ")", "added", "=", "True", "if", "added", ":", "self", ".", "_sort_clients", "(", ")" ]
Restores clients that have reconnected. This function should be called first for every public method.
[ "Restores", "clients", "that", "have", "reconnected", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L126-L137
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster.get_client
def get_client(self, shard_key): """Get the client for a given shard, based on what's available. If the proper client isn't available, the next available client is returned. If no clients are available, an exception is raised. """ self._prune_penalty_box() if len(self.active_clients) == 0: raise ClusterEmptyError("All clients are down.") # So that hashing is consistent when a node is down, check against # the initial client list. Only use the active client list when # the desired node is down. # N.B.: I know this is not technically "consistent hashing" as # academically defined. It's a hack so that keys which need to # go elsewhere do, while the rest stay on the same instance. if not isinstance(shard_key, bytes): shard_key = shard_key.encode("utf-8") hashed = mmh3.hash(shard_key) pos = hashed % len(self.initial_clients) if self.initial_clients[pos] in self.active_clients: return self.initial_clients[pos] else: pos = hashed % len(self.active_clients) return self.active_clients[pos]
python
def get_client(self, shard_key): """Get the client for a given shard, based on what's available. If the proper client isn't available, the next available client is returned. If no clients are available, an exception is raised. """ self._prune_penalty_box() if len(self.active_clients) == 0: raise ClusterEmptyError("All clients are down.") # So that hashing is consistent when a node is down, check against # the initial client list. Only use the active client list when # the desired node is down. # N.B.: I know this is not technically "consistent hashing" as # academically defined. It's a hack so that keys which need to # go elsewhere do, while the rest stay on the same instance. if not isinstance(shard_key, bytes): shard_key = shard_key.encode("utf-8") hashed = mmh3.hash(shard_key) pos = hashed % len(self.initial_clients) if self.initial_clients[pos] in self.active_clients: return self.initial_clients[pos] else: pos = hashed % len(self.active_clients) return self.active_clients[pos]
[ "def", "get_client", "(", "self", ",", "shard_key", ")", ":", "self", ".", "_prune_penalty_box", "(", ")", "if", "len", "(", "self", ".", "active_clients", ")", "==", "0", ":", "raise", "ClusterEmptyError", "(", "\"All clients are down.\"", ")", "# So that hashing is consistent when a node is down, check against", "# the initial client list. Only use the active client list when", "# the desired node is down.", "# N.B.: I know this is not technically \"consistent hashing\" as", "# academically defined. It's a hack so that keys which need to", "# go elsewhere do, while the rest stay on the same instance.", "if", "not", "isinstance", "(", "shard_key", ",", "bytes", ")", ":", "shard_key", "=", "shard_key", ".", "encode", "(", "\"utf-8\"", ")", "hashed", "=", "mmh3", ".", "hash", "(", "shard_key", ")", "pos", "=", "hashed", "%", "len", "(", "self", ".", "initial_clients", ")", "if", "self", ".", "initial_clients", "[", "pos", "]", "in", "self", ".", "active_clients", ":", "return", "self", ".", "initial_clients", "[", "pos", "]", "else", ":", "pos", "=", "hashed", "%", "len", "(", "self", ".", "active_clients", ")", "return", "self", ".", "active_clients", "[", "pos", "]" ]
Get the client for a given shard, based on what's available. If the proper client isn't available, the next available client is returned. If no clients are available, an exception is raised.
[ "Get", "the", "client", "for", "a", "given", "shard", "based", "on", "what", "s", "available", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L139-L164
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster._penalize_client
def _penalize_client(self, client): """Place client in the penalty box. :param client: Client object """ if client in self.active_clients: # hasn't been removed yet log.warning("%r marked down.", client) self.active_clients.remove(client) self.penalty_box.add(client) else: log.info("%r not in active client list.")
python
def _penalize_client(self, client): """Place client in the penalty box. :param client: Client object """ if client in self.active_clients: # hasn't been removed yet log.warning("%r marked down.", client) self.active_clients.remove(client) self.penalty_box.add(client) else: log.info("%r not in active client list.")
[ "def", "_penalize_client", "(", "self", ",", "client", ")", ":", "if", "client", "in", "self", ".", "active_clients", ":", "# hasn't been removed yet", "log", ".", "warning", "(", "\"%r marked down.\"", ",", "client", ")", "self", ".", "active_clients", ".", "remove", "(", "client", ")", "self", ".", "penalty_box", ".", "add", "(", "client", ")", "else", ":", "log", ".", "info", "(", "\"%r not in active client list.\"", ")" ]
Place client in the penalty box. :param client: Client object
[ "Place", "client", "in", "the", "penalty", "box", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L166-L176
Parsely/redis-fluster
fluster/cluster.py
FlusterCluster.zrevrange_with_int_score
def zrevrange_with_int_score(self, key, max_score, min_score): """Get the zrevrangebyscore across the cluster. Highest score for duplicate element is returned. A faster method should be written if scores are not needed. """ self._prune_penalty_box() if len(self.active_clients) == 0: raise ClusterEmptyError("All clients are down.") element__score = defaultdict(int) for client in self.active_clients: revrange = client.zrevrangebyscore( key, max_score, min_score, withscores=True, score_cast_func=int ) for element, count in revrange: element__score[element] = max(element__score[element], int(count)) return element__score
python
def zrevrange_with_int_score(self, key, max_score, min_score): """Get the zrevrangebyscore across the cluster. Highest score for duplicate element is returned. A faster method should be written if scores are not needed. """ self._prune_penalty_box() if len(self.active_clients) == 0: raise ClusterEmptyError("All clients are down.") element__score = defaultdict(int) for client in self.active_clients: revrange = client.zrevrangebyscore( key, max_score, min_score, withscores=True, score_cast_func=int ) for element, count in revrange: element__score[element] = max(element__score[element], int(count)) return element__score
[ "def", "zrevrange_with_int_score", "(", "self", ",", "key", ",", "max_score", ",", "min_score", ")", ":", "self", ".", "_prune_penalty_box", "(", ")", "if", "len", "(", "self", ".", "active_clients", ")", "==", "0", ":", "raise", "ClusterEmptyError", "(", "\"All clients are down.\"", ")", "element__score", "=", "defaultdict", "(", "int", ")", "for", "client", "in", "self", ".", "active_clients", ":", "revrange", "=", "client", ".", "zrevrangebyscore", "(", "key", ",", "max_score", ",", "min_score", ",", "withscores", "=", "True", ",", "score_cast_func", "=", "int", ")", "for", "element", ",", "count", "in", "revrange", ":", "element__score", "[", "element", "]", "=", "max", "(", "element__score", "[", "element", "]", ",", "int", "(", "count", ")", ")", "return", "element__score" ]
Get the zrevrangebyscore across the cluster. Highest score for duplicate element is returned. A faster method should be written if scores are not needed.
[ "Get", "the", "zrevrangebyscore", "across", "the", "cluster", ".", "Highest", "score", "for", "duplicate", "element", "is", "returned", ".", "A", "faster", "method", "should", "be", "written", "if", "scores", "are", "not", "needed", "." ]
train
https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/cluster.py#L178-L197
alfredodeza/notario
notario/regex.py
chain
def chain(*regexes, **kwargs): """ A helper function to interact with the regular expression engine that compiles and applies partial matches to a string. It expects key value tuples as arguments (any number of them) where the first pair is the regex to compile and the latter is the message to display when the regular expression does not match. The engine constructs partial regular expressions from the input and applies them sequentially to find the exact point of failure and allowing the ability to return a meaningful message. Because adding negation statements like "does not..." can become repetitive, the function defaults to ``True`` to include the option to prepend the negative. For example, this is what would happen with a failing regex:: >>> rx = chain((r'^\d+', 'start with a digit')) >>> rx('foo') Traceback (most recent call last): ... AssertionError: does not start with a digit If there is no need for prepending the negation, the keyword argument will need to set it as ``False``:: >>> rx = chain((r'^\d+', 'it should start with a digit'), ... prepend_negation=False) >>> rx('foo') Traceback (most recent call last): ... AssertionError: it should start with a digit """ prepend_negation = kwargs.get('prepend_negation', True) return Linker(regexes, prepend_negation=prepend_negation)
python
def chain(*regexes, **kwargs): """ A helper function to interact with the regular expression engine that compiles and applies partial matches to a string. It expects key value tuples as arguments (any number of them) where the first pair is the regex to compile and the latter is the message to display when the regular expression does not match. The engine constructs partial regular expressions from the input and applies them sequentially to find the exact point of failure and allowing the ability to return a meaningful message. Because adding negation statements like "does not..." can become repetitive, the function defaults to ``True`` to include the option to prepend the negative. For example, this is what would happen with a failing regex:: >>> rx = chain((r'^\d+', 'start with a digit')) >>> rx('foo') Traceback (most recent call last): ... AssertionError: does not start with a digit If there is no need for prepending the negation, the keyword argument will need to set it as ``False``:: >>> rx = chain((r'^\d+', 'it should start with a digit'), ... prepend_negation=False) >>> rx('foo') Traceback (most recent call last): ... AssertionError: it should start with a digit """ prepend_negation = kwargs.get('prepend_negation', True) return Linker(regexes, prepend_negation=prepend_negation)
[ "def", "chain", "(", "*", "regexes", ",", "*", "*", "kwargs", ")", ":", "prepend_negation", "=", "kwargs", ".", "get", "(", "'prepend_negation'", ",", "True", ")", "return", "Linker", "(", "regexes", ",", "prepend_negation", "=", "prepend_negation", ")" ]
A helper function to interact with the regular expression engine that compiles and applies partial matches to a string. It expects key value tuples as arguments (any number of them) where the first pair is the regex to compile and the latter is the message to display when the regular expression does not match. The engine constructs partial regular expressions from the input and applies them sequentially to find the exact point of failure and allowing the ability to return a meaningful message. Because adding negation statements like "does not..." can become repetitive, the function defaults to ``True`` to include the option to prepend the negative. For example, this is what would happen with a failing regex:: >>> rx = chain((r'^\d+', 'start with a digit')) >>> rx('foo') Traceback (most recent call last): ... AssertionError: does not start with a digit If there is no need for prepending the negation, the keyword argument will need to set it as ``False``:: >>> rx = chain((r'^\d+', 'it should start with a digit'), ... prepend_negation=False) >>> rx('foo') Traceback (most recent call last): ... AssertionError: it should start with a digit
[ "A", "helper", "function", "to", "interact", "with", "the", "regular", "expression", "engine", "that", "compiles", "and", "applies", "partial", "matches", "to", "a", "string", "." ]
train
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/regex.py#L57-L94
solvebio/solvebio-dash-components
examples/s3_uploader.py
generate_s3_url
def generate_s3_url(files): """Takes files from React side, creates SolveBio Object containing signed S3 URL.""" if files: vault = g.client.Vault.get_personal_vault() files = json.loads(files) objects = [] for i in xrange(len(files)): obj = g.client.Object.create( vault_id=vault.id, object_type='file', filename=files[i].get('filename'), mimetype=files[i].get('mimetype'), size=files[i].get('size') ) objects.append({ 'id': obj.id, 'filename': obj.filename, 'upload_url': obj.upload_url }) return json.dumps(objects)
python
def generate_s3_url(files): """Takes files from React side, creates SolveBio Object containing signed S3 URL.""" if files: vault = g.client.Vault.get_personal_vault() files = json.loads(files) objects = [] for i in xrange(len(files)): obj = g.client.Object.create( vault_id=vault.id, object_type='file', filename=files[i].get('filename'), mimetype=files[i].get('mimetype'), size=files[i].get('size') ) objects.append({ 'id': obj.id, 'filename': obj.filename, 'upload_url': obj.upload_url }) return json.dumps(objects)
[ "def", "generate_s3_url", "(", "files", ")", ":", "if", "files", ":", "vault", "=", "g", ".", "client", ".", "Vault", ".", "get_personal_vault", "(", ")", "files", "=", "json", ".", "loads", "(", "files", ")", "objects", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "files", ")", ")", ":", "obj", "=", "g", ".", "client", ".", "Object", ".", "create", "(", "vault_id", "=", "vault", ".", "id", ",", "object_type", "=", "'file'", ",", "filename", "=", "files", "[", "i", "]", ".", "get", "(", "'filename'", ")", ",", "mimetype", "=", "files", "[", "i", "]", ".", "get", "(", "'mimetype'", ")", ",", "size", "=", "files", "[", "i", "]", ".", "get", "(", "'size'", ")", ")", "objects", ".", "append", "(", "{", "'id'", ":", "obj", ".", "id", ",", "'filename'", ":", "obj", ".", "filename", ",", "'upload_url'", ":", "obj", ".", "upload_url", "}", ")", "return", "json", ".", "dumps", "(", "objects", ")" ]
Takes files from React side, creates SolveBio Object containing signed S3 URL.
[ "Takes", "files", "from", "React", "side", "creates", "SolveBio", "Object", "containing", "signed", "S3", "URL", "." ]
train
https://github.com/solvebio/solvebio-dash-components/blob/07f786379f9bb1bb003cc5727baf85f5ce54ae23/examples/s3_uploader.py#L36-L56
solvebio/solvebio-dash-components
examples/s3_uploader.py
handle_uploaded_files
def handle_uploaded_files(uploaded_files): """Handles downstream processes using metadata about the uploaded files from React side.""" if uploaded_files: uploaded_files = json.loads(uploaded_files)[0] _id = uploaded_files.get('id') # Strip extension from filename _filename = os.path.splitext(uploaded_files.get('filename'))[0] # Create a dataset dataset = g.client.Dataset.get_or_create_by_full_path('~/' + _filename) # Import the file into the dataset g.client.DatasetImport.create( dataset_id=dataset.id, object_id=_id ) # Wait until activity is completed dataset.activity(follow=True) SELECTED_COLS = ['col_a', 'col_b', 'col_c'] query = dataset.query(fields=SELECTED_COLS) return html.Div( dt.DataTable( id='data-table', rows=list(query), columns=SELECTED_COLS ) )
python
def handle_uploaded_files(uploaded_files): """Handles downstream processes using metadata about the uploaded files from React side.""" if uploaded_files: uploaded_files = json.loads(uploaded_files)[0] _id = uploaded_files.get('id') # Strip extension from filename _filename = os.path.splitext(uploaded_files.get('filename'))[0] # Create a dataset dataset = g.client.Dataset.get_or_create_by_full_path('~/' + _filename) # Import the file into the dataset g.client.DatasetImport.create( dataset_id=dataset.id, object_id=_id ) # Wait until activity is completed dataset.activity(follow=True) SELECTED_COLS = ['col_a', 'col_b', 'col_c'] query = dataset.query(fields=SELECTED_COLS) return html.Div( dt.DataTable( id='data-table', rows=list(query), columns=SELECTED_COLS ) )
[ "def", "handle_uploaded_files", "(", "uploaded_files", ")", ":", "if", "uploaded_files", ":", "uploaded_files", "=", "json", ".", "loads", "(", "uploaded_files", ")", "[", "0", "]", "_id", "=", "uploaded_files", ".", "get", "(", "'id'", ")", "# Strip extension from filename", "_filename", "=", "os", ".", "path", ".", "splitext", "(", "uploaded_files", ".", "get", "(", "'filename'", ")", ")", "[", "0", "]", "# Create a dataset", "dataset", "=", "g", ".", "client", ".", "Dataset", ".", "get_or_create_by_full_path", "(", "'~/'", "+", "_filename", ")", "# Import the file into the dataset", "g", ".", "client", ".", "DatasetImport", ".", "create", "(", "dataset_id", "=", "dataset", ".", "id", ",", "object_id", "=", "_id", ")", "# Wait until activity is completed", "dataset", ".", "activity", "(", "follow", "=", "True", ")", "SELECTED_COLS", "=", "[", "'col_a'", ",", "'col_b'", ",", "'col_c'", "]", "query", "=", "dataset", ".", "query", "(", "fields", "=", "SELECTED_COLS", ")", "return", "html", ".", "Div", "(", "dt", ".", "DataTable", "(", "id", "=", "'data-table'", ",", "rows", "=", "list", "(", "query", ")", ",", "columns", "=", "SELECTED_COLS", ")", ")" ]
Handles downstream processes using metadata about the uploaded files from React side.
[ "Handles", "downstream", "processes", "using", "metadata", "about", "the", "uploaded", "files", "from", "React", "side", "." ]
train
https://github.com/solvebio/solvebio-dash-components/blob/07f786379f9bb1bb003cc5727baf85f5ce54ae23/examples/s3_uploader.py#L62-L92
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py
create_camera_panel
def create_camera_panel(document_controller, panel_id, properties): """Create a custom camera panel. The camera panel type is specified in the 'camera_panel_type' key in the properties dict. The camera panel type must match a the 'camera_panel_type' of a camera panel factory in the Registry. The matching camera panel factory must return a ui_handler for the panel which is used to produce the UI. """ camera_panel_type = properties.get("camera_panel_type") for component in Registry.get_components_by_type("camera_panel"): if component.camera_panel_type == camera_panel_type: hardware_source_id = properties["hardware_source_id"] hardware_source = HardwareSource.HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) camera_device = getattr(hardware_source, "camera", None) camera_settings = getattr(hardware_source, "camera_settings", None) ui_handler = component.get_ui_handler(api_broker=PlugInManager.APIBroker(), event_loop=document_controller.event_loop, hardware_source_id=hardware_source_id, camera_device=camera_device, camera_settings=camera_settings) panel = Panel.Panel(document_controller, panel_id, properties) panel.widget = Declarative.DeclarativeWidget(document_controller.ui, document_controller.event_loop, ui_handler) return panel return None
python
def create_camera_panel(document_controller, panel_id, properties): """Create a custom camera panel. The camera panel type is specified in the 'camera_panel_type' key in the properties dict. The camera panel type must match a the 'camera_panel_type' of a camera panel factory in the Registry. The matching camera panel factory must return a ui_handler for the panel which is used to produce the UI. """ camera_panel_type = properties.get("camera_panel_type") for component in Registry.get_components_by_type("camera_panel"): if component.camera_panel_type == camera_panel_type: hardware_source_id = properties["hardware_source_id"] hardware_source = HardwareSource.HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) camera_device = getattr(hardware_source, "camera", None) camera_settings = getattr(hardware_source, "camera_settings", None) ui_handler = component.get_ui_handler(api_broker=PlugInManager.APIBroker(), event_loop=document_controller.event_loop, hardware_source_id=hardware_source_id, camera_device=camera_device, camera_settings=camera_settings) panel = Panel.Panel(document_controller, panel_id, properties) panel.widget = Declarative.DeclarativeWidget(document_controller.ui, document_controller.event_loop, ui_handler) return panel return None
[ "def", "create_camera_panel", "(", "document_controller", ",", "panel_id", ",", "properties", ")", ":", "camera_panel_type", "=", "properties", ".", "get", "(", "\"camera_panel_type\"", ")", "for", "component", "in", "Registry", ".", "get_components_by_type", "(", "\"camera_panel\"", ")", ":", "if", "component", ".", "camera_panel_type", "==", "camera_panel_type", ":", "hardware_source_id", "=", "properties", "[", "\"hardware_source_id\"", "]", "hardware_source", "=", "HardwareSource", ".", "HardwareSourceManager", "(", ")", ".", "get_hardware_source_for_hardware_source_id", "(", "hardware_source_id", ")", "camera_device", "=", "getattr", "(", "hardware_source", ",", "\"camera\"", ",", "None", ")", "camera_settings", "=", "getattr", "(", "hardware_source", ",", "\"camera_settings\"", ",", "None", ")", "ui_handler", "=", "component", ".", "get_ui_handler", "(", "api_broker", "=", "PlugInManager", ".", "APIBroker", "(", ")", ",", "event_loop", "=", "document_controller", ".", "event_loop", ",", "hardware_source_id", "=", "hardware_source_id", ",", "camera_device", "=", "camera_device", ",", "camera_settings", "=", "camera_settings", ")", "panel", "=", "Panel", ".", "Panel", "(", "document_controller", ",", "panel_id", ",", "properties", ")", "panel", ".", "widget", "=", "Declarative", ".", "DeclarativeWidget", "(", "document_controller", ".", "ui", ",", "document_controller", ".", "event_loop", ",", "ui_handler", ")", "return", "panel", "return", "None" ]
Create a custom camera panel. The camera panel type is specified in the 'camera_panel_type' key in the properties dict. The camera panel type must match a the 'camera_panel_type' of a camera panel factory in the Registry. The matching camera panel factory must return a ui_handler for the panel which is used to produce the UI.
[ "Create", "a", "custom", "camera", "panel", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py#L849-L869
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py
CameraControlStateController.initialize_state
def initialize_state(self): """ Call this to initialize the state of the UI after everything has been connected. """ if self.__hardware_source: self.__profile_changed_event_listener = self.__hardware_source.profile_changed_event.listen(self.__update_profile_index) self.__frame_parameters_changed_event_listener = self.__hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters) self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed) self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed) self.__log_messages_event_listener = self.__hardware_source.log_messages_event.listen(self.__log_messages) if self.on_display_name_changed: self.on_display_name_changed(self.display_name) if self.on_binning_values_changed: self.on_binning_values_changed(self.__hardware_source.binning_values) if self.on_monitor_button_state_changed: has_monitor = self.__hardware_source and self.__hardware_source.features.get("has_monitor", False) self.on_monitor_button_state_changed(has_monitor, has_monitor) self.__update_buttons() if self.on_profiles_changed: profile_items = self.__hardware_source.modes self.on_profiles_changed(profile_items) self.__update_profile_index(self.__hardware_source.selected_profile_index) if self.on_data_item_states_changed: self.on_data_item_states_changed(list())
python
def initialize_state(self): """ Call this to initialize the state of the UI after everything has been connected. """ if self.__hardware_source: self.__profile_changed_event_listener = self.__hardware_source.profile_changed_event.listen(self.__update_profile_index) self.__frame_parameters_changed_event_listener = self.__hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters) self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed) self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed) self.__log_messages_event_listener = self.__hardware_source.log_messages_event.listen(self.__log_messages) if self.on_display_name_changed: self.on_display_name_changed(self.display_name) if self.on_binning_values_changed: self.on_binning_values_changed(self.__hardware_source.binning_values) if self.on_monitor_button_state_changed: has_monitor = self.__hardware_source and self.__hardware_source.features.get("has_monitor", False) self.on_monitor_button_state_changed(has_monitor, has_monitor) self.__update_buttons() if self.on_profiles_changed: profile_items = self.__hardware_source.modes self.on_profiles_changed(profile_items) self.__update_profile_index(self.__hardware_source.selected_profile_index) if self.on_data_item_states_changed: self.on_data_item_states_changed(list())
[ "def", "initialize_state", "(", "self", ")", ":", "if", "self", ".", "__hardware_source", ":", "self", ".", "__profile_changed_event_listener", "=", "self", ".", "__hardware_source", ".", "profile_changed_event", ".", "listen", "(", "self", ".", "__update_profile_index", ")", "self", ".", "__frame_parameters_changed_event_listener", "=", "self", ".", "__hardware_source", ".", "frame_parameters_changed_event", ".", "listen", "(", "self", ".", "__update_frame_parameters", ")", "self", ".", "__data_item_states_changed_event_listener", "=", "self", ".", "__hardware_source", ".", "data_item_states_changed_event", ".", "listen", "(", "self", ".", "__data_item_states_changed", ")", "self", ".", "__acquisition_state_changed_event_listener", "=", "self", ".", "__hardware_source", ".", "acquisition_state_changed_event", ".", "listen", "(", "self", ".", "__acquisition_state_changed", ")", "self", ".", "__log_messages_event_listener", "=", "self", ".", "__hardware_source", ".", "log_messages_event", ".", "listen", "(", "self", ".", "__log_messages", ")", "if", "self", ".", "on_display_name_changed", ":", "self", ".", "on_display_name_changed", "(", "self", ".", "display_name", ")", "if", "self", ".", "on_binning_values_changed", ":", "self", ".", "on_binning_values_changed", "(", "self", ".", "__hardware_source", ".", "binning_values", ")", "if", "self", ".", "on_monitor_button_state_changed", ":", "has_monitor", "=", "self", ".", "__hardware_source", "and", "self", ".", "__hardware_source", ".", "features", ".", "get", "(", "\"has_monitor\"", ",", "False", ")", "self", ".", "on_monitor_button_state_changed", "(", "has_monitor", ",", "has_monitor", ")", "self", ".", "__update_buttons", "(", ")", "if", "self", ".", "on_profiles_changed", ":", "profile_items", "=", "self", ".", "__hardware_source", ".", "modes", "self", ".", "on_profiles_changed", "(", "profile_items", ")", "self", ".", "__update_profile_index", "(", "self", ".", "__hardware_source", ".", "selected_profile_index", ")", "if", "self", ".", "on_data_item_states_changed", ":", "self", ".", "on_data_item_states_changed", "(", "list", "(", ")", ")" ]
Call this to initialize the state of the UI after everything has been connected.
[ "Call", "this", "to", "initialize", "the", "state", "of", "the", "UI", "after", "everything", "has", "been", "connected", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py#L221-L242
nion-software/nionswift-instrumentation-kit
nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py
CameraControlStateController.handle_play_pause_clicked
def handle_play_pause_clicked(self): """ Call this when the user clicks the play/pause button. """ if self.__hardware_source: if self.is_playing: self.__hardware_source.stop_playing() else: self.__hardware_source.start_playing()
python
def handle_play_pause_clicked(self): """ Call this when the user clicks the play/pause button. """ if self.__hardware_source: if self.is_playing: self.__hardware_source.stop_playing() else: self.__hardware_source.start_playing()
[ "def", "handle_play_pause_clicked", "(", "self", ")", ":", "if", "self", ".", "__hardware_source", ":", "if", "self", ".", "is_playing", ":", "self", ".", "__hardware_source", ".", "stop_playing", "(", ")", "else", ":", "self", ".", "__hardware_source", ".", "start_playing", "(", ")" ]
Call this when the user clicks the play/pause button.
[ "Call", "this", "when", "the", "user", "clicks", "the", "play", "/", "pause", "button", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/CameraControlPanel.py#L249-L255
jneight/django-earthdistance
django_earthdistance/models.py
LlToEarth.resolve_expression
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """Setup any data here, this method will be called before final SQL is generated""" c = self.copy() c.is_summary = summarize c.for_save = for_save final_points = [] for i, p in enumerate(self.params): try: float(p) except: _, source, _, join_list, last = query.setup_joins( six.text_type(p).split('__'), query.model._meta, query.get_initial_alias())[:5] target, alias, _ = query.trim_joins(source, join_list, last) final_points.append("%s.%s" % (alias, target[0].get_attname_column()[1])) else: final_points.append(six.text_type(p)) c.params = final_points return c
python
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """Setup any data here, this method will be called before final SQL is generated""" c = self.copy() c.is_summary = summarize c.for_save = for_save final_points = [] for i, p in enumerate(self.params): try: float(p) except: _, source, _, join_list, last = query.setup_joins( six.text_type(p).split('__'), query.model._meta, query.get_initial_alias())[:5] target, alias, _ = query.trim_joins(source, join_list, last) final_points.append("%s.%s" % (alias, target[0].get_attname_column()[1])) else: final_points.append(six.text_type(p)) c.params = final_points return c
[ "def", "resolve_expression", "(", "self", ",", "query", "=", "None", ",", "allow_joins", "=", "True", ",", "reuse", "=", "None", ",", "summarize", "=", "False", ",", "for_save", "=", "False", ")", ":", "c", "=", "self", ".", "copy", "(", ")", "c", ".", "is_summary", "=", "summarize", "c", ".", "for_save", "=", "for_save", "final_points", "=", "[", "]", "for", "i", ",", "p", "in", "enumerate", "(", "self", ".", "params", ")", ":", "try", ":", "float", "(", "p", ")", "except", ":", "_", ",", "source", ",", "_", ",", "join_list", ",", "last", "=", "query", ".", "setup_joins", "(", "six", ".", "text_type", "(", "p", ")", ".", "split", "(", "'__'", ")", ",", "query", ".", "model", ".", "_meta", ",", "query", ".", "get_initial_alias", "(", ")", ")", "[", ":", "5", "]", "target", ",", "alias", ",", "_", "=", "query", ".", "trim_joins", "(", "source", ",", "join_list", ",", "last", ")", "final_points", ".", "append", "(", "\"%s.%s\"", "%", "(", "alias", ",", "target", "[", "0", "]", ".", "get_attname_column", "(", ")", "[", "1", "]", ")", ")", "else", ":", "final_points", ".", "append", "(", "six", ".", "text_type", "(", "p", ")", ")", "c", ".", "params", "=", "final_points", "return", "c" ]
Setup any data here, this method will be called before final SQL is generated
[ "Setup", "any", "data", "here", "this", "method", "will", "be", "called", "before", "final", "SQL", "is", "generated" ]
train
https://github.com/jneight/django-earthdistance/blob/d9e620778a8bb49ae8e73ea161fee3e832f1af77/django_earthdistance/models.py#L20-L37
jneight/django-earthdistance
django_earthdistance/models.py
EarthDistance.resolve_expression
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """Prepare SQL from inner funcions (ll_to_earth or any other)""" c = self.copy() c.is_summary = summarize c.for_save = for_save for pos, expression in enumerate(self.expressions): c.expressions[pos] = expression.resolve_expression(query, allow_joins, reuse, summarize) return c
python
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """Prepare SQL from inner funcions (ll_to_earth or any other)""" c = self.copy() c.is_summary = summarize c.for_save = for_save for pos, expression in enumerate(self.expressions): c.expressions[pos] = expression.resolve_expression(query, allow_joins, reuse, summarize) return c
[ "def", "resolve_expression", "(", "self", ",", "query", "=", "None", ",", "allow_joins", "=", "True", ",", "reuse", "=", "None", ",", "summarize", "=", "False", ",", "for_save", "=", "False", ")", ":", "c", "=", "self", ".", "copy", "(", ")", "c", ".", "is_summary", "=", "summarize", "c", ".", "for_save", "=", "for_save", "for", "pos", ",", "expression", "in", "enumerate", "(", "self", ".", "expressions", ")", ":", "c", ".", "expressions", "[", "pos", "]", "=", "expression", ".", "resolve_expression", "(", "query", ",", "allow_joins", ",", "reuse", ",", "summarize", ")", "return", "c" ]
Prepare SQL from inner funcions (ll_to_earth or any other)
[ "Prepare", "SQL", "from", "inner", "funcions", "(", "ll_to_earth", "or", "any", "other", ")" ]
train
https://github.com/jneight/django-earthdistance/blob/d9e620778a8bb49ae8e73ea161fee3e832f1af77/django_earthdistance/models.py#L57-L64
jneight/django-earthdistance
django_earthdistance/models.py
EarthDistanceQuerySet.in_distance
def in_distance(self, distance, fields, points, annotate='_ed_distance'): """Filter rows inside a circunference of radius distance `distance` :param distance: max distance to allow :param fields: `tuple` with the fields to filter (latitude, longitude) :param points: center of the circunference (latitude, longitude) :param annotate: name where the distance will be annotated """ clone = self._clone() return clone.annotate( **{annotate: EarthDistance([ LlToEarth(fields), LlToEarth(points)]) }).filter(**{'{0}__lte'.format(annotate): distance})
python
def in_distance(self, distance, fields, points, annotate='_ed_distance'): """Filter rows inside a circunference of radius distance `distance` :param distance: max distance to allow :param fields: `tuple` with the fields to filter (latitude, longitude) :param points: center of the circunference (latitude, longitude) :param annotate: name where the distance will be annotated """ clone = self._clone() return clone.annotate( **{annotate: EarthDistance([ LlToEarth(fields), LlToEarth(points)]) }).filter(**{'{0}__lte'.format(annotate): distance})
[ "def", "in_distance", "(", "self", ",", "distance", ",", "fields", ",", "points", ",", "annotate", "=", "'_ed_distance'", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "return", "clone", ".", "annotate", "(", "*", "*", "{", "annotate", ":", "EarthDistance", "(", "[", "LlToEarth", "(", "fields", ")", ",", "LlToEarth", "(", "points", ")", "]", ")", "}", ")", ".", "filter", "(", "*", "*", "{", "'{0}__lte'", ".", "format", "(", "annotate", ")", ":", "distance", "}", ")" ]
Filter rows inside a circunference of radius distance `distance` :param distance: max distance to allow :param fields: `tuple` with the fields to filter (latitude, longitude) :param points: center of the circunference (latitude, longitude) :param annotate: name where the distance will be annotated
[ "Filter", "rows", "inside", "a", "circunference", "of", "radius", "distance", "distance" ]
train
https://github.com/jneight/django-earthdistance/blob/d9e620778a8bb49ae8e73ea161fee3e832f1af77/django_earthdistance/models.py#L76-L89
HolmesNL/confidence
confidence/models.py
Configuration.get
def get(self, path, default=_NoDefault, as_type=None, resolve_references=True): """ Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved """ value = self._source steps_taken = [] try: # walk through the values dictionary for step in path.split(self._separator): steps_taken.append(step) value = value[step] if as_type: return as_type(value) elif isinstance(value, Mapping): # create an instance of our current type, copying 'configured' properties / policies namespace = type(self)(separator=self._separator, missing=self._missing) namespace._source = value # carry the root object from namespace to namespace, references are always resolved from root namespace._root = self._root return namespace elif resolve_references and isinstance(value, str): # only resolve references in str-type values (the only way they can be expressed) return self._resolve(value) else: return value except ConfiguredReferenceError: # also a KeyError, but this one should bubble to caller raise except KeyError as e: if default is not _NoDefault: return default else: missing_key = self._separator.join(steps_taken) raise NotConfiguredError('no configuration for key {}'.format(missing_key), key=missing_key) from e
python
def get(self, path, default=_NoDefault, as_type=None, resolve_references=True): """ Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved """ value = self._source steps_taken = [] try: # walk through the values dictionary for step in path.split(self._separator): steps_taken.append(step) value = value[step] if as_type: return as_type(value) elif isinstance(value, Mapping): # create an instance of our current type, copying 'configured' properties / policies namespace = type(self)(separator=self._separator, missing=self._missing) namespace._source = value # carry the root object from namespace to namespace, references are always resolved from root namespace._root = self._root return namespace elif resolve_references and isinstance(value, str): # only resolve references in str-type values (the only way they can be expressed) return self._resolve(value) else: return value except ConfiguredReferenceError: # also a KeyError, but this one should bubble to caller raise except KeyError as e: if default is not _NoDefault: return default else: missing_key = self._separator.join(steps_taken) raise NotConfiguredError('no configuration for key {}'.format(missing_key), key=missing_key) from e
[ "def", "get", "(", "self", ",", "path", ",", "default", "=", "_NoDefault", ",", "as_type", "=", "None", ",", "resolve_references", "=", "True", ")", ":", "value", "=", "self", ".", "_source", "steps_taken", "=", "[", "]", "try", ":", "# walk through the values dictionary", "for", "step", "in", "path", ".", "split", "(", "self", ".", "_separator", ")", ":", "steps_taken", ".", "append", "(", "step", ")", "value", "=", "value", "[", "step", "]", "if", "as_type", ":", "return", "as_type", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "Mapping", ")", ":", "# create an instance of our current type, copying 'configured' properties / policies", "namespace", "=", "type", "(", "self", ")", "(", "separator", "=", "self", ".", "_separator", ",", "missing", "=", "self", ".", "_missing", ")", "namespace", ".", "_source", "=", "value", "# carry the root object from namespace to namespace, references are always resolved from root", "namespace", ".", "_root", "=", "self", ".", "_root", "return", "namespace", "elif", "resolve_references", "and", "isinstance", "(", "value", ",", "str", ")", ":", "# only resolve references in str-type values (the only way they can be expressed)", "return", "self", ".", "_resolve", "(", "value", ")", "else", ":", "return", "value", "except", "ConfiguredReferenceError", ":", "# also a KeyError, but this one should bubble to caller", "raise", "except", "KeyError", "as", "e", ":", "if", "default", "is", "not", "_NoDefault", ":", "return", "default", "else", ":", "missing_key", "=", "self", ".", "_separator", ".", "join", "(", "steps_taken", ")", "raise", "NotConfiguredError", "(", "'no configuration for key {}'", ".", "format", "(", "missing_key", ")", ",", "key", "=", "missing_key", ")", "from", "e" ]
Gets a value for the specified path. :param path: the configuration key to fetch a value for, steps separated by the separator supplied to the constructor (default ``.``) :param default: a value to return if no value is found for the supplied path (``None`` is allowed) :param as_type: an optional callable to apply to the value found for the supplied path (possibly raising exceptions of its own if the value can not be coerced to the expected type) :param resolve_references: whether to resolve references in values :return: the value associated with the supplied configuration key, if available, or a supplied default value if the key was not found :raises ConfigurationError: when no value was found for *path* and *default* was not provided or a reference could not be resolved
[ "Gets", "a", "value", "for", "the", "specified", "path", "." ]
train
https://github.com/HolmesNL/confidence/blob/e14d2d8769a01fa55676716f7a2f22714c2616d3/confidence/models.py#L113-L161
craigahobbs/chisel
src/chisel/action.py
action
def action(action_callback=None, **kwargs): """ Chisel action decorator """ if action_callback is None: return lambda fn: action(fn, **kwargs) else: return Action(action_callback, **kwargs).decorate_module(action_callback)
python
def action(action_callback=None, **kwargs): """ Chisel action decorator """ if action_callback is None: return lambda fn: action(fn, **kwargs) else: return Action(action_callback, **kwargs).decorate_module(action_callback)
[ "def", "action", "(", "action_callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "action_callback", "is", "None", ":", "return", "lambda", "fn", ":", "action", "(", "fn", ",", "*", "*", "kwargs", ")", "else", ":", "return", "Action", "(", "action_callback", ",", "*", "*", "kwargs", ")", ".", "decorate_module", "(", "action_callback", ")" ]
Chisel action decorator
[ "Chisel", "action", "decorator" ]
train
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/action.py#L15-L23
nion-software/nionswift-instrumentation-kit
nion/instrumentation/scan_base.py
ScanHardwareSource.record_async
def record_async(self, callback_fn): """ Call this when the user clicks the record button. """ assert callable(callback_fn) def record_thread(): current_frame_time = self.get_current_frame_time() def handle_finished(xdatas): callback_fn(xdatas) self.start_recording(current_frame_time, finished_callback_fn=handle_finished) self.__thread = threading.Thread(target=record_thread) self.__thread.start()
python
def record_async(self, callback_fn): """ Call this when the user clicks the record button. """ assert callable(callback_fn) def record_thread(): current_frame_time = self.get_current_frame_time() def handle_finished(xdatas): callback_fn(xdatas) self.start_recording(current_frame_time, finished_callback_fn=handle_finished) self.__thread = threading.Thread(target=record_thread) self.__thread.start()
[ "def", "record_async", "(", "self", ",", "callback_fn", ")", ":", "assert", "callable", "(", "callback_fn", ")", "def", "record_thread", "(", ")", ":", "current_frame_time", "=", "self", ".", "get_current_frame_time", "(", ")", "def", "handle_finished", "(", "xdatas", ")", ":", "callback_fn", "(", "xdatas", ")", "self", ".", "start_recording", "(", "current_frame_time", ",", "finished_callback_fn", "=", "handle_finished", ")", "self", ".", "__thread", "=", "threading", ".", "Thread", "(", "target", "=", "record_thread", ")", "self", ".", "__thread", ".", "start", "(", ")" ]
Call this when the user clicks the record button.
[ "Call", "this", "when", "the", "user", "clicks", "the", "record", "button", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/scan_base.py#L801-L814
nion-software/nionswift-instrumentation-kit
nion/instrumentation/scan_base.py
ScanHardwareSource.get_buffer_data
def get_buffer_data(self, start: int, count: int) -> typing.Optional[typing.List[typing.List[typing.Dict]]]: """Get recently acquired (buffered) data. The start parameter can be negative to index backwards from the end. If start refers to a buffer item that doesn't exist or if count requests too many buffer items given the start value, the returned list may have fewer elements than count. Returns None if buffering is not enabled. """ if hasattr(self.__device, "get_buffer_data"): buffer_data = self.__device.get_buffer_data(start, count) enabled_channel_states = list() for channel_index in range(self.channel_count): channel_state = self.get_channel_state(channel_index) if channel_state.enabled: enabled_channel_states.append(channel_state) scan_id = uuid.uuid4() for data_element_group in buffer_data: for channel_index, (data_element, channel_state) in enumerate(zip(data_element_group, enabled_channel_states)): channel_name = channel_state.name channel_id = channel_state.channel_id if self.subscan_enabled: channel_id += "_subscan" properties = data_element["properties"] update_autostem_properties(data_element, self.__stem_controller) update_calibration_metadata(data_element, None, data_element["data"].shape, scan_id, None, channel_name, channel_id, properties, None, 0) data_element["properties"]["channel_index"] = channel_index data_element["properties"]["hardware_source_name"] = self.display_name data_element["properties"]["hardware_source_id"] = self.hardware_source_id return buffer_data return None
python
def get_buffer_data(self, start: int, count: int) -> typing.Optional[typing.List[typing.List[typing.Dict]]]: """Get recently acquired (buffered) data. The start parameter can be negative to index backwards from the end. If start refers to a buffer item that doesn't exist or if count requests too many buffer items given the start value, the returned list may have fewer elements than count. Returns None if buffering is not enabled. """ if hasattr(self.__device, "get_buffer_data"): buffer_data = self.__device.get_buffer_data(start, count) enabled_channel_states = list() for channel_index in range(self.channel_count): channel_state = self.get_channel_state(channel_index) if channel_state.enabled: enabled_channel_states.append(channel_state) scan_id = uuid.uuid4() for data_element_group in buffer_data: for channel_index, (data_element, channel_state) in enumerate(zip(data_element_group, enabled_channel_states)): channel_name = channel_state.name channel_id = channel_state.channel_id if self.subscan_enabled: channel_id += "_subscan" properties = data_element["properties"] update_autostem_properties(data_element, self.__stem_controller) update_calibration_metadata(data_element, None, data_element["data"].shape, scan_id, None, channel_name, channel_id, properties, None, 0) data_element["properties"]["channel_index"] = channel_index data_element["properties"]["hardware_source_name"] = self.display_name data_element["properties"]["hardware_source_id"] = self.hardware_source_id return buffer_data return None
[ "def", "get_buffer_data", "(", "self", ",", "start", ":", "int", ",", "count", ":", "int", ")", "->", "typing", ".", "Optional", "[", "typing", ".", "List", "[", "typing", ".", "List", "[", "typing", ".", "Dict", "]", "]", "]", ":", "if", "hasattr", "(", "self", ".", "__device", ",", "\"get_buffer_data\"", ")", ":", "buffer_data", "=", "self", ".", "__device", ".", "get_buffer_data", "(", "start", ",", "count", ")", "enabled_channel_states", "=", "list", "(", ")", "for", "channel_index", "in", "range", "(", "self", ".", "channel_count", ")", ":", "channel_state", "=", "self", ".", "get_channel_state", "(", "channel_index", ")", "if", "channel_state", ".", "enabled", ":", "enabled_channel_states", ".", "append", "(", "channel_state", ")", "scan_id", "=", "uuid", ".", "uuid4", "(", ")", "for", "data_element_group", "in", "buffer_data", ":", "for", "channel_index", ",", "(", "data_element", ",", "channel_state", ")", "in", "enumerate", "(", "zip", "(", "data_element_group", ",", "enabled_channel_states", ")", ")", ":", "channel_name", "=", "channel_state", ".", "name", "channel_id", "=", "channel_state", ".", "channel_id", "if", "self", ".", "subscan_enabled", ":", "channel_id", "+=", "\"_subscan\"", "properties", "=", "data_element", "[", "\"properties\"", "]", "update_autostem_properties", "(", "data_element", ",", "self", ".", "__stem_controller", ")", "update_calibration_metadata", "(", "data_element", ",", "None", ",", "data_element", "[", "\"data\"", "]", ".", "shape", ",", "scan_id", ",", "None", ",", "channel_name", ",", "channel_id", ",", "properties", ",", "None", ",", "0", ")", "data_element", "[", "\"properties\"", "]", "[", "\"channel_index\"", "]", "=", "channel_index", "data_element", "[", "\"properties\"", "]", "[", "\"hardware_source_name\"", "]", "=", "self", ".", "display_name", "data_element", "[", "\"properties\"", "]", "[", "\"hardware_source_id\"", "]", "=", "self", ".", "hardware_source_id", "return", "buffer_data", "return", "None" ]
Get recently acquired (buffered) data. The start parameter can be negative to index backwards from the end. If start refers to a buffer item that doesn't exist or if count requests too many buffer items given the start value, the returned list may have fewer elements than count. Returns None if buffering is not enabled.
[ "Get", "recently", "acquired", "(", "buffered", ")", "data", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/scan_base.py#L910-L946
tipsi/tipsi_tools
tipsi_tools/tipsi_logging.py
get_plain_logname
def get_plain_logname(base_name, root_dir, enable_json): """ we nest all plain logs to prevent double log shipping """ if enable_json: nested_dir = os.path.join(root_dir, 'plain') if os.path.exists(root_dir) and not os.path.exists(nested_dir): os.mkdir(nested_dir) root_dir = nested_dir return os.path.join(root_dir, '{}.log'.format(base_name))
python
def get_plain_logname(base_name, root_dir, enable_json): """ we nest all plain logs to prevent double log shipping """ if enable_json: nested_dir = os.path.join(root_dir, 'plain') if os.path.exists(root_dir) and not os.path.exists(nested_dir): os.mkdir(nested_dir) root_dir = nested_dir return os.path.join(root_dir, '{}.log'.format(base_name))
[ "def", "get_plain_logname", "(", "base_name", ",", "root_dir", ",", "enable_json", ")", ":", "if", "enable_json", ":", "nested_dir", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'plain'", ")", "if", "os", ".", "path", ".", "exists", "(", "root_dir", ")", "and", "not", "os", ".", "path", ".", "exists", "(", "nested_dir", ")", ":", "os", ".", "mkdir", "(", "nested_dir", ")", "root_dir", "=", "nested_dir", "return", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'{}.log'", ".", "format", "(", "base_name", ")", ")" ]
we nest all plain logs to prevent double log shipping
[ "we", "nest", "all", "plain", "logs", "to", "prevent", "double", "log", "shipping" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/tipsi_logging.py#L50-L59
tipsi/tipsi_tools
tipsi_tools/tipsi_logging.py
setup_logger
def setup_logger( base_name, root_dir=None, enable_json=True, json_formatter='tipsi_tools.tipsi_logging.JSFormatter', loggers={}, ): """ json_formatter: 'fan.contrib.django.span_formatter.SpanFormatter' - add INSTALLATION_ID, SPAN and etc """ if not root_dir: root_dir = os.environ.get('LOG_DIR') assert root_dir, 'You should pass root_dir parameter or set env LOG_DIR' JSON_FORMATTER = { '()': json_formatter, 'env_vars': ['HOST_TYPE', 'TIPSI_CONFIG', 'TIPSI_BRANCH', 'CONTAINER_TYPE'], } default_loggers = { '': {'handlers': ['default'], 'level': 'DEBUG', 'propagate': True}, 'googleapicliet.discovery_cache': {'level': 'ERROR'}, 'boto3': {'level': 'INFO'}, 'botocore': {'level': 'INFO'}, 'kazoo': {'level': 'INFO'}, 'urllib3': {'level': 'INFO'}, } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'json': JSON_FORMATTER, 'standard': {'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'}, }, 'handlers': {'default': base_handler(get_plain_logname(base_name, root_dir, enable_json))}, 'loggers': {**default_loggers, **loggers}, } if enable_json: LOGGING['handlers']['json'] = base_handler( os.path.join(root_dir, '{}.json_log'.format(base_name)), formatter='json' ) LOGGING['loggers']['']['handlers'].append('json') logging.config.dictConfig(LOGGING)
python
def setup_logger( base_name, root_dir=None, enable_json=True, json_formatter='tipsi_tools.tipsi_logging.JSFormatter', loggers={}, ): """ json_formatter: 'fan.contrib.django.span_formatter.SpanFormatter' - add INSTALLATION_ID, SPAN and etc """ if not root_dir: root_dir = os.environ.get('LOG_DIR') assert root_dir, 'You should pass root_dir parameter or set env LOG_DIR' JSON_FORMATTER = { '()': json_formatter, 'env_vars': ['HOST_TYPE', 'TIPSI_CONFIG', 'TIPSI_BRANCH', 'CONTAINER_TYPE'], } default_loggers = { '': {'handlers': ['default'], 'level': 'DEBUG', 'propagate': True}, 'googleapicliet.discovery_cache': {'level': 'ERROR'}, 'boto3': {'level': 'INFO'}, 'botocore': {'level': 'INFO'}, 'kazoo': {'level': 'INFO'}, 'urllib3': {'level': 'INFO'}, } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'json': JSON_FORMATTER, 'standard': {'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'}, }, 'handlers': {'default': base_handler(get_plain_logname(base_name, root_dir, enable_json))}, 'loggers': {**default_loggers, **loggers}, } if enable_json: LOGGING['handlers']['json'] = base_handler( os.path.join(root_dir, '{}.json_log'.format(base_name)), formatter='json' ) LOGGING['loggers']['']['handlers'].append('json') logging.config.dictConfig(LOGGING)
[ "def", "setup_logger", "(", "base_name", ",", "root_dir", "=", "None", ",", "enable_json", "=", "True", ",", "json_formatter", "=", "'tipsi_tools.tipsi_logging.JSFormatter'", ",", "loggers", "=", "{", "}", ",", ")", ":", "if", "not", "root_dir", ":", "root_dir", "=", "os", ".", "environ", ".", "get", "(", "'LOG_DIR'", ")", "assert", "root_dir", ",", "'You should pass root_dir parameter or set env LOG_DIR'", "JSON_FORMATTER", "=", "{", "'()'", ":", "json_formatter", ",", "'env_vars'", ":", "[", "'HOST_TYPE'", ",", "'TIPSI_CONFIG'", ",", "'TIPSI_BRANCH'", ",", "'CONTAINER_TYPE'", "]", ",", "}", "default_loggers", "=", "{", "''", ":", "{", "'handlers'", ":", "[", "'default'", "]", ",", "'level'", ":", "'DEBUG'", ",", "'propagate'", ":", "True", "}", ",", "'googleapicliet.discovery_cache'", ":", "{", "'level'", ":", "'ERROR'", "}", ",", "'boto3'", ":", "{", "'level'", ":", "'INFO'", "}", ",", "'botocore'", ":", "{", "'level'", ":", "'INFO'", "}", ",", "'kazoo'", ":", "{", "'level'", ":", "'INFO'", "}", ",", "'urllib3'", ":", "{", "'level'", ":", "'INFO'", "}", ",", "}", "LOGGING", "=", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "False", ",", "'formatters'", ":", "{", "'json'", ":", "JSON_FORMATTER", ",", "'standard'", ":", "{", "'format'", ":", "'%(asctime)s [%(levelname)s] %(name)s: %(message)s'", "}", ",", "}", ",", "'handlers'", ":", "{", "'default'", ":", "base_handler", "(", "get_plain_logname", "(", "base_name", ",", "root_dir", ",", "enable_json", ")", ")", "}", ",", "'loggers'", ":", "{", "*", "*", "default_loggers", ",", "*", "*", "loggers", "}", ",", "}", "if", "enable_json", ":", "LOGGING", "[", "'handlers'", "]", "[", "'json'", "]", "=", "base_handler", "(", "os", ".", "path", ".", "join", "(", "root_dir", ",", "'{}.json_log'", ".", "format", "(", "base_name", ")", ")", ",", "formatter", "=", "'json'", ")", "LOGGING", "[", "'loggers'", "]", "[", "''", "]", "[", "'handlers'", "]", ".", "append", "(", "'json'", ")", "logging", ".", "config", ".", "dictConfig", "(", "LOGGING", ")" ]
json_formatter: 'fan.contrib.django.span_formatter.SpanFormatter' - add INSTALLATION_ID, SPAN and etc
[ "json_formatter", ":", "fan", ".", "contrib", ".", "django", ".", "span_formatter", ".", "SpanFormatter", "-", "add", "INSTALLATION_ID", "SPAN", "and", "etc" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/tipsi_logging.py#L62-L106
tipsi/tipsi_tools
tipsi_tools/unix.py
run
def run(command): ''' Run command in shell, accepts command construction from list Return (return_code, stdout, stderr) stdout and stderr - as list of strings ''' if isinstance(command, list): command = ' '.join(command) out = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return (out.returncode, _prepare(out.stdout), _prepare(out.stderr))
python
def run(command): ''' Run command in shell, accepts command construction from list Return (return_code, stdout, stderr) stdout and stderr - as list of strings ''' if isinstance(command, list): command = ' '.join(command) out = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return (out.returncode, _prepare(out.stdout), _prepare(out.stderr))
[ "def", "run", "(", "command", ")", ":", "if", "isinstance", "(", "command", ",", "list", ")", ":", "command", "=", "' '", ".", "join", "(", "command", ")", "out", "=", "subprocess", ".", "run", "(", "command", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "return", "(", "out", ".", "returncode", ",", "_prepare", "(", "out", ".", "stdout", ")", ",", "_prepare", "(", "out", ".", "stderr", ")", ")" ]
Run command in shell, accepts command construction from list Return (return_code, stdout, stderr) stdout and stderr - as list of strings
[ "Run", "command", "in", "shell", "accepts", "command", "construction", "from", "list", "Return", "(", "return_code", "stdout", "stderr", ")", "stdout", "and", "stderr", "-", "as", "list", "of", "strings" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L22-L31
tipsi/tipsi_tools
tipsi_tools/unix.py
succ
def succ(cmd, check_stderr=True, stdout=None, stderr=None): ''' Alias to run with check return code and stderr ''' code, out, err = run(cmd) # Because we're raising error, sometimes we want to process stdout/stderr after catching error # so we're copying these outputs if required if stdout is not None: stdout[:] = out if stderr is not None: stderr[:] = err if code != 0: for l in out: print(l) assert code == 0, 'Return: {} {}\nStderr: {}'.format(code, cmd, err) if check_stderr: assert err == [], 'Error: {} {}'.format(err, code) return code, out, err
python
def succ(cmd, check_stderr=True, stdout=None, stderr=None): ''' Alias to run with check return code and stderr ''' code, out, err = run(cmd) # Because we're raising error, sometimes we want to process stdout/stderr after catching error # so we're copying these outputs if required if stdout is not None: stdout[:] = out if stderr is not None: stderr[:] = err if code != 0: for l in out: print(l) assert code == 0, 'Return: {} {}\nStderr: {}'.format(code, cmd, err) if check_stderr: assert err == [], 'Error: {} {}'.format(err, code) return code, out, err
[ "def", "succ", "(", "cmd", ",", "check_stderr", "=", "True", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ")", ":", "code", ",", "out", ",", "err", "=", "run", "(", "cmd", ")", "# Because we're raising error, sometimes we want to process stdout/stderr after catching error", "# so we're copying these outputs if required", "if", "stdout", "is", "not", "None", ":", "stdout", "[", ":", "]", "=", "out", "if", "stderr", "is", "not", "None", ":", "stderr", "[", ":", "]", "=", "err", "if", "code", "!=", "0", ":", "for", "l", "in", "out", ":", "print", "(", "l", ")", "assert", "code", "==", "0", ",", "'Return: {} {}\\nStderr: {}'", ".", "format", "(", "code", ",", "cmd", ",", "err", ")", "if", "check_stderr", ":", "assert", "err", "==", "[", "]", ",", "'Error: {} {}'", ".", "format", "(", "err", ",", "code", ")", "return", "code", ",", "out", ",", "err" ]
Alias to run with check return code and stderr
[ "Alias", "to", "run", "with", "check", "return", "code", "and", "stderr" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L34-L53
tipsi/tipsi_tools
tipsi_tools/unix.py
wait_socket
def wait_socket(host, port, timeout=120): ''' Wait for socket opened on remote side. Return False after timeout ''' return wait_result(lambda: check_socket(host, port), True, timeout)
python
def wait_socket(host, port, timeout=120): ''' Wait for socket opened on remote side. Return False after timeout ''' return wait_result(lambda: check_socket(host, port), True, timeout)
[ "def", "wait_socket", "(", "host", ",", "port", ",", "timeout", "=", "120", ")", ":", "return", "wait_result", "(", "lambda", ":", "check_socket", "(", "host", ",", "port", ")", ",", "True", ",", "timeout", ")" ]
Wait for socket opened on remote side. Return False after timeout
[ "Wait", "for", "socket", "opened", "on", "remote", "side", ".", "Return", "False", "after", "timeout" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L132-L136
tipsi/tipsi_tools
tipsi_tools/unix.py
interpolate_sysenv
def interpolate_sysenv(line, defaults={}): ''' Format line system environment variables + defaults ''' map = ChainMap(os.environ, defaults) return line.format(**map)
python
def interpolate_sysenv(line, defaults={}): ''' Format line system environment variables + defaults ''' map = ChainMap(os.environ, defaults) return line.format(**map)
[ "def", "interpolate_sysenv", "(", "line", ",", "defaults", "=", "{", "}", ")", ":", "map", "=", "ChainMap", "(", "os", ".", "environ", ",", "defaults", ")", "return", "line", ".", "format", "(", "*", "*", "map", ")" ]
Format line system environment variables + defaults
[ "Format", "line", "system", "environment", "variables", "+", "defaults" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L143-L148
tipsi/tipsi_tools
tipsi_tools/unix.py
source
def source(fname): ''' Acts similar to bash 'source' or '.' commands. ''' rex = re.compile('(?:export |declare -x )?(.*?)="(.*?)"') out = call_out('source {} && export'.format(fname)) out = [x for x in out if 'export' in x or 'declare' in x] out = {k: v for k, v in [rex.match(x).groups() for x in out if rex.match(x)]} for k, v in out.items(): os.environ[k] = v
python
def source(fname): ''' Acts similar to bash 'source' or '.' commands. ''' rex = re.compile('(?:export |declare -x )?(.*?)="(.*?)"') out = call_out('source {} && export'.format(fname)) out = [x for x in out if 'export' in x or 'declare' in x] out = {k: v for k, v in [rex.match(x).groups() for x in out if rex.match(x)]} for k, v in out.items(): os.environ[k] = v
[ "def", "source", "(", "fname", ")", ":", "rex", "=", "re", ".", "compile", "(", "'(?:export |declare -x )?(.*?)=\"(.*?)\"'", ")", "out", "=", "call_out", "(", "'source {} && export'", ".", "format", "(", "fname", ")", ")", "out", "=", "[", "x", "for", "x", "in", "out", "if", "'export'", "in", "x", "or", "'declare'", "in", "x", "]", "out", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "[", "rex", ".", "match", "(", "x", ")", ".", "groups", "(", ")", "for", "x", "in", "out", "if", "rex", ".", "match", "(", "x", ")", "]", "}", "for", "k", ",", "v", "in", "out", ".", "items", "(", ")", ":", "os", ".", "environ", "[", "k", "]", "=", "v" ]
Acts similar to bash 'source' or '.' commands.
[ "Acts", "similar", "to", "bash", "source", "or", ".", "commands", "." ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L151-L160
tipsi/tipsi_tools
tipsi_tools/unix.py
cd
def cd(dir_name): """ do something in other directory and return back after block ended """ old_path = os.path.abspath('.') os.chdir(dir_name) try: yield os.chdir(old_path) except Exception: os.chdir(old_path) raise
python
def cd(dir_name): """ do something in other directory and return back after block ended """ old_path = os.path.abspath('.') os.chdir(dir_name) try: yield os.chdir(old_path) except Exception: os.chdir(old_path) raise
[ "def", "cd", "(", "dir_name", ")", ":", "old_path", "=", "os", ".", "path", ".", "abspath", "(", "'.'", ")", "os", ".", "chdir", "(", "dir_name", ")", "try", ":", "yield", "os", ".", "chdir", "(", "old_path", ")", "except", "Exception", ":", "os", ".", "chdir", "(", "old_path", ")", "raise" ]
do something in other directory and return back after block ended
[ "do", "something", "in", "other", "directory", "and", "return", "back", "after", "block", "ended" ]
train
https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/unix.py#L164-L175
non-Jedi/gyr
gyr/resources.py
Resource._is_new
def _is_new(self, identifier): """Returns True if identifier hasn't been seen before.""" if identifier in self.tracker: return False else: self.tracker.append(identifier) self.tracker.pop(0) return True
python
def _is_new(self, identifier): """Returns True if identifier hasn't been seen before.""" if identifier in self.tracker: return False else: self.tracker.append(identifier) self.tracker.pop(0) return True
[ "def", "_is_new", "(", "self", ",", "identifier", ")", ":", "if", "identifier", "in", "self", ".", "tracker", ":", "return", "False", "else", ":", "self", ".", "tracker", ".", "append", "(", "identifier", ")", "self", ".", "tracker", ".", "pop", "(", "0", ")", "return", "True" ]
Returns True if identifier hasn't been seen before.
[ "Returns", "True", "if", "identifier", "hasn", "t", "been", "seen", "before", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/resources.py#L35-L42
non-Jedi/gyr
gyr/resources.py
Room.on_get
def on_get(self, request, response, room_alias=None): """Called when a GET request is sent to /rooms/{room_alias}""" response.body = "{}" if self.handler(room_alias): response.status = falcon.HTTP_200 self.api.create_room(alias=room_alias) else: response.status = falcon.HTTP_404
python
def on_get(self, request, response, room_alias=None): """Called when a GET request is sent to /rooms/{room_alias}""" response.body = "{}" if self.handler(room_alias): response.status = falcon.HTTP_200 self.api.create_room(alias=room_alias) else: response.status = falcon.HTTP_404
[ "def", "on_get", "(", "self", ",", "request", ",", "response", ",", "room_alias", "=", "None", ")", ":", "response", ".", "body", "=", "\"{}\"", "if", "self", ".", "handler", "(", "room_alias", ")", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_200", "self", ".", "api", ".", "create_room", "(", "alias", "=", "room_alias", ")", "else", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_404" ]
Called when a GET request is sent to /rooms/{room_alias}
[ "Called", "when", "a", "GET", "request", "is", "sent", "to", "/", "rooms", "/", "{", "room_alias", "}" ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/resources.py#L48-L55
non-Jedi/gyr
gyr/resources.py
Transaction.on_put
def on_put(self, request, response, txn_id=None): """Responds to PUT request containing events.""" response.body = "{}" # Check whether repeat txn_id if not self._is_new(txn_id): response.status = falcon.HTTP_200 return request.context["body"] = request.stream.read() try: events = json.loads(request.context["body"].decode("utf-8"))["events"] except(KeyError, ValueError, UnicodeDecodeError): response.status = falcon.HTTP_400 response.body = "Malformed request body" return if self.handler(EventStream(events, self.Api)): response.status = falcon.HTTP_200 else: response.status = falcon.HTTP_400
python
def on_put(self, request, response, txn_id=None): """Responds to PUT request containing events.""" response.body = "{}" # Check whether repeat txn_id if not self._is_new(txn_id): response.status = falcon.HTTP_200 return request.context["body"] = request.stream.read() try: events = json.loads(request.context["body"].decode("utf-8"))["events"] except(KeyError, ValueError, UnicodeDecodeError): response.status = falcon.HTTP_400 response.body = "Malformed request body" return if self.handler(EventStream(events, self.Api)): response.status = falcon.HTTP_200 else: response.status = falcon.HTTP_400
[ "def", "on_put", "(", "self", ",", "request", ",", "response", ",", "txn_id", "=", "None", ")", ":", "response", ".", "body", "=", "\"{}\"", "# Check whether repeat txn_id", "if", "not", "self", ".", "_is_new", "(", "txn_id", ")", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_200", "return", "request", ".", "context", "[", "\"body\"", "]", "=", "request", ".", "stream", ".", "read", "(", ")", "try", ":", "events", "=", "json", ".", "loads", "(", "request", ".", "context", "[", "\"body\"", "]", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "\"events\"", "]", "except", "(", "KeyError", ",", "ValueError", ",", "UnicodeDecodeError", ")", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_400", "response", ".", "body", "=", "\"Malformed request body\"", "return", "if", "self", ".", "handler", "(", "EventStream", "(", "events", ",", "self", ".", "Api", ")", ")", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_200", "else", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_400" ]
Responds to PUT request containing events.
[ "Responds", "to", "PUT", "request", "containing", "events", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/resources.py#L61-L81
non-Jedi/gyr
gyr/resources.py
User.on_get
def on_get(self, request, response, user_id=None): """Responds to GET request for users.""" response.body = "{}" if self.handler(user_id): response.status = falcon.HTTP_200 self.api.register(utils.mxid2localpart(user_id)) else: response.status = falcon.HTTP_404
python
def on_get(self, request, response, user_id=None): """Responds to GET request for users.""" response.body = "{}" if self.handler(user_id): response.status = falcon.HTTP_200 self.api.register(utils.mxid2localpart(user_id)) else: response.status = falcon.HTTP_404
[ "def", "on_get", "(", "self", ",", "request", ",", "response", ",", "user_id", "=", "None", ")", ":", "response", ".", "body", "=", "\"{}\"", "if", "self", ".", "handler", "(", "user_id", ")", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_200", "self", ".", "api", ".", "register", "(", "utils", ".", "mxid2localpart", "(", "user_id", ")", ")", "else", ":", "response", ".", "status", "=", "falcon", ".", "HTTP_404" ]
Responds to GET request for users.
[ "Responds", "to", "GET", "request", "for", "users", "." ]
train
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/resources.py#L87-L94
nion-software/nionswift-instrumentation-kit
nion/instrumentation/camera_base.py
CameraSettings.set_frame_parameters
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
python
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: """Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters. """ self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
[ "def", "set_frame_parameters", "(", "self", ",", "profile_index", ":", "int", ",", "frame_parameters", ")", "->", "None", ":", "self", ".", "frame_parameters_changed_event", ".", "fire", "(", "profile_index", ",", "frame_parameters", ")" ]
Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters.
[ "Set", "the", "frame", "parameters", "with", "the", "settings", "index", "and", "fire", "the", "frame", "parameters", "changed", "event", "." ]
train
https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nion/instrumentation/camera_base.py#L504-L511