repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.min | def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
'''
return cls._binary_op(x, y, tf.minimum, tf.float32) | python | def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
'''
return cls._binary_op(x, y, tf.minimum, tf.float32) | [
"def",
"min",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_binary_op",
"(",
"x",
",",
"y",
",",
"tf",
".",
"minimum",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"minimum",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L468-L478 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.if_then_else | def if_then_else(cls,
condition: 'TensorFluent',
true_case: 'TensorFluent',
false_case: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape.
'''
true = TensorFluent.constant(True, tf.bool)
false = TensorFluent.constant(False, tf.bool)
ite = (condition == true) * true_case + (condition == false) * false_case
if true_case.dtype == tf.bool and false_case.dtype == tf.bool:
ite = ite.cast(tf.bool)
return ite | python | def if_then_else(cls,
condition: 'TensorFluent',
true_case: 'TensorFluent',
false_case: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape.
'''
true = TensorFluent.constant(True, tf.bool)
false = TensorFluent.constant(False, tf.bool)
ite = (condition == true) * true_case + (condition == false) * false_case
if true_case.dtype == tf.bool and false_case.dtype == tf.bool:
ite = ite.cast(tf.bool)
return ite | [
"def",
"if_then_else",
"(",
"cls",
",",
"condition",
":",
"'TensorFluent'",
",",
"true_case",
":",
"'TensorFluent'",
",",
"false_case",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"true",
"=",
"TensorFluent",
".",
"constant",
"(",
"True",
",",
"tf",
".",
"bool",
")",
"false",
"=",
"TensorFluent",
".",
"constant",
"(",
"False",
",",
"tf",
".",
"bool",
")",
"ite",
"=",
"(",
"condition",
"==",
"true",
")",
"*",
"true_case",
"+",
"(",
"condition",
"==",
"false",
")",
"*",
"false_case",
"if",
"true_case",
".",
"dtype",
"==",
"tf",
".",
"bool",
"and",
"false_case",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"ite",
"=",
"ite",
".",
"cast",
"(",
"tf",
".",
"bool",
")",
"return",
"ite"
] | Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"control",
"op",
"if",
"-",
"then",
"-",
"else",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L481-L503 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._binary_op | def _binary_op(cls,
x: 'TensorFluent',
y: 'TensorFluent',
op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output.
'''
# scope
s1 = x.scope.as_list()
s2 = y.scope.as_list()
scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)
if x.batch and perm1 != []:
perm1 = [0] + [p+1 for p in perm1]
if y.batch and perm2 != []:
perm2 = [0] + [p+1 for p in perm2]
x = x.transpose(perm1)
y = y.transpose(perm2)
# shape
reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)
if reshape1 is not None:
x = x.reshape(reshape1)
if reshape2 is not None:
y = y.reshape(reshape2)
# dtype
x = x.cast(dtype)
y = y.cast(dtype)
# operation
t = op(x.tensor, y.tensor)
# batch
batch = x.batch or y.batch
return TensorFluent(t, scope, batch=batch) | python | def _binary_op(cls,
x: 'TensorFluent',
y: 'TensorFluent',
op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output.
'''
# scope
s1 = x.scope.as_list()
s2 = y.scope.as_list()
scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)
if x.batch and perm1 != []:
perm1 = [0] + [p+1 for p in perm1]
if y.batch and perm2 != []:
perm2 = [0] + [p+1 for p in perm2]
x = x.transpose(perm1)
y = y.transpose(perm2)
# shape
reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)
if reshape1 is not None:
x = x.reshape(reshape1)
if reshape2 is not None:
y = y.reshape(reshape2)
# dtype
x = x.cast(dtype)
y = y.cast(dtype)
# operation
t = op(x.tensor, y.tensor)
# batch
batch = x.batch or y.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_binary_op",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
",",
"tf",
".",
"Tensor",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"dtype",
":",
"tf",
".",
"DType",
")",
"->",
"'TensorFluent'",
":",
"# scope",
"s1",
"=",
"x",
".",
"scope",
".",
"as_list",
"(",
")",
"s2",
"=",
"y",
".",
"scope",
".",
"as_list",
"(",
")",
"scope",
",",
"perm1",
",",
"perm2",
"=",
"TensorFluentScope",
".",
"broadcast",
"(",
"s1",
",",
"s2",
")",
"if",
"x",
".",
"batch",
"and",
"perm1",
"!=",
"[",
"]",
":",
"perm1",
"=",
"[",
"0",
"]",
"+",
"[",
"p",
"+",
"1",
"for",
"p",
"in",
"perm1",
"]",
"if",
"y",
".",
"batch",
"and",
"perm2",
"!=",
"[",
"]",
":",
"perm2",
"=",
"[",
"0",
"]",
"+",
"[",
"p",
"+",
"1",
"for",
"p",
"in",
"perm2",
"]",
"x",
"=",
"x",
".",
"transpose",
"(",
"perm1",
")",
"y",
"=",
"y",
".",
"transpose",
"(",
"perm2",
")",
"# shape",
"reshape1",
",",
"reshape2",
"=",
"TensorFluentShape",
".",
"broadcast",
"(",
"x",
".",
"shape",
",",
"y",
".",
"shape",
")",
"if",
"reshape1",
"is",
"not",
"None",
":",
"x",
"=",
"x",
".",
"reshape",
"(",
"reshape1",
")",
"if",
"reshape2",
"is",
"not",
"None",
":",
"y",
"=",
"y",
".",
"reshape",
"(",
"reshape2",
")",
"# dtype",
"x",
"=",
"x",
".",
"cast",
"(",
"dtype",
")",
"y",
"=",
"y",
".",
"cast",
"(",
"dtype",
")",
"# operation",
"t",
"=",
"op",
"(",
"x",
".",
"tensor",
",",
"y",
".",
"tensor",
")",
"# batch",
"batch",
"=",
"x",
".",
"batch",
"or",
"y",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"binary",
"op",
"applied",
"to",
"fluents",
"x",
"and",
"y",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L506-L550 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._unary_op | def _unary_op(cls,
x: 'TensorFluent',
op: Callable[[tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
'''
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch) | python | def _unary_op(cls,
x: 'TensorFluent',
op: Callable[[tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
'''
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_unary_op",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"dtype",
":",
"tf",
".",
"DType",
")",
"->",
"'TensorFluent'",
":",
"x",
"=",
"x",
".",
"cast",
"(",
"dtype",
")",
"t",
"=",
"op",
"(",
"x",
".",
"tensor",
")",
"scope",
"=",
"x",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"x",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"unary",
"op",
"applied",
"to",
"fluent",
"x",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L553-L571 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._aggregation_op | def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
'''Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
'''
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch) | python | def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
'''Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
'''
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_aggregation_op",
"(",
"cls",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
",",
"Optional",
"[",
"Sequence",
"[",
"int",
"]",
"]",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"x",
":",
"'TensorFluent'",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"axis",
"=",
"cls",
".",
"_varslist2axis",
"(",
"x",
",",
"vars_list",
")",
"t",
"=",
"op",
"(",
"x",
".",
"tensor",
",",
"axis",
")",
"scope",
"=",
"[",
"]",
"for",
"var",
"in",
"x",
".",
"scope",
".",
"as_list",
"(",
")",
":",
"if",
"var",
"not",
"in",
"vars_list",
":",
"scope",
".",
"append",
"(",
"var",
")",
"batch",
"=",
"x",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"aggregation",
"op",
"applied",
"to",
"fluent",
"x",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L574-L598 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._varslist2axis | def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
'''Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
'''
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | python | def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
'''Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
'''
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | [
"def",
"_varslist2axis",
"(",
"cls",
",",
"fluent",
":",
"'TensorFluent'",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"int",
"]",
":",
"axis",
"=",
"[",
"]",
"for",
"var",
"in",
"vars_list",
":",
"if",
"var",
"in",
"fluent",
".",
"scope",
".",
"as_list",
"(",
")",
":",
"ax",
"=",
"fluent",
".",
"scope",
".",
"index",
"(",
"var",
")",
"if",
"fluent",
".",
"batch",
":",
"ax",
"+=",
"1",
"axis",
".",
"append",
"(",
"ax",
")",
"return",
"axis"
] | Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis. | [
"Maps",
"the",
"vars_list",
"into",
"a",
"list",
"of",
"axis",
"indices",
"corresponding",
"to",
"the",
"fluent",
"scope",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L601-L619 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.cast | def cast(self, dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
'''
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def cast(self, dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
'''
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"cast",
"(",
"self",
",",
"dtype",
":",
"tf",
".",
"DType",
")",
"->",
"'TensorFluent'",
":",
"if",
"self",
".",
"dtype",
"==",
"dtype",
":",
"return",
"self",
"t",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"tensor",
",",
"dtype",
")",
"scope",
"=",
"self",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"self",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"cast",
"operation",
"with",
"given",
"dtype",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L622-L636 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.reshape | def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
'''Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
'''
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
'''Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
'''
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"reshape",
"(",
"self",
",",
"shape",
":",
"tf",
".",
"TensorShape",
")",
"->",
"'TensorFluent'",
":",
"t",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"tensor",
",",
"shape",
")",
"scope",
"=",
"self",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"self",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"reshape",
"operation",
"with",
"given",
"shape",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L638-L650 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.transpose | def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':
'''Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
'''
if permutation == []:
return self
t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':
'''Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
'''
if permutation == []:
return self
t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"transpose",
"(",
"self",
",",
"permutation",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
"->",
"'TensorFluent'",
":",
"if",
"permutation",
"==",
"[",
"]",
":",
"return",
"self",
"t",
"=",
"tf",
".",
"transpose",
"(",
"self",
".",
"tensor",
",",
"permutation",
")",
"if",
"permutation",
"!=",
"[",
"]",
"else",
"self",
".",
"tensor",
"scope",
"=",
"self",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"self",
".",
"batch",
"return",
"TensorFluent",
"(",
"t",
",",
"scope",
",",
"batch",
"=",
"batch",
")"
] | Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"transpose",
"operation",
"with",
"given",
"permutation",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L652-L666 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.sum | def sum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_sum, operand, vars_list) | python | def sum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_sum, operand, vars_list) | [
"def",
"sum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
".",
"float32",
")",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_sum",
",",
"operand",
",",
"vars_list",
")"
] | Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"sum",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L668-L680 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.avg | def avg(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_mean, operand, vars_list) | python | def avg(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_mean, operand, vars_list) | [
"def",
"avg",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
".",
"float32",
")",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_mean",
",",
"operand",
",",
"vars_list",
")"
] | Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"avg",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L682-L694 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.prod | def prod(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list) | python | def prod(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list) | [
"def",
"prod",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
".",
"float32",
")",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_prod",
",",
"operand",
",",
"vars_list",
")"
] | Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"prod",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L696-L708 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.maximum | def maximum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
'''
return self._aggregation_op(tf.reduce_max, self, vars_list) | python | def maximum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
'''
return self._aggregation_op(tf.reduce_max, self, vars_list) | [
"def",
"maximum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_max",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"maximum",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L710-L719 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.minimum | def minimum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function.
'''
return self._aggregation_op(tf.reduce_min, self, vars_list) | python | def minimum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function.
'''
return self._aggregation_op(tf.reduce_min, self, vars_list) | [
"def",
"minimum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_min",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"minimum",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L721-L730 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.forall | def forall(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function.
'''
return self._aggregation_op(tf.reduce_all, self, vars_list) | python | def forall(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function.
'''
return self._aggregation_op(tf.reduce_all, self, vars_list) | [
"def",
"forall",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_all",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"forall",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L732-L741 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.exists | def exists(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
'''
return self._aggregation_op(tf.reduce_any, self, vars_list) | python | def exists(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
'''
return self._aggregation_op(tf.reduce_any, self, vars_list) | [
"def",
"exists",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_any",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"exists",
"aggregation",
"function",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L743-L752 |
ocaballeror/LyricFetch | lyricfetch/run.py | exclude_sources | def exclude_sources(exclude, section=False):
"""
Returns a narrower list of sources.
If the exclude parameter is a list, every one of its items will be removed
from the returned list.
If it's just a function (or a function's name) and 'section' is set to
False (default), a copy of the sources list without this element will be
returned.
If it's a function (or a function's name) but the section parameter is set
to True, the returned list will be a section of the sources list, including
everything between 'exclude' and the end of the list.
"""
newlist = sources.copy()
if not isinstance(exclude, list):
exclude = [exclude]
for source in exclude:
if not section:
newlist.remove(source)
else:
pos = newlist.index(source) + 1
if pos == len(sources):
return []
newlist = sources[pos:]
return newlist | python | def exclude_sources(exclude, section=False):
"""
Returns a narrower list of sources.
If the exclude parameter is a list, every one of its items will be removed
from the returned list.
If it's just a function (or a function's name) and 'section' is set to
False (default), a copy of the sources list without this element will be
returned.
If it's a function (or a function's name) but the section parameter is set
to True, the returned list will be a section of the sources list, including
everything between 'exclude' and the end of the list.
"""
newlist = sources.copy()
if not isinstance(exclude, list):
exclude = [exclude]
for source in exclude:
if not section:
newlist.remove(source)
else:
pos = newlist.index(source) + 1
if pos == len(sources):
return []
newlist = sources[pos:]
return newlist | [
"def",
"exclude_sources",
"(",
"exclude",
",",
"section",
"=",
"False",
")",
":",
"newlist",
"=",
"sources",
".",
"copy",
"(",
")",
"if",
"not",
"isinstance",
"(",
"exclude",
",",
"list",
")",
":",
"exclude",
"=",
"[",
"exclude",
"]",
"for",
"source",
"in",
"exclude",
":",
"if",
"not",
"section",
":",
"newlist",
".",
"remove",
"(",
"source",
")",
"else",
":",
"pos",
"=",
"newlist",
".",
"index",
"(",
"source",
")",
"+",
"1",
"if",
"pos",
"==",
"len",
"(",
"sources",
")",
":",
"return",
"[",
"]",
"newlist",
"=",
"sources",
"[",
"pos",
":",
"]",
"return",
"newlist"
] | Returns a narrower list of sources.
If the exclude parameter is a list, every one of its items will be removed
from the returned list.
If it's just a function (or a function's name) and 'section' is set to
False (default), a copy of the sources list without this element will be
returned.
If it's a function (or a function's name) but the section parameter is set
to True, the returned list will be a section of the sources list, including
everything between 'exclude' and the end of the list. | [
"Returns",
"a",
"narrower",
"list",
"of",
"sources",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L65-L90 |
ocaballeror/LyricFetch | lyricfetch/run.py | get_lyrics | def get_lyrics(song, l_sources=None):
"""
Searches for lyrics of a single song and returns a Result object with the
various stats collected in the process.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used.
"""
if l_sources is None:
l_sources = sources
if song.lyrics and not CONFIG['overwrite']:
logger.debug('%s already has embedded lyrics', song)
return None
runtimes = {}
source = None
for l_source in l_sources:
start = time.time()
try:
lyrics = l_source(song)
except (HTTPError, HTTPException, URLError, ConnectionError):
lyrics = ''
runtimes[l_source] = time.time() - start
if lyrics != '':
source = l_source
break
if lyrics != '':
logger.info('++ %s: Found lyrics for %s\n', source.__name__, song)
song.lyrics = lyrics
else:
logger.info("Couldn't find lyrics for %s\n", song)
source = None
return Result(song, source, runtimes) | python | def get_lyrics(song, l_sources=None):
"""
Searches for lyrics of a single song and returns a Result object with the
various stats collected in the process.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used.
"""
if l_sources is None:
l_sources = sources
if song.lyrics and not CONFIG['overwrite']:
logger.debug('%s already has embedded lyrics', song)
return None
runtimes = {}
source = None
for l_source in l_sources:
start = time.time()
try:
lyrics = l_source(song)
except (HTTPError, HTTPException, URLError, ConnectionError):
lyrics = ''
runtimes[l_source] = time.time() - start
if lyrics != '':
source = l_source
break
if lyrics != '':
logger.info('++ %s: Found lyrics for %s\n', source.__name__, song)
song.lyrics = lyrics
else:
logger.info("Couldn't find lyrics for %s\n", song)
source = None
return Result(song, source, runtimes) | [
"def",
"get_lyrics",
"(",
"song",
",",
"l_sources",
"=",
"None",
")",
":",
"if",
"l_sources",
"is",
"None",
":",
"l_sources",
"=",
"sources",
"if",
"song",
".",
"lyrics",
"and",
"not",
"CONFIG",
"[",
"'overwrite'",
"]",
":",
"logger",
".",
"debug",
"(",
"'%s already has embedded lyrics'",
",",
"song",
")",
"return",
"None",
"runtimes",
"=",
"{",
"}",
"source",
"=",
"None",
"for",
"l_source",
"in",
"l_sources",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"lyrics",
"=",
"l_source",
"(",
"song",
")",
"except",
"(",
"HTTPError",
",",
"HTTPException",
",",
"URLError",
",",
"ConnectionError",
")",
":",
"lyrics",
"=",
"''",
"runtimes",
"[",
"l_source",
"]",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start",
"if",
"lyrics",
"!=",
"''",
":",
"source",
"=",
"l_source",
"break",
"if",
"lyrics",
"!=",
"''",
":",
"logger",
".",
"info",
"(",
"'++ %s: Found lyrics for %s\\n'",
",",
"source",
".",
"__name__",
",",
"song",
")",
"song",
".",
"lyrics",
"=",
"lyrics",
"else",
":",
"logger",
".",
"info",
"(",
"\"Couldn't find lyrics for %s\\n\"",
",",
"song",
")",
"source",
"=",
"None",
"return",
"Result",
"(",
"song",
",",
"source",
",",
"runtimes",
")"
] | Searches for lyrics of a single song and returns a Result object with the
various stats collected in the process.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used. | [
"Searches",
"for",
"lyrics",
"of",
"a",
"single",
"song",
"and",
"returns",
"a",
"Result",
"object",
"with",
"the",
"various",
"stats",
"collected",
"in",
"the",
"process",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L93-L129 |
ocaballeror/LyricFetch | lyricfetch/run.py | get_lyrics_threaded | def get_lyrics_threaded(song, l_sources=None):
"""
Launches a pool of threads to search for the lyrics of a single song.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used.
"""
if l_sources is None:
l_sources = sources
if song.lyrics and not CONFIG['overwrite']:
logger.debug('%s already has embedded lyrics', song)
return None
runtimes = {}
queue = Queue()
pool = [LyrThread(source, song, queue) for source in l_sources]
for thread in pool:
thread.start()
for _ in range(len(pool)):
result = queue.get()
runtimes[result['source']] = result['runtime']
if result['lyrics']:
break
if result['lyrics']:
song.lyrics = result['lyrics']
source = result['source']
else:
source = None
return Result(song, source, runtimes) | python | def get_lyrics_threaded(song, l_sources=None):
"""
Launches a pool of threads to search for the lyrics of a single song.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used.
"""
if l_sources is None:
l_sources = sources
if song.lyrics and not CONFIG['overwrite']:
logger.debug('%s already has embedded lyrics', song)
return None
runtimes = {}
queue = Queue()
pool = [LyrThread(source, song, queue) for source in l_sources]
for thread in pool:
thread.start()
for _ in range(len(pool)):
result = queue.get()
runtimes[result['source']] = result['runtime']
if result['lyrics']:
break
if result['lyrics']:
song.lyrics = result['lyrics']
source = result['source']
else:
source = None
return Result(song, source, runtimes) | [
"def",
"get_lyrics_threaded",
"(",
"song",
",",
"l_sources",
"=",
"None",
")",
":",
"if",
"l_sources",
"is",
"None",
":",
"l_sources",
"=",
"sources",
"if",
"song",
".",
"lyrics",
"and",
"not",
"CONFIG",
"[",
"'overwrite'",
"]",
":",
"logger",
".",
"debug",
"(",
"'%s already has embedded lyrics'",
",",
"song",
")",
"return",
"None",
"runtimes",
"=",
"{",
"}",
"queue",
"=",
"Queue",
"(",
")",
"pool",
"=",
"[",
"LyrThread",
"(",
"source",
",",
"song",
",",
"queue",
")",
"for",
"source",
"in",
"l_sources",
"]",
"for",
"thread",
"in",
"pool",
":",
"thread",
".",
"start",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"pool",
")",
")",
":",
"result",
"=",
"queue",
".",
"get",
"(",
")",
"runtimes",
"[",
"result",
"[",
"'source'",
"]",
"]",
"=",
"result",
"[",
"'runtime'",
"]",
"if",
"result",
"[",
"'lyrics'",
"]",
":",
"break",
"if",
"result",
"[",
"'lyrics'",
"]",
":",
"song",
".",
"lyrics",
"=",
"result",
"[",
"'lyrics'",
"]",
"source",
"=",
"result",
"[",
"'source'",
"]",
"else",
":",
"source",
"=",
"None",
"return",
"Result",
"(",
"song",
",",
"source",
",",
"runtimes",
")"
] | Launches a pool of threads to search for the lyrics of a single song.
The optional parameter 'sources' specifies an alternative list of sources.
If not present, the main list will be used. | [
"Launches",
"a",
"pool",
"of",
"threads",
"to",
"search",
"for",
"the",
"lyrics",
"of",
"a",
"single",
"song",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L132-L164 |
ocaballeror/LyricFetch | lyricfetch/run.py | process_result | def process_result(result):
"""
Process a result object by:
1. Saving the lyrics to the corresponding file(if applicable).
2. Printing the lyrics or the corresponding error/success message.
3. Returning a boolean indicating if the lyrics were found or not.
"""
found = result.source is not None
if found:
if hasattr(result.song, 'filename'):
audiofile = eyed3.load(result.song.filename)
audiofile.tag.lyrics.set(result.song.lyrics)
audiofile.tag.save()
print(f'{id_source(result.source)} Lyrics added for {result.song}')
else:
print(f"""FROM {id_source(result.source, full=True)}
{result.song.lyrics}
-----------------------------------------------------------------------------\
""")
else:
print(f'Lyrics for {result.song} not found')
return found | python | def process_result(result):
"""
Process a result object by:
1. Saving the lyrics to the corresponding file(if applicable).
2. Printing the lyrics or the corresponding error/success message.
3. Returning a boolean indicating if the lyrics were found or not.
"""
found = result.source is not None
if found:
if hasattr(result.song, 'filename'):
audiofile = eyed3.load(result.song.filename)
audiofile.tag.lyrics.set(result.song.lyrics)
audiofile.tag.save()
print(f'{id_source(result.source)} Lyrics added for {result.song}')
else:
print(f"""FROM {id_source(result.source, full=True)}
{result.song.lyrics}
-----------------------------------------------------------------------------\
""")
else:
print(f'Lyrics for {result.song} not found')
return found | [
"def",
"process_result",
"(",
"result",
")",
":",
"found",
"=",
"result",
".",
"source",
"is",
"not",
"None",
"if",
"found",
":",
"if",
"hasattr",
"(",
"result",
".",
"song",
",",
"'filename'",
")",
":",
"audiofile",
"=",
"eyed3",
".",
"load",
"(",
"result",
".",
"song",
".",
"filename",
")",
"audiofile",
".",
"tag",
".",
"lyrics",
".",
"set",
"(",
"result",
".",
"song",
".",
"lyrics",
")",
"audiofile",
".",
"tag",
".",
"save",
"(",
")",
"print",
"(",
"f'{id_source(result.source)} Lyrics added for {result.song}'",
")",
"else",
":",
"print",
"(",
"f\"\"\"FROM {id_source(result.source, full=True)}\n\n{result.song.lyrics}\n-----------------------------------------------------------------------------\\\n\"\"\"",
")",
"else",
":",
"print",
"(",
"f'Lyrics for {result.song} not found'",
")",
"return",
"found"
] | Process a result object by:
1. Saving the lyrics to the corresponding file(if applicable).
2. Printing the lyrics or the corresponding error/success message.
3. Returning a boolean indicating if the lyrics were found or not. | [
"Process",
"a",
"result",
"object",
"by",
":",
"1",
".",
"Saving",
"the",
"lyrics",
"to",
"the",
"corresponding",
"file",
"(",
"if",
"applicable",
")",
".",
"2",
".",
"Printing",
"the",
"lyrics",
"or",
"the",
"corresponding",
"error",
"/",
"success",
"message",
".",
"3",
".",
"Returning",
"a",
"boolean",
"indicating",
"if",
"the",
"lyrics",
"were",
"found",
"or",
"not",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L167-L190 |
ocaballeror/LyricFetch | lyricfetch/run.py | run | def run(songs):
"""
Calls get_lyrics_threaded for a song or list of songs.
"""
if not hasattr(songs, '__iter__'):
result = get_lyrics_threaded(songs)
process_result(result)
else:
start = time.time()
stats = run_mp(songs)
end = time.time()
if CONFIG['print_stats']:
stats.print_stats()
total_time = end - start
total_time = '%d:%02d:%02d' % (total_time / 3600,
(total_time / 3600) / 60,
(total_time % 3600) % 60)
print(f'Total time: {total_time}') | python | def run(songs):
"""
Calls get_lyrics_threaded for a song or list of songs.
"""
if not hasattr(songs, '__iter__'):
result = get_lyrics_threaded(songs)
process_result(result)
else:
start = time.time()
stats = run_mp(songs)
end = time.time()
if CONFIG['print_stats']:
stats.print_stats()
total_time = end - start
total_time = '%d:%02d:%02d' % (total_time / 3600,
(total_time / 3600) / 60,
(total_time % 3600) % 60)
print(f'Total time: {total_time}') | [
"def",
"run",
"(",
"songs",
")",
":",
"if",
"not",
"hasattr",
"(",
"songs",
",",
"'__iter__'",
")",
":",
"result",
"=",
"get_lyrics_threaded",
"(",
"songs",
")",
"process_result",
"(",
"result",
")",
"else",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"stats",
"=",
"run_mp",
"(",
"songs",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"if",
"CONFIG",
"[",
"'print_stats'",
"]",
":",
"stats",
".",
"print_stats",
"(",
")",
"total_time",
"=",
"end",
"-",
"start",
"total_time",
"=",
"'%d:%02d:%02d'",
"%",
"(",
"total_time",
"/",
"3600",
",",
"(",
"total_time",
"/",
"3600",
")",
"/",
"60",
",",
"(",
"total_time",
"%",
"3600",
")",
"%",
"60",
")",
"print",
"(",
"f'Total time: {total_time}'",
")"
] | Calls get_lyrics_threaded for a song or list of songs. | [
"Calls",
"get_lyrics_threaded",
"for",
"a",
"song",
"or",
"list",
"of",
"songs",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L193-L210 |
ocaballeror/LyricFetch | lyricfetch/run.py | run_mp | def run_mp(songs):
"""
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
"""
stats = Stats()
if CONFIG['debug']:
good = open('found', 'w')
bad = open('notfound', 'w')
logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount'])
chunksize = math.ceil(len(songs) / os.cpu_count())
try:
with Pool(CONFIG['jobcount']) as pool:
for result in pool.imap_unordered(get_lyrics, songs, chunksize):
if result is None:
continue
for source, runtime in result.runtimes.items():
stats.add_result(source, result.source == source, runtime)
found = process_result(result)
if CONFIG['debug']:
if found:
good.write(f'{id_source(source)}: {result.song}\n')
good.flush()
else:
bad.write(str(result.song) + '\n')
bad.flush()
finally:
if CONFIG['debug']:
good.close()
bad.close()
return stats | python | def run_mp(songs):
"""
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
"""
stats = Stats()
if CONFIG['debug']:
good = open('found', 'w')
bad = open('notfound', 'w')
logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount'])
chunksize = math.ceil(len(songs) / os.cpu_count())
try:
with Pool(CONFIG['jobcount']) as pool:
for result in pool.imap_unordered(get_lyrics, songs, chunksize):
if result is None:
continue
for source, runtime in result.runtimes.items():
stats.add_result(source, result.source == source, runtime)
found = process_result(result)
if CONFIG['debug']:
if found:
good.write(f'{id_source(source)}: {result.song}\n')
good.flush()
else:
bad.write(str(result.song) + '\n')
bad.flush()
finally:
if CONFIG['debug']:
good.close()
bad.close()
return stats | [
"def",
"run_mp",
"(",
"songs",
")",
":",
"stats",
"=",
"Stats",
"(",
")",
"if",
"CONFIG",
"[",
"'debug'",
"]",
":",
"good",
"=",
"open",
"(",
"'found'",
",",
"'w'",
")",
"bad",
"=",
"open",
"(",
"'notfound'",
",",
"'w'",
")",
"logger",
".",
"debug",
"(",
"'Launching a pool of %d processes\\n'",
",",
"CONFIG",
"[",
"'jobcount'",
"]",
")",
"chunksize",
"=",
"math",
".",
"ceil",
"(",
"len",
"(",
"songs",
")",
"/",
"os",
".",
"cpu_count",
"(",
")",
")",
"try",
":",
"with",
"Pool",
"(",
"CONFIG",
"[",
"'jobcount'",
"]",
")",
"as",
"pool",
":",
"for",
"result",
"in",
"pool",
".",
"imap_unordered",
"(",
"get_lyrics",
",",
"songs",
",",
"chunksize",
")",
":",
"if",
"result",
"is",
"None",
":",
"continue",
"for",
"source",
",",
"runtime",
"in",
"result",
".",
"runtimes",
".",
"items",
"(",
")",
":",
"stats",
".",
"add_result",
"(",
"source",
",",
"result",
".",
"source",
"==",
"source",
",",
"runtime",
")",
"found",
"=",
"process_result",
"(",
"result",
")",
"if",
"CONFIG",
"[",
"'debug'",
"]",
":",
"if",
"found",
":",
"good",
".",
"write",
"(",
"f'{id_source(source)}: {result.song}\\n'",
")",
"good",
".",
"flush",
"(",
")",
"else",
":",
"bad",
".",
"write",
"(",
"str",
"(",
"result",
".",
"song",
")",
"+",
"'\\n'",
")",
"bad",
".",
"flush",
"(",
")",
"finally",
":",
"if",
"CONFIG",
"[",
"'debug'",
"]",
":",
"good",
".",
"close",
"(",
")",
"bad",
".",
"close",
"(",
")",
"return",
"stats"
] | Concurrently calls get_lyrics to fetch the lyrics of a large list of songs. | [
"Concurrently",
"calls",
"get_lyrics",
"to",
"fetch",
"the",
"lyrics",
"of",
"a",
"large",
"list",
"of",
"songs",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L213-L247 |
malramsay64/experi | src/experi/scheduler.py | parse_setup | def parse_setup(options: Union[List, str]) -> str:
"""Convert potentially a list of commands into a single string.
This creates a single string with newlines between each element of the list
so that they will all run after each other in a bash script.
"""
if isinstance(options, str):
return options
return "\n".join(options) | python | def parse_setup(options: Union[List, str]) -> str:
"""Convert potentially a list of commands into a single string.
This creates a single string with newlines between each element of the list
so that they will all run after each other in a bash script.
"""
if isinstance(options, str):
return options
return "\n".join(options) | [
"def",
"parse_setup",
"(",
"options",
":",
"Union",
"[",
"List",
",",
"str",
"]",
")",
"->",
"str",
":",
"if",
"isinstance",
"(",
"options",
",",
"str",
")",
":",
"return",
"options",
"return",
"\"\\n\"",
".",
"join",
"(",
"options",
")"
] | Convert potentially a list of commands into a single string.
This creates a single string with newlines between each element of the list
so that they will all run after each other in a bash script. | [
"Convert",
"potentially",
"a",
"list",
"of",
"commands",
"into",
"a",
"single",
"string",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L267-L276 |
malramsay64/experi | src/experi/scheduler.py | create_scheduler_file | def create_scheduler_file(scheduler: str, job: Job) -> str:
"""Substitute values into a template scheduler file."""
logger.debug("Create Scheduler File Function")
if job.scheduler_options is None:
scheduler_options: Dict[str, Any] = {}
else:
scheduler_options = deepcopy(job.scheduler_options)
try:
setup_string = parse_setup(scheduler_options["setup"])
del scheduler_options["setup"]
except KeyError:
setup_string = ""
# Create header
header_string = create_header_string(scheduler, **scheduler_options)
header_string += get_array_string(scheduler, len(job))
if scheduler.upper() == "SLURM":
workdir = r"$SLURM_SUBMIT_DIR"
array_index = r"$SLURM_ARRAY_TASK_ID"
elif scheduler.upper() == "PBS":
workdir = r"$PBS_O_WORKDIR"
array_index = r"$PBS_ARRAY_INDEX"
return header_string + SCHEDULER_TEMPLATE.format(
workdir=workdir,
command_list=job.as_bash_array(),
setup=setup_string,
array_index=array_index,
) | python | def create_scheduler_file(scheduler: str, job: Job) -> str:
"""Substitute values into a template scheduler file."""
logger.debug("Create Scheduler File Function")
if job.scheduler_options is None:
scheduler_options: Dict[str, Any] = {}
else:
scheduler_options = deepcopy(job.scheduler_options)
try:
setup_string = parse_setup(scheduler_options["setup"])
del scheduler_options["setup"]
except KeyError:
setup_string = ""
# Create header
header_string = create_header_string(scheduler, **scheduler_options)
header_string += get_array_string(scheduler, len(job))
if scheduler.upper() == "SLURM":
workdir = r"$SLURM_SUBMIT_DIR"
array_index = r"$SLURM_ARRAY_TASK_ID"
elif scheduler.upper() == "PBS":
workdir = r"$PBS_O_WORKDIR"
array_index = r"$PBS_ARRAY_INDEX"
return header_string + SCHEDULER_TEMPLATE.format(
workdir=workdir,
command_list=job.as_bash_array(),
setup=setup_string,
array_index=array_index,
) | [
"def",
"create_scheduler_file",
"(",
"scheduler",
":",
"str",
",",
"job",
":",
"Job",
")",
"->",
"str",
":",
"logger",
".",
"debug",
"(",
"\"Create Scheduler File Function\"",
")",
"if",
"job",
".",
"scheduler_options",
"is",
"None",
":",
"scheduler_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"else",
":",
"scheduler_options",
"=",
"deepcopy",
"(",
"job",
".",
"scheduler_options",
")",
"try",
":",
"setup_string",
"=",
"parse_setup",
"(",
"scheduler_options",
"[",
"\"setup\"",
"]",
")",
"del",
"scheduler_options",
"[",
"\"setup\"",
"]",
"except",
"KeyError",
":",
"setup_string",
"=",
"\"\"",
"# Create header",
"header_string",
"=",
"create_header_string",
"(",
"scheduler",
",",
"*",
"*",
"scheduler_options",
")",
"header_string",
"+=",
"get_array_string",
"(",
"scheduler",
",",
"len",
"(",
"job",
")",
")",
"if",
"scheduler",
".",
"upper",
"(",
")",
"==",
"\"SLURM\"",
":",
"workdir",
"=",
"r\"$SLURM_SUBMIT_DIR\"",
"array_index",
"=",
"r\"$SLURM_ARRAY_TASK_ID\"",
"elif",
"scheduler",
".",
"upper",
"(",
")",
"==",
"\"PBS\"",
":",
"workdir",
"=",
"r\"$PBS_O_WORKDIR\"",
"array_index",
"=",
"r\"$PBS_ARRAY_INDEX\"",
"return",
"header_string",
"+",
"SCHEDULER_TEMPLATE",
".",
"format",
"(",
"workdir",
"=",
"workdir",
",",
"command_list",
"=",
"job",
".",
"as_bash_array",
"(",
")",
",",
"setup",
"=",
"setup_string",
",",
"array_index",
"=",
"array_index",
",",
")"
] | Substitute values into a template scheduler file. | [
"Substitute",
"values",
"into",
"a",
"template",
"scheduler",
"file",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L304-L333 |
nion-software/nionswift-instrumentation-kit | nionswift_plugin/nion_instrumentation_ui/VideoControlPanel.py | VideoSourceStateController.initialize_state | def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
self.__update_buttons()
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | python | def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
self.__update_buttons()
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | [
"def",
"initialize_state",
"(",
"self",
")",
":",
"if",
"self",
".",
"__hardware_source",
":",
"self",
".",
"__data_item_states_changed_event_listener",
"=",
"self",
".",
"__hardware_source",
".",
"data_item_states_changed_event",
".",
"listen",
"(",
"self",
".",
"__data_item_states_changed",
")",
"self",
".",
"__acquisition_state_changed_event_listener",
"=",
"self",
".",
"__hardware_source",
".",
"acquisition_state_changed_event",
".",
"listen",
"(",
"self",
".",
"__acquisition_state_changed",
")",
"if",
"self",
".",
"on_display_name_changed",
":",
"self",
".",
"on_display_name_changed",
"(",
"self",
".",
"display_name",
")",
"self",
".",
"__update_buttons",
"(",
")",
"if",
"self",
".",
"on_data_item_states_changed",
":",
"self",
".",
"on_data_item_states_changed",
"(",
"list",
"(",
")",
")"
] | Call this to initialize the state of the UI after everything has been connected. | [
"Call",
"this",
"to",
"initialize",
"the",
"state",
"of",
"the",
"UI",
"after",
"everything",
"has",
"been",
"connected",
"."
] | train | https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/VideoControlPanel.py#L121-L130 |
nion-software/nionswift-instrumentation-kit | nionswift_plugin/nion_instrumentation_ui/VideoControlPanel.py | VideoSourceStateController.handle_play_clicked | def handle_play_clicked(self):
""" Call this when the user clicks the play/pause button. """
if self.__hardware_source:
if self.is_playing:
self.__hardware_source.stop_playing()
else:
self.__hardware_source.start_playing() | python | def handle_play_clicked(self):
""" Call this when the user clicks the play/pause button. """
if self.__hardware_source:
if self.is_playing:
self.__hardware_source.stop_playing()
else:
self.__hardware_source.start_playing() | [
"def",
"handle_play_clicked",
"(",
"self",
")",
":",
"if",
"self",
".",
"__hardware_source",
":",
"if",
"self",
".",
"is_playing",
":",
"self",
".",
"__hardware_source",
".",
"stop_playing",
"(",
")",
"else",
":",
"self",
".",
"__hardware_source",
".",
"start_playing",
"(",
")"
] | Call this when the user clicks the play/pause button. | [
"Call",
"this",
"when",
"the",
"user",
"clicks",
"the",
"play",
"/",
"pause",
"button",
"."
] | train | https://github.com/nion-software/nionswift-instrumentation-kit/blob/b20c4fff17e840e8cb3d544705faf5bd05f1cbf7/nionswift_plugin/nion_instrumentation_ui/VideoControlPanel.py#L132-L138 |
thiagopbueno/rddl2tf | rddl2tf/utils.py | range_type_to_dtype | def range_type_to_dtype(range_type: str) -> Optional[tf.DType]:
'''Maps RDDL range types to TensorFlow dtypes.'''
range2dtype = {
'real': tf.float32,
'int': tf.int32,
'bool': tf.bool
}
return range2dtype[range_type] | python | def range_type_to_dtype(range_type: str) -> Optional[tf.DType]:
'''Maps RDDL range types to TensorFlow dtypes.'''
range2dtype = {
'real': tf.float32,
'int': tf.int32,
'bool': tf.bool
}
return range2dtype[range_type] | [
"def",
"range_type_to_dtype",
"(",
"range_type",
":",
"str",
")",
"->",
"Optional",
"[",
"tf",
".",
"DType",
"]",
":",
"range2dtype",
"=",
"{",
"'real'",
":",
"tf",
".",
"float32",
",",
"'int'",
":",
"tf",
".",
"int32",
",",
"'bool'",
":",
"tf",
".",
"bool",
"}",
"return",
"range2dtype",
"[",
"range_type",
"]"
] | Maps RDDL range types to TensorFlow dtypes. | [
"Maps",
"RDDL",
"range",
"types",
"to",
"TensorFlow",
"dtypes",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/utils.py#L22-L29 |
thiagopbueno/rddl2tf | rddl2tf/utils.py | python_type_to_dtype | def python_type_to_dtype(python_type: type) -> Optional[tf.DType]:
'''Maps python types to TensorFlow dtypes.'''
dtype = None
if python_type == float:
dtype = tf.float32
elif python_type == int:
dtype = tf.int32
elif python_type == bool:
dtype = tf.bool
return dtype | python | def python_type_to_dtype(python_type: type) -> Optional[tf.DType]:
'''Maps python types to TensorFlow dtypes.'''
dtype = None
if python_type == float:
dtype = tf.float32
elif python_type == int:
dtype = tf.int32
elif python_type == bool:
dtype = tf.bool
return dtype | [
"def",
"python_type_to_dtype",
"(",
"python_type",
":",
"type",
")",
"->",
"Optional",
"[",
"tf",
".",
"DType",
"]",
":",
"dtype",
"=",
"None",
"if",
"python_type",
"==",
"float",
":",
"dtype",
"=",
"tf",
".",
"float32",
"elif",
"python_type",
"==",
"int",
":",
"dtype",
"=",
"tf",
".",
"int32",
"elif",
"python_type",
"==",
"bool",
":",
"dtype",
"=",
"tf",
".",
"bool",
"return",
"dtype"
] | Maps python types to TensorFlow dtypes. | [
"Maps",
"python",
"types",
"to",
"TensorFlow",
"dtypes",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/utils.py#L32-L41 |
RowleyGroup/pyqueue | pyqueue/systems/slurm.py | SlurmPrinter.get_dependency_type | def get_dependency_type(_type):
"""
Get the dependency type string for SlurmPrinter
:rtype: str
"""
if _type == DependencyTypes.AFTER:
return 'after'
elif _type == DependencyTypes.AFTER_ANY:
return 'afterany'
elif _type == DependencyTypes.AFTER_CORR:
return 'aftercorr'
elif _type == DependencyTypes.AFTER_NOT_OK:
return 'afternotok'
elif _type == DependencyTypes.AFTER_OK:
return 'afterok'
else:
return None | python | def get_dependency_type(_type):
"""
Get the dependency type string for SlurmPrinter
:rtype: str
"""
if _type == DependencyTypes.AFTER:
return 'after'
elif _type == DependencyTypes.AFTER_ANY:
return 'afterany'
elif _type == DependencyTypes.AFTER_CORR:
return 'aftercorr'
elif _type == DependencyTypes.AFTER_NOT_OK:
return 'afternotok'
elif _type == DependencyTypes.AFTER_OK:
return 'afterok'
else:
return None | [
"def",
"get_dependency_type",
"(",
"_type",
")",
":",
"if",
"_type",
"==",
"DependencyTypes",
".",
"AFTER",
":",
"return",
"'after'",
"elif",
"_type",
"==",
"DependencyTypes",
".",
"AFTER_ANY",
":",
"return",
"'afterany'",
"elif",
"_type",
"==",
"DependencyTypes",
".",
"AFTER_CORR",
":",
"return",
"'aftercorr'",
"elif",
"_type",
"==",
"DependencyTypes",
".",
"AFTER_NOT_OK",
":",
"return",
"'afternotok'",
"elif",
"_type",
"==",
"DependencyTypes",
".",
"AFTER_OK",
":",
"return",
"'afterok'",
"else",
":",
"return",
"None"
] | Get the dependency type string for SlurmPrinter
:rtype: str | [
"Get",
"the",
"dependency",
"type",
"string",
"for",
"SlurmPrinter"
] | train | https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L21-L38 |
RowleyGroup/pyqueue | pyqueue/systems/slurm.py | SlurmPrinter.get_header | def get_header():
"""
Makes the header section for the scripts
:rtype: str
"""
username, userid, uname = get_user_information()
header = '''\
# This Slurm batch script was generated
# By user: %s (%s)
# On host: %s
# At date: %s
# Using: Pyqueue v%s
''' % (username, userid, uname, datetime.now().strftime('%a. %B %w %X %Y'), __version__)
return header | python | def get_header():
"""
Makes the header section for the scripts
:rtype: str
"""
username, userid, uname = get_user_information()
header = '''\
# This Slurm batch script was generated
# By user: %s (%s)
# On host: %s
# At date: %s
# Using: Pyqueue v%s
''' % (username, userid, uname, datetime.now().strftime('%a. %B %w %X %Y'), __version__)
return header | [
"def",
"get_header",
"(",
")",
":",
"username",
",",
"userid",
",",
"uname",
"=",
"get_user_information",
"(",
")",
"header",
"=",
"'''\\\n# This Slurm batch script was generated\n# By user: %s (%s)\n# On host: %s\n# At date: %s\n# Using: Pyqueue v%s\n\n'''",
"%",
"(",
"username",
",",
"userid",
",",
"uname",
",",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%a. %B %w %X %Y'",
")",
",",
"__version__",
")",
"return",
"header"
] | Makes the header section for the scripts
:rtype: str | [
"Makes",
"the",
"header",
"section",
"for",
"the",
"scripts"
] | train | https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L41-L58 |
RowleyGroup/pyqueue | pyqueue/systems/slurm.py | SlurmPrinter.generate | def generate(self, job):
"""
Generates a job submission script from a job object
:param job: An instance of JobInterface
:type job: pyqueue.job.JobInterface
"""
options = job.get_options().copy()
job_name = options.pop('name', None)
job_account = options.pop('account', None)
job_walltime = options.pop('walltime', None)
job_mem_per_cpu = options.pop('mem_per_cpu', None)
job_memory = options.pop('memory', None)
job_working_directory = options.pop('working_directory', None)
job_error_path = options.pop('error_path', None)
job_output_path = options.pop('output_path', None)
job_dependency = options.pop('depending', None)
job_shell = options.pop('shell', '/bin/bash')
job_custom_options = options.pop('__custom__', [])
directives_lines = []
if job_name is not None:
directives_lines.append('--job-name=%s' % job_name)
if job_account is not None:
directives_lines.append('--account=%s' % job_account)
if job_working_directory is not None:
directives_lines.append('--workdir=%s' % job_working_directory)
if job_error_path is not None:
directives_lines.append('--error=%s' % job_error_path)
if job_output_path is not None:
directives_lines.append('--output=%s' % job_output_path)
if job_walltime is not None:
directives_lines.append('--time=%s' %
strfdelta(job_walltime, '%H:%M:%S'))
if job_mem_per_cpu is not None:
directives_lines.append('--mem-per-cpu=%d' % job_mem_per_cpu)
if job_memory is not None:
directives_lines.append('--mem=%d' % job_memory)
if job_dependency is not None:
master = job_dependency['job']
dependency_type = SlurmPrinter.get_dependency_type(
job_dependency['dependency_type']
)
job_id = master.get_id() if isinstance(master, JobInterface) else master
directives_lines.append(
'--dependency=%s:%s' %
(dependency_type, job_id)
)
for custom_option in job_custom_options:
directives_lines.append(custom_option)
directives = '\n'.join([
'#SBATCH %s' % directive for directive in directives_lines
])
commands = '\n'.join([
'\n'.join(command_container.get_commands()) for command_container in job.get_commands()
])
script = '#!%s\n' % job_shell
script += SlurmPrinter.get_header()
script += directives
script += '\n\n'
script += commands
return script | python | def generate(self, job):
"""
Generates a job submission script from a job object
:param job: An instance of JobInterface
:type job: pyqueue.job.JobInterface
"""
options = job.get_options().copy()
job_name = options.pop('name', None)
job_account = options.pop('account', None)
job_walltime = options.pop('walltime', None)
job_mem_per_cpu = options.pop('mem_per_cpu', None)
job_memory = options.pop('memory', None)
job_working_directory = options.pop('working_directory', None)
job_error_path = options.pop('error_path', None)
job_output_path = options.pop('output_path', None)
job_dependency = options.pop('depending', None)
job_shell = options.pop('shell', '/bin/bash')
job_custom_options = options.pop('__custom__', [])
directives_lines = []
if job_name is not None:
directives_lines.append('--job-name=%s' % job_name)
if job_account is not None:
directives_lines.append('--account=%s' % job_account)
if job_working_directory is not None:
directives_lines.append('--workdir=%s' % job_working_directory)
if job_error_path is not None:
directives_lines.append('--error=%s' % job_error_path)
if job_output_path is not None:
directives_lines.append('--output=%s' % job_output_path)
if job_walltime is not None:
directives_lines.append('--time=%s' %
strfdelta(job_walltime, '%H:%M:%S'))
if job_mem_per_cpu is not None:
directives_lines.append('--mem-per-cpu=%d' % job_mem_per_cpu)
if job_memory is not None:
directives_lines.append('--mem=%d' % job_memory)
if job_dependency is not None:
master = job_dependency['job']
dependency_type = SlurmPrinter.get_dependency_type(
job_dependency['dependency_type']
)
job_id = master.get_id() if isinstance(master, JobInterface) else master
directives_lines.append(
'--dependency=%s:%s' %
(dependency_type, job_id)
)
for custom_option in job_custom_options:
directives_lines.append(custom_option)
directives = '\n'.join([
'#SBATCH %s' % directive for directive in directives_lines
])
commands = '\n'.join([
'\n'.join(command_container.get_commands()) for command_container in job.get_commands()
])
script = '#!%s\n' % job_shell
script += SlurmPrinter.get_header()
script += directives
script += '\n\n'
script += commands
return script | [
"def",
"generate",
"(",
"self",
",",
"job",
")",
":",
"options",
"=",
"job",
".",
"get_options",
"(",
")",
".",
"copy",
"(",
")",
"job_name",
"=",
"options",
".",
"pop",
"(",
"'name'",
",",
"None",
")",
"job_account",
"=",
"options",
".",
"pop",
"(",
"'account'",
",",
"None",
")",
"job_walltime",
"=",
"options",
".",
"pop",
"(",
"'walltime'",
",",
"None",
")",
"job_mem_per_cpu",
"=",
"options",
".",
"pop",
"(",
"'mem_per_cpu'",
",",
"None",
")",
"job_memory",
"=",
"options",
".",
"pop",
"(",
"'memory'",
",",
"None",
")",
"job_working_directory",
"=",
"options",
".",
"pop",
"(",
"'working_directory'",
",",
"None",
")",
"job_error_path",
"=",
"options",
".",
"pop",
"(",
"'error_path'",
",",
"None",
")",
"job_output_path",
"=",
"options",
".",
"pop",
"(",
"'output_path'",
",",
"None",
")",
"job_dependency",
"=",
"options",
".",
"pop",
"(",
"'depending'",
",",
"None",
")",
"job_shell",
"=",
"options",
".",
"pop",
"(",
"'shell'",
",",
"'/bin/bash'",
")",
"job_custom_options",
"=",
"options",
".",
"pop",
"(",
"'__custom__'",
",",
"[",
"]",
")",
"directives_lines",
"=",
"[",
"]",
"if",
"job_name",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--job-name=%s'",
"%",
"job_name",
")",
"if",
"job_account",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--account=%s'",
"%",
"job_account",
")",
"if",
"job_working_directory",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--workdir=%s'",
"%",
"job_working_directory",
")",
"if",
"job_error_path",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--error=%s'",
"%",
"job_error_path",
")",
"if",
"job_output_path",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--output=%s'",
"%",
"job_output_path",
")",
"if",
"job_walltime",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--time=%s'",
"%",
"strfdelta",
"(",
"job_walltime",
",",
"'%H:%M:%S'",
")",
")",
"if",
"job_mem_per_cpu",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--mem-per-cpu=%d'",
"%",
"job_mem_per_cpu",
")",
"if",
"job_memory",
"is",
"not",
"None",
":",
"directives_lines",
".",
"append",
"(",
"'--mem=%d'",
"%",
"job_memory",
")",
"if",
"job_dependency",
"is",
"not",
"None",
":",
"master",
"=",
"job_dependency",
"[",
"'job'",
"]",
"dependency_type",
"=",
"SlurmPrinter",
".",
"get_dependency_type",
"(",
"job_dependency",
"[",
"'dependency_type'",
"]",
")",
"job_id",
"=",
"master",
".",
"get_id",
"(",
")",
"if",
"isinstance",
"(",
"master",
",",
"JobInterface",
")",
"else",
"master",
"directives_lines",
".",
"append",
"(",
"'--dependency=%s:%s'",
"%",
"(",
"dependency_type",
",",
"job_id",
")",
")",
"for",
"custom_option",
"in",
"job_custom_options",
":",
"directives_lines",
".",
"append",
"(",
"custom_option",
")",
"directives",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'#SBATCH %s'",
"%",
"directive",
"for",
"directive",
"in",
"directives_lines",
"]",
")",
"commands",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'\\n'",
".",
"join",
"(",
"command_container",
".",
"get_commands",
"(",
")",
")",
"for",
"command_container",
"in",
"job",
".",
"get_commands",
"(",
")",
"]",
")",
"script",
"=",
"'#!%s\\n'",
"%",
"job_shell",
"script",
"+=",
"SlurmPrinter",
".",
"get_header",
"(",
")",
"script",
"+=",
"directives",
"script",
"+=",
"'\\n\\n'",
"script",
"+=",
"commands",
"return",
"script"
] | Generates a job submission script from a job object
:param job: An instance of JobInterface
:type job: pyqueue.job.JobInterface | [
"Generates",
"a",
"job",
"submission",
"script",
"from",
"a",
"job",
"object"
] | train | https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L60-L137 |
RowleyGroup/pyqueue | pyqueue/systems/slurm.py | SlurmLocalSubmitter.submit | def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
from subprocess import Popen, PIPE
script = self._printer.generate(job)
process = Popen('sbatch', stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout, sterr = process.communicate(input=script)
process.stdin.close() | python | def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
from subprocess import Popen, PIPE
script = self._printer.generate(job)
process = Popen('sbatch', stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout, sterr = process.communicate(input=script)
process.stdin.close() | [
"def",
"submit",
"(",
"self",
",",
"job",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"script",
"=",
"self",
".",
"_printer",
".",
"generate",
"(",
"job",
")",
"process",
"=",
"Popen",
"(",
"'sbatch'",
",",
"stdout",
"=",
"PIPE",
",",
"stdin",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"stdout",
",",
"sterr",
"=",
"process",
".",
"communicate",
"(",
"input",
"=",
"script",
")",
"process",
".",
"stdin",
".",
"close",
"(",
")"
] | Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface | [
"Submits",
"a",
"given",
"job"
] | train | https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L149-L160 |
RowleyGroup/pyqueue | pyqueue/systems/slurm.py | SlurmRemoteSubmitter.submit | def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
script = self._printer.generate(job)
stdin, stdout, stderr = self._ssh.exec_command('sbatch')
stdin.write(script)
stdin.flush()
stdin.channel.shutdown_write()
return stdout.read() | python | def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
script = self._printer.generate(job)
stdin, stdout, stderr = self._ssh.exec_command('sbatch')
stdin.write(script)
stdin.flush()
stdin.channel.shutdown_write()
return stdout.read() | [
"def",
"submit",
"(",
"self",
",",
"job",
")",
":",
"script",
"=",
"self",
".",
"_printer",
".",
"generate",
"(",
"job",
")",
"stdin",
",",
"stdout",
",",
"stderr",
"=",
"self",
".",
"_ssh",
".",
"exec_command",
"(",
"'sbatch'",
")",
"stdin",
".",
"write",
"(",
"script",
")",
"stdin",
".",
"flush",
"(",
")",
"stdin",
".",
"channel",
".",
"shutdown_write",
"(",
")",
"return",
"stdout",
".",
"read",
"(",
")"
] | Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface | [
"Submits",
"a",
"given",
"job"
] | train | https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L172-L184 |
ocaballeror/LyricFetch | lyricfetch/song.py | get_info_mpris2 | def get_info_mpris2(name):
"""
Get the current playing song from an mpris2 compliant player.
"""
# qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\
# org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat
bus_name = 'org.mpris.MediaPlayer2.' + name
path = '/org/mpris/MediaPlayer2'
interface = 'org.mpris.MediaPlayer2.Player'
address = DBusAddress(path, bus_name=bus_name, interface=interface)
msg = Properties(address).get('Metadata')
connection = connect_and_authenticate()
response = connection.send_and_get_reply(msg)
metadata = dict(response[0][1])
keys = ['album', 'title', 'artist', 'albumartist']
info = {}
metadata = {k: v for k, v in metadata.items() if 'xesam:' in k}
for key, value in metadata.items():
name = key.split(':')[1].lower()
value = value[1]
if name not in keys or name in info:
continue
if isinstance(value, list):
value = value[0]
info[name] = value
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | python | def get_info_mpris2(name):
"""
Get the current playing song from an mpris2 compliant player.
"""
# qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\
# org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat
bus_name = 'org.mpris.MediaPlayer2.' + name
path = '/org/mpris/MediaPlayer2'
interface = 'org.mpris.MediaPlayer2.Player'
address = DBusAddress(path, bus_name=bus_name, interface=interface)
msg = Properties(address).get('Metadata')
connection = connect_and_authenticate()
response = connection.send_and_get_reply(msg)
metadata = dict(response[0][1])
keys = ['album', 'title', 'artist', 'albumartist']
info = {}
metadata = {k: v for k, v in metadata.items() if 'xesam:' in k}
for key, value in metadata.items():
name = key.split(':')[1].lower()
value = value[1]
if name not in keys or name in info:
continue
if isinstance(value, list):
value = value[0]
info[name] = value
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | [
"def",
"get_info_mpris2",
"(",
"name",
")",
":",
"# qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\\",
"# org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat",
"bus_name",
"=",
"'org.mpris.MediaPlayer2.'",
"+",
"name",
"path",
"=",
"'/org/mpris/MediaPlayer2'",
"interface",
"=",
"'org.mpris.MediaPlayer2.Player'",
"address",
"=",
"DBusAddress",
"(",
"path",
",",
"bus_name",
"=",
"bus_name",
",",
"interface",
"=",
"interface",
")",
"msg",
"=",
"Properties",
"(",
"address",
")",
".",
"get",
"(",
"'Metadata'",
")",
"connection",
"=",
"connect_and_authenticate",
"(",
")",
"response",
"=",
"connection",
".",
"send_and_get_reply",
"(",
"msg",
")",
"metadata",
"=",
"dict",
"(",
"response",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"keys",
"=",
"[",
"'album'",
",",
"'title'",
",",
"'artist'",
",",
"'albumartist'",
"]",
"info",
"=",
"{",
"}",
"metadata",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"metadata",
".",
"items",
"(",
")",
"if",
"'xesam:'",
"in",
"k",
"}",
"for",
"key",
",",
"value",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"name",
"=",
"key",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"value",
"=",
"value",
"[",
"1",
"]",
"if",
"name",
"not",
"in",
"keys",
"or",
"name",
"in",
"info",
":",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"value",
"=",
"value",
"[",
"0",
"]",
"info",
"[",
"name",
"]",
"=",
"value",
"if",
"'albumartist'",
"in",
"info",
":",
"info",
"[",
"'artist'",
"]",
"=",
"info",
"[",
"'albumartist'",
"]",
"del",
"info",
"[",
"'albumartist'",
"]",
"return",
"Song",
"(",
"*",
"*",
"info",
")"
] | Get the current playing song from an mpris2 compliant player. | [
"Get",
"the",
"current",
"playing",
"song",
"from",
"an",
"mpris2",
"compliant",
"player",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L147-L178 |
ocaballeror/LyricFetch | lyricfetch/song.py | get_current_clementine | def get_current_clementine():
"""
Get the current song from clementine.
"""
# mpris_version 2
try:
return get_info_mpris2('clementine')
except DBusErrorResponse:
bus_name = 'org.mpris.clementine'
path = '/Player'
interface = 'org.freedesktop.MediaPlayer'
return dbus_get_metadata(path, bus_name, interface) | python | def get_current_clementine():
"""
Get the current song from clementine.
"""
# mpris_version 2
try:
return get_info_mpris2('clementine')
except DBusErrorResponse:
bus_name = 'org.mpris.clementine'
path = '/Player'
interface = 'org.freedesktop.MediaPlayer'
return dbus_get_metadata(path, bus_name, interface) | [
"def",
"get_current_clementine",
"(",
")",
":",
"# mpris_version 2",
"try",
":",
"return",
"get_info_mpris2",
"(",
"'clementine'",
")",
"except",
"DBusErrorResponse",
":",
"bus_name",
"=",
"'org.mpris.clementine'",
"path",
"=",
"'/Player'",
"interface",
"=",
"'org.freedesktop.MediaPlayer'",
"return",
"dbus_get_metadata",
"(",
"path",
",",
"bus_name",
",",
"interface",
")"
] | Get the current song from clementine. | [
"Get",
"the",
"current",
"song",
"from",
"clementine",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L198-L209 |
ocaballeror/LyricFetch | lyricfetch/song.py | get_current_cmus | def get_current_cmus():
"""
Get the current song from cmus.
"""
result = subprocess.run('cmus-remote -Q'.split(' '), check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
info = {}
for line in result.stdout.decode().split('\n'):
line = line.split(' ')
if line[0] != 'tag':
continue
key = line[1]
if key in ['album', 'title', 'artist', 'albumartist'] and\
key not in info:
info[key] = ' '.join(line[2:])
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | python | def get_current_cmus():
"""
Get the current song from cmus.
"""
result = subprocess.run('cmus-remote -Q'.split(' '), check=True,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
info = {}
for line in result.stdout.decode().split('\n'):
line = line.split(' ')
if line[0] != 'tag':
continue
key = line[1]
if key in ['album', 'title', 'artist', 'albumartist'] and\
key not in info:
info[key] = ' '.join(line[2:])
if 'albumartist' in info:
info['artist'] = info['albumartist']
del info['albumartist']
return Song(**info) | [
"def",
"get_current_cmus",
"(",
")",
":",
"result",
"=",
"subprocess",
".",
"run",
"(",
"'cmus-remote -Q'",
".",
"split",
"(",
"' '",
")",
",",
"check",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"DEVNULL",
")",
"info",
"=",
"{",
"}",
"for",
"line",
"in",
"result",
".",
"stdout",
".",
"decode",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
":",
"line",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"if",
"line",
"[",
"0",
"]",
"!=",
"'tag'",
":",
"continue",
"key",
"=",
"line",
"[",
"1",
"]",
"if",
"key",
"in",
"[",
"'album'",
",",
"'title'",
",",
"'artist'",
",",
"'albumartist'",
"]",
"and",
"key",
"not",
"in",
"info",
":",
"info",
"[",
"key",
"]",
"=",
"' '",
".",
"join",
"(",
"line",
"[",
"2",
":",
"]",
")",
"if",
"'albumartist'",
"in",
"info",
":",
"info",
"[",
"'artist'",
"]",
"=",
"info",
"[",
"'albumartist'",
"]",
"del",
"info",
"[",
"'albumartist'",
"]",
"return",
"Song",
"(",
"*",
"*",
"info",
")"
] | Get the current song from cmus. | [
"Get",
"the",
"current",
"song",
"from",
"cmus",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L212-L232 |
ocaballeror/LyricFetch | lyricfetch/song.py | Song.from_filename | def from_filename(cls, filename):
"""
Class constructor using the path to the corresponding mp3 file. The
metadata will be read from this file to create the song object, so it
must at least contain valid ID3 tags for artist and title.
"""
if not filename:
logger.error('No filename specified')
return None
if not os.path.exists(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
try:
audiofile = eyed3.load(filename)
except Exception as error:
print(type(error), error)
return None
# Sometimes eyed3 may return a null object and not raise any exceptions
if audiofile is None:
return None
tags = audiofile.tag
album = tags.album
title = tags.title
lyrics = ''.join([l.text for l in tags.lyrics])
artist = tags.album_artist
if not artist:
artist = tags.artist
song = cls(artist, title, album, lyrics)
song.filename = filename
return song | python | def from_filename(cls, filename):
"""
Class constructor using the path to the corresponding mp3 file. The
metadata will be read from this file to create the song object, so it
must at least contain valid ID3 tags for artist and title.
"""
if not filename:
logger.error('No filename specified')
return None
if not os.path.exists(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
try:
audiofile = eyed3.load(filename)
except Exception as error:
print(type(error), error)
return None
# Sometimes eyed3 may return a null object and not raise any exceptions
if audiofile is None:
return None
tags = audiofile.tag
album = tags.album
title = tags.title
lyrics = ''.join([l.text for l in tags.lyrics])
artist = tags.album_artist
if not artist:
artist = tags.artist
song = cls(artist, title, album, lyrics)
song.filename = filename
return song | [
"def",
"from_filename",
"(",
"cls",
",",
"filename",
")",
":",
"if",
"not",
"filename",
":",
"logger",
".",
"error",
"(",
"'No filename specified'",
")",
"return",
"None",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"logger",
".",
"error",
"(",
"\"Err: File '%s' does not exist\"",
",",
"filename",
")",
"return",
"None",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"logger",
".",
"error",
"(",
"\"Err: File '%s' is a directory\"",
",",
"filename",
")",
"return",
"None",
"try",
":",
"audiofile",
"=",
"eyed3",
".",
"load",
"(",
"filename",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"type",
"(",
"error",
")",
",",
"error",
")",
"return",
"None",
"# Sometimes eyed3 may return a null object and not raise any exceptions",
"if",
"audiofile",
"is",
"None",
":",
"return",
"None",
"tags",
"=",
"audiofile",
".",
"tag",
"album",
"=",
"tags",
".",
"album",
"title",
"=",
"tags",
".",
"title",
"lyrics",
"=",
"''",
".",
"join",
"(",
"[",
"l",
".",
"text",
"for",
"l",
"in",
"tags",
".",
"lyrics",
"]",
")",
"artist",
"=",
"tags",
".",
"album_artist",
"if",
"not",
"artist",
":",
"artist",
"=",
"tags",
".",
"artist",
"song",
"=",
"cls",
"(",
"artist",
",",
"title",
",",
"album",
",",
"lyrics",
")",
"song",
".",
"filename",
"=",
"filename",
"return",
"song"
] | Class constructor using the path to the corresponding mp3 file. The
metadata will be read from this file to create the song object, so it
must at least contain valid ID3 tags for artist and title. | [
"Class",
"constructor",
"using",
"the",
"path",
"to",
"the",
"corresponding",
"mp3",
"file",
".",
"The",
"metadata",
"will",
"be",
"read",
"from",
"this",
"file",
"to",
"create",
"the",
"song",
"object",
"so",
"it",
"must",
"at",
"least",
"contain",
"valid",
"ID3",
"tags",
"for",
"artist",
"and",
"title",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L65-L103 |
ocaballeror/LyricFetch | lyricfetch/song.py | Song.from_string | def from_string(cls, name, separator='-', reverse=False):
"""
Class constructor using a string with the artist and title. This should
be used when parsing user input, since all the information must be
specified in a single string formatted as: '{artist} - {title}'.
"""
recv = [t.strip() for t in name.split(separator)]
if len(recv) < 2:
logger.error('Wrong format!')
return None
if reverse:
title = recv[0]
artist = ''.join(recv[1:])
else:
artist = recv[0]
title = ''.join(recv[1:])
if not artist or not title:
logger.error('Wrong format!')
return None
song = cls(artist, title)
return song | python | def from_string(cls, name, separator='-', reverse=False):
"""
Class constructor using a string with the artist and title. This should
be used when parsing user input, since all the information must be
specified in a single string formatted as: '{artist} - {title}'.
"""
recv = [t.strip() for t in name.split(separator)]
if len(recv) < 2:
logger.error('Wrong format!')
return None
if reverse:
title = recv[0]
artist = ''.join(recv[1:])
else:
artist = recv[0]
title = ''.join(recv[1:])
if not artist or not title:
logger.error('Wrong format!')
return None
song = cls(artist, title)
return song | [
"def",
"from_string",
"(",
"cls",
",",
"name",
",",
"separator",
"=",
"'-'",
",",
"reverse",
"=",
"False",
")",
":",
"recv",
"=",
"[",
"t",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"name",
".",
"split",
"(",
"separator",
")",
"]",
"if",
"len",
"(",
"recv",
")",
"<",
"2",
":",
"logger",
".",
"error",
"(",
"'Wrong format!'",
")",
"return",
"None",
"if",
"reverse",
":",
"title",
"=",
"recv",
"[",
"0",
"]",
"artist",
"=",
"''",
".",
"join",
"(",
"recv",
"[",
"1",
":",
"]",
")",
"else",
":",
"artist",
"=",
"recv",
"[",
"0",
"]",
"title",
"=",
"''",
".",
"join",
"(",
"recv",
"[",
"1",
":",
"]",
")",
"if",
"not",
"artist",
"or",
"not",
"title",
":",
"logger",
".",
"error",
"(",
"'Wrong format!'",
")",
"return",
"None",
"song",
"=",
"cls",
"(",
"artist",
",",
"title",
")",
"return",
"song"
] | Class constructor using a string with the artist and title. This should
be used when parsing user input, since all the information must be
specified in a single string formatted as: '{artist} - {title}'. | [
"Class",
"constructor",
"using",
"a",
"string",
"with",
"the",
"artist",
"and",
"title",
".",
"This",
"should",
"be",
"used",
"when",
"parsing",
"user",
"input",
"since",
"all",
"the",
"information",
"must",
"be",
"specified",
"in",
"a",
"single",
"string",
"formatted",
"as",
":",
"{",
"artist",
"}",
"-",
"{",
"title",
"}",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L106-L129 |
ocaballeror/LyricFetch | lyricfetch/song.py | Song.fetch_album_name | def fetch_album_name(self):
"""
Get the name of the album from lastfm.
"""
response = get_lastfm('track.getInfo', artist=self.artist,
track=self.title)
if response:
try:
self.album = response['track']['album']['title']
logger.debug('Found album %s from lastfm', self.album)
except Exception:
logger.warning('Could not fetch album name for %s', self)
else:
logger.warning('Could not fetch album name for %s', self) | python | def fetch_album_name(self):
"""
Get the name of the album from lastfm.
"""
response = get_lastfm('track.getInfo', artist=self.artist,
track=self.title)
if response:
try:
self.album = response['track']['album']['title']
logger.debug('Found album %s from lastfm', self.album)
except Exception:
logger.warning('Could not fetch album name for %s', self)
else:
logger.warning('Could not fetch album name for %s', self) | [
"def",
"fetch_album_name",
"(",
"self",
")",
":",
"response",
"=",
"get_lastfm",
"(",
"'track.getInfo'",
",",
"artist",
"=",
"self",
".",
"artist",
",",
"track",
"=",
"self",
".",
"title",
")",
"if",
"response",
":",
"try",
":",
"self",
".",
"album",
"=",
"response",
"[",
"'track'",
"]",
"[",
"'album'",
"]",
"[",
"'title'",
"]",
"logger",
".",
"debug",
"(",
"'Found album %s from lastfm'",
",",
"self",
".",
"album",
")",
"except",
"Exception",
":",
"logger",
".",
"warning",
"(",
"'Could not fetch album name for %s'",
",",
"self",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'Could not fetch album name for %s'",
",",
"self",
")"
] | Get the name of the album from lastfm. | [
"Get",
"the",
"name",
"of",
"the",
"album",
"from",
"lastfm",
"."
] | train | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L131-L144 |
numan/py-analytics | analytics/backends/redis.py | Redis._get_closest_week | def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday) | python | def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday) | [
"def",
"_get_closest_week",
"(",
"self",
",",
"metric_date",
")",
":",
"#find the offset to the closest monday",
"days_after_monday",
"=",
"metric_date",
".",
"isoweekday",
"(",
")",
"-",
"1",
"return",
"metric_date",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"days_after_monday",
")"
] | Gets the closest monday to the date provided. | [
"Gets",
"the",
"closest",
"monday",
"to",
"the",
"date",
"provided",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L58-L65 |
numan/py-analytics | analytics/backends/redis.py | Redis._get_daily_date_range | def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates | python | def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates | [
"def",
"_get_daily_date_range",
"(",
"self",
",",
"metric_date",
",",
"delta",
")",
":",
"dates",
"=",
"[",
"metric_date",
"]",
"start_date",
"=",
"metric_date",
"end_date",
"=",
"metric_date",
"+",
"delta",
"while",
"start_date",
".",
"month",
"<",
"end_date",
".",
"month",
"or",
"start_date",
".",
"year",
"<",
"end_date",
".",
"year",
":",
"days_in_month",
"=",
"calendar",
".",
"monthrange",
"(",
"start_date",
".",
"year",
",",
"start_date",
".",
"month",
")",
"[",
"1",
"]",
"#shift along to the next month as one of the months we will have to see. We don't care that the exact date",
"#is the 1st in each subsequent date range as we only care about the year and the month",
"start_date",
"=",
"start_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"days_in_month",
"-",
"start_date",
".",
"day",
"+",
"1",
")",
"dates",
".",
"append",
"(",
"start_date",
")",
"return",
"dates"
] | Get the range of months that we need to use as keys to scan redis. | [
"Get",
"the",
"range",
"of",
"months",
"that",
"we",
"need",
"to",
"use",
"as",
"keys",
"to",
"scan",
"redis",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L97-L112 |
numan/py-analytics | analytics/backends/redis.py | Redis._get_weekly_date_range | def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates | python | def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates | [
"def",
"_get_weekly_date_range",
"(",
"self",
",",
"metric_date",
",",
"delta",
")",
":",
"dates",
"=",
"[",
"metric_date",
"]",
"end_date",
"=",
"metric_date",
"+",
"delta",
"#Figure out how many years our metric range spans",
"spanning_years",
"=",
"end_date",
".",
"year",
"-",
"metric_date",
".",
"year",
"for",
"i",
"in",
"range",
"(",
"spanning_years",
")",
":",
"#for the weekly keys, we only care about the year",
"dates",
".",
"append",
"(",
"datetime",
".",
"date",
"(",
"year",
"=",
"metric_date",
".",
"year",
"+",
"(",
"i",
"+",
"1",
")",
",",
"month",
"=",
"1",
",",
"day",
"=",
"1",
")",
")",
"return",
"dates"
] | Gets the range of years that we need to use as keys to get metrics from redis. | [
"Gets",
"the",
"range",
"of",
"years",
"that",
"we",
"need",
"to",
"use",
"as",
"keys",
"to",
"get",
"metrics",
"from",
"redis",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L114-L127 |
numan/py-analytics | analytics/backends/redis.py | Redis.clear_all | def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key) | python | def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key) | [
"def",
"clear_all",
"(",
"self",
")",
":",
"keys",
"=",
"self",
".",
"_analytics_backend",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"itertools",
".",
"chain",
"(",
"*",
"keys",
")",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"if",
"key",
".",
"startswith",
"(",
"self",
".",
"_prefix",
")",
":",
"conn",
".",
"delete",
"(",
"key",
")"
] | Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion | [
"Deletes",
"all",
"sandsnake",
"related",
"data",
"from",
"redis",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L151-L164 |
numan/py-analytics | analytics/backends/redis.py | Redis.track_count | def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt) | python | def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt) | [
"def",
"track_count",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"inc_amt",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_analytics_backend",
".",
"incr",
"(",
"self",
".",
"_prefix",
"+",
"\":\"",
"+",
"\"analy:%s:count:%s\"",
"%",
"(",
"unique_identifier",
",",
"metric",
")",
",",
"inc_amt",
")"
] | Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise | [
"Tracks",
"a",
"metric",
"just",
"by",
"count",
".",
"If",
"you",
"track",
"a",
"metric",
"this",
"way",
"you",
"won",
"t",
"be",
"able",
"to",
"query",
"the",
"metric",
"by",
"day",
"week",
"or",
"month",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L166-L176 |
numan/py-analytics | analytics/backends/redis.py | Redis.track_metric | def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results | python | def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results | [
"def",
"track_metric",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"date",
"=",
"None",
",",
"inc_amt",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"metric",
"=",
"[",
"metric",
"]",
"if",
"isinstance",
"(",
"metric",
",",
"basestring",
")",
"else",
"metric",
"unique_identifier",
"=",
"[",
"unique_identifier",
"]",
"if",
"not",
"isinstance",
"(",
"unique_identifier",
",",
"(",
"types",
".",
"ListType",
",",
"types",
".",
"TupleType",
",",
"types",
".",
"GeneratorType",
",",
")",
")",
"else",
"unique_identifier",
"results",
"=",
"[",
"]",
"if",
"date",
"is",
"None",
":",
"date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"for",
"uid",
"in",
"unique_identifier",
":",
"hash_key_daily",
"=",
"self",
".",
"_get_daily_metric_key",
"(",
"uid",
",",
"date",
")",
"closest_monday",
"=",
"self",
".",
"_get_closest_week",
"(",
"date",
")",
"hash_key_weekly",
"=",
"self",
".",
"_get_weekly_metric_key",
"(",
"uid",
",",
"date",
")",
"for",
"single_metric",
"in",
"metric",
":",
"daily_metric_name",
"=",
"self",
".",
"_get_daily_metric_name",
"(",
"single_metric",
",",
"date",
")",
"weekly_metric_name",
"=",
"self",
".",
"_get_weekly_metric_name",
"(",
"single_metric",
",",
"closest_monday",
")",
"monthly_metric_name",
"=",
"self",
".",
"_get_monthly_metric_name",
"(",
"single_metric",
",",
"date",
")",
"results",
".",
"append",
"(",
"[",
"conn",
".",
"hincrby",
"(",
"hash_key_daily",
",",
"daily_metric_name",
",",
"inc_amt",
")",
",",
"conn",
".",
"hincrby",
"(",
"hash_key_weekly",
",",
"weekly_metric_name",
",",
"inc_amt",
")",
",",
"conn",
".",
"hincrby",
"(",
"hash_key_weekly",
",",
"monthly_metric_name",
",",
"inc_amt",
")",
",",
"conn",
".",
"incr",
"(",
"self",
".",
"_prefix",
"+",
"\":\"",
"+",
"\"analy:%s:count:%s\"",
"%",
"(",
"uid",
",",
"single_metric",
")",
",",
"inc_amt",
")",
"]",
")",
"return",
"results"
] | Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise | [
"Tracks",
"a",
"metric",
"for",
"a",
"specific",
"unique_identifier",
"for",
"a",
"certain",
"date",
".",
"The",
"redis",
"backend",
"supports",
"lists",
"for",
"both",
"unique_identifier",
"and",
"metric",
"allowing",
"for",
"tracking",
"of",
"multiple",
"metrics",
"for",
"multiple",
"unique_identifiers",
"efficiently",
".",
"Not",
"all",
"backends",
"may",
"support",
"this",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L178-L216 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_metric_by_day | def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | python | def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | [
"def",
"get_metric_by_day",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"30",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"kwargs",
".",
"get",
"(",
"\"connection\"",
",",
"None",
")",
"date_generator",
"=",
"(",
"from_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
")",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_daily_date_range",
"(",
"from_date",
",",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"limit",
")",
")",
"#generate a list of mondays in between the start date and the end date",
"series",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"date_generator",
",",
"limit",
")",
")",
"metric_keys",
"=",
"[",
"self",
".",
"_get_daily_metric_name",
"(",
"metric",
",",
"daily_date",
")",
"for",
"daily_date",
"in",
"series",
"]",
"metric_func",
"=",
"lambda",
"conn",
":",
"[",
"conn",
".",
"hmget",
"(",
"self",
".",
"_get_daily_metric_key",
"(",
"unique_identifier",
",",
"metric_key_date",
")",
",",
"metric_keys",
")",
"for",
"metric_key_date",
"in",
"metric_key_date_range",
"]",
"if",
"conn",
"is",
"not",
"None",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"else",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"series",
",",
"results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"series",
",",
"results",
")",
"return",
"series",
",",
"results"
] | Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date`` | [
"Returns",
"the",
"metric",
"for",
"unique_identifier",
"segmented",
"by",
"day",
"starting",
"from",
"from_date"
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L218-L246 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_metric_by_week | def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | python | def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | [
"def",
"get_metric_by_week",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"kwargs",
".",
"get",
"(",
"\"connection\"",
",",
"None",
")",
"closest_monday_from_date",
"=",
"self",
".",
"_get_closest_week",
"(",
"from_date",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_weekly_date_range",
"(",
"closest_monday_from_date",
",",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"limit",
")",
")",
"date_generator",
"=",
"(",
"closest_monday_from_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
"step",
"=",
"7",
")",
")",
"#generate a list of mondays in between the start date and the end date",
"series",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"date_generator",
",",
"limit",
")",
")",
"metric_keys",
"=",
"[",
"self",
".",
"_get_weekly_metric_name",
"(",
"metric",
",",
"monday_date",
")",
"for",
"monday_date",
"in",
"series",
"]",
"metric_func",
"=",
"lambda",
"conn",
":",
"[",
"conn",
".",
"hmget",
"(",
"self",
".",
"_get_weekly_metric_key",
"(",
"unique_identifier",
",",
"metric_key_date",
")",
",",
"metric_keys",
")",
"for",
"metric_key_date",
"in",
"metric_key_date_range",
"]",
"if",
"conn",
"is",
"not",
"None",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"else",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"series",
",",
"results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"series",
",",
"results",
")",
"return",
"series",
",",
"results"
] | Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date`` | [
"Returns",
"the",
"metric",
"for",
"unique_identifier",
"segmented",
"by",
"week",
"starting",
"from",
"from_date"
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L248-L278 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_metric_by_month | def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | python | def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results | [
"def",
"get_metric_by_month",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"10",
",",
"*",
"*",
"kwargs",
")",
":",
"conn",
"=",
"kwargs",
".",
"get",
"(",
"\"connection\"",
",",
"None",
")",
"first_of_month",
"=",
"datetime",
".",
"date",
"(",
"year",
"=",
"from_date",
".",
"year",
",",
"month",
"=",
"from_date",
".",
"month",
",",
"day",
"=",
"1",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_weekly_date_range",
"(",
"first_of_month",
",",
"relativedelta",
"(",
"months",
"=",
"limit",
")",
")",
"date_generator",
"=",
"(",
"first_of_month",
"+",
"relativedelta",
"(",
"months",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
")",
")",
"#generate a list of first_of_month's in between the start date and the end date",
"series",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"date_generator",
",",
"limit",
")",
")",
"metric_keys",
"=",
"[",
"self",
".",
"_get_monthly_metric_name",
"(",
"metric",
",",
"month_date",
")",
"for",
"month_date",
"in",
"series",
"]",
"metric_func",
"=",
"lambda",
"conn",
":",
"[",
"conn",
".",
"hmget",
"(",
"self",
".",
"_get_weekly_metric_key",
"(",
"unique_identifier",
",",
"metric_key_date",
")",
",",
"metric_keys",
")",
"for",
"metric_key_date",
"in",
"metric_key_date_range",
"]",
"if",
"conn",
"is",
"not",
"None",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"else",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"results",
"=",
"metric_func",
"(",
"conn",
")",
"series",
",",
"results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"series",
",",
"results",
")",
"return",
"series",
",",
"results"
] | Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date`` | [
"Returns",
"the",
"metric",
"for",
"unique_identifier",
"segmented",
"by",
"month",
"starting",
"from",
"from_date",
".",
"It",
"will",
"retrieve",
"metrics",
"data",
"starting",
"from",
"the",
"1st",
"of",
"the",
"month",
"specified",
"in",
"from_date"
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L280-L313 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_metrics | def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results] | python | def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results] | [
"def",
"get_metrics",
"(",
"self",
",",
"metric_identifiers",
",",
"from_date",
",",
"limit",
"=",
"10",
",",
"group_by",
"=",
"\"week\"",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"[",
"]",
"#validation of types:",
"allowed_types",
"=",
"{",
"\"day\"",
":",
"self",
".",
"get_metric_by_day",
",",
"\"week\"",
":",
"self",
".",
"get_metric_by_week",
",",
"\"month\"",
":",
"self",
".",
"get_metric_by_month",
",",
"}",
"if",
"group_by",
".",
"lower",
"(",
")",
"not",
"in",
"allowed_types",
":",
"raise",
"Exception",
"(",
"\"Allowed values for group_by are day, week or month.\"",
")",
"group_by_func",
"=",
"allowed_types",
"[",
"group_by",
".",
"lower",
"(",
")",
"]",
"#pass a connection object so we can pipeline as much as possible",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"for",
"unique_identifier",
",",
"metric",
"in",
"metric_identifiers",
":",
"results",
".",
"append",
"(",
"group_by_func",
"(",
"unique_identifier",
",",
"metric",
",",
"from_date",
",",
"limit",
"=",
"limit",
",",
"connection",
"=",
"conn",
")",
")",
"#we have to merge all the metric results afterwards because we are using a custom context processor",
"return",
"[",
"self",
".",
"_parse_and_process_metrics",
"(",
"series",
",",
"list_of_metrics",
")",
"for",
"series",
",",
"list_of_metrics",
"in",
"results",
"]"
] | Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month`` | [
"Retrieves",
"a",
"multiple",
"metrics",
"as",
"efficiently",
"as",
"possible",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L315-L344 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_count | def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result | python | def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result | [
"def",
"get_count",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"None",
"if",
"start_date",
"and",
"end_date",
":",
"start_date",
",",
"end_date",
"=",
"(",
"start_date",
",",
"end_date",
",",
")",
"if",
"start_date",
"<",
"end_date",
"else",
"(",
"end_date",
",",
"start_date",
",",
")",
"start_date",
"=",
"start_date",
"if",
"hasattr",
"(",
"start_date",
",",
"'date'",
")",
"else",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"start_date",
",",
"datetime",
".",
"time",
"(",
")",
")",
"end_date",
"=",
"end_date",
"if",
"hasattr",
"(",
"end_date",
",",
"'date'",
")",
"else",
"datetime",
".",
"datetime",
".",
"combine",
"(",
"end_date",
",",
"datetime",
".",
"time",
"(",
")",
")",
"monthly_metrics_dates",
"=",
"list",
"(",
"rrule",
".",
"rrule",
"(",
"rrule",
".",
"MONTHLY",
",",
"dtstart",
"=",
"start_date",
",",
"bymonthday",
"=",
"1",
",",
"until",
"=",
"end_date",
")",
")",
"#We can sorta optimize this by getting most of the data by month",
"if",
"len",
"(",
"monthly_metrics_dates",
")",
">=",
"3",
":",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"monthly_metric_series",
",",
"monthly_metric_results",
",",
"starting_metric_series",
",",
"starting_metric_results",
",",
"ending_metric_series",
",",
"ending_metric_results",
"=",
"self",
".",
"_get_counts",
"(",
"conn",
",",
"metric",
",",
"unique_identifier",
",",
"monthly_metrics_dates",
",",
"start_date",
",",
"end_date",
")",
"monthly_metric_series",
",",
"monthly_metric_results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"monthly_metric_series",
",",
"monthly_metric_results",
")",
"starting_metric_series",
",",
"starting_metric_results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"starting_metric_series",
",",
"starting_metric_results",
")",
"ending_metric_series",
",",
"ending_metric_results",
"=",
"self",
".",
"_parse_and_process_metrics",
"(",
"ending_metric_series",
",",
"ending_metric_results",
")",
"result",
"=",
"sum",
"(",
"monthly_metric_results",
".",
"values",
"(",
")",
")",
"+",
"sum",
"(",
"starting_metric_results",
".",
"values",
"(",
")",
")",
"+",
"sum",
"(",
"ending_metric_results",
".",
"values",
"(",
")",
")",
"else",
":",
"diff",
"=",
"end_date",
"-",
"start_date",
"metric_results",
"=",
"self",
".",
"get_metric_by_day",
"(",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"limit",
"=",
"diff",
".",
"days",
"+",
"1",
")",
"result",
"=",
"sum",
"(",
"metric_results",
"[",
"1",
"]",
".",
"values",
"(",
")",
")",
"else",
":",
"try",
":",
"result",
"=",
"int",
"(",
"self",
".",
"_analytics_backend",
".",
"get",
"(",
"self",
".",
"_prefix",
"+",
"\":\"",
"+",
"\"analy:%s:count:%s\"",
"%",
"(",
"unique_identifier",
",",
"metric",
",",
")",
")",
")",
"except",
"TypeError",
":",
"result",
"=",
"0",
"return",
"result"
] | Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise | [
"Gets",
"the",
"count",
"for",
"the",
"metric",
"for",
"unique_identifier",
".",
"You",
"can",
"specify",
"a",
"start_date",
"and",
"an",
"end_date",
"to",
"only",
"get",
"metrics",
"within",
"that",
"time",
"range",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L346-L389 |
numan/py-analytics | analytics/backends/redis.py | Redis.get_counts | def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results | python | def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results | [
"def",
"get_counts",
"(",
"self",
",",
"metric_identifiers",
",",
"*",
"*",
"kwargs",
")",
":",
"parsed_results",
"=",
"[",
"]",
"results",
"=",
"[",
"self",
".",
"get_count",
"(",
"unique_identifier",
",",
"metric",
",",
"*",
"*",
"kwargs",
")",
"for",
"unique_identifier",
",",
"metric",
"in",
"metric_identifiers",
"]",
"for",
"result",
"in",
"results",
":",
"try",
":",
"parsed_result",
"=",
"int",
"(",
"result",
")",
"except",
"TypeError",
":",
"parsed_result",
"=",
"0",
"parsed_results",
".",
"append",
"(",
"parsed_result",
")",
"return",
"parsed_results"
] | Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)] | [
"Retrieves",
"a",
"multiple",
"metrics",
"as",
"efficiently",
"as",
"possible",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L391-L411 |
numan/py-analytics | analytics/backends/redis.py | Redis.set_metric_by_day | def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results | python | def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results | [
"def",
"set_metric_by_day",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"date",
",",
"count",
",",
"sync_agg",
"=",
"True",
",",
"update_counter",
"=",
"True",
")",
":",
"metric",
"=",
"[",
"metric",
"]",
"if",
"isinstance",
"(",
"metric",
",",
"basestring",
")",
"else",
"metric",
"unique_identifier",
"=",
"[",
"unique_identifier",
"]",
"if",
"not",
"isinstance",
"(",
"unique_identifier",
",",
"(",
"types",
".",
"ListType",
",",
"types",
".",
"TupleType",
",",
"types",
".",
"GeneratorType",
",",
")",
")",
"else",
"unique_identifier",
"results",
"=",
"[",
"]",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"for",
"uid",
"in",
"unique_identifier",
":",
"hash_key_daily",
"=",
"self",
".",
"_get_daily_metric_key",
"(",
"uid",
",",
"date",
")",
"for",
"single_metric",
"in",
"metric",
":",
"daily_metric_name",
"=",
"self",
".",
"_get_daily_metric_name",
"(",
"single_metric",
",",
"date",
")",
"if",
"update_counter",
":",
"# updates overall counter for metric",
"overall_count",
"=",
"self",
".",
"get_count",
"(",
"uid",
",",
"single_metric",
")",
"day",
",",
"daily_count",
"=",
"self",
".",
"get_metric_by_day",
"(",
"uid",
",",
"single_metric",
",",
"date",
",",
"1",
")",
"[",
"1",
"]",
".",
"popitem",
"(",
")",
"self",
".",
"_analytics_backend",
".",
"set",
"(",
"self",
".",
"_prefix",
"+",
"\":\"",
"+",
"\"analy:%s:count:%s\"",
"%",
"(",
"uid",
",",
"single_metric",
")",
",",
"overall_count",
"+",
"(",
"count",
"-",
"daily_count",
")",
")",
"results",
".",
"append",
"(",
"[",
"conn",
".",
"hset",
"(",
"hash_key_daily",
",",
"daily_metric_name",
",",
"count",
")",
"]",
")",
"if",
"sync_agg",
":",
"self",
".",
"sync_agg_metric",
"(",
"unique_identifier",
",",
"metric",
",",
"date",
",",
"date",
")",
"return",
"results"
] | Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated | [
"Sets",
"the",
"count",
"for",
"the",
"metric",
"for",
"unique_identifier",
".",
"You",
"must",
"specify",
"a",
"date",
"for",
"the",
"count",
"to",
"be",
"set",
"on",
".",
"Useful",
"for",
"resetting",
"a",
"metric",
"count",
"to",
"0",
"or",
"decrementing",
"a",
"metric",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L413-L448 |
numan/py-analytics | analytics/backends/redis.py | Redis.sync_agg_metric | def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date) | python | def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date) | [
"def",
"sync_agg_metric",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"end_date",
")",
":",
"self",
".",
"sync_week_metric",
"(",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"end_date",
")",
"self",
".",
"sync_month_metric",
"(",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"end_date",
")"
] | Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end | [
"Uses",
"the",
"count",
"for",
"each",
"day",
"in",
"the",
"date",
"range",
"to",
"recalculate",
"the",
"counters",
"for",
"the",
"associated",
"weeks",
"and",
"months",
"for",
"the",
"metric",
"for",
"unique_identifier",
".",
"Useful",
"for",
"updating",
"the",
"counters",
"for",
"week",
"and",
"month",
"after",
"using",
"set_metric_by_day",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L450-L464 |
numan/py-analytics | analytics/backends/redis.py | Redis.sync_week_metric | def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter) | python | def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter) | [
"def",
"sync_week_metric",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"end_date",
")",
":",
"metric",
"=",
"[",
"metric",
"]",
"if",
"isinstance",
"(",
"metric",
",",
"basestring",
")",
"else",
"metric",
"unique_identifier",
"=",
"[",
"unique_identifier",
"]",
"if",
"not",
"isinstance",
"(",
"unique_identifier",
",",
"(",
"types",
".",
"ListType",
",",
"types",
".",
"TupleType",
",",
"types",
".",
"GeneratorType",
",",
")",
")",
"else",
"unique_identifier",
"closest_monday_from_date",
"=",
"self",
".",
"_get_closest_week",
"(",
"start_date",
")",
"num_weeks",
"=",
"self",
".",
"_num_weeks",
"(",
"start_date",
",",
"end_date",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_weekly_date_range",
"(",
"closest_monday_from_date",
",",
"datetime",
".",
"timedelta",
"(",
"weeks",
"=",
"num_weeks",
")",
")",
"week_date_generator",
"=",
"(",
"closest_monday_from_date",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
"step",
"=",
"7",
")",
")",
"#generate a list of mondays in between the start date and the end date",
"weeks_to_update",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"week_date_generator",
",",
"num_weeks",
")",
")",
"for",
"uid",
"in",
"unique_identifier",
":",
"for",
"single_metric",
"in",
"metric",
":",
"for",
"week",
"in",
"weeks_to_update",
":",
"_",
",",
"series_results",
"=",
"self",
".",
"get_metric_by_day",
"(",
"uid",
",",
"single_metric",
",",
"from_date",
"=",
"week",
",",
"limit",
"=",
"7",
")",
"week_counter",
"=",
"sum",
"(",
"[",
"value",
"for",
"key",
",",
"value",
"in",
"series_results",
".",
"items",
"(",
")",
"]",
")",
"hash_key_weekly",
"=",
"self",
".",
"_get_weekly_metric_key",
"(",
"uid",
",",
"week",
")",
"weekly_metric_name",
"=",
"self",
".",
"_get_weekly_metric_name",
"(",
"single_metric",
",",
"week",
")",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"conn",
".",
"hset",
"(",
"hash_key_weekly",
",",
"weekly_metric_name",
",",
"week_counter",
")"
] | Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end | [
"Uses",
"the",
"count",
"for",
"each",
"day",
"in",
"the",
"date",
"range",
"to",
"recalculate",
"the",
"counters",
"for",
"the",
"weeks",
"for",
"the",
"metric",
"for",
"unique_identifier",
".",
"Useful",
"for",
"updating",
"the",
"counters",
"for",
"week",
"and",
"month",
"after",
"using",
"set_metric_by_day",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L466-L498 |
numan/py-analytics | analytics/backends/redis.py | Redis.sync_month_metric | def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter) | python | def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter) | [
"def",
"sync_month_metric",
"(",
"self",
",",
"unique_identifier",
",",
"metric",
",",
"start_date",
",",
"end_date",
")",
":",
"metric",
"=",
"[",
"metric",
"]",
"if",
"isinstance",
"(",
"metric",
",",
"basestring",
")",
"else",
"metric",
"unique_identifier",
"=",
"[",
"unique_identifier",
"]",
"if",
"not",
"isinstance",
"(",
"unique_identifier",
",",
"(",
"types",
".",
"ListType",
",",
"types",
".",
"TupleType",
",",
"types",
".",
"GeneratorType",
",",
")",
")",
"else",
"unique_identifier",
"num_months",
"=",
"self",
".",
"_num_months",
"(",
"start_date",
",",
"end_date",
")",
"first_of_month",
"=",
"datetime",
".",
"date",
"(",
"year",
"=",
"start_date",
".",
"year",
",",
"month",
"=",
"start_date",
".",
"month",
",",
"day",
"=",
"1",
")",
"metric_key_date_range",
"=",
"self",
".",
"_get_weekly_date_range",
"(",
"first_of_month",
",",
"relativedelta",
"(",
"months",
"=",
"num_months",
")",
")",
"month_date_generator",
"=",
"(",
"first_of_month",
"+",
"relativedelta",
"(",
"months",
"=",
"i",
")",
"for",
"i",
"in",
"itertools",
".",
"count",
"(",
")",
")",
"#generate a list of first_of_month's in between the start date and the end date",
"months_to_update",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"month_date_generator",
",",
"num_months",
")",
")",
"for",
"uid",
"in",
"unique_identifier",
":",
"for",
"single_metric",
"in",
"metric",
":",
"for",
"month",
"in",
"months_to_update",
":",
"_",
",",
"series_results",
"=",
"self",
".",
"get_metric_by_day",
"(",
"uid",
",",
"single_metric",
",",
"from_date",
"=",
"month",
",",
"limit",
"=",
"monthrange",
"(",
"month",
".",
"year",
",",
"month",
".",
"month",
")",
"[",
"1",
"]",
")",
"month_counter",
"=",
"sum",
"(",
"[",
"value",
"for",
"key",
",",
"value",
"in",
"series_results",
".",
"items",
"(",
")",
"]",
")",
"hash_key_monthly",
"=",
"self",
".",
"_get_weekly_metric_key",
"(",
"uid",
",",
"month",
")",
"monthly_metric_name",
"=",
"self",
".",
"_get_monthly_metric_name",
"(",
"single_metric",
",",
"month",
")",
"with",
"self",
".",
"_analytics_backend",
".",
"map",
"(",
")",
"as",
"conn",
":",
"conn",
".",
"hset",
"(",
"hash_key_monthly",
",",
"monthly_metric_name",
",",
"month_counter",
")"
] | Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end | [
"Uses",
"the",
"count",
"for",
"each",
"day",
"in",
"the",
"date",
"range",
"to",
"recalculate",
"the",
"counters",
"for",
"the",
"months",
"for",
"the",
"metric",
"for",
"unique_identifier",
".",
"Useful",
"for",
"updating",
"the",
"counters",
"for",
"week",
"and",
"month",
"after",
"using",
"set_metric_by_day",
"."
] | train | https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L500-L532 |
non-Jedi/gyr | gyr/utils.py | is_full_mxid | def is_full_mxid(user_string):
"""Returns True if a string is a valid mxid."""
if not user_string[0] == "@":
return False
parts = user_string[1:].split(":")
localpart_chars = ascii_lowercase + digits + "._-="
if not (len(parts) == 2 and all([i in localpart_chars for i in parts[0]])):
return False
return True | python | def is_full_mxid(user_string):
"""Returns True if a string is a valid mxid."""
if not user_string[0] == "@":
return False
parts = user_string[1:].split(":")
localpart_chars = ascii_lowercase + digits + "._-="
if not (len(parts) == 2 and all([i in localpart_chars for i in parts[0]])):
return False
return True | [
"def",
"is_full_mxid",
"(",
"user_string",
")",
":",
"if",
"not",
"user_string",
"[",
"0",
"]",
"==",
"\"@\"",
":",
"return",
"False",
"parts",
"=",
"user_string",
"[",
"1",
":",
"]",
".",
"split",
"(",
"\":\"",
")",
"localpart_chars",
"=",
"ascii_lowercase",
"+",
"digits",
"+",
"\"._-=\"",
"if",
"not",
"(",
"len",
"(",
"parts",
")",
"==",
"2",
"and",
"all",
"(",
"[",
"i",
"in",
"localpart_chars",
"for",
"i",
"in",
"parts",
"[",
"0",
"]",
"]",
")",
")",
":",
"return",
"False",
"return",
"True"
] | Returns True if a string is a valid mxid. | [
"Returns",
"True",
"if",
"a",
"string",
"is",
"a",
"valid",
"mxid",
"."
] | train | https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/utils.py#L30-L38 |
non-Jedi/gyr | gyr/utils.py | intent | def intent(method):
"""Helps object methods handle MatrixRequestError.
Args:
method(function): Object method to be wrapped
Method's object must have _handle_request_exception method that deals with
specific status codes and errcodes.
"""
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except exceptions.MatrixError as e:
if isinstance(e.original_exception,
matrix_client.errors.MatrixRequestError):
self._handle_request_exception(e)
# May still throw exception for other reasons; not handled
return method(self, *args, **kwargs)
else:
raise e
return wrapper | python | def intent(method):
"""Helps object methods handle MatrixRequestError.
Args:
method(function): Object method to be wrapped
Method's object must have _handle_request_exception method that deals with
specific status codes and errcodes.
"""
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except exceptions.MatrixError as e:
if isinstance(e.original_exception,
matrix_client.errors.MatrixRequestError):
self._handle_request_exception(e)
# May still throw exception for other reasons; not handled
return method(self, *args, **kwargs)
else:
raise e
return wrapper | [
"def",
"intent",
"(",
"method",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
".",
"MatrixError",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
".",
"original_exception",
",",
"matrix_client",
".",
"errors",
".",
"MatrixRequestError",
")",
":",
"self",
".",
"_handle_request_exception",
"(",
"e",
")",
"# May still throw exception for other reasons; not handled",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"e",
"return",
"wrapper"
] | Helps object methods handle MatrixRequestError.
Args:
method(function): Object method to be wrapped
Method's object must have _handle_request_exception method that deals with
specific status codes and errcodes. | [
"Helps",
"object",
"methods",
"handle",
"MatrixRequestError",
"."
] | train | https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/utils.py#L46-L68 |
malramsay64/experi | src/experi/commands.py | Command.get_variables | def get_variables(self) -> Set[str]:
"""Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
"""
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | python | def get_variables(self) -> Set[str]:
"""Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
"""
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | [
"def",
"get_variables",
"(",
"self",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"variables",
"=",
"set",
"(",
")",
"for",
"cmd",
"in",
"self",
".",
"_cmd",
":",
"for",
"var",
"in",
"self",
".",
"__formatter",
".",
"parse",
"(",
"cmd",
")",
":",
"logger",
".",
"debug",
"(",
"\"Checking variable: %s\"",
",",
"var",
")",
"# creates and requires are special class values",
"if",
"var",
"[",
"1",
"]",
"is",
"not",
"None",
"and",
"var",
"[",
"1",
"]",
"not",
"in",
"[",
"\"creates\"",
",",
"\"requires\"",
"]",
":",
"variables",
".",
"add",
"(",
"var",
"[",
"1",
"]",
")",
"return",
"variables"
] | Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces. | [
"Find",
"all",
"the",
"variables",
"specified",
"in",
"a",
"format",
"string",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L53-L67 |
malramsay64/experi | src/experi/commands.py | Job.as_bash_array | def as_bash_array(self) -> str:
"""Return a representation as a bash array.
This creates a string formatted as a bash array containing all the commands in the job.
"""
return_string = "( \\\n"
for command in self:
return_string += '"' + str(command) + '" \\\n'
return_string += ")"
return return_string | python | def as_bash_array(self) -> str:
"""Return a representation as a bash array.
This creates a string formatted as a bash array containing all the commands in the job.
"""
return_string = "( \\\n"
for command in self:
return_string += '"' + str(command) + '" \\\n'
return_string += ")"
return return_string | [
"def",
"as_bash_array",
"(",
"self",
")",
"->",
"str",
":",
"return_string",
"=",
"\"( \\\\\\n\"",
"for",
"command",
"in",
"self",
":",
"return_string",
"+=",
"'\"'",
"+",
"str",
"(",
"command",
")",
"+",
"'\" \\\\\\n'",
"return_string",
"+=",
"\")\"",
"return",
"return_string"
] | Return a representation as a bash array.
This creates a string formatted as a bash array containing all the commands in the job. | [
"Return",
"a",
"representation",
"as",
"a",
"bash",
"array",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L140-L150 |
tipsi/tipsi_tools | tipsi_tools/doc_utils/tipsi_sphinx/dyn_serializer.py | parse_doc | def parse_doc(doc):
"""
Parse docstrings to dict, it should look like:
key: value
"""
if not doc:
return {}
out = {}
for s in doc.split('\n'):
s = s.strip().split(':', maxsplit=1)
if len(s) == 2:
out[s[0]] = s[1]
return out | python | def parse_doc(doc):
"""
Parse docstrings to dict, it should look like:
key: value
"""
if not doc:
return {}
out = {}
for s in doc.split('\n'):
s = s.strip().split(':', maxsplit=1)
if len(s) == 2:
out[s[0]] = s[1]
return out | [
"def",
"parse_doc",
"(",
"doc",
")",
":",
"if",
"not",
"doc",
":",
"return",
"{",
"}",
"out",
"=",
"{",
"}",
"for",
"s",
"in",
"doc",
".",
"split",
"(",
"'\\n'",
")",
":",
"s",
"=",
"s",
".",
"strip",
"(",
")",
".",
"split",
"(",
"':'",
",",
"maxsplit",
"=",
"1",
")",
"if",
"len",
"(",
"s",
")",
"==",
"2",
":",
"out",
"[",
"s",
"[",
"0",
"]",
"]",
"=",
"s",
"[",
"1",
"]",
"return",
"out"
] | Parse docstrings to dict, it should look like:
key: value | [
"Parse",
"docstrings",
"to",
"dict",
"it",
"should",
"look",
"like",
":",
"key",
":",
"value"
] | train | https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/doc_utils/tipsi_sphinx/dyn_serializer.py#L20-L32 |
malramsay64/experi | src/experi/run.py | combine_dictionaries | def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts)) | python | def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts)) | [
"def",
"combine_dictionaries",
"(",
"dicts",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"return",
"dict",
"(",
"ChainMap",
"(",
"*",
"dicts",
")",
")"
] | Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts. | [
"Merge",
"a",
"list",
"of",
"dictionaries",
"into",
"a",
"single",
"dictionary",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L39-L46 |
malramsay64/experi | src/experi/run.py | iterator_zip | def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip")) | python | def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip")) | [
"def",
"iterator_zip",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
"=",
"None",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Yielding from zip iterator\"",
")",
"if",
"isinstance",
"(",
"variables",
",",
"list",
")",
":",
"for",
"item",
"in",
"variables",
":",
"yield",
"list",
"(",
"variable_matrix",
"(",
"item",
",",
"parent",
",",
"\"zip\"",
")",
")",
"else",
":",
"yield",
"list",
"(",
"variable_matrix",
"(",
"variables",
",",
"parent",
",",
"\"zip\"",
")",
")"
] | Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused | [
"Apply",
"the",
"zip",
"operator",
"to",
"a",
"set",
"of",
"variables",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L49-L66 |
malramsay64/experi | src/experi/run.py | iterator_product | def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product")) | python | def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product")) | [
"def",
"iterator_product",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
"=",
"None",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Yielding from product iterator\"",
")",
"if",
"isinstance",
"(",
"variables",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"f\"Product only takes mappings of values, got {variables} of type {type(variables)}\"",
")",
"yield",
"list",
"(",
"variable_matrix",
"(",
"variables",
",",
"parent",
",",
"\"product\"",
")",
")"
] | Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused | [
"Apply",
"the",
"product",
"operator",
"to",
"a",
"set",
"of",
"variables",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L69-L87 |
malramsay64/experi | src/experi/run.py | iterator_chain | def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | python | def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
) | [
"def",
"iterator_chain",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
"=",
"None",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Yielding from append iterator\"",
")",
"if",
"not",
"isinstance",
"(",
"variables",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"f\"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}\"",
")",
"# Create a single list containing all the values",
"yield",
"list",
"(",
"chain",
".",
"from_iterable",
"(",
"variable_matrix",
"(",
"item",
",",
"parent",
",",
"\"product\"",
")",
"for",
"item",
"in",
"variables",
")",
")"
] | This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused | [
"This",
"successively",
"appends",
"each",
"element",
"of",
"an",
"array",
"to",
"a",
"single",
"list",
"of",
"values",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L90-L114 |
malramsay64/experi | src/experi/run.py | iterator_arange | def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
) | python | def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
) | [
"def",
"iterator_arange",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"assert",
"parent",
"is",
"not",
"None",
"if",
"isinstance",
"(",
"variables",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"yield",
"[",
"{",
"parent",
":",
"i",
"}",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"variables",
")",
"]",
"elif",
"isinstance",
"(",
"variables",
",",
"dict",
")",
":",
"if",
"variables",
".",
"get",
"(",
"\"stop\"",
")",
":",
"yield",
"[",
"{",
"parent",
":",
"i",
"}",
"for",
"i",
"in",
"arange",
"(",
"*",
"*",
"variables",
")",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"f\"Stop is a required keyword for the arange iterator.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f\"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}\"",
")"
] | Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value. | [
"Create",
"a",
"list",
"of",
"values",
"using",
"the",
":",
"func",
":",
"numpy",
".",
"arange",
"function",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L123-L146 |
malramsay64/experi | src/experi/run.py | iterator_cycle | def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
) | python | def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
) | [
"def",
"iterator_cycle",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
")",
"->",
"Iterable",
"[",
"VarMatrix",
"]",
":",
"if",
"isinstance",
"(",
"variables",
",",
"dict",
")",
":",
"if",
"variables",
".",
"get",
"(",
"\"times\"",
")",
":",
"times",
"=",
"int",
"(",
"variables",
"[",
"\"times\"",
"]",
")",
"del",
"variables",
"[",
"\"times\"",
"]",
"yield",
"list",
"(",
"variable_matrix",
"(",
"variables",
",",
"parent",
",",
"\"product\"",
")",
")",
"*",
"times",
"else",
":",
"raise",
"ValueError",
"(",
"f\"times is a required keyword for the repeat iterator.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f\"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}\"",
")"
] | Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value. | [
"Cycle",
"through",
"a",
"list",
"of",
"values",
"a",
"specified",
"number",
"of",
"times"
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L149-L171 |
malramsay64/experi | src/experi/run.py | variable_matrix | def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables} | python | def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables} | [
"def",
"variable_matrix",
"(",
"variables",
":",
"VarType",
",",
"parent",
":",
"str",
"=",
"None",
",",
"iterator",
":",
"str",
"=",
"\"product\"",
")",
"->",
"Iterable",
"[",
"Dict",
"[",
"str",
",",
"YamlValue",
"]",
"]",
":",
"_iters",
":",
"Dict",
"[",
"str",
",",
"Callable",
"]",
"=",
"{",
"\"product\"",
":",
"product",
",",
"\"zip\"",
":",
"zip",
"}",
"_special_keys",
":",
"Dict",
"[",
"str",
",",
"Callable",
"[",
"[",
"VarType",
",",
"Any",
"]",
",",
"Iterable",
"[",
"VarMatrix",
"]",
"]",
"]",
"=",
"{",
"\"zip\"",
":",
"iterator_zip",
",",
"\"product\"",
":",
"iterator_product",
",",
"\"arange\"",
":",
"iterator_arange",
",",
"\"chain\"",
":",
"iterator_chain",
",",
"\"append\"",
":",
"iterator_chain",
",",
"\"cycle\"",
":",
"iterator_cycle",
",",
"\"repeat\"",
":",
"iterator_cycle",
",",
"}",
"if",
"isinstance",
"(",
"variables",
",",
"dict",
")",
":",
"key_vars",
":",
"List",
"[",
"List",
"[",
"Dict",
"[",
"str",
",",
"YamlValue",
"]",
"]",
"]",
"=",
"[",
"]",
"# Handling of specialised iterators",
"for",
"key",
",",
"function",
"in",
"_special_keys",
".",
"items",
"(",
")",
":",
"if",
"variables",
".",
"get",
"(",
"key",
")",
":",
"item",
"=",
"variables",
"[",
"key",
"]",
"assert",
"item",
"is",
"not",
"None",
"for",
"val",
"in",
"function",
"(",
"item",
",",
"parent",
")",
":",
"key_vars",
".",
"append",
"(",
"val",
")",
"del",
"variables",
"[",
"key",
"]",
"for",
"key",
",",
"value",
"in",
"variables",
".",
"items",
"(",
")",
":",
"key_vars",
".",
"append",
"(",
"list",
"(",
"variable_matrix",
"(",
"value",
",",
"key",
",",
"iterator",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"key vars: %s\"",
",",
"key_vars",
")",
"# Iterate through all possible products generating a dictionary",
"for",
"i",
"in",
"_iters",
"[",
"iterator",
"]",
"(",
"*",
"key_vars",
")",
":",
"logger",
".",
"debug",
"(",
"\"dicts: %s\"",
",",
"i",
")",
"yield",
"combine_dictionaries",
"(",
"i",
")",
"# Iterate through a list of values",
"elif",
"isinstance",
"(",
"variables",
",",
"list",
")",
":",
"for",
"item",
"in",
"variables",
":",
"yield",
"from",
"variable_matrix",
"(",
"item",
",",
"parent",
",",
"iterator",
")",
"# Stopping condition -> we have either a single value from a list",
"# or a value had only one item",
"else",
":",
"assert",
"parent",
"is",
"not",
"None",
"yield",
"{",
"parent",
":",
"variables",
"}"
] | Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input. | [
"Process",
"the",
"variables",
"into",
"a",
"list",
"of",
"the",
"appropriate",
"combinations",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L174-L226 |
malramsay64/experi | src/experi/run.py | uniqueify | def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)] | python | def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)] | [
"def",
"uniqueify",
"(",
"my_list",
":",
"Any",
")",
"->",
"List",
"[",
"Any",
"]",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"6",
")",
":",
"# An implementation specific detail of py3.6 is the retention of order",
"# within a dictionary. In py3.7 this becomes the documented behaviour.",
"return",
"list",
"(",
"dict",
".",
"fromkeys",
"(",
"my_list",
")",
")",
"# Slower method of order preserving unique list in older python versions",
"seen",
"=",
"set",
"(",
")",
"return",
"[",
"x",
"for",
"x",
"in",
"my_list",
"if",
"x",
"not",
"in",
"seen",
"and",
"not",
"seen",
".",
"add",
"(",
"x",
")",
"]"
] | Remove duplicate entries in a list retaining order. | [
"Remove",
"duplicate",
"entries",
"in",
"a",
"list",
"retaining",
"order",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L229-L238 |
malramsay64/experi | src/experi/run.py | process_command | def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list) | python | def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list) | [
"def",
"process_command",
"(",
"command",
":",
"CommandInput",
",",
"matrix",
":",
"VarMatrix",
")",
"->",
"List",
"[",
"Command",
"]",
":",
"assert",
"command",
"is",
"not",
"None",
"if",
"isinstance",
"(",
"command",
",",
"str",
")",
":",
"command_list",
"=",
"[",
"Command",
"(",
"command",
",",
"variables",
"=",
"variables",
")",
"for",
"variables",
"in",
"matrix",
"]",
"elif",
"isinstance",
"(",
"command",
",",
"list",
")",
":",
"command_list",
"=",
"[",
"Command",
"(",
"command",
",",
"variables",
"=",
"variables",
")",
"for",
"variables",
"in",
"matrix",
"]",
"else",
":",
"if",
"command",
".",
"get",
"(",
"\"command\"",
")",
"is",
"not",
"None",
":",
"cmd",
"=",
"command",
".",
"get",
"(",
"\"command\"",
")",
"else",
":",
"cmd",
"=",
"command",
".",
"get",
"(",
"\"cmd\"",
")",
"creates",
"=",
"str",
"(",
"command",
".",
"get",
"(",
"\"creates\"",
",",
"\"\"",
")",
")",
"requires",
"=",
"str",
"(",
"command",
".",
"get",
"(",
"\"requires\"",
",",
"\"\"",
")",
")",
"assert",
"isinstance",
"(",
"cmd",
",",
"(",
"list",
",",
"str",
")",
")",
"command_list",
"=",
"[",
"Command",
"(",
"cmd",
",",
"variables",
",",
"creates",
",",
"requires",
")",
"for",
"variables",
"in",
"matrix",
"]",
"return",
"uniqueify",
"(",
"command_list",
")"
] | Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings. | [
"Generate",
"all",
"combinations",
"of",
"commands",
"given",
"a",
"variable",
"matrix",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L263-L286 |
malramsay64/experi | src/experi/run.py | read_file | def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure | python | def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure | [
"def",
"read_file",
"(",
"filename",
":",
"PathLike",
"=",
"\"experiment.yml\"",
")",
"->",
"Dict",
"[",
"str",
",",
"Any",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Input file: %s\"",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"stream",
":",
"structure",
"=",
"yaml",
".",
"safe_load",
"(",
"stream",
")",
"return",
"structure"
] | Read and parse yaml file. | [
"Read",
"and",
"parse",
"yaml",
"file",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L289-L295 |
malramsay64/experi | src/experi/run.py | run_bash_jobs | def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return | python | def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return | [
"def",
"run_bash_jobs",
"(",
"jobs",
":",
"Iterator",
"[",
"Job",
"]",
",",
"directory",
":",
"PathLike",
"=",
"Path",
".",
"cwd",
"(",
")",
",",
"dry_run",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Running commands in bash shell\"",
")",
"# iterate through command groups",
"for",
"job",
"in",
"jobs",
":",
"# Check shell exists",
"if",
"shutil",
".",
"which",
"(",
"job",
".",
"shell",
")",
"is",
"None",
":",
"raise",
"ProcessLookupError",
"(",
"f\"The shell '{job.shell}' was not found.\"",
")",
"failed",
"=",
"False",
"for",
"command",
"in",
"job",
":",
"for",
"cmd",
"in",
"command",
":",
"logger",
".",
"info",
"(",
"cmd",
")",
"if",
"dry_run",
":",
"print",
"(",
"f\"{job.shell} -c '{cmd}'\"",
")",
"else",
":",
"result",
"=",
"subprocess",
".",
"run",
"(",
"[",
"job",
".",
"shell",
",",
"\"-c\"",
",",
"f\"{cmd}\"",
"]",
",",
"cwd",
"=",
"str",
"(",
"directory",
")",
")",
"if",
"result",
".",
"returncode",
"!=",
"0",
":",
"failed",
"=",
"True",
"logger",
".",
"error",
"(",
"\"Command failed: %s\"",
",",
"command",
")",
"break",
"if",
"failed",
":",
"logger",
".",
"error",
"(",
"\"A command failed, not continuing further.\"",
")",
"return"
] | Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run. | [
"Submit",
"commands",
"to",
"the",
"bash",
"shell",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L361-L395 |
malramsay64/experi | src/experi/run.py | run_scheduler_jobs | def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break | python | def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break | [
"def",
"run_scheduler_jobs",
"(",
"scheduler",
":",
"str",
",",
"jobs",
":",
"Iterator",
"[",
"Job",
"]",
",",
"directory",
":",
"PathLike",
"=",
"Path",
".",
"cwd",
"(",
")",
",",
"basename",
":",
"str",
"=",
"\"experi\"",
",",
"dry_run",
":",
"bool",
"=",
"False",
",",
")",
"->",
"None",
":",
"submit_job",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"Creating commands in %s files.\"",
",",
"scheduler",
")",
"# Check scheduler submit command exists",
"if",
"scheduler",
"==",
"\"pbs\"",
":",
"submit_executable",
"=",
"\"qsub\"",
"elif",
"scheduler",
"==",
"\"slurm\"",
":",
"submit_executable",
"=",
"\"sbatch\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"scheduler can only take values ['pbs', 'slurm']\"",
")",
"if",
"shutil",
".",
"which",
"(",
"submit_executable",
")",
"is",
"None",
":",
"logger",
".",
"warning",
"(",
"\"The `%s` command is not found.\"",
"\"Skipping job submission and just generating files\"",
",",
"submit_executable",
",",
")",
"submit_job",
"=",
"False",
"# Ensure directory is a Path",
"directory",
"=",
"Path",
"(",
"directory",
")",
"# remove existing files",
"for",
"fname",
"in",
"directory",
".",
"glob",
"(",
"basename",
"+",
"f\"*.{scheduler}\"",
")",
":",
"print",
"(",
"\"Removing {}\"",
".",
"format",
"(",
"fname",
")",
")",
"os",
".",
"remove",
"(",
"str",
"(",
"fname",
")",
")",
"# Write new files and generate commands",
"prev_jobids",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"for",
"index",
",",
"job",
"in",
"enumerate",
"(",
"jobs",
")",
":",
"# Generate scheduler file",
"content",
"=",
"create_scheduler_file",
"(",
"scheduler",
",",
"job",
")",
"logger",
".",
"debug",
"(",
"\"File contents:\\n%s\"",
",",
"content",
")",
"# Write file to disk",
"fname",
"=",
"Path",
"(",
"directory",
"/",
"\"{}_{:02d}.{}\"",
".",
"format",
"(",
"basename",
",",
"index",
",",
"scheduler",
")",
")",
"with",
"fname",
".",
"open",
"(",
"\"w\"",
")",
"as",
"dst",
":",
"dst",
".",
"write",
"(",
"content",
")",
"if",
"submit_job",
"or",
"dry_run",
":",
"# Construct command",
"submit_cmd",
"=",
"[",
"submit_executable",
"]",
"if",
"prev_jobids",
":",
"# Continue to append all previous jobs to submit_cmd so subsequent jobs die along",
"# with the first.",
"afterok",
"=",
"f\"afterok:{':'.join(prev_jobids)}\"",
"if",
"scheduler",
"==",
"\"pbs\"",
":",
"submit_cmd",
"+=",
"[",
"\"-W\"",
",",
"f\"depend={afterok}\"",
"]",
"elif",
"scheduler",
"==",
"\"slurm\"",
":",
"submit_cmd",
"+=",
"[",
"\"--dependency\"",
",",
"afterok",
"]",
"# actually run the command",
"logger",
".",
"info",
"(",
"str",
"(",
"submit_cmd",
")",
")",
"try",
":",
"if",
"dry_run",
":",
"print",
"(",
"f\"{submit_cmd} {fname.name}\"",
")",
"prev_jobids",
".",
"append",
"(",
"\"dry_run\"",
")",
"else",
":",
"cmd_res",
"=",
"subprocess",
".",
"check_output",
"(",
"submit_cmd",
"+",
"[",
"fname",
".",
"name",
"]",
",",
"cwd",
"=",
"str",
"(",
"directory",
")",
")",
"prev_jobids",
".",
"append",
"(",
"cmd_res",
".",
"decode",
"(",
")",
".",
"strip",
"(",
")",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"logger",
".",
"error",
"(",
"\"Submitting job to the queue failed.\"",
")",
"break"
] | Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed. | [
"Submit",
"a",
"series",
"of",
"commands",
"to",
"a",
"batch",
"scheduler",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L398-L483 |
malramsay64/experi | src/experi/run.py | determine_scheduler | def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell" | python | def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell" | [
"def",
"determine_scheduler",
"(",
"scheduler",
":",
"Optional",
"[",
"str",
"]",
",",
"experiment_definition",
":",
"Dict",
"[",
"str",
",",
"YamlValue",
"]",
")",
"->",
"str",
":",
"# Scheduler value from command line has first priority",
"if",
"scheduler",
"is",
"not",
"None",
":",
"if",
"scheduler",
"in",
"[",
"\"shell\"",
",",
"\"pbs\"",
",",
"\"slurm\"",
"]",
":",
"return",
"scheduler",
"raise",
"ValueError",
"(",
"\"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']\"",
")",
"# Next priority goes to the experiment.yml file",
"if",
"experiment_definition",
".",
"get",
"(",
"\"pbs\"",
")",
":",
"return",
"\"pbs\"",
"if",
"experiment_definition",
".",
"get",
"(",
"\"slurm\"",
")",
":",
"return",
"\"slurm\"",
"if",
"experiment_definition",
".",
"get",
"(",
"\"shell\"",
")",
":",
"return",
"\"shell\"",
"# Final priority goes to the auto-discovery",
"if",
"shutil",
".",
"which",
"(",
"\"pbs\"",
")",
"is",
"not",
"None",
":",
"return",
"\"pbs\"",
"if",
"shutil",
".",
"which",
"(",
"\"slurm\"",
")",
"is",
"not",
"None",
":",
"return",
"\"slurm\"",
"# Default if nothing else is found goes to shell",
"return",
"\"shell\""
] | Determine the scheduler to use to run the jobs. | [
"Determine",
"the",
"scheduler",
"to",
"use",
"to",
"run",
"the",
"jobs",
"."
] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L486-L514 |
alfredodeza/notario | notario/validators/iterables.py | BasicIterableValidator.safe_type | def safe_type(self, data, tree):
"""
Make sure that the incoming data complies with the class type we
are expecting it to be. In this case, classes that inherit from this
base class expect data to be of type ``list``.
"""
if not isinstance(data, list):
name = self.__class__.__name__
msg = "did not pass validation against callable: %s" % name
reason = 'expected a list but got %s' % safe_repr(data)
raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg) | python | def safe_type(self, data, tree):
"""
Make sure that the incoming data complies with the class type we
are expecting it to be. In this case, classes that inherit from this
base class expect data to be of type ``list``.
"""
if not isinstance(data, list):
name = self.__class__.__name__
msg = "did not pass validation against callable: %s" % name
reason = 'expected a list but got %s' % safe_repr(data)
raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg) | [
"def",
"safe_type",
"(",
"self",
",",
"data",
",",
"tree",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"msg",
"=",
"\"did not pass validation against callable: %s\"",
"%",
"name",
"reason",
"=",
"'expected a list but got %s'",
"%",
"safe_repr",
"(",
"data",
")",
"raise",
"Invalid",
"(",
"self",
".",
"schema",
",",
"tree",
",",
"reason",
"=",
"reason",
",",
"pair",
"=",
"'value'",
",",
"msg",
"=",
"msg",
")"
] | Make sure that the incoming data complies with the class type we
are expecting it to be. In this case, classes that inherit from this
base class expect data to be of type ``list``. | [
"Make",
"sure",
"that",
"the",
"incoming",
"data",
"complies",
"with",
"the",
"class",
"type",
"we",
"are",
"expecting",
"it",
"to",
"be",
".",
"In",
"this",
"case",
"classes",
"that",
"inherit",
"from",
"this",
"base",
"class",
"expect",
"data",
"to",
"be",
"of",
"type",
"list",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/iterables.py#L22-L32 |
aptivate/ckanext-datasetversions | ckanext/datasetversions/helpers.py | get_context | def get_context(context):
"""An internal context generator. Accepts a CKAN context.
CKAN's internals put various things into the context which
makes reusing it for multiple API calls inadvisable. This
function adds more fine grain control on the context from
our plugin logic side.
"""
new_context = {
'model': context['model'],
'session': context['session'],
'user': context.get('user'),
'ignore_auth': context.get('ignore_auth', False),
'use_cache': context.get('use_cache', False),
}
if 'validate' in context:
new_context['validate'] = context['validate']
return new_context | python | def get_context(context):
"""An internal context generator. Accepts a CKAN context.
CKAN's internals put various things into the context which
makes reusing it for multiple API calls inadvisable. This
function adds more fine grain control on the context from
our plugin logic side.
"""
new_context = {
'model': context['model'],
'session': context['session'],
'user': context.get('user'),
'ignore_auth': context.get('ignore_auth', False),
'use_cache': context.get('use_cache', False),
}
if 'validate' in context:
new_context['validate'] = context['validate']
return new_context | [
"def",
"get_context",
"(",
"context",
")",
":",
"new_context",
"=",
"{",
"'model'",
":",
"context",
"[",
"'model'",
"]",
",",
"'session'",
":",
"context",
"[",
"'session'",
"]",
",",
"'user'",
":",
"context",
".",
"get",
"(",
"'user'",
")",
",",
"'ignore_auth'",
":",
"context",
".",
"get",
"(",
"'ignore_auth'",
",",
"False",
")",
",",
"'use_cache'",
":",
"context",
".",
"get",
"(",
"'use_cache'",
",",
"False",
")",
",",
"}",
"if",
"'validate'",
"in",
"context",
":",
"new_context",
"[",
"'validate'",
"]",
"=",
"context",
"[",
"'validate'",
"]",
"return",
"new_context"
] | An internal context generator. Accepts a CKAN context.
CKAN's internals put various things into the context which
makes reusing it for multiple API calls inadvisable. This
function adds more fine grain control on the context from
our plugin logic side. | [
"An",
"internal",
"context",
"generator",
".",
"Accepts",
"a",
"CKAN",
"context",
"."
] | train | https://github.com/aptivate/ckanext-datasetversions/blob/6a82fa5b20e28c705a2c187f4835b31ae928d88a/ckanext/datasetversions/helpers.py#L16-L35 |
tipsi/tipsi_tools | tipsi_tools/django/__init__.py | request_uniq | def request_uniq(func):
"""
return unique dict for each uwsgi request.
note: won't work on non-uwsgi cases
"""
def _wrapped(*args, **kwargs):
data = _get_request_unique_cache()
return func(data, *args, **kwargs)
return _wrapped | python | def request_uniq(func):
"""
return unique dict for each uwsgi request.
note: won't work on non-uwsgi cases
"""
def _wrapped(*args, **kwargs):
data = _get_request_unique_cache()
return func(data, *args, **kwargs)
return _wrapped | [
"def",
"request_uniq",
"(",
"func",
")",
":",
"def",
"_wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"_get_request_unique_cache",
"(",
")",
"return",
"func",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_wrapped"
] | return unique dict for each uwsgi request.
note: won't work on non-uwsgi cases | [
"return",
"unique",
"dict",
"for",
"each",
"uwsgi",
"request",
".",
"note",
":",
"won",
"t",
"work",
"on",
"non",
"-",
"uwsgi",
"cases"
] | train | https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/django/__init__.py#L21-L31 |
alfredodeza/notario | notario/utils.py | safe_repr | def safe_repr(obj):
"""
Try to get ``__name__`` first, ``__class__.__name__`` second
and finally, if we can't get anything acceptable, fallback
to user a ``repr()`` call.
"""
name = getattr(obj, '__name__', getattr(obj.__class__, '__name__'))
if name == 'ndict':
name = 'dict'
return name or repr(obj) | python | def safe_repr(obj):
"""
Try to get ``__name__`` first, ``__class__.__name__`` second
and finally, if we can't get anything acceptable, fallback
to user a ``repr()`` call.
"""
name = getattr(obj, '__name__', getattr(obj.__class__, '__name__'))
if name == 'ndict':
name = 'dict'
return name or repr(obj) | [
"def",
"safe_repr",
"(",
"obj",
")",
":",
"name",
"=",
"getattr",
"(",
"obj",
",",
"'__name__'",
",",
"getattr",
"(",
"obj",
".",
"__class__",
",",
"'__name__'",
")",
")",
"if",
"name",
"==",
"'ndict'",
":",
"name",
"=",
"'dict'",
"return",
"name",
"or",
"repr",
"(",
"obj",
")"
] | Try to get ``__name__`` first, ``__class__.__name__`` second
and finally, if we can't get anything acceptable, fallback
to user a ``repr()`` call. | [
"Try",
"to",
"get",
"__name__",
"first",
"__class__",
".",
"__name__",
"second",
"and",
"finally",
"if",
"we",
"can",
"t",
"get",
"anything",
"acceptable",
"fallback",
"to",
"user",
"a",
"repr",
"()",
"call",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L10-L19 |
alfredodeza/notario | notario/utils.py | re_sort | def re_sort(data):
"""
A data with keys that are not enumerated sequentially will be
re sorted and sequentially ordered.
For example::
>>> data = {16: ('1', 'b'), 3: ('1', 'a')}
>>> re_sort(data)
>>> {0: ('1', 'a'), 1: ('1', 'b')}
"""
keys = sorted(data.keys())
new_data = {}
for number, key in enumerate(keys):
new_data[number] = data[key]
return new_data | python | def re_sort(data):
"""
A data with keys that are not enumerated sequentially will be
re sorted and sequentially ordered.
For example::
>>> data = {16: ('1', 'b'), 3: ('1', 'a')}
>>> re_sort(data)
>>> {0: ('1', 'a'), 1: ('1', 'b')}
"""
keys = sorted(data.keys())
new_data = {}
for number, key in enumerate(keys):
new_data[number] = data[key]
return new_data | [
"def",
"re_sort",
"(",
"data",
")",
":",
"keys",
"=",
"sorted",
"(",
"data",
".",
"keys",
"(",
")",
")",
"new_data",
"=",
"{",
"}",
"for",
"number",
",",
"key",
"in",
"enumerate",
"(",
"keys",
")",
":",
"new_data",
"[",
"number",
"]",
"=",
"data",
"[",
"key",
"]",
"return",
"new_data"
] | A data with keys that are not enumerated sequentially will be
re sorted and sequentially ordered.
For example::
>>> data = {16: ('1', 'b'), 3: ('1', 'a')}
>>> re_sort(data)
>>> {0: ('1', 'a'), 1: ('1', 'b')} | [
"A",
"data",
"with",
"keys",
"that",
"are",
"not",
"enumerated",
"sequentially",
"will",
"be",
"re",
"sorted",
"and",
"sequentially",
"ordered",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L39-L54 |
alfredodeza/notario | notario/utils.py | sift | def sift(data, required_items=None):
"""
Receive a ``data`` object that will be in the form
of a normalized structure (e.g. ``{0: {'a': 0}}``) and
filter out keys that match the ``required_items``.
"""
required_items = required_items or []
new_data = {}
for k, v in data.items():
if v[0] in required_items:
new_data[k] = v
continue
for required_item in required_items:
key = getattr(required_item, '_object', False)
if key:
if v[0] == key:
new_data[k] = v
return re_sort(new_data) | python | def sift(data, required_items=None):
"""
Receive a ``data`` object that will be in the form
of a normalized structure (e.g. ``{0: {'a': 0}}``) and
filter out keys that match the ``required_items``.
"""
required_items = required_items or []
new_data = {}
for k, v in data.items():
if v[0] in required_items:
new_data[k] = v
continue
for required_item in required_items:
key = getattr(required_item, '_object', False)
if key:
if v[0] == key:
new_data[k] = v
return re_sort(new_data) | [
"def",
"sift",
"(",
"data",
",",
"required_items",
"=",
"None",
")",
":",
"required_items",
"=",
"required_items",
"or",
"[",
"]",
"new_data",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"v",
"[",
"0",
"]",
"in",
"required_items",
":",
"new_data",
"[",
"k",
"]",
"=",
"v",
"continue",
"for",
"required_item",
"in",
"required_items",
":",
"key",
"=",
"getattr",
"(",
"required_item",
",",
"'_object'",
",",
"False",
")",
"if",
"key",
":",
"if",
"v",
"[",
"0",
"]",
"==",
"key",
":",
"new_data",
"[",
"k",
"]",
"=",
"v",
"return",
"re_sort",
"(",
"new_data",
")"
] | Receive a ``data`` object that will be in the form
of a normalized structure (e.g. ``{0: {'a': 0}}``) and
filter out keys that match the ``required_items``. | [
"Receive",
"a",
"data",
"object",
"that",
"will",
"be",
"in",
"the",
"form",
"of",
"a",
"normalized",
"structure",
"(",
"e",
".",
"g",
".",
"{",
"0",
":",
"{",
"a",
":",
"0",
"}}",
")",
"and",
"filter",
"out",
"keys",
"that",
"match",
"the",
"required_items",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L57-L75 |
alfredodeza/notario | notario/utils.py | data_item | def data_item(data):
"""
When trying to return a meaningful error about an unexpected data item
we cannot just `repr(data)` as that could show a gigantic data struture.
This utility should try to get the key of the first item or the single item
in the data structure.
"""
if isinstance(data, ndict):
# OK, we have something that looks like {0: ('a', 'b')}
# or something that is a regular dictionary
# so try to return 'a' regardless of the length
for item in data:
return repr(data[item][0])
elif isinstance(data, dict):
for item in data:
return repr(data[item])
elif isinstance(data, list):
return repr(data[0])
return repr(data) | python | def data_item(data):
"""
When trying to return a meaningful error about an unexpected data item
we cannot just `repr(data)` as that could show a gigantic data struture.
This utility should try to get the key of the first item or the single item
in the data structure.
"""
if isinstance(data, ndict):
# OK, we have something that looks like {0: ('a', 'b')}
# or something that is a regular dictionary
# so try to return 'a' regardless of the length
for item in data:
return repr(data[item][0])
elif isinstance(data, dict):
for item in data:
return repr(data[item])
elif isinstance(data, list):
return repr(data[0])
return repr(data) | [
"def",
"data_item",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"ndict",
")",
":",
"# OK, we have something that looks like {0: ('a', 'b')}",
"# or something that is a regular dictionary",
"# so try to return 'a' regardless of the length",
"for",
"item",
"in",
"data",
":",
"return",
"repr",
"(",
"data",
"[",
"item",
"]",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"item",
"in",
"data",
":",
"return",
"repr",
"(",
"data",
"[",
"item",
"]",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"repr",
"(",
"data",
"[",
"0",
"]",
")",
"return",
"repr",
"(",
"data",
")"
] | When trying to return a meaningful error about an unexpected data item
we cannot just `repr(data)` as that could show a gigantic data struture.
This utility should try to get the key of the first item or the single item
in the data structure. | [
"When",
"trying",
"to",
"return",
"a",
"meaningful",
"error",
"about",
"an",
"unexpected",
"data",
"item",
"we",
"cannot",
"just",
"repr",
"(",
"data",
")",
"as",
"that",
"could",
"show",
"a",
"gigantic",
"data",
"struture",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L95-L114 |
alfredodeza/notario | notario/utils.py | ensure | def ensure(assertion, message=None):
"""
Checks an assertion argument for truth-ness. Will return ``True`` or
explicitly raise ``AssertionError``. This is to deal with environments
using ``python -O` or ``PYTHONOPTIMIZE=``.
:param assertion: some value to evaluate for truth-ness
:param message: optional message used for raising AssertionError
"""
message = message or assertion
if not assertion:
raise AssertionError(message)
return True | python | def ensure(assertion, message=None):
"""
Checks an assertion argument for truth-ness. Will return ``True`` or
explicitly raise ``AssertionError``. This is to deal with environments
using ``python -O` or ``PYTHONOPTIMIZE=``.
:param assertion: some value to evaluate for truth-ness
:param message: optional message used for raising AssertionError
"""
message = message or assertion
if not assertion:
raise AssertionError(message)
return True | [
"def",
"ensure",
"(",
"assertion",
",",
"message",
"=",
"None",
")",
":",
"message",
"=",
"message",
"or",
"assertion",
"if",
"not",
"assertion",
":",
"raise",
"AssertionError",
"(",
"message",
")",
"return",
"True"
] | Checks an assertion argument for truth-ness. Will return ``True`` or
explicitly raise ``AssertionError``. This is to deal with environments
using ``python -O` or ``PYTHONOPTIMIZE=``.
:param assertion: some value to evaluate for truth-ness
:param message: optional message used for raising AssertionError | [
"Checks",
"an",
"assertion",
"argument",
"for",
"truth",
"-",
"ness",
".",
"Will",
"return",
"True",
"or",
"explicitly",
"raise",
"AssertionError",
".",
"This",
"is",
"to",
"deal",
"with",
"environments",
"using",
"python",
"-",
"O",
"or",
"PYTHONOPTIMIZE",
"=",
"."
] | train | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L144-L158 |
thiagopbueno/rddl2tf | rddl2tf/fluentshape.py | TensorFluentShape.fluent_shape | def fluent_shape(self) -> Sequence[int]:
'''Returns a copy of the fluent shape, ignoring batch size if in batch mode.'''
return tuple(self._shape.as_list()[1:] if self._batch else self._shape.as_list()[:]) | python | def fluent_shape(self) -> Sequence[int]:
'''Returns a copy of the fluent shape, ignoring batch size if in batch mode.'''
return tuple(self._shape.as_list()[1:] if self._batch else self._shape.as_list()[:]) | [
"def",
"fluent_shape",
"(",
"self",
")",
"->",
"Sequence",
"[",
"int",
"]",
":",
"return",
"tuple",
"(",
"self",
".",
"_shape",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
"if",
"self",
".",
"_batch",
"else",
"self",
".",
"_shape",
".",
"as_list",
"(",
")",
"[",
":",
"]",
")"
] | Returns a copy of the fluent shape, ignoring batch size if in batch mode. | [
"Returns",
"a",
"copy",
"of",
"the",
"fluent",
"shape",
"ignoring",
"batch",
"size",
"if",
"in",
"batch",
"mode",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentshape.py#L80-L82 |
thiagopbueno/rddl2tf | rddl2tf/fluentshape.py | TensorFluentShape.broadcast | def broadcast(cls,
shape1: 'TensorFluentShape',
shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]:
'''It broadcasts the fluent shapes if any input is in batch mode.
It handles input shapes in different modes, expanding its
dimensions if necessary. It outputs a tuple with new shapes.
If no input shape is in batch mode, return (None, None).
If an input shape does not need to be changed, return None.
Args:
shape1: A fluent's shape.
shape2: A fluent's shape.
Returns:
A pair of new shapes.
'''
reshape_1, reshape_2 = None, None
if not (shape1._batch or shape2._batch):
return reshape_1, reshape_2
size_1, size_2 = shape1.fluent_size, shape2.fluent_size
size_diff = abs(size_1 - size_2)
if size_diff == 0:
return reshape_1, reshape_2
if size_2 > size_1 and not (size_1 == 0 and not shape1._batch):
reshape_1 = [1] * size_diff + list(shape1.fluent_shape)
if shape1._batch:
reshape_1 = [shape1.batch_size] + reshape_1
elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch):
reshape_2 = [1] * size_diff + list(shape2.fluent_shape)
if shape2._batch:
reshape_2 = [shape2.batch_size] + reshape_2
return reshape_1, reshape_2 | python | def broadcast(cls,
shape1: 'TensorFluentShape',
shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]:
'''It broadcasts the fluent shapes if any input is in batch mode.
It handles input shapes in different modes, expanding its
dimensions if necessary. It outputs a tuple with new shapes.
If no input shape is in batch mode, return (None, None).
If an input shape does not need to be changed, return None.
Args:
shape1: A fluent's shape.
shape2: A fluent's shape.
Returns:
A pair of new shapes.
'''
reshape_1, reshape_2 = None, None
if not (shape1._batch or shape2._batch):
return reshape_1, reshape_2
size_1, size_2 = shape1.fluent_size, shape2.fluent_size
size_diff = abs(size_1 - size_2)
if size_diff == 0:
return reshape_1, reshape_2
if size_2 > size_1 and not (size_1 == 0 and not shape1._batch):
reshape_1 = [1] * size_diff + list(shape1.fluent_shape)
if shape1._batch:
reshape_1 = [shape1.batch_size] + reshape_1
elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch):
reshape_2 = [1] * size_diff + list(shape2.fluent_shape)
if shape2._batch:
reshape_2 = [shape2.batch_size] + reshape_2
return reshape_1, reshape_2 | [
"def",
"broadcast",
"(",
"cls",
",",
"shape1",
":",
"'TensorFluentShape'",
",",
"shape2",
":",
"'TensorFluentShape'",
")",
"->",
"Tuple",
"[",
"Reshaping",
",",
"Reshaping",
"]",
":",
"reshape_1",
",",
"reshape_2",
"=",
"None",
",",
"None",
"if",
"not",
"(",
"shape1",
".",
"_batch",
"or",
"shape2",
".",
"_batch",
")",
":",
"return",
"reshape_1",
",",
"reshape_2",
"size_1",
",",
"size_2",
"=",
"shape1",
".",
"fluent_size",
",",
"shape2",
".",
"fluent_size",
"size_diff",
"=",
"abs",
"(",
"size_1",
"-",
"size_2",
")",
"if",
"size_diff",
"==",
"0",
":",
"return",
"reshape_1",
",",
"reshape_2",
"if",
"size_2",
">",
"size_1",
"and",
"not",
"(",
"size_1",
"==",
"0",
"and",
"not",
"shape1",
".",
"_batch",
")",
":",
"reshape_1",
"=",
"[",
"1",
"]",
"*",
"size_diff",
"+",
"list",
"(",
"shape1",
".",
"fluent_shape",
")",
"if",
"shape1",
".",
"_batch",
":",
"reshape_1",
"=",
"[",
"shape1",
".",
"batch_size",
"]",
"+",
"reshape_1",
"elif",
"size_1",
">",
"size_2",
"and",
"not",
"(",
"size_2",
"==",
"0",
"and",
"not",
"shape2",
".",
"_batch",
")",
":",
"reshape_2",
"=",
"[",
"1",
"]",
"*",
"size_diff",
"+",
"list",
"(",
"shape2",
".",
"fluent_shape",
")",
"if",
"shape2",
".",
"_batch",
":",
"reshape_2",
"=",
"[",
"shape2",
".",
"batch_size",
"]",
"+",
"reshape_2",
"return",
"reshape_1",
",",
"reshape_2"
] | It broadcasts the fluent shapes if any input is in batch mode.
It handles input shapes in different modes, expanding its
dimensions if necessary. It outputs a tuple with new shapes.
If no input shape is in batch mode, return (None, None).
If an input shape does not need to be changed, return None.
Args:
shape1: A fluent's shape.
shape2: A fluent's shape.
Returns:
A pair of new shapes. | [
"It",
"broadcasts",
"the",
"fluent",
"shapes",
"if",
"any",
"input",
"is",
"in",
"batch",
"mode",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentshape.py#L90-L125 |
inodb/sufam | sufam/mpileup_parser.py | run | def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters):
"""Run mpileup on given chrom and pos"""
# check for chr ref
is_chr_query = chrom.startswith('chr')
if is_chr_query and chr_reffa is None:
chr_reffa = reffa
# check bam ref type
bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True)
is_chr_bam = bam_header.find('SN:chr') != -1
if is_chr_bam:
reffa = chr_reffa
if not is_chr_query and is_chr_bam:
chrom = 'chr' + chrom
if is_chr_query and not is_chr_bam:
chrom = re.sub(r'^chr', '', chrom)
posmin = min(pos1, pos2)
posmax = max(pos1, pos2)
cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \
"| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom,
pos1=posmin, pos2=posmax,
reffa=reffa, parameters=parameters)
if pos1 == pos2:
cmd += " | awk '$2 == {pos}'".format(pos=pos1)
else:
cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax)
sys.stderr.write("Running:\n{}\n".format(cmd))
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode != 0:
if len(stdout) == 0 and stderr is None:
warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. "
"Possibly no coverage for sample.".format(cmd=cmd))
else:
raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. "
"Check command.".format(cmd=cmd)))
else:
return stdout | python | def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters):
"""Run mpileup on given chrom and pos"""
# check for chr ref
is_chr_query = chrom.startswith('chr')
if is_chr_query and chr_reffa is None:
chr_reffa = reffa
# check bam ref type
bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True)
is_chr_bam = bam_header.find('SN:chr') != -1
if is_chr_bam:
reffa = chr_reffa
if not is_chr_query and is_chr_bam:
chrom = 'chr' + chrom
if is_chr_query and not is_chr_bam:
chrom = re.sub(r'^chr', '', chrom)
posmin = min(pos1, pos2)
posmax = max(pos1, pos2)
cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \
"| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom,
pos1=posmin, pos2=posmax,
reffa=reffa, parameters=parameters)
if pos1 == pos2:
cmd += " | awk '$2 == {pos}'".format(pos=pos1)
else:
cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax)
sys.stderr.write("Running:\n{}\n".format(cmd))
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode != 0:
if len(stdout) == 0 and stderr is None:
warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. "
"Possibly no coverage for sample.".format(cmd=cmd))
else:
raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. "
"Check command.".format(cmd=cmd)))
else:
return stdout | [
"def",
"run",
"(",
"bam",
",",
"chrom",
",",
"pos1",
",",
"pos2",
",",
"reffa",
",",
"chr_reffa",
",",
"parameters",
")",
":",
"# check for chr ref",
"is_chr_query",
"=",
"chrom",
".",
"startswith",
"(",
"'chr'",
")",
"if",
"is_chr_query",
"and",
"chr_reffa",
"is",
"None",
":",
"chr_reffa",
"=",
"reffa",
"# check bam ref type",
"bam_header",
"=",
"subprocess",
".",
"check_output",
"(",
"\"samtools view -H {}\"",
".",
"format",
"(",
"bam",
")",
",",
"shell",
"=",
"True",
")",
"is_chr_bam",
"=",
"bam_header",
".",
"find",
"(",
"'SN:chr'",
")",
"!=",
"-",
"1",
"if",
"is_chr_bam",
":",
"reffa",
"=",
"chr_reffa",
"if",
"not",
"is_chr_query",
"and",
"is_chr_bam",
":",
"chrom",
"=",
"'chr'",
"+",
"chrom",
"if",
"is_chr_query",
"and",
"not",
"is_chr_bam",
":",
"chrom",
"=",
"re",
".",
"sub",
"(",
"r'^chr'",
",",
"''",
",",
"chrom",
")",
"posmin",
"=",
"min",
"(",
"pos1",
",",
"pos2",
")",
"posmax",
"=",
"max",
"(",
"pos1",
",",
"pos2",
")",
"cmd",
"=",
"\"samtools view -bh {bam} {chrom}:{pos1}-{pos2} \"",
"\"| samtools mpileup {parameters} -f {reffa} -\"",
".",
"format",
"(",
"bam",
"=",
"bam",
",",
"chrom",
"=",
"chrom",
",",
"pos1",
"=",
"posmin",
",",
"pos2",
"=",
"posmax",
",",
"reffa",
"=",
"reffa",
",",
"parameters",
"=",
"parameters",
")",
"if",
"pos1",
"==",
"pos2",
":",
"cmd",
"+=",
"\" | awk '$2 == {pos}'\"",
".",
"format",
"(",
"pos",
"=",
"pos1",
")",
"else",
":",
"cmd",
"+=",
"\" | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'\"",
".",
"format",
"(",
"posmin",
"=",
"posmin",
",",
"posmax",
"=",
"posmax",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Running:\\n{}\\n\"",
".",
"format",
"(",
"cmd",
")",
")",
"child",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"child",
".",
"communicate",
"(",
")",
"if",
"child",
".",
"returncode",
"!=",
"0",
":",
"if",
"len",
"(",
"stdout",
")",
"==",
"0",
"and",
"stderr",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"Command:\\n{cmd}\\n did not exit with zero exit code. \"",
"\"Possibly no coverage for sample.\"",
".",
"format",
"(",
"cmd",
"=",
"cmd",
")",
")",
"else",
":",
"raise",
"(",
"Exception",
"(",
"\"Command:\\n{cmd}\\n did not exit with zero exit code. \"",
"\"Check command.\"",
".",
"format",
"(",
"cmd",
"=",
"cmd",
")",
")",
")",
"else",
":",
"return",
"stdout"
] | Run mpileup on given chrom and pos | [
"Run",
"mpileup",
"on",
"given",
"chrom",
"and",
"pos"
] | train | https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/mpileup_parser.py#L97-L136 |
tipsi/tipsi_tools | tipsi_tools/python.py | execfile | def execfile(fname, _globals, _locals):
"""
Usage: execfile('path/to/file.py', globals(), locals())
"""
if os.path.exists(fname):
with open(fname) as f:
code = compile(f.read(), os.path.basename(fname), 'exec')
exec(code, _globals, _locals)
return True
else:
return False | python | def execfile(fname, _globals, _locals):
"""
Usage: execfile('path/to/file.py', globals(), locals())
"""
if os.path.exists(fname):
with open(fname) as f:
code = compile(f.read(), os.path.basename(fname), 'exec')
exec(code, _globals, _locals)
return True
else:
return False | [
"def",
"execfile",
"(",
"fname",
",",
"_globals",
",",
"_locals",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fname",
")",
":",
"with",
"open",
"(",
"fname",
")",
"as",
"f",
":",
"code",
"=",
"compile",
"(",
"f",
".",
"read",
"(",
")",
",",
"os",
".",
"path",
".",
"basename",
"(",
"fname",
")",
",",
"'exec'",
")",
"exec",
"(",
"code",
",",
"_globals",
",",
"_locals",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Usage: execfile('path/to/file.py', globals(), locals()) | [
"Usage",
":",
"execfile",
"(",
"path",
"/",
"to",
"/",
"file",
".",
"py",
"globals",
"()",
"locals",
"()",
")"
] | train | https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/python.py#L6-L16 |
tipsi/tipsi_tools | tipsi_tools/python.py | auto_directory | def auto_directory(rel_name):
"""
if you're using py.path you make do that as:
py.path.local(full_path).ensure_dir()
"""
dir_name = rel_path(rel_name, check=False)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
return dir_name | python | def auto_directory(rel_name):
"""
if you're using py.path you make do that as:
py.path.local(full_path).ensure_dir()
"""
dir_name = rel_path(rel_name, check=False)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
return dir_name | [
"def",
"auto_directory",
"(",
"rel_name",
")",
":",
"dir_name",
"=",
"rel_path",
"(",
"rel_name",
",",
"check",
"=",
"False",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_name",
",",
"exist_ok",
"=",
"True",
")",
"return",
"dir_name"
] | if you're using py.path you make do that as:
py.path.local(full_path).ensure_dir() | [
"if",
"you",
"re",
"using",
"py",
".",
"path",
"you",
"make",
"do",
"that",
"as",
":",
"py",
".",
"path",
".",
"local",
"(",
"full_path",
")",
".",
"ensure_dir",
"()"
] | train | https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/python.py#L27-L35 |
craigahobbs/chisel | src/chisel/util.py | parse_iso8601_date | def parse_iso8601_date(string):
"""
Parse an ISO 8601 date string
"""
# Match ISO 8601?
match = _RE_ISO8601_DATE.search(string)
if not match:
raise ValueError('Expected ISO 8601 date')
# Extract ISO 8601 components
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
return date(year, month, day) | python | def parse_iso8601_date(string):
"""
Parse an ISO 8601 date string
"""
# Match ISO 8601?
match = _RE_ISO8601_DATE.search(string)
if not match:
raise ValueError('Expected ISO 8601 date')
# Extract ISO 8601 components
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
return date(year, month, day) | [
"def",
"parse_iso8601_date",
"(",
"string",
")",
":",
"# Match ISO 8601?",
"match",
"=",
"_RE_ISO8601_DATE",
".",
"search",
"(",
"string",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
"'Expected ISO 8601 date'",
")",
"# Extract ISO 8601 components",
"year",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'year'",
")",
")",
"month",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'month'",
")",
")",
"day",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'day'",
")",
")",
"return",
"date",
"(",
"year",
",",
"month",
",",
"day",
")"
] | Parse an ISO 8601 date string | [
"Parse",
"an",
"ISO",
"8601",
"date",
"string"
] | train | https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L106-L121 |
craigahobbs/chisel | src/chisel/util.py | parse_iso8601_datetime | def parse_iso8601_datetime(string):
"""
Parse an ISO 8601 date/time string
"""
# Match ISO 8601?
match = _RE_ISO8601_DATETIME.search(string)
if not match:
raise ValueError('Expected ISO 8601 date/time')
# Extract ISO 8601 components
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
hour = int(match.group('hour')) if match.group('hour') else 0
minute = int(match.group('min')) if match.group('min') else 0
sec = int(match.group('sec')) if match.group('sec') else 0
microsec = int(float('.' + match.group('fracsec')) * 1000000) if match.group('fracsec') else 0
offhour = int(match.group('offsign') + match.group('offhour')) if match.group('offhour') else 0
offmin = int(match.group('offsign') + match.group('offmin')) if match.group('offmin') else 0
return (datetime(year, month, day, hour, minute, sec, microsec, TZUTC) -
timedelta(hours=offhour, minutes=offmin)) | python | def parse_iso8601_datetime(string):
"""
Parse an ISO 8601 date/time string
"""
# Match ISO 8601?
match = _RE_ISO8601_DATETIME.search(string)
if not match:
raise ValueError('Expected ISO 8601 date/time')
# Extract ISO 8601 components
year = int(match.group('year'))
month = int(match.group('month'))
day = int(match.group('day'))
hour = int(match.group('hour')) if match.group('hour') else 0
minute = int(match.group('min')) if match.group('min') else 0
sec = int(match.group('sec')) if match.group('sec') else 0
microsec = int(float('.' + match.group('fracsec')) * 1000000) if match.group('fracsec') else 0
offhour = int(match.group('offsign') + match.group('offhour')) if match.group('offhour') else 0
offmin = int(match.group('offsign') + match.group('offmin')) if match.group('offmin') else 0
return (datetime(year, month, day, hour, minute, sec, microsec, TZUTC) -
timedelta(hours=offhour, minutes=offmin)) | [
"def",
"parse_iso8601_datetime",
"(",
"string",
")",
":",
"# Match ISO 8601?",
"match",
"=",
"_RE_ISO8601_DATETIME",
".",
"search",
"(",
"string",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
"'Expected ISO 8601 date/time'",
")",
"# Extract ISO 8601 components",
"year",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'year'",
")",
")",
"month",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'month'",
")",
")",
"day",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'day'",
")",
")",
"hour",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'hour'",
")",
")",
"if",
"match",
".",
"group",
"(",
"'hour'",
")",
"else",
"0",
"minute",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'min'",
")",
")",
"if",
"match",
".",
"group",
"(",
"'min'",
")",
"else",
"0",
"sec",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'sec'",
")",
")",
"if",
"match",
".",
"group",
"(",
"'sec'",
")",
"else",
"0",
"microsec",
"=",
"int",
"(",
"float",
"(",
"'.'",
"+",
"match",
".",
"group",
"(",
"'fracsec'",
")",
")",
"*",
"1000000",
")",
"if",
"match",
".",
"group",
"(",
"'fracsec'",
")",
"else",
"0",
"offhour",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'offsign'",
")",
"+",
"match",
".",
"group",
"(",
"'offhour'",
")",
")",
"if",
"match",
".",
"group",
"(",
"'offhour'",
")",
"else",
"0",
"offmin",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"'offsign'",
")",
"+",
"match",
".",
"group",
"(",
"'offmin'",
")",
")",
"if",
"match",
".",
"group",
"(",
"'offmin'",
")",
"else",
"0",
"return",
"(",
"datetime",
"(",
"year",
",",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"sec",
",",
"microsec",
",",
"TZUTC",
")",
"-",
"timedelta",
"(",
"hours",
"=",
"offhour",
",",
"minutes",
"=",
"offmin",
")",
")"
] | Parse an ISO 8601 date/time string | [
"Parse",
"an",
"ISO",
"8601",
"date",
"/",
"time",
"string"
] | train | https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L124-L146 |
craigahobbs/chisel | src/chisel/util.py | import_submodules | def import_submodules(package, parent_package=None, exclude_submodules=None):
"""
Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules
"""
exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules
package = importlib.import_module(package, parent_package)
for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)):
continue
yield importlib.import_module(name) | python | def import_submodules(package, parent_package=None, exclude_submodules=None):
"""
Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules
"""
exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules
package = importlib.import_module(package, parent_package)
for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)):
continue
yield importlib.import_module(name) | [
"def",
"import_submodules",
"(",
"package",
",",
"parent_package",
"=",
"None",
",",
"exclude_submodules",
"=",
"None",
")",
":",
"exclude_submodules_dot",
"=",
"[",
"x",
"+",
"'.'",
"for",
"x",
"in",
"exclude_submodules",
"]",
"if",
"exclude_submodules",
"else",
"exclude_submodules",
"package",
"=",
"importlib",
".",
"import_module",
"(",
"package",
",",
"parent_package",
")",
"for",
"_",
",",
"name",
",",
"_",
"in",
"pkgutil",
".",
"walk_packages",
"(",
"package",
".",
"__path__",
",",
"package",
".",
"__name__",
"+",
"'.'",
")",
":",
"if",
"exclude_submodules",
"and",
"(",
"name",
"in",
"exclude_submodules",
"or",
"any",
"(",
"name",
".",
"startswith",
"(",
"x",
")",
"for",
"x",
"in",
"exclude_submodules_dot",
")",
")",
":",
"continue",
"yield",
"importlib",
".",
"import_module",
"(",
"name",
")"
] | Generator which imports all submodules of a module, recursively, including subpackages
:param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided
:type package: str
:param parent_package: parent package name (e.g 'chisel')
:type package: str
:rtype: iterator of modules | [
"Generator",
"which",
"imports",
"all",
"submodules",
"of",
"a",
"module",
"recursively",
"including",
"subpackages"
] | train | https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L149-L165 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_initial_state | def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
'''Returns a tuple of tensors representing the initial state fluents.
Args:
batch_size (Optional[int]): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
'''
with self.graph.as_default():
with tf.name_scope('initial_state'):
self._initialize_initial_state_fluents()
if batch_size is None:
return self.initial_state_fluents
return self._compile_batch_fluents(self.initial_state_fluents, batch_size) | python | def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
'''Returns a tuple of tensors representing the initial state fluents.
Args:
batch_size (Optional[int]): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
'''
with self.graph.as_default():
with tf.name_scope('initial_state'):
self._initialize_initial_state_fluents()
if batch_size is None:
return self.initial_state_fluents
return self._compile_batch_fluents(self.initial_state_fluents, batch_size) | [
"def",
"compile_initial_state",
"(",
"self",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'initial_state'",
")",
":",
"self",
".",
"_initialize_initial_state_fluents",
"(",
")",
"if",
"batch_size",
"is",
"None",
":",
"return",
"self",
".",
"initial_state_fluents",
"return",
"self",
".",
"_compile_batch_fluents",
"(",
"self",
".",
"initial_state_fluents",
",",
"batch_size",
")"
] | Returns a tuple of tensors representing the initial state fluents.
Args:
batch_size (Optional[int]): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors. | [
"Returns",
"a",
"tuple",
"of",
"tensors",
"representing",
"the",
"initial",
"state",
"fluents",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L90-L104 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_default_action | def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
'''Returns a tuple of tensors representing the default action fluents.
Args:
batch_size (int): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
'''
with self.graph.as_default():
with tf.name_scope('default_action'):
self._initialize_default_action_fluents()
if batch_size is None:
return self.default_action_fluents
return self._compile_batch_fluents(self.default_action_fluents, batch_size) | python | def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]:
'''Returns a tuple of tensors representing the default action fluents.
Args:
batch_size (int): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors.
'''
with self.graph.as_default():
with tf.name_scope('default_action'):
self._initialize_default_action_fluents()
if batch_size is None:
return self.default_action_fluents
return self._compile_batch_fluents(self.default_action_fluents, batch_size) | [
"def",
"compile_default_action",
"(",
"self",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
":",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'default_action'",
")",
":",
"self",
".",
"_initialize_default_action_fluents",
"(",
")",
"if",
"batch_size",
"is",
"None",
":",
"return",
"self",
".",
"default_action_fluents",
"return",
"self",
".",
"_compile_batch_fluents",
"(",
"self",
".",
"default_action_fluents",
",",
"batch_size",
")"
] | Returns a tuple of tensors representing the default action fluents.
Args:
batch_size (int): The batch size.
Returns:
Sequence[tf.Tensor]: A tuple of tensors. | [
"Returns",
"a",
"tuple",
"of",
"tensors",
"representing",
"the",
"default",
"action",
"fluents",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L106-L120 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.cpfs | def cpfs(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]:
'''Compiles the intermediate and next state fluent CPFs given
the current `state` and `action`.
Args:
state (Sequence[tf.Tensor]): A tuple of state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
Returns:
Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
'''
scope = self.transition_scope(state, action)
batch_size = int(state[0].shape[0])
interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise)
interms = [fluent for _, fluent in interm_fluents]
next_state = [fluent for _, fluent in next_state_fluents]
return interms, next_state | python | def cpfs(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]:
'''Compiles the intermediate and next state fluent CPFs given
the current `state` and `action`.
Args:
state (Sequence[tf.Tensor]): A tuple of state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
Returns:
Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
'''
scope = self.transition_scope(state, action)
batch_size = int(state[0].shape[0])
interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise)
interms = [fluent for _, fluent in interm_fluents]
next_state = [fluent for _, fluent in next_state_fluents]
return interms, next_state | [
"def",
"cpfs",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"noise",
":",
"Optional",
"[",
"Noise",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"List",
"[",
"TensorFluent",
"]",
",",
"List",
"[",
"TensorFluent",
"]",
"]",
":",
"scope",
"=",
"self",
".",
"transition_scope",
"(",
"state",
",",
"action",
")",
"batch_size",
"=",
"int",
"(",
"state",
"[",
"0",
"]",
".",
"shape",
"[",
"0",
"]",
")",
"interm_fluents",
",",
"next_state_fluents",
"=",
"self",
".",
"compile_cpfs",
"(",
"scope",
",",
"batch_size",
",",
"noise",
")",
"interms",
"=",
"[",
"fluent",
"for",
"_",
",",
"fluent",
"in",
"interm_fluents",
"]",
"next_state",
"=",
"[",
"fluent",
"for",
"_",
",",
"fluent",
"in",
"next_state_fluents",
"]",
"return",
"interms",
",",
"next_state"
] | Compiles the intermediate and next state fluent CPFs given
the current `state` and `action`.
Args:
state (Sequence[tf.Tensor]): A tuple of state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
Returns:
Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs. | [
"Compiles",
"the",
"intermediate",
"and",
"next",
"state",
"fluent",
"CPFs",
"given",
"the",
"current",
"state",
"and",
"action",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L122-L142 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.reward | def reward(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> tf.Tensor:
'''Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
'''
scope = self.reward_scope(state, action, next_state)
r = self.compile_reward(scope).tensor
with self.graph.as_default():
with tf.name_scope('reward'):
return tf.expand_dims(r, -1) | python | def reward(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor],
next_state: Sequence[tf.Tensor]) -> tf.Tensor:
'''Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function.
'''
scope = self.reward_scope(state, action, next_state)
r = self.compile_reward(scope).tensor
with self.graph.as_default():
with tf.name_scope('reward'):
return tf.expand_dims(r, -1) | [
"def",
"reward",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"next_state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"tf",
".",
"Tensor",
":",
"scope",
"=",
"self",
".",
"reward_scope",
"(",
"state",
",",
"action",
",",
"next_state",
")",
"r",
"=",
"self",
".",
"compile_reward",
"(",
"scope",
")",
".",
"tensor",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'reward'",
")",
":",
"return",
"tf",
".",
"expand_dims",
"(",
"r",
",",
"-",
"1",
")"
] | Compiles the reward function given the current `state`, `action` and
`next_state`.
Args:
state (Sequence[tf.Tensor]): A tuple of current state tensors.
action (Sequence[tf.Tensor]): A tuple of action tensors.
next_state (Sequence[tf.Tensor]): A tuple of next state tensors.
Returns:
(:obj:`tf.Tensor`): A tensor representing the reward function. | [
"Compiles",
"the",
"reward",
"function",
"given",
"the",
"current",
"state",
"action",
"and",
"next_state",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L144-L163 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_cpfs | def compile_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]:
'''Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
'''
interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise)
scope.update(dict(interm_fluents))
next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise)
return interm_fluents, next_state_fluents | python | def compile_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]:
'''Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs.
'''
interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise)
scope.update(dict(interm_fluents))
next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise)
return interm_fluents, next_state_fluents | [
"def",
"compile_cpfs",
"(",
"self",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"noise",
":",
"Optional",
"[",
"Noise",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"List",
"[",
"CPFPair",
"]",
",",
"List",
"[",
"CPFPair",
"]",
"]",
":",
"interm_fluents",
"=",
"self",
".",
"compile_intermediate_cpfs",
"(",
"scope",
",",
"batch_size",
",",
"noise",
")",
"scope",
".",
"update",
"(",
"dict",
"(",
"interm_fluents",
")",
")",
"next_state_fluents",
"=",
"self",
".",
"compile_state_cpfs",
"(",
"scope",
",",
"batch_size",
",",
"noise",
")",
"return",
"interm_fluents",
",",
"next_state_fluents"
] | Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent
representing the intermediate and state CPFs. | [
"Compiles",
"the",
"intermediate",
"and",
"next",
"state",
"fluent",
"CPFs",
"given",
"the",
"current",
"state",
"and",
"action",
"scope",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L165-L182 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_intermediate_cpfs | def compile_intermediate_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
'''Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
'''
interm_fluents = []
with self.graph.as_default():
with tf.name_scope('intermediate_cpfs'):
for cpf in self.rddl.domain.intermediate_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
interm_fluents.append((cpf.name, t))
scope[cpf.name] = t
return interm_fluents | python | def compile_intermediate_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
'''Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
'''
interm_fluents = []
with self.graph.as_default():
with tf.name_scope('intermediate_cpfs'):
for cpf in self.rddl.domain.intermediate_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
interm_fluents.append((cpf.name, t))
scope[cpf.name] = t
return interm_fluents | [
"def",
"compile_intermediate_cpfs",
"(",
"self",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"noise",
":",
"Optional",
"[",
"Noise",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"CPFPair",
"]",
":",
"interm_fluents",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'intermediate_cpfs'",
")",
":",
"for",
"cpf",
"in",
"self",
".",
"rddl",
".",
"domain",
".",
"intermediate_cpfs",
":",
"cpf_noise",
"=",
"noise",
".",
"get",
"(",
"cpf",
".",
"name",
",",
"None",
")",
"if",
"noise",
"is",
"not",
"None",
"else",
"None",
"name_scope",
"=",
"utils",
".",
"identifier",
"(",
"cpf",
".",
"name",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name_scope",
")",
":",
"t",
"=",
"self",
".",
"_compile_expression",
"(",
"cpf",
".",
"expr",
",",
"scope",
",",
"batch_size",
",",
"cpf_noise",
")",
"interm_fluents",
".",
"append",
"(",
"(",
"cpf",
".",
"name",
",",
"t",
")",
")",
"scope",
"[",
"cpf",
".",
"name",
"]",
"=",
"t",
"return",
"interm_fluents"
] | Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. | [
"Compiles",
"the",
"intermediate",
"fluent",
"CPFs",
"given",
"the",
"current",
"state",
"and",
"action",
"scope",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L184-L212 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_state_cpfs | def compile_state_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
'''Compiles the next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
'''
next_state_fluents = []
with self.graph.as_default():
with tf.name_scope('state_cpfs'):
for cpf in self.rddl.domain.state_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
next_state_fluents.append((cpf.name, t))
key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0])
next_state_fluents = sorted(next_state_fluents, key=key)
return next_state_fluents | python | def compile_state_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
'''Compiles the next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
'''
next_state_fluents = []
with self.graph.as_default():
with tf.name_scope('state_cpfs'):
for cpf in self.rddl.domain.state_cpfs:
cpf_noise = noise.get(cpf.name, None) if noise is not None else None
name_scope = utils.identifier(cpf.name)
with tf.name_scope(name_scope):
t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise)
next_state_fluents.append((cpf.name, t))
key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0])
next_state_fluents = sorted(next_state_fluents, key=key)
return next_state_fluents | [
"def",
"compile_state_cpfs",
"(",
"self",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"noise",
":",
"Optional",
"[",
"Noise",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"CPFPair",
"]",
":",
"next_state_fluents",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'state_cpfs'",
")",
":",
"for",
"cpf",
"in",
"self",
".",
"rddl",
".",
"domain",
".",
"state_cpfs",
":",
"cpf_noise",
"=",
"noise",
".",
"get",
"(",
"cpf",
".",
"name",
",",
"None",
")",
"if",
"noise",
"is",
"not",
"None",
"else",
"None",
"name_scope",
"=",
"utils",
".",
"identifier",
"(",
"cpf",
".",
"name",
")",
"with",
"tf",
".",
"name_scope",
"(",
"name_scope",
")",
":",
"t",
"=",
"self",
".",
"_compile_expression",
"(",
"cpf",
".",
"expr",
",",
"scope",
",",
"batch_size",
",",
"cpf_noise",
")",
"next_state_fluents",
".",
"append",
"(",
"(",
"cpf",
".",
"name",
",",
"t",
")",
")",
"key",
"=",
"lambda",
"f",
":",
"self",
".",
"rddl",
".",
"domain",
".",
"next_state_fluent_ordering",
".",
"index",
"(",
"f",
"[",
"0",
"]",
")",
"next_state_fluents",
"=",
"sorted",
"(",
"next_state_fluents",
",",
"key",
"=",
"key",
")",
"return",
"next_state_fluents"
] | Compiles the next state fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. | [
"Compiles",
"the",
"next",
"state",
"fluent",
"CPFs",
"given",
"the",
"current",
"state",
"and",
"action",
"scope",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L214-L244 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_reward | def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent:
'''Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
'''
reward_expr = self.rddl.domain.reward
with self.graph.as_default():
with tf.name_scope('reward'):
return self._compile_expression(reward_expr, scope) | python | def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent:
'''Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
'''
reward_expr = self.rddl.domain.reward
with self.graph.as_default():
with tf.name_scope('reward'):
return self._compile_expression(reward_expr, scope) | [
"def",
"compile_reward",
"(",
"self",
",",
"scope",
":",
"Dict",
"[",
"str",
",",
"TensorFluent",
"]",
")",
"->",
"TensorFluent",
":",
"reward_expr",
"=",
"self",
".",
"rddl",
".",
"domain",
".",
"reward",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'reward'",
")",
":",
"return",
"self",
".",
"_compile_expression",
"(",
"reward_expr",
",",
"scope",
")"
] | Compiles the reward function given the fluent `scope`.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation.
Returns:
A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function. | [
"Compiles",
"the",
"reward",
"function",
"given",
"the",
"fluent",
"scope",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L246-L258 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_state_action_constraints | def compile_state_action_constraints(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.transition_scope(state, action)
constraints = []
with self.graph.as_default():
with tf.name_scope('state_action_constraints'):
for p in self.rddl.domain.constraints:
fluent = self._compile_expression(p, scope)
constraints.append(fluent)
return constraints | python | def compile_state_action_constraints(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.transition_scope(state, action)
constraints = []
with self.graph.as_default():
with tf.name_scope('state_action_constraints'):
for p in self.rddl.domain.constraints:
fluent = self._compile_expression(p, scope)
constraints.append(fluent)
return constraints | [
"def",
"compile_state_action_constraints",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"List",
"[",
"TensorFluent",
"]",
":",
"scope",
"=",
"self",
".",
"transition_scope",
"(",
"state",
",",
"action",
")",
"constraints",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'state_action_constraints'",
")",
":",
"for",
"p",
"in",
"self",
".",
"rddl",
".",
"domain",
".",
"constraints",
":",
"fluent",
"=",
"self",
".",
"_compile_expression",
"(",
"p",
",",
"scope",
")",
"constraints",
".",
"append",
"(",
"fluent",
")",
"return",
"constraints"
] | Compiles the state-action constraints given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | [
"Compiles",
"the",
"state",
"-",
"action",
"constraints",
"given",
"current",
"state",
"and",
"action",
"fluents",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L260-L279 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_action_preconditions | def compile_action_preconditions(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.preconds:
fluent = self._compile_expression(p, scope)
preconds.append(fluent)
return preconds | python | def compile_action_preconditions(self,
state: Sequence[tf.Tensor],
action: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.preconds:
fluent = self._compile_expression(p, scope)
preconds.append(fluent)
return preconds | [
"def",
"compile_action_preconditions",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
",",
"action",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"List",
"[",
"TensorFluent",
"]",
":",
"scope",
"=",
"self",
".",
"action_precondition_scope",
"(",
"state",
",",
"action",
")",
"preconds",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'action_preconditions'",
")",
":",
"for",
"p",
"in",
"self",
".",
"rddl",
".",
"domain",
".",
"preconds",
":",
"fluent",
"=",
"self",
".",
"_compile_expression",
"(",
"p",
",",
"scope",
")",
"preconds",
".",
"append",
"(",
"fluent",
")",
"return",
"preconds"
] | Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | [
"Compiles",
"the",
"action",
"preconditions",
"given",
"current",
"state",
"and",
"action",
"fluents",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L281-L300 |
thiagopbueno/rddl2tf | rddl2tf/compiler.py | Compiler.compile_state_invariants | def compile_state_invariants(self,
state: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state invarints given current `state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.state_invariant_scope(state)
invariants = []
with self.graph.as_default():
with tf.name_scope('state_invariants'):
for p in self.rddl.domain.invariants:
fluent = self._compile_expression(p, scope)
invariants.append(fluent)
return invariants | python | def compile_state_invariants(self,
state: Sequence[tf.Tensor]) -> List[TensorFluent]:
'''Compiles the state invarints given current `state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`.
'''
scope = self.state_invariant_scope(state)
invariants = []
with self.graph.as_default():
with tf.name_scope('state_invariants'):
for p in self.rddl.domain.invariants:
fluent = self._compile_expression(p, scope)
invariants.append(fluent)
return invariants | [
"def",
"compile_state_invariants",
"(",
"self",
",",
"state",
":",
"Sequence",
"[",
"tf",
".",
"Tensor",
"]",
")",
"->",
"List",
"[",
"TensorFluent",
"]",
":",
"scope",
"=",
"self",
".",
"state_invariant_scope",
"(",
"state",
")",
"invariants",
"=",
"[",
"]",
"with",
"self",
".",
"graph",
".",
"as_default",
"(",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'state_invariants'",
")",
":",
"for",
"p",
"in",
"self",
".",
"rddl",
".",
"domain",
".",
"invariants",
":",
"fluent",
"=",
"self",
".",
"_compile_expression",
"(",
"p",
",",
"scope",
")",
"invariants",
".",
"append",
"(",
"fluent",
")",
"return",
"invariants"
] | Compiles the state invarints given current `state` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | [
"Compiles",
"the",
"state",
"invarints",
"given",
"current",
"state",
"fluents",
"."
] | train | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L302-L319 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.