hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
970k
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_non_ascii
int64 0
514k
| filtered:remove_delete_markers
int64 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4029817dc33967552ae1824d14039c95b823fc6b | 12,289 | py | Python | neural_guided_symbolic_regression/models/mcts.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | neural_guided_symbolic_regression/models/mcts.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 7 | 2021-11-10T19:44:38.000Z | 2022-02-10T06:48:39.000Z | neural_guided_symbolic_regression/models/mcts.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find expression by Monte Carlo Tree Search guided by neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from neural_guided_symbolic_regression.mcts import policies
from neural_guided_symbolic_regression.mcts import rewards
from neural_guided_symbolic_regression.mcts import states
from neural_guided_symbolic_regression.models import metrics
from neural_guided_symbolic_regression.models import partial_sequence_model_generator
class NeuralProductionRuleAppendPolicy(policies.PolicyBase):
"""Appends a valid production rule on existing list of production rules.
The probabilities of the actions will be determined by the partial sequence
model.
"""
def __init__(self, sess, grammar, max_length, symbolic_properties_dict):
"""Initializer.
Args:
sess: tf.Session, the session contains the trained model to predict next
production rule from input partial sequence. If None, each step will
be selected randomly.
grammar: arithmetic_grammar.Grammar object.
max_length: Integer, the max length of production rule sequence.
symbolic_properties_dict: Dict, the keys are the symbolic properties used
as conditions. Values are the corresponding desired values of the
symbolic properties.
"""
self._sess = sess
self._grammar = grammar
self._max_length = max_length
conditions = {}
if symbolic_properties_dict is not None:
conditions.update({
key: np.array([value], dtype=np.float32)
for key, value in symbolic_properties_dict.iteritems()
})
self._conditions = conditions
def get_new_states_probs(self, state):
"""Gets new state from current state by appending a valid production rule.
Args:
state: A mcts.states.ProductionRulesState object. Contains a list of
nltk.grammar.Production objects in attribute
production_rules_sequence.
Returns:
new_states: A list of next states. Each state is a result from apply an
action in the instance attribute actions to the input state.
action_probs: A float numpy array with shape [num_actions,]. The
probability of each action in the class attribute actions.
Raises:
TypeError: If input state is not states.ProductionRulesState object.
"""
if not isinstance(state, states.ProductionRulesState):
raise TypeError('Input state shoud be an instance of '
'states.ProductionRulesState but got %s' % type(state))
production_rules_sequence = state.production_rules_sequence
if len(production_rules_sequence) > self._max_length:
# Do not allow the length of production rules sequence exceed _max_length.
# All nan probabilities will stop the rollout in MCTS.
masked_probabilities = [np.nan] * self._grammar.num_production_rules
else:
masked_probabilities = (
partial_sequence_model_generator.get_masked_probabilities_from_model(
sess=self._sess,
max_length=self._max_length,
partial_sequence=[self._grammar.prod_rule_to_index[str(prod_rule)]
for prod_rule in production_rules_sequence],
next_production_rule_mask=self._grammar.masks[
self._grammar.lhs_to_index[state.stack_peek()]],
conditions=self._conditions))
new_states = []
action_probs = []
for probability, production_rule in zip(
masked_probabilities, self._grammar.prod_rules):
if state.is_valid_to_append(production_rule):
new_state = state.copy()
new_state.append_production_rule(production_rule)
new_states.append(new_state)
action_probs.append(probability)
else:
new_states.append(None)
action_probs.append(np.nan)
action_probs = np.asarray(action_probs)
action_probs /= np.nansum(action_probs)
return new_states, action_probs
class LeadingPowers(rewards.RewardBase):
"""Computes reward for univariate expression only on leading powers.
This reward measures a univariate expression by whether this expression
satisfies the desired leading powers at 0 and infinity.
reward = -abs(leading power difference at 0)
- abs(leading power difference at infinity))
"""
def __init__(
self,
leading_at_0,
leading_at_inf,
variable_symbol='x',
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
variable_symbol: String, the symbol of variable in function expression.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(LeadingPowers, self).__init__(
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._leading_at_0 = leading_at_0
self._leading_at_inf = leading_at_inf
self._variable_symbol = variable_symbol
def get_leading_power_error(self, state):
"""Gets the leading power error.
The leading power error is defined as
abs(leading power difference at 0) + abs(leading power difference at inf).
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
true_leading_at_0, true_leading_at_inf = (
metrics.evaluate_leading_powers_at_0_inf(
expression_string=state.get_expression(),
symbol=self._variable_symbol))
return (abs(true_leading_at_0 - self._leading_at_0)
+ abs(true_leading_at_inf - self._leading_at_inf))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
leading_power_error = self.get_leading_power_error(state)
if np.isfinite(leading_power_error):
return -float(leading_power_error)
else:
return self._default_value
class NumericalPointsAndLeadingPowers(LeadingPowers):
"""Computes reward for univariate expression with leading powers and values.
This reward measures an univariate expression in two aspects:
1. The mean square error of numerical values defined by input_values and
output_values.
2. Whether this expression satisfies the desired leading powers at 0 and
infinity.
hard_penalty_default_value decides whether to use soft or hard penalty when
the expression does not match the desired leading powers.
Soft penalty
reward = (
-(root mean square error)
- abs(leading power difference at 0)
- abs(leading power difference at infinity))
Hard penalty
If leading power at 0 and infinity are both correct
reward = -(root mean square error)
Otherwise reward = hard_penalty_default_value
If include_leading_powers is False, the reward is just
-(root mean square error).
"""
def __init__(
self,
input_values,
output_values,
leading_at_0,
leading_at_inf,
hard_penalty_default_value=None,
variable_symbol='x',
include_leading_powers=True,
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
input_values: Numpy array with shape [num_input_values]. List of input
values to univariate function.
output_values: Numpy array with shape [num_output_values]. List of output
values from the univariate function.
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
hard_penalty_default_value: Float, the default value for hard penalty.
Default None, the reward will be computed by soft penalty instead of
hard penalty.
variable_symbol: String, the symbol of variable in function expression.
include_leading_powers: Boolean, whether to include leading powers in
reward.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(NumericalPointsAndLeadingPowers, self).__init__(
leading_at_0=leading_at_0,
leading_at_inf=leading_at_inf,
variable_symbol=variable_symbol,
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._input_values = input_values
self._output_values = output_values
self._include_leading_powers = include_leading_powers
self._hard_penalty_default_value = hard_penalty_default_value
def get_input_values_rmse(self, state):
"""Evaluates root mean square error on input_values.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
expression_output_values = metrics.evaluate_expression(
expression_string=state.get_expression(),
grids=self._input_values,
symbol=self._variable_symbol)
return np.sqrt(
np.mean((expression_output_values - self._output_values) ** 2))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
input_values_rmse = self.get_input_values_rmse(state)
if not self._include_leading_powers:
if np.isfinite(input_values_rmse):
return -input_values_rmse
else:
return self._default_value
# NOTE(leeley): If computing the leading power fails
# (timeout or sympy ValueError) or functions in symbolic_properties return
# nan (for example, 1 / (x - x)).
leading_power_error = self.get_leading_power_error(state)
if self._hard_penalty_default_value is None:
# Soft penalty.
if np.isfinite(leading_power_error):
return -input_values_rmse - leading_power_error
else:
return self._default_value
else:
# Hard penalty.
if (np.isfinite(leading_power_error)
and np.isclose(leading_power_error, 0)):
return -input_values_rmse
else:
return self._hard_penalty_default_value
| 37.58104 | 85 | 0.714948 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find expression by Monte Carlo Tree Search guided by neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from neural_guided_symbolic_regression.mcts import policies
from neural_guided_symbolic_regression.mcts import rewards
from neural_guided_symbolic_regression.mcts import states
from neural_guided_symbolic_regression.models import metrics
from neural_guided_symbolic_regression.models import partial_sequence_model_generator
class NeuralProductionRuleAppendPolicy(policies.PolicyBase):
"""Appends a valid production rule on existing list of production rules.
The probabilities of the actions will be determined by the partial sequence
model.
"""
def __init__(self, sess, grammar, max_length, symbolic_properties_dict):
"""Initializer.
Args:
sess: tf.Session, the session contains the trained model to predict next
production rule from input partial sequence. If None, each step will
be selected randomly.
grammar: arithmetic_grammar.Grammar object.
max_length: Integer, the max length of production rule sequence.
symbolic_properties_dict: Dict, the keys are the symbolic properties used
as conditions. Values are the corresponding desired values of the
symbolic properties.
"""
self._sess = sess
self._grammar = grammar
self._max_length = max_length
conditions = {}
if symbolic_properties_dict is not None:
conditions.update({
key: np.array([value], dtype=np.float32)
for key, value in symbolic_properties_dict.iteritems()
})
self._conditions = conditions
def get_new_states_probs(self, state):
"""Gets new state from current state by appending a valid production rule.
Args:
state: A mcts.states.ProductionRulesState object. Contains a list of
nltk.grammar.Production objects in attribute
production_rules_sequence.
Returns:
new_states: A list of next states. Each state is a result from apply an
action in the instance attribute actions to the input state.
action_probs: A float numpy array with shape [num_actions,]. The
probability of each action in the class attribute actions.
Raises:
TypeError: If input state is not states.ProductionRulesState object.
"""
if not isinstance(state, states.ProductionRulesState):
raise TypeError('Input state shoud be an instance of '
'states.ProductionRulesState but got %s' % type(state))
production_rules_sequence = state.production_rules_sequence
if len(production_rules_sequence) > self._max_length:
# Do not allow the length of production rules sequence exceed _max_length.
# All nan probabilities will stop the rollout in MCTS.
masked_probabilities = [np.nan] * self._grammar.num_production_rules
else:
masked_probabilities = (
partial_sequence_model_generator.get_masked_probabilities_from_model(
sess=self._sess,
max_length=self._max_length,
partial_sequence=[self._grammar.prod_rule_to_index[str(prod_rule)]
for prod_rule in production_rules_sequence],
next_production_rule_mask=self._grammar.masks[
self._grammar.lhs_to_index[state.stack_peek()]],
conditions=self._conditions))
new_states = []
action_probs = []
for probability, production_rule in zip(
masked_probabilities, self._grammar.prod_rules):
if state.is_valid_to_append(production_rule):
new_state = state.copy()
new_state.append_production_rule(production_rule)
new_states.append(new_state)
action_probs.append(probability)
else:
new_states.append(None)
action_probs.append(np.nan)
action_probs = np.asarray(action_probs)
action_probs /= np.nansum(action_probs)
return new_states, action_probs
class LeadingPowers(rewards.RewardBase):
"""Computes reward for univariate expression only on leading powers.
This reward measures a univariate expression by whether this expression
satisfies the desired leading powers at 0 and infinity.
reward = -abs(leading power difference at 0)
- abs(leading power difference at infinity))
"""
def __init__(
self,
leading_at_0,
leading_at_inf,
variable_symbol='x',
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
variable_symbol: String, the symbol of variable in function expression.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(LeadingPowers, self).__init__(
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._leading_at_0 = leading_at_0
self._leading_at_inf = leading_at_inf
self._variable_symbol = variable_symbol
def get_leading_power_error(self, state):
"""Gets the leading power error.
The leading power error is defined as
abs(leading power difference at 0) + abs(leading power difference at inf).
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
true_leading_at_0, true_leading_at_inf = (
metrics.evaluate_leading_powers_at_0_inf(
expression_string=state.get_expression(),
symbol=self._variable_symbol))
return (abs(true_leading_at_0 - self._leading_at_0)
+ abs(true_leading_at_inf - self._leading_at_inf))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
leading_power_error = self.get_leading_power_error(state)
if np.isfinite(leading_power_error):
return -float(leading_power_error)
else:
return self._default_value
class NumericalPointsAndLeadingPowers(LeadingPowers):
"""Computes reward for univariate expression with leading powers and values.
This reward measures an univariate expression in two aspects:
1. The mean square error of numerical values defined by input_values and
output_values.
2. Whether this expression satisfies the desired leading powers at 0 and
infinity.
hard_penalty_default_value decides whether to use soft or hard penalty when
the expression does not match the desired leading powers.
Soft penalty
reward = (
-(root mean square error)
- abs(leading power difference at 0)
- abs(leading power difference at infinity))
Hard penalty
If leading power at 0 and infinity are both correct
reward = -(root mean square error)
Otherwise reward = hard_penalty_default_value
If include_leading_powers is False, the reward is just
-(root mean square error).
"""
def __init__(
self,
input_values,
output_values,
leading_at_0,
leading_at_inf,
hard_penalty_default_value=None,
variable_symbol='x',
include_leading_powers=True,
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
input_values: Numpy array with shape [num_input_values]. List of input
values to univariate function.
output_values: Numpy array with shape [num_output_values]. List of output
values from the univariate function.
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
hard_penalty_default_value: Float, the default value for hard penalty.
Default None, the reward will be computed by soft penalty instead of
hard penalty.
variable_symbol: String, the symbol of variable in function expression.
include_leading_powers: Boolean, whether to include leading powers in
reward.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(NumericalPointsAndLeadingPowers, self).__init__(
leading_at_0=leading_at_0,
leading_at_inf=leading_at_inf,
variable_symbol=variable_symbol,
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._input_values = input_values
self._output_values = output_values
self._include_leading_powers = include_leading_powers
self._hard_penalty_default_value = hard_penalty_default_value
def get_input_values_rmse(self, state):
"""Evaluates root mean square error on input_values.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
expression_output_values = metrics.evaluate_expression(
expression_string=state.get_expression(),
grids=self._input_values,
symbol=self._variable_symbol)
return np.sqrt(
np.mean((expression_output_values - self._output_values) ** 2))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
input_values_rmse = self.get_input_values_rmse(state)
if not self._include_leading_powers:
if np.isfinite(input_values_rmse):
return -input_values_rmse
else:
return self._default_value
# NOTE(leeley): If computing the leading power fails
# (timeout or sympy ValueError) or functions in symbolic_properties return
# nan (for example, 1 / (x - x)).
leading_power_error = self.get_leading_power_error(state)
if self._hard_penalty_default_value is None:
# Soft penalty.
if np.isfinite(leading_power_error):
return -input_values_rmse - leading_power_error
else:
return self._default_value
else:
# Hard penalty.
if (np.isfinite(leading_power_error)
and np.isclose(leading_power_error, 0)):
return -input_values_rmse
else:
return self._hard_penalty_default_value
| 0 | 0 |
c84726272f5ccce872fa9f251d5064eaed566127 | 1,302 | py | Python | Graded/G3/slam/solution/__init__.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | Graded/G3/slam/solution/__init__.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | Graded/G3/slam/solution/__init__.py | chrstrom/TTK4250 | f453c3a59597d3fe6cff7d35b790689919798b94 | [
"Unlicense"
] | null | null | null | from .pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xed\x00\x00\x00\x00\x00\x00\x18\x3d\x71\xc5\x03\x9e\x68\x9a\xa0\x37\x72\x21\xef\xad\x8a\xf4\x10\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x8c\x82\x42\x16\x77\xe5\x90\x93\xcb\xad\x1f\x2f\x25\x62\x6c\xf5\x02\xd8\xd5\xa2\x5e\x70\x77\xac\xd7\x78\x2f\xbe\x60\x40\x8f\x2b\x57\x02\x4f\xa0\x4f\xb9\x5f\x3f\x67\x56\x7c\x8c\x15\x95\x26\xdf\xaf\x5d\x30\xf2\xbc\x4b\x06\x6d\x66\x77\x1d\xf1\xd6\x67\x18\x5f\xe5\x7f\x4a\x8d\x4e\x82\x97\x42\x19\xfa\xff\x42\xe3\x1b\xe7\xa1\x36\x46\x2b\x63\x0b\x2b\x4a\x53\x6e\x1b\x06\xf1\x8d\xc9\xf5\x16\x5c\xcd\xd0\xc8\xd3\xaf\x08\x86\x5e\x20\xc7\xad\x33\x4a\x8c\x06\x71\x4d\x9a\x1e\xbe\xa7\xe8\x08\x3f\xf1\x6b\x6e\x54\x4e\x6f\x4b\xe3\x3b\x98\x9a\x2a\x3a\x01\xfa\x52\xc3\xf6\x64\x3c\xeb\xa6\xbf\x4c\xb6\x5e\xf4\x59\x40\xd3\xb9\x02\x01\x63\x0f\xa8\x5a\x9f\x60\x26\xc4\xdc\xa6\xb6\xe6\xf8\xac\xea\xaa\x04\xa4\x23\x1a\x50\xb2\x67\x91\xf9\xee\xed\xbc\x35\x18\xff\x1f\x5a\xab\x0b\xbe\x95\xc6\x72\x12\x2d\x31\xf9\x4a\x52\x60\x1f\x42\x0f\x5d\xcc\xf1\x4c\xa0\xed\xc5\x2b\x49\x68\x71\xa4\x0f\x7b\x76\x16\x50\xe6\xdb\x83\xd7\x2f\xc4\x57\xc7\x12\x02\x30\xc8\xef\xe8\x38\xf6', 2) | 434 | 1,243 | 0.754992 | from .pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xed\x00\x00\x00\x00\x00\x00\x18\x3d\x71\xc5\x03\x9e\x68\x9a\xa0\x37\x72\x21\xef\xad\x8a\xf4\x10\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x8c\x82\x42\x16\x77\xe5\x90\x93\xcb\xad\x1f\x2f\x25\x62\x6c\xf5\x02\xd8\xd5\xa2\x5e\x70\x77\xac\xd7\x78\x2f\xbe\x60\x40\x8f\x2b\x57\x02\x4f\xa0\x4f\xb9\x5f\x3f\x67\x56\x7c\x8c\x15\x95\x26\xdf\xaf\x5d\x30\xf2\xbc\x4b\x06\x6d\x66\x77\x1d\xf1\xd6\x67\x18\x5f\xe5\x7f\x4a\x8d\x4e\x82\x97\x42\x19\xfa\xff\x42\xe3\x1b\xe7\xa1\x36\x46\x2b\x63\x0b\x2b\x4a\x53\x6e\x1b\x06\xf1\x8d\xc9\xf5\x16\x5c\xcd\xd0\xc8\xd3\xaf\x08\x86\x5e\x20\xc7\xad\x33\x4a\x8c\x06\x71\x4d\x9a\x1e\xbe\xa7\xe8\x08\x3f\xf1\x6b\x6e\x54\x4e\x6f\x4b\xe3\x3b\x98\x9a\x2a\x3a\x01\xfa\x52\xc3\xf6\x64\x3c\xeb\xa6\xbf\x4c\xb6\x5e\xf4\x59\x40\xd3\xb9\x02\x01\x63\x0f\xa8\x5a\x9f\x60\x26\xc4\xdc\xa6\xb6\xe6\xf8\xac\xea\xaa\x04\xa4\x23\x1a\x50\xb2\x67\x91\xf9\xee\xed\xbc\x35\x18\xff\x1f\x5a\xab\x0b\xbe\x95\xc6\x72\x12\x2d\x31\xf9\x4a\x52\x60\x1f\x42\x0f\x5d\xcc\xf1\x4c\xa0\xed\xc5\x2b\x49\x68\x71\xa4\x0f\x7b\x76\x16\x50\xe6\xdb\x83\xd7\x2f\xc4\x57\xc7\x12\x02\x30\xc8\xef\xe8\x38\xf6', 2) | 0 | 0 |
1ca24822ceacb59afa74f32fca7fe5d5d075a42c | 5,532 | py | Python | tests/unit/modules/test_mine.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 1 | 2022-02-09T06:40:14.000Z | 2022-02-09T06:40:14.000Z | tests/unit/modules/test_mine.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_mine.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mine as mine
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MineTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mine
'''
def setup_loader_modules(self):
return {mine: {}}
def test_get_docker(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker()
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
'172.17.42.1:80',
'192.168.0.1:80',
])}}})
def test_get_docker_with_container_id(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker(with_container_id=True)
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
('172.17.42.1:80', 'abcdefhjhi1234567899'),
('192.168.0.1:80', 'abcdefhjhi1234567899'),
])}}})
| 44.256 | 99 | 0.357014 | # -*- coding: utf-8 -*-
'''
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mine as mine
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MineTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mine
'''
def setup_loader_modules(self):
return {mine: {}}
def test_get_docker(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker()
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
'172.17.42.1:80',
'192.168.0.1:80',
])}}})
def test_get_docker_with_container_id(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker(with_container_id=True)
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
('172.17.42.1:80', 'abcdefhjhi1234567899'),
('192.168.0.1:80', 'abcdefhjhi1234567899'),
])}}})
| 0 | 0 |
261d9ad0af2c41868951b0b120d7fd4d4af8e62d | 13,215 | py | Python | metrics/custom_losses.py | tbuikr/fastMRI | 4395380bbcddefe0bcfea76a2790e0d978009dea | [
"MIT"
] | 2 | 2019-12-09T04:57:57.000Z | 2020-02-24T18:04:12.000Z | metrics/custom_losses.py | tbuikr/fastMRI | 4395380bbcddefe0bcfea76a2790e0d978009dea | [
"MIT"
] | null | null | null | metrics/custom_losses.py | tbuikr/fastMRI | 4395380bbcddefe0bcfea76a2790e0d978009dea | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
from metrics.ssim import ssim
from metrics.tv_loss import TVLoss
#import models.networks as networks
from metrics.my_ssim import ssim_loss
# class CSSIM(nn.Module): # Complementary SSIM
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# return 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size,
# sigma=self.sigma, reduction=self.reduction)
# class CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# input = input.unsqueeze(1)
# target = target.unsqueeze(1)
# ssim_value = ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
# return ssim_value #+ self.l1_weight * l1_loss
class CSSIM(nn.Module): # Complementary SSIM
def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
input = input.unsqueeze(1)
print (input.max())
target = target.unsqueeze(1)
return 1- ssim_loss(input, target, max_val=max_val, filter_size=self.filter_size, k1=self.k1, k2=self.k2,
sigma=self.sigma, reduction=self.reduction)
class L1CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
return cssim + self.l1_weight * l1_loss
class L1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class C1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.cham = CharbonnierLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.cham(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class ECSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.ElasticLoss = ElasticLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.ElasticLoss(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss, cssim, tv_loss
## Combination loss for SRRaGAN
class SRRaGAN(nn.Module):
def __init__(self, elastic_weight = 1):
super().__init__()
self.cri_pix = ElasticLoss().to(self.device) # Pixel Loss
self.cri_fea = ElasticLoss().to(self.device) # Feature Loss
self.netF = networks.define_F(opt, use_bn=False).to(self.device)
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
return
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
b, c, h, w = y.size()
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss/(c*b*h*w)
def LoG(imgHF):
weight = [
[0, 0, 1, 0, 0],
[0, 1, 2, 1, 0],
[1, 2, -16, 2, 1],
[0, 1, 2, 1, 0],
[0, 0, 1, 0, 0]
]
weight = np.array(weight)
weight_np = np.zeros((1, 1, 5, 5))
weight_np[0, 0, :, :] = weight
weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)
weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)
weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')
return nn.functional.conv2d(imgHF, weight, padding=1)
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size=15, sigma=3, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
kernel = kernel / torch.sum(kernel)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class HFENL1Loss(nn.Module):
def __init__(self):
super(HFENL1Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.abs(LoG(input_smooth-target_smooth)).sum()
class HFENL2Loss(nn.Module):
def __init__(self):
super(HFENL2Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))
class ElasticLoss(nn.Module):
def __init__(self, a=0.2): #a=0.5 default
super(ElasticLoss, self).__init__()
self.alpha = torch.FloatTensor([a, 1 - a]).to('cuda:0')
def forward(self, input, target):
if not isinstance(input, tuple):
input = (input,)
for i in range(len(input)):
l2 = nn.functional.mse_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[0])
l1 = nn.functional.l1_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[1])
loss = l1 + l2
return loss | 37.225352 | 139 | 0.62308 | import torch
from torch import nn
import torch.nn.functional as F
from metrics.ssim import ssim
from metrics.tv_loss import TVLoss
#import models.networks as networks
from metrics.my_ssim import ssim_loss
# class CSSIM(nn.Module): # Complementary SSIM
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# return 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size,
# sigma=self.sigma, reduction=self.reduction)
# class CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# input = input.unsqueeze(1)
# target = target.unsqueeze(1)
# ssim_value = ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
# return ssim_value #+ self.l1_weight * l1_loss
class CSSIM(nn.Module): # Complementary SSIM
def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
input = input.unsqueeze(1)
print (input.max())
target = target.unsqueeze(1)
return 1- ssim_loss(input, target, max_val=max_val, filter_size=self.filter_size, k1=self.k1, k2=self.k2,
sigma=self.sigma, reduction=self.reduction)
class L1CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
return cssim + self.l1_weight * l1_loss
class L1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class C1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.cham = CharbonnierLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.cham(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class ECSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.ElasticLoss = ElasticLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.ElasticLoss(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss, cssim, tv_loss
## Combination loss for SRRaGAN
class SRRaGAN(nn.Module):
def __init__(self, elastic_weight = 1):
super().__init__()
self.cri_pix = ElasticLoss().to(self.device) # Pixel Loss
self.cri_fea = ElasticLoss().to(self.device) # Feature Loss
self.netF = networks.define_F(opt, use_bn=False).to(self.device)
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
return
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
b, c, h, w = y.size()
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss/(c*b*h*w)
def LoG(imgHF):
weight = [
[0, 0, 1, 0, 0],
[0, 1, 2, 1, 0],
[1, 2, -16, 2, 1],
[0, 1, 2, 1, 0],
[0, 0, 1, 0, 0]
]
weight = np.array(weight)
weight_np = np.zeros((1, 1, 5, 5))
weight_np[0, 0, :, :] = weight
weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)
weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)
weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')
return nn.functional.conv2d(imgHF, weight, padding=1)
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size=15, sigma=3, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
kernel = kernel / torch.sum(kernel)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class HFENL1Loss(nn.Module):
def __init__(self):
super(HFENL1Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.abs(LoG(input_smooth-target_smooth)).sum()
class HFENL2Loss(nn.Module):
def __init__(self):
super(HFENL2Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))
class ElasticLoss(nn.Module):
def __init__(self, a=0.2): #a=0.5 default
super(ElasticLoss, self).__init__()
self.alpha = torch.FloatTensor([a, 1 - a]).to('cuda:0')
def forward(self, input, target):
if not isinstance(input, tuple):
input = (input,)
for i in range(len(input)):
l2 = nn.functional.mse_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[0])
l1 = nn.functional.l1_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[1])
loss = l1 + l2
return loss | 0 | 0 |
36e99d037172fee82cbe9e3275f4053561bba8c8 | 1,808 | py | Python | tests/integration_tests/data_steward/gcloud/gcs_client_test.py | dcarbone/curation | 68f9ba9466646d73509d424567b64566856fb8e8 | [
"MIT"
] | 1 | 2019-03-18T18:22:41.000Z | 2019-03-18T18:22:41.000Z | tests/integration_tests/data_steward/gcloud/gcs_client_test.py | nishanthpp93/curation | ac9f38b2f4580ae806121dd929293159132c7d2a | [
"MIT"
] | null | null | null | tests/integration_tests/data_steward/gcloud/gcs_client_test.py | nishanthpp93/curation | ac9f38b2f4580ae806121dd929293159132c7d2a | [
"MIT"
] | 1 | 2021-09-16T14:25:19.000Z | 2021-09-16T14:25:19.000Z | """
Test the Google Cloud Storage Client and associated helper functions
"""
# Python stl imports
import os
import unittest
# Project imports
from gcloud.gcs import StorageClient
# Third-party imports
class GcsClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.client = StorageClient()
self.bucket_name: str = os.environ.get('BUCKET_NAME_FAKE')
self.prefix: str = 'prefix'
self.data: bytes = b'bytes'
self.sub_prefixes: tuple = (f'{self.prefix}/a', f'{self.prefix}/b',
f'{self.prefix}/c', f'{self.prefix}/d')
def test_empty_bucket(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
self.client.empty_bucket(self.bucket_name)
actual = self.client.list_blobs(self.bucket_name)
expected: list = []
self.assertCountEqual(actual, expected)
def test_list_sub_prefixes(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
items = self.client.list_sub_prefixes(self.bucket_name, self.prefix)
self.assertEqual(len(self.sub_prefixes), len(items))
for item in items:
self.assertIn(item[:-1], self.sub_prefixes)
self.client.empty_bucket(self.bucket_name)
def _stage_bucket(self):
bucket = self.client.bucket(self.bucket_name)
for sub_prefix in self.sub_prefixes:
bucket.blob(f'{sub_prefix}/obj.txt').upload_from_string(self.data)
def tearDown(self):
self.client.empty_bucket(self.bucket_name)
| 29.639344 | 79 | 0.606195 | """
Test the Google Cloud Storage Client and associated helper functions
"""
# Python stl imports
import os
import unittest
# Project imports
from gcloud.gcs import StorageClient
# Third-party imports
class GcsClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.client = StorageClient()
self.bucket_name: str = os.environ.get('BUCKET_NAME_FAKE')
self.prefix: str = 'prefix'
self.data: bytes = b'bytes'
self.sub_prefixes: tuple = (f'{self.prefix}/a', f'{self.prefix}/b',
f'{self.prefix}/c', f'{self.prefix}/d')
def test_empty_bucket(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
self.client.empty_bucket(self.bucket_name)
actual = self.client.list_blobs(self.bucket_name)
expected: list = []
self.assertCountEqual(actual, expected)
def test_list_sub_prefixes(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
items = self.client.list_sub_prefixes(self.bucket_name, self.prefix)
self.assertEqual(len(self.sub_prefixes), len(items))
for item in items:
self.assertIn(item[:-1], self.sub_prefixes)
self.client.empty_bucket(self.bucket_name)
def _stage_bucket(self):
bucket = self.client.bucket(self.bucket_name)
for sub_prefix in self.sub_prefixes:
bucket.blob(f'{sub_prefix}/obj.txt').upload_from_string(self.data)
def tearDown(self):
self.client.empty_bucket(self.bucket_name)
| 0 | 0 |
e52b2e1c1ea59fa64c3206672451d1ca75882b8f | 297 | py | Python | Contacts/urls.py | simonescob/Agendadj | badd90f3fce0950aa151840f7015c68632c7a203 | [
"MIT"
] | null | null | null | Contacts/urls.py | simonescob/Agendadj | badd90f3fce0950aa151840f7015c68632c7a203 | [
"MIT"
] | null | null | null | Contacts/urls.py | simonescob/Agendadj | badd90f3fce0950aa151840f7015c68632c7a203 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('all', views.index),
path('create', views.create, name="create"),
path('delete/<int:contact_id>', views.delete, name="delete"),
path('edit/<int:contact_id>', views.edit, name="edit"),
] | 29.7 | 62 | 0.680135 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('all', views.index),
path('create', views.create, name="create"),
path('delete/<int:contact_id>', views.delete, name="delete"),
path('edit/<int:contact_id>', views.edit, name="edit"),
] | 0 | 0 |
92ad40269e21a10d36efc821460ad6ea637d560c | 4,569 | py | Python | glidepy.py | quantizimo/test | 3d830dac676877db0a3a63ee0c482bc1b6871d6a | [
"MIT"
] | null | null | null | glidepy.py | quantizimo/test | 3d830dac676877db0a3a63ee0c482bc1b6871d6a | [
"MIT"
] | null | null | null | glidepy.py | quantizimo/test | 3d830dac676877db0a3a63ee0c482bc1b6871d6a | [
"MIT"
] | null | null | null | import numpy as np
from scipy.optimize import fmin
import math
from scipy.optimize import minimize
class Glider:
def __init__(self, speeds, sink_rates, weight_ref, weight=None):
self._init_constants()
# self.wm = 0
self.altitude = 0
self.flight_state = "on_ground"
self.weight_ref = weight_ref
self.weight = weight
self.original_speeds = speeds
self.original_sink_rates = sink_rates
self._calc_scale_factor()
self._scale_speeds()
self._calc_polar_model()
self.calc_stf_model()
def _init_constants(self):
self.kmh_to_knots = 0.539957
self.ms_to_knots = 1.94384
self.knots_to_kmh = 1.852
self.nm_to_feet = 6076.12
self.nm_to_sm = 1.15078
def _calc_scale_factor(self):
if self.weight is None:
self._scale_factor = 1.0
else:
self._scale_factor = math.sqrt(self.weight / self.weight_ref)
def _scale_speeds(self):
self.speeds = self.kmh_to_knots * self._scale_factor * np.array(self.original_speeds)
self.sink_rates = self.ms_to_knots * self._scale_factor * np.array(self.original_sink_rates)
def _calc_polar_model(self):
self._polar_model = np.polyfit(self.speeds, self.sink_rates, 3)
def _glide_ratio2(self, speed):
return speed/self.sink_rate(speed)
def best_ld(self):
res = minimize(self._glide_ratio2, 0, method="SLSQP")
return -res.fun
def best_ld_speed(self):
res = minimize(self._glide_ratio2, 0, method="SLSQP")
return res.x[0]
def calc_stf_model(self):
distance = 1
lower_limit = 0.0001
climb_range = np.arange(lower_limit, 10.5, 1)
stf_values = [fmin(self.calc_total_time, 1, args=(x, distance), disp=False)[0] for x in climb_range]
self._stf_model = np.polyfit(climb_range, stf_values, 4)
def sink_rate(self, speed):
return self.polar(speed)
def polar(self, speed):
return np.polyval(self._polar_model, speed)
def glide_ratio(self, speed):
return -speed/self.sink_rate(speed)
def calc_avg_speed(self, speed, climb_rate, e=1):
total_time = self.calc_total_time(speed, climb_rate, e)
avg_speed = e / total_time
return avg_speed
def calc_total_time(self, speed, climb_rate, e=1):
ws = np.polyval(self._polar_model, speed)
total_time = e * (-(ws + 0) / (speed * climb_rate) + (1 / speed))
return total_time
def speed_to_fly(self, climb_rate, explicit=False):
if explicit:
return fmin(self.calc_total_time, 1, args=[climb_rate], disp=False)[0]
else:
return np.polyval(self._stf_model, climb_rate)
def altitude_lost(self, speed, distance):
sink_rate = -(self.polar(speed))
altitude_lost = sink_rate * distance/speed
return altitude_lost, altitude_lost * self.nm_to_feet
def get_range(self, altitude, speed):
sink_rate = self.polar(speed)
glide_time = altitude/self.nm_to_feet/(-sink_rate)
range1 = speed * glide_time
return range1
def set_altitude(self, altitude):
self.altitude = altitude
self.flight_state = 'flying'
def cruise(self, mc, distance):
speed = self.speed_to_fly(mc)
altitude_loss = self.altitude_lost(speed, distance)[1]
if altitude_loss > self.altitude:
self.altitude = 0
self.flight_state = "on_ground"
else:
self.altitude = self.altitude - altitude_loss
cruise_time = distance/speed
return cruise_time
def climb(self, climb_step_size, climb_rate):
self.altitude += climb_step_size
climb_time = climb_step_size/(climb_rate * self.nm_to_feet)
return climb_time
class Thermals:
def __init__(self, dt, distance_to_destination, distance_step):
self.cum_dist = 0
self.thermals = np.array([])
while self.cum_dist <= distance_to_destination:
d = np.random.exponential(dt, 1)[0]
if d < distance_step:
d += distance_step # make it at least a min distance of distant_step
# print(d)
self.thermals = np.append(self.thermals, d)
self.cum_dist += d
self.cum_sum = np.cumsum(self.thermals)
def is_thermal(self, dist, distance_step):
for d in self.cum_sum:
if (d > dist) & (d < (dist + distance_step)):
return d
return 0
| 31.729167 | 108 | 0.632524 | import numpy as np
from scipy.optimize import fmin
import math
from scipy.optimize import minimize
class Glider:
def __init__(self, speeds, sink_rates, weight_ref, weight=None):
self._init_constants()
# self.wm = 0
self.altitude = 0
self.flight_state = "on_ground"
self.weight_ref = weight_ref
self.weight = weight
self.original_speeds = speeds
self.original_sink_rates = sink_rates
self._calc_scale_factor()
self._scale_speeds()
self._calc_polar_model()
self.calc_stf_model()
def _init_constants(self):
self.kmh_to_knots = 0.539957
self.ms_to_knots = 1.94384
self.knots_to_kmh = 1.852
self.nm_to_feet = 6076.12
self.nm_to_sm = 1.15078
def _calc_scale_factor(self):
if self.weight is None:
self._scale_factor = 1.0
else:
self._scale_factor = math.sqrt(self.weight / self.weight_ref)
def _scale_speeds(self):
self.speeds = self.kmh_to_knots * self._scale_factor * np.array(self.original_speeds)
self.sink_rates = self.ms_to_knots * self._scale_factor * np.array(self.original_sink_rates)
def _calc_polar_model(self):
self._polar_model = np.polyfit(self.speeds, self.sink_rates, 3)
def _glide_ratio2(self, speed):
return speed/self.sink_rate(speed)
def best_ld(self):
res = minimize(self._glide_ratio2, 0, method="SLSQP")
return -res.fun
def best_ld_speed(self):
res = minimize(self._glide_ratio2, 0, method="SLSQP")
return res.x[0]
def calc_stf_model(self):
distance = 1
lower_limit = 0.0001
climb_range = np.arange(lower_limit, 10.5, 1)
stf_values = [fmin(self.calc_total_time, 1, args=(x, distance), disp=False)[0] for x in climb_range]
self._stf_model = np.polyfit(climb_range, stf_values, 4)
def sink_rate(self, speed):
return self.polar(speed)
def polar(self, speed):
return np.polyval(self._polar_model, speed)
def glide_ratio(self, speed):
return -speed/self.sink_rate(speed)
def calc_avg_speed(self, speed, climb_rate, e=1):
total_time = self.calc_total_time(speed, climb_rate, e)
avg_speed = e / total_time
return avg_speed
def calc_total_time(self, speed, climb_rate, e=1):
ws = np.polyval(self._polar_model, speed)
total_time = e * (-(ws + 0) / (speed * climb_rate) + (1 / speed))
return total_time
def speed_to_fly(self, climb_rate, explicit=False):
if explicit:
return fmin(self.calc_total_time, 1, args=[climb_rate], disp=False)[0]
else:
return np.polyval(self._stf_model, climb_rate)
def altitude_lost(self, speed, distance):
sink_rate = -(self.polar(speed))
altitude_lost = sink_rate * distance/speed
return altitude_lost, altitude_lost * self.nm_to_feet
def get_range(self, altitude, speed):
sink_rate = self.polar(speed)
glide_time = altitude/self.nm_to_feet/(-sink_rate)
range1 = speed * glide_time
return range1
def set_altitude(self, altitude):
self.altitude = altitude
self.flight_state = 'flying'
def cruise(self, mc, distance):
speed = self.speed_to_fly(mc)
altitude_loss = self.altitude_lost(speed, distance)[1]
if altitude_loss > self.altitude:
self.altitude = 0
self.flight_state = "on_ground"
else:
self.altitude = self.altitude - altitude_loss
cruise_time = distance/speed
return cruise_time
def climb(self, climb_step_size, climb_rate):
self.altitude += climb_step_size
climb_time = climb_step_size/(climb_rate * self.nm_to_feet)
return climb_time
class Thermals:
def __init__(self, dt, distance_to_destination, distance_step):
self.cum_dist = 0
self.thermals = np.array([])
while self.cum_dist <= distance_to_destination:
d = np.random.exponential(dt, 1)[0]
if d < distance_step:
d += distance_step # make it at least a min distance of distant_step
# print(d)
self.thermals = np.append(self.thermals, d)
self.cum_dist += d
self.cum_sum = np.cumsum(self.thermals)
def is_thermal(self, dist, distance_step):
for d in self.cum_sum:
if (d > dist) & (d < (dist + distance_step)):
return d
return 0
| 0 | 0 |
c30132e2c9779826c7032440043cdd50a86109e8 | 146 | py | Python | src/dummy.py | ashesh705/master-of-coin | 8ce253cd1c73005856c896a155ef25804d95d02f | [
"MIT"
] | null | null | null | src/dummy.py | ashesh705/master-of-coin | 8ce253cd1c73005856c896a155ef25804d95d02f | [
"MIT"
] | null | null | null | src/dummy.py | ashesh705/master-of-coin | 8ce253cd1c73005856c896a155ef25804d95d02f | [
"MIT"
] | null | null | null | """ Dummy source code to initialize repo"""
from typing import Literal
def dummy() -> Literal[True]:
"""Dummy function"""
return True
| 14.6 | 43 | 0.657534 | """ Dummy source code to initialize repo"""
from typing import Literal
def dummy() -> Literal[True]:
"""Dummy function"""
return True
| 0 | 0 |
202dc2c2ce0978019f7627c8b0b1ddd47cb141d2 | 160 | py | Python | module.py | ShveczovaKS/8lab2k | 1c58ec07c8a7fa5ed9807a7751315131f2e361f0 | [
"MIT"
] | null | null | null | module.py | ShveczovaKS/8lab2k | 1c58ec07c8a7fa5ed9807a7751315131f2e361f0 | [
"MIT"
] | null | null | null | module.py | ShveczovaKS/8lab2k | 1c58ec07c8a7fa5ed9807a7751315131f2e361f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def get_func(tag):
def func(s):
group = tag, s
return group
return func
| 12.307692 | 24 | 0.4875 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def get_func(tag):
def func(s):
group = tag, s
return group
return func
| 0 | 0 |
df2da96dcd879b529489f9f2d167184ae6a3c44c | 436 | py | Python | ros2_and_turtlesim_deep_dive/launch/2_turtlesim_cmd_vel_publisher.launch.py | noshluk2/ROS2-Learners-Repository | 2e8982a4f659c99ab90175253c28f1c10a60e31b | [
"MIT"
] | 3 | 2021-11-27T18:18:12.000Z | 2022-01-15T19:32:19.000Z | ros2_and_turtlesim_deep_dive/launch/2_turtlesim_cmd_vel_publisher.launch.py | noshluk2/ROS2-Learners-Repository | 2e8982a4f659c99ab90175253c28f1c10a60e31b | [
"MIT"
] | null | null | null | ros2_and_turtlesim_deep_dive/launch/2_turtlesim_cmd_vel_publisher.launch.py | noshluk2/ROS2-Learners-Repository | 2e8982a4f659c99ab90175253c28f1c10a60e31b | [
"MIT"
] | 5 | 2021-09-02T10:49:46.000Z | 2022-02-22T12:47:17.000Z | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package='turtlesim',
executable='turtlesim_node'
),
Node(
package='ros2_and_turtlesim_deep_dive',
executable='ts_velocity_publisher',
name='cmd_vel_publisher',
output='screen'
),
]) | 25.647059 | 51 | 0.605505 | from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
return LaunchDescription([
Node(
package='turtlesim',
executable='turtlesim_node'
),
Node(
package='ros2_and_turtlesim_deep_dive',
executable='ts_velocity_publisher',
name='cmd_vel_publisher',
output='screen'
),
]) | 0 | 0 |
0ba659f60c6cbb8e70fbe2ade949ed0726b3d12f | 680 | py | Python | crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 38,667 | 2015-01-01T00:15:34.000Z | 2022-03-31T22:57:03.000Z | crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 1,599 | 2016-09-27T09:07:36.000Z | 2022-03-31T23:04:51.000Z | crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 11,269 | 2015-01-01T08:41:17.000Z | 2022-03-31T16:12:52.000Z | """ Exports a no-op 'cython' namespace similar to
https://github.com/cython/cython/blob/master/Cython/Shadow.py
This allows to optionally compile @cython decorated functions
(when cython is available at built time), or run the same code
as pure-python, without runtime dependency on cython module.
We only define the symbols that we use. E.g. see fontTools.cu2qu
"""
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
| 26.153846 | 64 | 0.739706 | """ Exports a no-op 'cython' namespace similar to
https://github.com/cython/cython/blob/master/Cython/Shadow.py
This allows to optionally compile @cython decorated functions
(when cython is available at built time), or run the same code
as pure-python, without runtime dependency on cython module.
We only define the symbols that we use. E.g. see fontTools.cu2qu
"""
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
| 0 | 0 |
ab9929ed3ea92aae5e1281a1b168bffd87818815 | 2,172 | py | Python | appengine/swarming/server/acl.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | null | null | null | appengine/swarming/server/acl.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | null | null | null | appengine/swarming/server/acl.py | pombreda/swarming | c70f311f3db8f25752c793a0d7b36cf537d95580 | [
"Apache-2.0"
] | 1 | 2021-12-06T03:37:36.000Z | 2021-12-06T03:37:36.000Z | # Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Defines access groups."""
from components import auth
from components import utils
# Names of groups.
# See https://code.google.com/p/swarming/wiki/SwarmingAccessGroups for each
# level.
ADMINS_GROUP = 'swarming-admins'
BOTS_GROUP = 'swarming-bots'
PRIVILEGED_USERS_GROUP = 'swarming-privileged-users'
USERS_GROUP = 'swarming-users'
def is_admin():
return auth.is_group_member(ADMINS_GROUP) or auth.is_admin()
def is_bot():
return auth.is_group_member(BOTS_GROUP) or is_admin()
def is_privileged_user():
return auth.is_group_member(PRIVILEGED_USERS_GROUP) or is_admin()
def is_user():
return auth.is_group_member(USERS_GROUP) or is_privileged_user()
def is_bot_or_user():
return is_bot() or is_user()
def is_bot_or_privileged_user():
return is_bot() or is_privileged_user()
def is_bot_or_admin():
"""Returns True if current user can execute user-side and bot-side calls."""
return is_bot() or is_admin()
def get_user_type():
"""Returns a string describing the current access control for the user."""
if is_admin():
return 'admin'
if is_privileged_user():
return 'privileged user'
if is_user():
return 'user'
if is_bot():
return 'bot'
return 'unknown user'
def bootstrap_dev_server_acls():
"""Adds localhost to IP whitelist and Swarming groups."""
assert utils.is_local_dev_server()
if auth.is_replica():
return
bots = auth.bootstrap_loopback_ips()
auth.bootstrap_group(BOTS_GROUP, bots, 'Swarming bots')
auth.bootstrap_group(USERS_GROUP, bots, 'Swarming users')
# Add a swarming admin. smoke-test@example.com is used in
# server_smoke_test.py
admin = auth.Identity(auth.IDENTITY_USER, 'smoke-test@example.com')
auth.bootstrap_group(ADMINS_GROUP, [admin], 'Swarming administrators')
# Add an instance admin (for easier manual testing when running dev server).
auth.bootstrap_group(
auth.ADMIN_GROUP,
[auth.Identity(auth.IDENTITY_USER, 'test@example.com')],
'Users that can manage groups')
| 26.487805 | 78 | 0.743094 | # Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Defines access groups."""
from components import auth
from components import utils
# Names of groups.
# See https://code.google.com/p/swarming/wiki/SwarmingAccessGroups for each
# level.
ADMINS_GROUP = 'swarming-admins'
BOTS_GROUP = 'swarming-bots'
PRIVILEGED_USERS_GROUP = 'swarming-privileged-users'
USERS_GROUP = 'swarming-users'
def is_admin():
return auth.is_group_member(ADMINS_GROUP) or auth.is_admin()
def is_bot():
return auth.is_group_member(BOTS_GROUP) or is_admin()
def is_privileged_user():
return auth.is_group_member(PRIVILEGED_USERS_GROUP) or is_admin()
def is_user():
return auth.is_group_member(USERS_GROUP) or is_privileged_user()
def is_bot_or_user():
return is_bot() or is_user()
def is_bot_or_privileged_user():
return is_bot() or is_privileged_user()
def is_bot_or_admin():
"""Returns True if current user can execute user-side and bot-side calls."""
return is_bot() or is_admin()
def get_user_type():
"""Returns a string describing the current access control for the user."""
if is_admin():
return 'admin'
if is_privileged_user():
return 'privileged user'
if is_user():
return 'user'
if is_bot():
return 'bot'
return 'unknown user'
def bootstrap_dev_server_acls():
"""Adds localhost to IP whitelist and Swarming groups."""
assert utils.is_local_dev_server()
if auth.is_replica():
return
bots = auth.bootstrap_loopback_ips()
auth.bootstrap_group(BOTS_GROUP, bots, 'Swarming bots')
auth.bootstrap_group(USERS_GROUP, bots, 'Swarming users')
# Add a swarming admin. smoke-test@example.com is used in
# server_smoke_test.py
admin = auth.Identity(auth.IDENTITY_USER, 'smoke-test@example.com')
auth.bootstrap_group(ADMINS_GROUP, [admin], 'Swarming administrators')
# Add an instance admin (for easier manual testing when running dev server).
auth.bootstrap_group(
auth.ADMIN_GROUP,
[auth.Identity(auth.IDENTITY_USER, 'test@example.com')],
'Users that can manage groups')
| 0 | 0 |
f75d5caf71f40f210458b85e2a678429b8d45bdb | 1,715 | py | Python | suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py | runtest007/dpdk_surcata_4.1.1 | 5abf91f483b418b5d9c2dd410b5c850d6ed95c5f | [
"MIT"
] | 77 | 2019-06-17T07:05:07.000Z | 2022-03-07T03:26:27.000Z | suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py | clockdad/DPDK_SURICATA-4_1_1 | 974cc9eb54b0b1ab90eff12a95617e3e293b77d3 | [
"MIT"
] | 22 | 2019-07-18T02:32:10.000Z | 2022-03-24T03:39:11.000Z | suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py | clockdad/DPDK_SURICATA-4_1_1 | 974cc9eb54b0b1ab90eff12a95617e3e293b77d3 | [
"MIT"
] | 49 | 2019-06-18T03:31:56.000Z | 2022-03-13T05:23:10.000Z | # Copyright (C) 2017 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import os
import logging
from suricata.update import config
from suricata.update import sources
logger = logging.getLogger()
def register(parser):
parser.add_argument("name")
parser.set_defaults(func=remove_source)
def remove_source():
name = config.args().name
enabled_source_filename = sources.get_enabled_source_filename(name)
if os.path.exists(enabled_source_filename):
logger.debug("Deleting file %s.", enabled_source_filename)
os.remove(enabled_source_filename)
logger.info("Source %s removed, previously enabled.", name)
return 0
disabled_source_filename = sources.get_disabled_source_filename(name)
if os.path.exists(disabled_source_filename):
logger.debug("Deleting file %s.", disabled_source_filename)
os.remove(disabled_source_filename)
logger.info("Source %s removed, previously disabled.", name)
return 0
logger.warning("Source %s does not exist.", name)
return 1
| 34.3 | 73 | 0.749271 | # Copyright (C) 2017 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import os
import logging
from suricata.update import config
from suricata.update import sources
logger = logging.getLogger()
def register(parser):
parser.add_argument("name")
parser.set_defaults(func=remove_source)
def remove_source():
name = config.args().name
enabled_source_filename = sources.get_enabled_source_filename(name)
if os.path.exists(enabled_source_filename):
logger.debug("Deleting file %s.", enabled_source_filename)
os.remove(enabled_source_filename)
logger.info("Source %s removed, previously enabled.", name)
return 0
disabled_source_filename = sources.get_disabled_source_filename(name)
if os.path.exists(disabled_source_filename):
logger.debug("Deleting file %s.", disabled_source_filename)
os.remove(disabled_source_filename)
logger.info("Source %s removed, previously disabled.", name)
return 0
logger.warning("Source %s does not exist.", name)
return 1
| 0 | 0 |
3ca045d0b4c2187471f92b0e5fdbef4d90523a1c | 936 | py | Python | blogs/admin.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | blogs/admin.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | blogs/admin.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Blog, Category, Tag, Comment
# Register your models here.
@admin.register(Blog)
class AdminBlog(admin.ModelAdmin):
list_display = ['title', 'publishing_date']
list_display_links = ['title', 'publishing_date']
list_filter = ['publishing_date', 'category', 'tag']
search_fields = ['title', 'content']
class Meta:
model = Blog
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Category
@admin.register(Tag)
class AdminTag(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Tag
@admin.register(Comment)
class AdminComment(admin.ModelAdmin):
search_fields = ['name', 'email', 'content', 'blog__title']
list_filter = ['publishing_date']
class Meta:
model = Comment
| 22.829268 | 63 | 0.673077 | from django.contrib import admin
from .models import Blog, Category, Tag, Comment
# Register your models here.
@admin.register(Blog)
class AdminBlog(admin.ModelAdmin):
list_display = ['title', 'publishing_date']
list_display_links = ['title', 'publishing_date']
list_filter = ['publishing_date', 'category', 'tag']
search_fields = ['title', 'content']
class Meta:
model = Blog
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Category
@admin.register(Tag)
class AdminTag(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Tag
@admin.register(Comment)
class AdminComment(admin.ModelAdmin):
search_fields = ['name', 'email', 'content', 'blog__title']
list_filter = ['publishing_date']
class Meta:
model = Comment
| 0 | 0 |
7eab3c278fcfc42d13e8ea1b8a894c6d62712411 | 7,206 | py | Python | zcode/inout/tests/test_inout_core.py | lzkelley/zcode | 55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba | [
"MIT"
] | 1 | 2021-02-11T03:24:55.000Z | 2021-02-11T03:24:55.000Z | zcode/inout/tests/test_inout_core.py | lzkelley/zcode | 55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba | [
"MIT"
] | null | null | null | zcode/inout/tests/test_inout_core.py | lzkelley/zcode | 55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba | [
"MIT"
] | null | null | null | """Test methods for `inout_core.py`.
Can be run with:
$ nosetests zcode/inout/tests/test_inout_core.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import warnings
import shutil
from numpy.testing import run_module_suite
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal
class TestInoutCore(object):
@classmethod
def setup_class(cls):
cls.fname_npz = '_test_inout_core_testfile.npz'
cls.fname_npz_subdir = os.path.join('./subdir', cls.fname_npz)
cls.test_dir_0 = '_test_inout_core_dir'
cls.test_file_0 = '_test_filename.txt'
cls._kill_test_files()
@classmethod
def teardown_class(cls):
cls._kill_test_files()
@classmethod
def _kill_test_files(cls):
# Remove created directories
if os.path.exists(cls.test_dir_0):
print("removing '{}'".format(cls.test_dir_0))
shutil.rmtree(cls.test_dir_0)
# Remove created files
if os.path.exists(cls.fname_npz_subdir):
print("removing '{}'".format(cls.fname_npz_subdir))
os.remove(cls.fname_npz_subdir)
tname = os.path.dirname(cls.fname_npz_subdir)
print("Removing '{}'".format(tname))
os.rmdir(tname)
if os.path.exists(cls.fname_npz):
print("removing '{}'".format(cls.fname_npz))
os.remove(cls.fname_npz)
if os.path.exists(cls.test_file_0):
print("removing '{}'".format(cls.test_file_0))
os.remove(cls.test_file_0)
return
def test_dictToNPZ_npzToDict(self):
fname = self.fname_npz
fname_subdir = self.fname_npz_subdir
from zcode.inout.inout_core import npzToDict, dictToNPZ
# Create a test dictionary to save
subdata = {'a': 'a', 'b': 'abc', 'c': np.arange(4)}
data = {'one': np.array(1), 'two': np.array(2, dtype=np.uint64), 'three': subdata}
# Try saving
dictToNPZ(data, fname)
assert_true(os.path.exists(fname))
# Try Loading
loaded = npzToDict(fname)
for key, item in data.items():
print("key = ", key)
print("\t", type(loaded[key]), repr(loaded[key]))
print("\t", type(item), repr(item))
# Look at internal dictionary separately
if type(item) is not dict and type(loaded[key]) is not dict:
assert_true(np.array_equal(loaded[key], item))
assert_equal(type(loaded[key]), type(item))
# Check internal dictionary
subloaded = loaded['three']
print("Subloaded keys = ", subloaded.keys())
for key, item in subdata.items():
print("key = ", key)
print("\t", subloaded[key])
print("\t", item)
assert_true(np.array_equal(subloaded[key], item))
assert_equal(type(subloaded[key]), type(item))
# Make sure subdirectories are created if needed
dictToNPZ(data, fname_subdir)
assert_true(os.path.exists(fname_subdir))
def test_modify_exists_files(self):
fdir = self.test_dir_0
fname = self.test_file_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Create test directory if needed, store boolean whether to later remove it.
if not os.path.exists(fdir):
os.makedirs(fdir)
# Create test filename
fname = os.path.join(fdir, fname)
# Make sure it doesnt already exist
if os.path.exists(fname):
raise RuntimeError("Test filename '{}' already exists.".format(fname))
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fname))
for ii in range(num_files):
new_name = modify_exists(fname, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create file
open(new_name, 'a')
if ii == 0:
intended_name = str(fname)
else:
intended_name = modify_filename(fname, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Make sure filenames dont exceed maximum, and raises warning
with warnings.catch_warnings(record=True) as ww:
assert_equal(modify_exists(fname, max=num_files-1), None)
assert_true(len(ww) > 0)
def test_modify_exists_dirs(self):
fdir = self.test_dir_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Make sure directory doesn't initially exist
if os.path.exists(fdir) and os.path.isdir(fdir):
shutil.rmtree(fdir)
'''
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
'''
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fdir))
created = []
for ii in range(num_files):
new_name = modify_exists(fdir, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create directory
os.makedirs(new_name)
created.append(new_name)
if ii == 0:
intended_name = str(fdir)
else:
intended_name = modify_filename(fdir, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Cleanup
for fdir in created:
shutil.rmtree(fdir)
return
# Run all methods as if with `nosetests ...`
if __name__ == "__main__":
run_module_suite()
| 37.53125 | 95 | 0.600056 | """Test methods for `inout_core.py`.
Can be run with:
$ nosetests zcode/inout/tests/test_inout_core.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import warnings
import shutil
from numpy.testing import run_module_suite
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal
class TestInoutCore(object):
@classmethod
def setup_class(cls):
cls.fname_npz = '_test_inout_core_testfile.npz'
cls.fname_npz_subdir = os.path.join('./subdir', cls.fname_npz)
cls.test_dir_0 = '_test_inout_core_dir'
cls.test_file_0 = '_test_filename.txt'
cls._kill_test_files()
@classmethod
def teardown_class(cls):
cls._kill_test_files()
@classmethod
def _kill_test_files(cls):
# Remove created directories
if os.path.exists(cls.test_dir_0):
print("removing '{}'".format(cls.test_dir_0))
shutil.rmtree(cls.test_dir_0)
# Remove created files
if os.path.exists(cls.fname_npz_subdir):
print("removing '{}'".format(cls.fname_npz_subdir))
os.remove(cls.fname_npz_subdir)
tname = os.path.dirname(cls.fname_npz_subdir)
print("Removing '{}'".format(tname))
os.rmdir(tname)
if os.path.exists(cls.fname_npz):
print("removing '{}'".format(cls.fname_npz))
os.remove(cls.fname_npz)
if os.path.exists(cls.test_file_0):
print("removing '{}'".format(cls.test_file_0))
os.remove(cls.test_file_0)
return
def test_dictToNPZ_npzToDict(self):
fname = self.fname_npz
fname_subdir = self.fname_npz_subdir
from zcode.inout.inout_core import npzToDict, dictToNPZ
# Create a test dictionary to save
subdata = {'a': 'a', 'b': 'abc', 'c': np.arange(4)}
data = {'one': np.array(1), 'two': np.array(2, dtype=np.uint64), 'three': subdata}
# Try saving
dictToNPZ(data, fname)
assert_true(os.path.exists(fname))
# Try Loading
loaded = npzToDict(fname)
for key, item in data.items():
print("key = ", key)
print("\t", type(loaded[key]), repr(loaded[key]))
print("\t", type(item), repr(item))
# Look at internal dictionary separately
if type(item) is not dict and type(loaded[key]) is not dict:
assert_true(np.array_equal(loaded[key], item))
assert_equal(type(loaded[key]), type(item))
# Check internal dictionary
subloaded = loaded['three']
print("Subloaded keys = ", subloaded.keys())
for key, item in subdata.items():
print("key = ", key)
print("\t", subloaded[key])
print("\t", item)
assert_true(np.array_equal(subloaded[key], item))
assert_equal(type(subloaded[key]), type(item))
# Make sure subdirectories are created if needed
dictToNPZ(data, fname_subdir)
assert_true(os.path.exists(fname_subdir))
def test_modify_exists_files(self):
fdir = self.test_dir_0
fname = self.test_file_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Create test directory if needed, store boolean whether to later remove it.
if not os.path.exists(fdir):
os.makedirs(fdir)
# Create test filename
fname = os.path.join(fdir, fname)
# Make sure it doesnt already exist
if os.path.exists(fname):
raise RuntimeError("Test filename '{}' already exists.".format(fname))
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fname))
for ii in range(num_files):
new_name = modify_exists(fname, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create file
open(new_name, 'a')
if ii == 0:
intended_name = str(fname)
else:
intended_name = modify_filename(fname, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Make sure filenames dont exceed maximum, and raises warning
with warnings.catch_warnings(record=True) as ww:
assert_equal(modify_exists(fname, max=num_files-1), None)
assert_true(len(ww) > 0)
def test_modify_exists_dirs(self):
fdir = self.test_dir_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Make sure directory doesn't initially exist
if os.path.exists(fdir) and os.path.isdir(fdir):
shutil.rmtree(fdir)
'''
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
'''
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fdir))
created = []
for ii in range(num_files):
new_name = modify_exists(fdir, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create directory
os.makedirs(new_name)
created.append(new_name)
if ii == 0:
intended_name = str(fdir)
else:
intended_name = modify_filename(fdir, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Cleanup
for fdir in created:
shutil.rmtree(fdir)
return
# Run all methods as if with `nosetests ...`
if __name__ == "__main__":
run_module_suite()
| 0 | 0 |
fb8a1c89bff42274aadcbbd4e1333face551763d | 46 | py | Python | Lecture2/name.py | EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript | 80df8834c1db8cc28b72d5393ff9aa340c069b57 | [
"MIT"
] | null | null | null | Lecture2/name.py | EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript | 80df8834c1db8cc28b72d5393ff9aa340c069b57 | [
"MIT"
] | null | null | null | Lecture2/name.py | EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript | 80df8834c1db8cc28b72d5393ff9aa340c069b57 | [
"MIT"
] | null | null | null | name = input("Mame: ")
print(f"Hello, {name}") | 23 | 23 | 0.608696 | name = input("Mame: ")
print(f"Hello, {name}") | 0 | 0 |
f510ea13d343357dd5eae6dc2035a2c37e918c43 | 2,733 | py | Python | tasksapi/models/validators.py | mwiens91/saltant | 9e72175a896f5859ada304ad3ae4d84dfc3834db | [
"MIT"
] | 3 | 2018-12-08T01:18:29.000Z | 2018-12-14T23:18:42.000Z | tasksapi/models/validators.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 3 | 2019-05-23T07:43:13.000Z | 2021-06-10T20:46:53.000Z | tasksapi/models/validators.py | saltant-org/saltant | db498a1186fc74221f8214ad1819dd03bf4b08ac | [
"MIT"
] | 2 | 2019-03-13T22:31:09.000Z | 2019-05-03T00:18:30.000Z | """Contains validators for task models."""
def task_instance_args_are_valid(instance, fill_missing_args=False):
"""Determines whether a task instance's arguments are valid.
The arguments are valid if the instance's argument includes all of
its task type's required arguments (but not necessarily the
arguments for which a default value exists).
Arg:
instance: A task instance instance. (Yikes!)
fill_missing_args: A boolean determining whether to fill in any
missing arguments in the instance with default values.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Validate an instance's args against its required args.
task_type_required_args = instance.task_type.required_arguments
task_type_default_vals = (
instance.task_type.required_arguments_default_values
)
instance_arg_keys = instance.arguments.keys()
for required_arg in task_type_required_args:
# Check if the required argument is provided
if required_arg not in instance_arg_keys:
# Required argument not provided. Check if default argument
# value exists.
if required_arg not in task_type_default_vals:
# No default exists
return (
False,
"required argument '%s' not provided!" % required_arg,
)
# Fill in the default value if we're told to
if fill_missing_args:
instance.arguments[required_arg] = task_type_default_vals[
required_arg
]
# Valid
return (True, "")
def task_type_args_are_valid(instance):
"""Determines whether a task type's argument fields are valid.
The argument fields are valid if the argument keys in the
required_arguments_default_values field are a subset of its required
arguments.
Arg:
instance: A task type instance.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Ensure that the default arguments form a subset of the required
# arguments
if not set(instance.required_arguments_default_values.keys()).issubset(
set(instance.required_arguments)
):
return (False, "default arguments not a subset of required arguments")
# Valid
return (True, "")
| 36.932432 | 78 | 0.668496 | """Contains validators for task models."""
def task_instance_args_are_valid(instance, fill_missing_args=False):
"""Determines whether a task instance's arguments are valid.
The arguments are valid if the instance's argument includes all of
its task type's required arguments (but not necessarily the
arguments for which a default value exists).
Arg:
instance: A task instance instance. (Yikes!)
fill_missing_args: A boolean determining whether to fill in any
missing arguments in the instance with default values.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Validate an instance's args against its required args.
task_type_required_args = instance.task_type.required_arguments
task_type_default_vals = (
instance.task_type.required_arguments_default_values
)
instance_arg_keys = instance.arguments.keys()
for required_arg in task_type_required_args:
# Check if the required argument is provided
if required_arg not in instance_arg_keys:
# Required argument not provided. Check if default argument
# value exists.
if required_arg not in task_type_default_vals:
# No default exists
return (
False,
"required argument '%s' not provided!" % required_arg,
)
# Fill in the default value if we're told to
if fill_missing_args:
instance.arguments[required_arg] = task_type_default_vals[
required_arg
]
# Valid
return (True, "")
def task_type_args_are_valid(instance):
"""Determines whether a task type's argument fields are valid.
The argument fields are valid if the argument keys in the
required_arguments_default_values field are a subset of its required
arguments.
Arg:
instance: A task type instance.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Ensure that the default arguments form a subset of the required
# arguments
if not set(instance.required_arguments_default_values.keys()).issubset(
set(instance.required_arguments)
):
return (False, "default arguments not a subset of required arguments")
# Valid
return (True, "")
| 0 | 0 |
b18e463d5d6fb71fac9aa5f7d292312fb8ee31b9 | 4,954 | py | Python | semantic_segmentation/keras_metrics.py | Jason-Khan/ubdvss | 76cabfa642af1f659920de32827ea6c3fe008588 | [
"Apache-2.0"
] | 13 | 2020-01-20T13:22:47.000Z | 2021-11-12T07:35:36.000Z | semantic_segmentation/keras_metrics.py | Jason-Khan/ubdvss | 76cabfa642af1f659920de32827ea6c3fe008588 | [
"Apache-2.0"
] | 3 | 2020-09-09T13:19:11.000Z | 2020-11-15T10:52:23.000Z | semantic_segmentation/keras_metrics.py | Jason-Khan/ubdvss | 76cabfa642af1f659920de32827ea6c3fe008588 | [
"Apache-2.0"
] | 5 | 2020-06-01T16:26:07.000Z | 2022-03-08T02:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright () ABBYY (BIT Software), 1993 - 2019. All rights reserved.
"""
keras
"""
import functools
import keras.backend as K
import tensorflow as tf
from semantic_segmentation.losses import get_losses
def _squeeze_single_dims(*args):
return [tf.squeeze(arg) for arg in args]
def _metric_wrapper(metric_fn):
@functools.wraps(metric_fn)
def metric_fn_wrapped(true, pred, weights=None):
if weights is None:
weights = tf.ones_like(true, tf.float32)
_true, _pred, _weights = _squeeze_single_dims(true, pred, weights)
metric_value = metric_fn(_true, _pred, _weights)
return metric_value
return metric_fn_wrapped
@_metric_wrapper
def _acc(true, pred, weights):
equal = K.cast(K.equal(true, pred), tf.float32)
return K.sum(equal * weights) / K.maximum(1., K.sum(weights))
def confusion_matrix(true, pred, weights):
"""
Confusion matrix
:param true:
:param pred:
:param weights:
:return: tp, tn, fp, fn - confusion matrix
"""
equal = K.equal(true, pred)
def calculate_sum(metric):
m = K.cast(metric, tf.float32)
return K.sum(m * weights)
tp = tf.logical_and(equal, K.equal(true, 1))
tn = tf.logical_and(equal, K.equal(true, 0))
fp = tf.logical_and(tf.logical_not(equal), K.equal(pred, 1))
fn = tf.logical_and(tf.logical_not(equal), K.equal(pred, 0))
tp = calculate_sum(tp)
tn = calculate_sum(tn)
fp = calculate_sum(fp)
fn = calculate_sum(fn)
return tp, tn, fp, fn
@_metric_wrapper
def precision(true, pred, weights):
"""
precision c
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fp)
@_metric_wrapper
def recall(true, pred, weights):
"""
recall
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fn)
@_metric_wrapper
def f1(true, pred, weights):
"""
f1-
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
precision = tp / K.maximum(1., tp + fp)
recall = tp / K.maximum(1., tp + fn)
return tf.cond(K.not_equal(precision + recall, 0.),
lambda: 2. * precision * recall / (precision + recall),
lambda: 0.)
def _get_detection_labels(y_true, y_pred):
detection_true = K.cast(K.greater(y_true, 0), tf.int32)
detection_pred = K.cast(K.greater(y_pred[..., 0], 0), tf.int32)
return detection_true, detection_pred
def detection_pixel_acc(y_true, y_pred):
"""
accuracy
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return _acc(detection_true, detection_pred)
def detection_pixel_precision(y_true, y_pred):
"""
(precision)
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return precision(detection_true, detection_pred)
def detection_pixel_recall(y_true, y_pred):
"""
(recall)
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return recall(detection_true, detection_pred)
def detection_pixel_f1(y_true, y_pred):
"""
f1-
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return f1(detection_true, detection_pred)
def classification_pixel_acc(y_true, y_pred):
"""
accuracy
y_true > 0 .. -
:param y_true:
:param y_pred:
:return:
"""
mask = K.cast(y_true > 0, tf.float32)
labels = K.cast((y_true - 1) * mask, tf.int64)
class_p = tf.nn.softmax(y_pred[..., 1:], axis=-1)
predictions = tf.argmax(class_p, axis=-1)
return _acc(labels, predictions, weights=mask)
def get_all_metrics(classification_mode=False):
"""
:param classification_mode:
:return:
"""
all_metrics = [
detection_pixel_acc,
detection_pixel_precision,
detection_pixel_recall,
detection_pixel_f1
]
if classification_mode:
all_metrics.append(classification_pixel_acc)
all_metrics += get_losses(classification_mode)
return all_metrics
| 25.802083 | 74 | 0.66411 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (С) ABBYY (BIT Software), 1993 - 2019. All rights reserved.
"""
Различные метрики для keras подсчитываемые при обучении
"""
import functools
import keras.backend as K
import tensorflow as tf
from semantic_segmentation.losses import get_losses
def _squeeze_single_dims(*args):
return [tf.squeeze(arg) for arg in args]
def _metric_wrapper(metric_fn):
@functools.wraps(metric_fn)
def metric_fn_wrapped(true, pred, weights=None):
if weights is None:
weights = tf.ones_like(true, tf.float32)
_true, _pred, _weights = _squeeze_single_dims(true, pred, weights)
metric_value = metric_fn(_true, _pred, _weights)
return metric_value
return metric_fn_wrapped
@_metric_wrapper
def _acc(true, pred, weights):
equal = K.cast(K.equal(true, pred), tf.float32)
return K.sum(equal * weights) / K.maximum(1., K.sum(weights))
def confusion_matrix(true, pred, weights):
"""
Confusion matrix для бинарной классификации
:param true:
:param pred:
:param weights:
:return: tp, tn, fp, fn - confusion matrix
"""
equal = K.equal(true, pred)
def calculate_sum(metric):
m = K.cast(metric, tf.float32)
return K.sum(m * weights)
tp = tf.logical_and(equal, K.equal(true, 1))
tn = tf.logical_and(equal, K.equal(true, 0))
fp = tf.logical_and(tf.logical_not(equal), K.equal(pred, 1))
fn = tf.logical_and(tf.logical_not(equal), K.equal(pred, 0))
tp = calculate_sum(tp)
tn = calculate_sum(tn)
fp = calculate_sum(fp)
fn = calculate_sum(fn)
return tp, tn, fp, fn
@_metric_wrapper
def precision(true, pred, weights):
"""
Вычисляет precision c учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fp)
@_metric_wrapper
def recall(true, pred, weights):
"""
Вычисляет recall с учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fn)
@_metric_wrapper
def f1(true, pred, weights):
"""
Вычисляет f1-меру с учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
precision = tp / K.maximum(1., tp + fp)
recall = tp / K.maximum(1., tp + fn)
return tf.cond(K.not_equal(precision + recall, 0.),
lambda: 2. * precision * recall / (precision + recall),
lambda: 0.)
def _get_detection_labels(y_true, y_pred):
detection_true = K.cast(K.greater(y_true, 0), tf.int32)
detection_pred = K.cast(K.greater(y_pred[..., 0], 0), tf.int32)
return detection_true, detection_pred
def detection_pixel_acc(y_true, y_pred):
"""
Вычисляет попиксельную accuracy детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return _acc(detection_true, detection_pred)
def detection_pixel_precision(y_true, y_pred):
"""
Вычисляет попиксельню точность (precision) детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return precision(detection_true, detection_pred)
def detection_pixel_recall(y_true, y_pred):
"""
Вычисляет попиксельню полноту (recall) детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return recall(detection_true, detection_pred)
def detection_pixel_f1(y_true, y_pred):
"""
Вычисляет попиксельню f1-меру детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return f1(detection_true, detection_pred)
def classification_pixel_acc(y_true, y_pred):
"""
Вычисляет попиксельную accuracy классификации
считается только по y_true > 0 т.е. там где есть какой-то объект
:param y_true:
:param y_pred:
:return:
"""
mask = K.cast(y_true > 0, tf.float32)
labels = K.cast((y_true - 1) * mask, tf.int64)
class_p = tf.nn.softmax(y_pred[..., 1:], axis=-1)
predictions = tf.argmax(class_p, axis=-1)
return _acc(labels, predictions, weights=mask)
def get_all_metrics(classification_mode=False):
"""
Возвращает список всех метрик
:param classification_mode:
:return:
"""
all_metrics = [
detection_pixel_acc,
detection_pixel_precision,
detection_pixel_recall,
detection_pixel_f1
]
if classification_mode:
all_metrics.append(classification_pixel_acc)
all_metrics += get_losses(classification_mode)
return all_metrics
| 738 | 0 |
a675dc6e47d5ff70decc3601d63c4681223ee3d8 | 2,722 | py | Python | datazie/model/LinearModel.py | amozie/amozie | fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef | [
"Apache-2.0"
] | null | null | null | datazie/model/LinearModel.py | amozie/amozie | fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef | [
"Apache-2.0"
] | null | null | null | datazie/model/LinearModel.py | amozie/amozie | fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
model_sm = 'sm'
class ModelException(Exception):
def __init__(self, message):
super().__init__(self)
self.message = message
class LinearModel():
def __init__(self, y, x=None, model=model_sm, add_constant=None):
self.model = model
self.add_constant = add_constant
self.__features = None
if x is None:
x = np.arange(y.size)
self.x = self.__pretreat_params(x)
self.y = y
def __pretreat_params(self, x):
if not isinstance(x, np.ndarray):
x = np.array(x)
if not self.__features:
if 1 == x.ndim:
self.__features = 1
elif 2 == x.ndim:
self.__features = x.shape[1]
else:
raise ModelException('dimension of x is error')
if 2 != x.ndim:
x = x.reshape(-1, self.__features)
if self.add_constant is None:
if model_sm == self.model:
x = self.__add_constant(x)
elif self.add_constant:
x = self.__add_constant(x)
return x
def __add_constant(self, x):
# 1sm.add_constantbug
if 1 == x.shape[0]:
return np.concatenate((np.ones((x.shape[0], 1)), x), axis=1)
else:
return sm.add_constant(x)
def __fit_sm(self):
self.res = sm.OLS(self.y, self.x).fit()
def fit(self):
if model_sm == self.model:
self.__fit_sm()
def predict(self, x=None, alpha=0.05):
if x is not None:
x = self.__pretreat_params(x)
ret = [self.res.predict(x)]
ret.extend(wls_prediction_std(self.res, exog=x, alpha=alpha))
return np.array(ret).T
def summary(self):
print(self.res.summary())
def plot(self):
fig = plt.figure()
fig.suptitle('LINEAR MODEL')
ax = plt.subplot(111)
y_prd = self.predict(alpha=0.1)
ax.plot(self.x[:, 1], self.y, '*', label='sample')
ax.plot(self.x[:, 1], y_prd[:, 0], label='predict')
ax.plot(self.x[:, 1], y_prd[:, 2], 'r--', label='std')
ax.plot(self.x[:, 1], y_prd[:, 3], 'r--', label='std')
plt.show()
if __name__ == '__main__':
x = np.linspace(0, 10, 21)
y = 3*x + 2
y += np.random.randn(x.size)
lm = LinearModel(y, x)
lm.fit()
lm.summary()
print(lm.predict())
lm.plot()
| 26.950495 | 72 | 0.559882 | # -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
model_sm = 'sm'
class ModelException(Exception):
def __init__(self, message):
super().__init__(self)
self.message = message
class LinearModel():
def __init__(self, y, x=None, model=model_sm, add_constant=None):
self.model = model
self.add_constant = add_constant
self.__features = None
if x is None:
x = np.arange(y.size)
self.x = self.__pretreat_params(x)
self.y = y
def __pretreat_params(self, x):
if not isinstance(x, np.ndarray):
x = np.array(x)
if not self.__features:
if 1 == x.ndim:
self.__features = 1
elif 2 == x.ndim:
self.__features = x.shape[1]
else:
raise ModelException('dimension of x is error')
if 2 != x.ndim:
x = x.reshape(-1, self.__features)
if self.add_constant is None:
if model_sm == self.model:
x = self.__add_constant(x)
elif self.add_constant:
x = self.__add_constant(x)
return x
def __add_constant(self, x):
# 样本数为1时,sm.add_constant存在bug,没有添加常数返回原数组
if 1 == x.shape[0]:
return np.concatenate((np.ones((x.shape[0], 1)), x), axis=1)
else:
return sm.add_constant(x)
def __fit_sm(self):
self.res = sm.OLS(self.y, self.x).fit()
def fit(self):
if model_sm == self.model:
self.__fit_sm()
def predict(self, x=None, alpha=0.05):
if x is not None:
x = self.__pretreat_params(x)
ret = [self.res.predict(x)]
ret.extend(wls_prediction_std(self.res, exog=x, alpha=alpha))
return np.array(ret).T
def summary(self):
print(self.res.summary())
def plot(self):
fig = plt.figure()
fig.suptitle('LINEAR MODEL')
ax = plt.subplot(111)
y_prd = self.predict(alpha=0.1)
ax.plot(self.x[:, 1], self.y, '*', label='sample')
ax.plot(self.x[:, 1], y_prd[:, 0], label='predict')
ax.plot(self.x[:, 1], y_prd[:, 2], 'r--', label='std')
ax.plot(self.x[:, 1], y_prd[:, 3], 'r--', label='std')
plt.show()
if __name__ == '__main__':
x = np.linspace(0, 10, 21)
y = 3*x + 2
y += np.random.randn(x.size)
lm = LinearModel(y, x)
lm.fit()
lm.summary()
print(lm.predict())
lm.plot()
| 60 | 0 |
b67a3f691f33184cbbf6898ebf7144756349476d | 4,888 | py | Python | utilities.py | rc1035/directed-probe-matching | c724096672e778202d9e8ed197cdf7395ea1d211 | [
"MIT"
] | 10 | 2017-08-16T12:16:52.000Z | 2022-02-26T05:09:39.000Z | utilities.py | d15c0/directed-probe-matching | c724096672e778202d9e8ed197cdf7395ea1d211 | [
"MIT"
] | 1 | 2019-07-10T12:00:00.000Z | 2019-07-10T12:00:00.000Z | utilities.py | d15c0/directed-probe-matching | c724096672e778202d9e8ed197cdf7395ea1d211 | [
"MIT"
] | 4 | 2017-11-30T11:01:06.000Z | 2019-11-03T23:39:40.000Z | #!/usr/bin/env python3.6
"""Refactored utility functions."""
__author__ = "Richard Cosgrove"
from collections import defaultdict
import gzip
from itertools import combinations
from datetime import datetime, timedelta
import json
import os
def export_compressed_json(dict_item, file_name):
"""Export gzip compressed JSON.
(For Uni dataset compressed size is ~10% of uncompressed.)
:param dict_item: Dictionary to dump as JSON.
:param file_name: Name of file to be written e.g. dict.json.gz
"""
# Use lowest level of compression for fast speed.
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with gzip.open(file_name, mode="wt", compresslevel=1) as f:
json.dump(dict_item, f, separators=(',', ':'))
def import_compressed_json(file_name):
"""Import gzip compressed JSON.
:param file_name: Name of file to be read e.g. dict.json.gz
:returns: JSON as a dictionary.
"""
with gzip.open(file_name, mode="rt") as f:
return json.load(f)
def match_tokens_with_same_ssid_set(token_to_probes):
"""Split into clusters that share the SAME set of SSIDs probed for.
:param token_to_probes: Dictionary with token keys and probe values
:returns: Dictionary with SSID set keys and token values
"""
ssid_set_to_tokens = defaultdict(set)
token_to_ssid_set = {}
for token, probes in token_to_probes.items():
ssid_set = set()
for probe in probes:
if probe["ssid"] == 0:
# Ignore broadcast probes.
continue
ssid_set.add(probe["ssid"])
if len(ssid_set) < 2:
# Ignore sets with cardinality less than X
# due to high rate of false positives.
continue
# Cluster token with any tokens that share the same SSID set.
ssid_set_to_tokens[frozenset(ssid_set)].add(token)
token_to_ssid_set[token] = frozenset(ssid_set)
# Sanity check: Assert that no token has been matched more than once.
tokens = [t for tokens in list(ssid_set_to_tokens.values()) for t in tokens]
assert(len(tokens) == len(set(tokens)))
return (ssid_set_to_tokens, token_to_ssid_set)
def validate_clusters(clusters, token_to_probes):
"""Validate the correctness of a clustering.
:param clusters: An iterable of clusters, where each cluster is a list of tokens.
:returns: Dictionary of binary classifier results
"""
token_to_mac = import_compressed_json("int/token_to_mac.json.gz")
# Use a binary Classification
true_positives, false_positives = 0, 0
num_of_clusters = 0
mac_to_timestamps = defaultdict(list)
for cluster in clusters:
num_of_clusters += 1
for pair in combinations(cluster, r=2):
if token_to_mac[pair[0]] == token_to_mac[pair[1]]:
true_positives += 1
mac = token_to_mac[pair[0]]
t1_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[0]]]
t2_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[1]]]
mac_to_timestamps[mac] += t1_timestamps
mac_to_timestamps[mac] += t2_timestamps
else:
false_positives += 1
greater_than = 0
lengths = []
for mac, timestamps in mac_to_timestamps.items():
length = timedelta(seconds=max(timestamps)) - timedelta(seconds=min(timestamps))
if length > timedelta(hours=12):
greater_than += 1
lengths.append(length)
import statistics
mid = statistics.median(lengths)
# Total number of valid pairs and invalid pairs have been
# pre-computed in randomiseTokens.py ...
# So we can easily calculate the negatives by subtracting the positives.
actual_combos = import_compressed_json("int/valid_combinations.json.gz")
true_negatives = actual_combos["invalid_pairs"] - false_positives
false_negatives = actual_combos["valid_pairs"] - true_positives
# Sanity checks
assert(true_positives + false_positives + true_negatives + false_negatives == actual_combos["total_pairs"])
assert(true_positives + false_negatives == actual_combos["valid_pairs"])
assert(false_positives + true_negatives == actual_combos["invalid_pairs"])
true_positive_rate = (true_positives / (float(true_positives + false_negatives)))
false_positive_rate = (false_positives / (float(false_positives + true_negatives)))
accuracy = (true_positives + true_negatives) / float(actual_combos["total_pairs"])
return {
"tp": true_positives,
"fp": false_positives,
"tn": true_negatives,
"fn": false_negatives,
"tpr": true_positive_rate,
"fpr": false_positive_rate,
"accuracy": accuracy,
"clusters": num_of_clusters,
"macs": greater_than,
"median": mid
}
| 36.75188 | 112 | 0.672054 | #!/usr/bin/env python3.6
"""Refactored utility functions."""
__author__ = "Richard Cosgrove"
from collections import defaultdict
import gzip
from itertools import combinations
from datetime import datetime, timedelta
import json
import os
def export_compressed_json(dict_item, file_name):
"""Export gzip compressed JSON.
(For Uni dataset compressed size is ~10% of uncompressed.)
:param dict_item: Dictionary to dump as JSON.
:param file_name: Name of file to be written e.g. dict.json.gz
"""
# Use lowest level of compression for fast speed.
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with gzip.open(file_name, mode="wt", compresslevel=1) as f:
json.dump(dict_item, f, separators=(',', ':'))
def import_compressed_json(file_name):
"""Import gzip compressed JSON.
:param file_name: Name of file to be read e.g. dict.json.gz
:returns: JSON as a dictionary.
"""
with gzip.open(file_name, mode="rt") as f:
return json.load(f)
def match_tokens_with_same_ssid_set(token_to_probes):
"""Split into clusters that share the SAME set of SSIDs probed for.
:param token_to_probes: Dictionary with token keys and probe values
:returns: Dictionary with SSID set keys and token values
"""
ssid_set_to_tokens = defaultdict(set)
token_to_ssid_set = {}
for token, probes in token_to_probes.items():
ssid_set = set()
for probe in probes:
if probe["ssid"] == 0:
# Ignore broadcast probes.
continue
ssid_set.add(probe["ssid"])
if len(ssid_set) < 2:
# Ignore sets with cardinality less than X
# due to high rate of false positives.
continue
# Cluster token with any tokens that share the same SSID set.
ssid_set_to_tokens[frozenset(ssid_set)].add(token)
token_to_ssid_set[token] = frozenset(ssid_set)
# Sanity check: Assert that no token has been matched more than once.
tokens = [t for tokens in list(ssid_set_to_tokens.values()) for t in tokens]
assert(len(tokens) == len(set(tokens)))
return (ssid_set_to_tokens, token_to_ssid_set)
def validate_clusters(clusters, token_to_probes):
"""Validate the correctness of a clustering.
:param clusters: An iterable of clusters, where each cluster is a list of tokens.
:returns: Dictionary of binary classifier results
"""
token_to_mac = import_compressed_json("int/token_to_mac.json.gz")
# Use a binary Classification
true_positives, false_positives = 0, 0
num_of_clusters = 0
mac_to_timestamps = defaultdict(list)
for cluster in clusters:
num_of_clusters += 1
for pair in combinations(cluster, r=2):
if token_to_mac[pair[0]] == token_to_mac[pair[1]]:
true_positives += 1
mac = token_to_mac[pair[0]]
t1_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[0]]]
t2_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[1]]]
mac_to_timestamps[mac] += t1_timestamps
mac_to_timestamps[mac] += t2_timestamps
else:
false_positives += 1
greater_than = 0
lengths = []
for mac, timestamps in mac_to_timestamps.items():
length = timedelta(seconds=max(timestamps)) - timedelta(seconds=min(timestamps))
if length > timedelta(hours=12):
greater_than += 1
lengths.append(length)
import statistics
mid = statistics.median(lengths)
# Total number of valid pairs and invalid pairs have been
# pre-computed in randomiseTokens.py ...
# So we can easily calculate the negatives by subtracting the positives.
actual_combos = import_compressed_json("int/valid_combinations.json.gz")
true_negatives = actual_combos["invalid_pairs"] - false_positives
false_negatives = actual_combos["valid_pairs"] - true_positives
# Sanity checks
assert(true_positives + false_positives + true_negatives + false_negatives == actual_combos["total_pairs"])
assert(true_positives + false_negatives == actual_combos["valid_pairs"])
assert(false_positives + true_negatives == actual_combos["invalid_pairs"])
true_positive_rate = (true_positives / (float(true_positives + false_negatives)))
false_positive_rate = (false_positives / (float(false_positives + true_negatives)))
accuracy = (true_positives + true_negatives) / float(actual_combos["total_pairs"])
return {
"tp": true_positives,
"fp": false_positives,
"tn": true_negatives,
"fn": false_negatives,
"tpr": true_positive_rate,
"fpr": false_positive_rate,
"accuracy": accuracy,
"clusters": num_of_clusters,
"macs": greater_than,
"median": mid
}
| 0 | 0 |
0e899a0c6dc84c26eed43123ac191ce6e094f3ae | 3,464 | py | Python | nautobot/users/tests/test_models.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 384 | 2021-02-24T01:40:40.000Z | 2022-03-30T10:30:59.000Z | nautobot/users/tests/test_models.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 1,067 | 2021-02-24T00:58:08.000Z | 2022-03-31T23:38:23.000Z | nautobot/users/tests/test_models.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 128 | 2021-02-24T02:45:16.000Z | 2022-03-20T18:48:36.000Z | from django.contrib.auth import get_user_model
from django.test import TestCase
# Use the proper swappable User model
User = get_user_model()
class UserConfigTest(TestCase):
def setUp(self):
user = User.objects.create_user(username="testuser")
user.config_data = {
"a": True,
"b": {
"foo": 101,
"bar": 102,
},
"c": {
"foo": {
"x": 201,
},
"bar": {
"y": 202,
},
"baz": {
"z": 203,
},
},
}
user.save()
self.user = user
def test_get(self):
# Retrieve root and nested values
self.assertEqual(self.user.get_config("a"), True)
self.assertEqual(self.user.get_config("b.foo"), 101)
self.assertEqual(self.user.get_config("c.baz.z"), 203)
# Invalid values should return None
self.assertIsNone(self.user.get_config("invalid"))
self.assertIsNone(self.user.get_config("a.invalid"))
self.assertIsNone(self.user.get_config("b.foo.invalid"))
self.assertIsNone(self.user.get_config("b.foo.x.invalid"))
# Invalid values with a provided default should return the default
self.assertEqual(self.user.get_config("invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("a.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.x.invalid", "DEFAULT"), "DEFAULT")
def test_all(self):
flattened_data = {
"a": True,
"b.foo": 101,
"b.bar": 102,
"c.foo.x": 201,
"c.bar.y": 202,
"c.baz.z": 203,
}
# Retrieve a flattened dictionary containing all config data
self.assertEqual(self.user.all_config(), flattened_data)
def test_set(self):
# Overwrite existing values
self.user.set_config("a", "abc")
self.user.set_config("c.foo.x", "abc")
self.assertEqual(self.user.config_data["a"], "abc")
self.assertEqual(self.user.config_data["c"]["foo"]["x"], "abc")
# Create new values
self.user.set_config("d", "abc")
self.user.set_config("b.baz", "abc")
self.assertEqual(self.user.config_data["d"], "abc")
self.assertEqual(self.user.config_data["b"]["baz"], "abc")
# Set a value and commit to the database
self.user.set_config("a", "def", commit=True)
self.user.refresh_from_db()
self.assertEqual(self.user.config_data["a"], "def")
# Attempt to change a branch node to a leaf node
with self.assertRaises(TypeError):
self.user.set_config("b", 1)
# Attempt to change a leaf node to a branch node
with self.assertRaises(TypeError):
self.user.set_config("a.x", 1)
def test_clear(self):
# Clear existing values
self.user.clear_config("a")
self.user.clear_config("b.foo")
self.assertTrue("a" not in self.user.config_data)
self.assertTrue("foo" not in self.user.config_data["b"])
self.assertEqual(self.user.config_data["b"]["bar"], 102)
# Clear a non-existing value; should fail silently
self.user.clear_config("invalid")
| 33.631068 | 87 | 0.566397 | from django.contrib.auth import get_user_model
from django.test import TestCase
# Use the proper swappable User model
User = get_user_model()
class UserConfigTest(TestCase):
def setUp(self):
user = User.objects.create_user(username="testuser")
user.config_data = {
"a": True,
"b": {
"foo": 101,
"bar": 102,
},
"c": {
"foo": {
"x": 201,
},
"bar": {
"y": 202,
},
"baz": {
"z": 203,
},
},
}
user.save()
self.user = user
def test_get(self):
# Retrieve root and nested values
self.assertEqual(self.user.get_config("a"), True)
self.assertEqual(self.user.get_config("b.foo"), 101)
self.assertEqual(self.user.get_config("c.baz.z"), 203)
# Invalid values should return None
self.assertIsNone(self.user.get_config("invalid"))
self.assertIsNone(self.user.get_config("a.invalid"))
self.assertIsNone(self.user.get_config("b.foo.invalid"))
self.assertIsNone(self.user.get_config("b.foo.x.invalid"))
# Invalid values with a provided default should return the default
self.assertEqual(self.user.get_config("invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("a.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.x.invalid", "DEFAULT"), "DEFAULT")
def test_all(self):
flattened_data = {
"a": True,
"b.foo": 101,
"b.bar": 102,
"c.foo.x": 201,
"c.bar.y": 202,
"c.baz.z": 203,
}
# Retrieve a flattened dictionary containing all config data
self.assertEqual(self.user.all_config(), flattened_data)
def test_set(self):
# Overwrite existing values
self.user.set_config("a", "abc")
self.user.set_config("c.foo.x", "abc")
self.assertEqual(self.user.config_data["a"], "abc")
self.assertEqual(self.user.config_data["c"]["foo"]["x"], "abc")
# Create new values
self.user.set_config("d", "abc")
self.user.set_config("b.baz", "abc")
self.assertEqual(self.user.config_data["d"], "abc")
self.assertEqual(self.user.config_data["b"]["baz"], "abc")
# Set a value and commit to the database
self.user.set_config("a", "def", commit=True)
self.user.refresh_from_db()
self.assertEqual(self.user.config_data["a"], "def")
# Attempt to change a branch node to a leaf node
with self.assertRaises(TypeError):
self.user.set_config("b", 1)
# Attempt to change a leaf node to a branch node
with self.assertRaises(TypeError):
self.user.set_config("a.x", 1)
def test_clear(self):
# Clear existing values
self.user.clear_config("a")
self.user.clear_config("b.foo")
self.assertTrue("a" not in self.user.config_data)
self.assertTrue("foo" not in self.user.config_data["b"])
self.assertEqual(self.user.config_data["b"]["bar"], 102)
# Clear a non-existing value; should fail silently
self.user.clear_config("invalid")
| 0 | 0 |
58ff00ab9dd53405c8606240357d386f8a7c8414 | 3,823 | py | Python | lib/googlecloudsdk/api_lib/sql/instances.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/sql/instances.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/sql/instances.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.sql import api_util
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
_POSTGRES_DATABASE_VERSION_PREFIX = 'POSTGRES'
class _BaseInstances(object):
"""Common utility functions for sql instances."""
@staticmethod
def GetDatabaseInstances(limit=None, batch_size=None):
"""Gets SQL instances in a given project.
Modifies current state of an individual instance to 'STOPPED' if
activationPolicy is 'NEVER'.
Args:
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
batch_size: int, The number of items to retrieve per request.
Returns:
List of yielded sql_messages.DatabaseInstance instances.
"""
client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
sql_client = client.sql_client
sql_messages = client.sql_messages
project_id = properties.VALUES.core.project.Get(required=True)
params = {}
if limit is not None:
params['limit'] = limit
if batch_size is not None:
params['batch_size'] = batch_size
yielded = list_pager.YieldFromList(
sql_client.instances,
sql_messages.SqlInstancesListRequest(project=project_id), **params)
def YieldInstancesWithAModifiedState():
for result in yielded:
# TODO(b/63139112): Investigate impact of instances without settings.
if result.settings and result.settings.activationPolicy == 'NEVER':
result.state = 'STOPPED'
yield result
return YieldInstancesWithAModifiedState()
@staticmethod
def PrintAndConfirmAuthorizedNetworksOverwrite():
console_io.PromptContinue(
message='When adding a new IP address to authorized networks, '
'make sure to also include any IP addresses that have already been '
'authorized. Otherwise, they will be overwritten and de-authorized.',
default=True,
cancel_on_no=True)
@staticmethod
def IsPostgresDatabaseVersion(database_version):
"""Returns a boolean indicating if the database version is Postgres."""
return _POSTGRES_DATABASE_VERSION_PREFIX in database_version
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = [backup_config]
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.name = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = backup_config
| 34.754545 | 79 | 0.757782 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.sql import api_util
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
_POSTGRES_DATABASE_VERSION_PREFIX = 'POSTGRES'
class _BaseInstances(object):
"""Common utility functions for sql instances."""
@staticmethod
def GetDatabaseInstances(limit=None, batch_size=None):
"""Gets SQL instances in a given project.
Modifies current state of an individual instance to 'STOPPED' if
activationPolicy is 'NEVER'.
Args:
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
batch_size: int, The number of items to retrieve per request.
Returns:
List of yielded sql_messages.DatabaseInstance instances.
"""
client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
sql_client = client.sql_client
sql_messages = client.sql_messages
project_id = properties.VALUES.core.project.Get(required=True)
params = {}
if limit is not None:
params['limit'] = limit
if batch_size is not None:
params['batch_size'] = batch_size
yielded = list_pager.YieldFromList(
sql_client.instances,
sql_messages.SqlInstancesListRequest(project=project_id), **params)
def YieldInstancesWithAModifiedState():
for result in yielded:
# TODO(b/63139112): Investigate impact of instances without settings.
if result.settings and result.settings.activationPolicy == 'NEVER':
result.state = 'STOPPED'
yield result
return YieldInstancesWithAModifiedState()
@staticmethod
def PrintAndConfirmAuthorizedNetworksOverwrite():
console_io.PromptContinue(
message='When adding a new IP address to authorized networks, '
'make sure to also include any IP addresses that have already been '
'authorized. Otherwise, they will be overwritten and de-authorized.',
default=True,
cancel_on_no=True)
@staticmethod
def IsPostgresDatabaseVersion(database_version):
"""Returns a boolean indicating if the database version is Postgres."""
return _POSTGRES_DATABASE_VERSION_PREFIX in database_version
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = [backup_config]
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.name = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = backup_config
| 0 | 0 |
a6e3fb81075849bc5006590462c1692cddcd0b28 | 1,912 | py | Python | catalog/general/catalog_logger.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | catalog/general/catalog_logger.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | catalog/general/catalog_logger.py | eoss-cloud/madxxx_catalog_api | ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
""" EOSS catalog system
Custom logger
Default configuration file within this directory is used to control logging behaviour; can be overwritten with LOGGING_CONF which points to
local logging configuration
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import logging
from logging.config import fileConfig
import os
from utilities import read_OS_var
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
if read_OS_var('LOGGING_CONF', mandatory=False) == None:
path = os.path.dirname(__file__)
log_config_file = os.path.join(path, 'logging.ini')
else:
log_config_file = read_OS_var('LOGGING_CONF', mandatory=False)
fileConfig(log_config_file)
logger = logging.getLogger()
logger.addHandler(NullHandler())
logging.getLogger(__name__).addHandler(NullHandler())
# Configure default logger to do nothing
notificator = logging.getLogger('EOSS:notification')
heartbeat_log = logging.getLogger('EOSS:heartbeat')
tracer_log = logging.getLogger('EOSS:tracer')
CALL = 41
START = 42
BEATING = 43
STOP = 44
STROKE = 45
HEALTH = 46
logging.addLevelName(CALL, 'CALL')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(STROKE, 'STROKE')
logging.addLevelName(HEALTH, 'HEALTH')
logging.addLevelName(START, 'START BEAT')
logging.addLevelName(STOP, 'STOP BEAT')
# 3rd party logger configuration
logging.getLogger('boto3.resources.action').setLevel(logging.WARNING)
logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
| 26.191781 | 140 | 0.764644 | #-*- coding: utf-8 -*-
""" EOSS catalog system
Custom logger
Default configuration file within this directory is used to control logging behaviour; can be overwritten with LOGGING_CONF which points to
local logging configuration
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import logging
from logging.config import fileConfig
import os
from utilities import read_OS_var
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
if read_OS_var('LOGGING_CONF', mandatory=False) == None:
path = os.path.dirname(__file__)
log_config_file = os.path.join(path, 'logging.ini')
else:
log_config_file = read_OS_var('LOGGING_CONF', mandatory=False)
fileConfig(log_config_file)
logger = logging.getLogger()
logger.addHandler(NullHandler())
logging.getLogger(__name__).addHandler(NullHandler())
# Configure default logger to do nothing
notificator = logging.getLogger('EOSS:notification')
heartbeat_log = logging.getLogger('EOSS:heartbeat')
tracer_log = logging.getLogger('EOSS:tracer')
CALL = 41
START = 42
BEATING = 43
STOP = 44
STROKE = 45
HEALTH = 46
logging.addLevelName(CALL, 'CALL')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(STROKE, 'STROKE')
logging.addLevelName(HEALTH, 'HEALTH')
logging.addLevelName(START, 'START BEAT')
logging.addLevelName(STOP, 'STOP BEAT')
# 3rd party logger configuration
logging.getLogger('boto3.resources.action').setLevel(logging.WARNING)
logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
| 0 | 0 |
25781249bb36750915e0251ce1e74e198d0fa28a | 8,973 | py | Python | deeplearning/ml4pl/models/batch.py | Zacharias030/ProGraML | cd99d2c5362acd0b24ee224492bb3e8c4d4736fb | [
"Apache-2.0"
] | null | null | null | deeplearning/ml4pl/models/batch.py | Zacharias030/ProGraML | cd99d2c5362acd0b24ee224492bb3e8c4d4736fb | [
"Apache-2.0"
] | 2 | 2020-07-27T08:22:06.000Z | 2020-07-30T17:34:35.000Z | deeplearning/ml4pl/models/batch.py | Zacharias030/ProGraML | cd99d2c5362acd0b24ee224492bb3e8c4d4736fb | [
"Apache-2.0"
] | 1 | 2020-06-05T04:58:13.000Z | 2020-06-05T04:58:13.000Z | # Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
| 30.110738 | 78 | 0.701995 | # Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
| 0 | 0 |
bd43660e61d12126149a6be149f44586a149537b | 102 | py | Python | python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py | wesleyegberto/dojos-languages | 87170a722efac1247c713daa21cb3fcc39f5c5c1 | [
"MIT"
] | null | null | null | python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py | wesleyegberto/dojos-languages | 87170a722efac1247c713daa21cb3fcc39f5c5c1 | [
"MIT"
] | null | null | null | python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py | wesleyegberto/dojos-languages | 87170a722efac1247c713daa21cb3fcc39f5c5c1 | [
"MIT"
] | null | null | null | # A module inside the package
print("Module: ", __name__)
def do_stuff():
print("Doing stuff")
| 12.75 | 29 | 0.666667 | # A module inside the package
print("Module: ", __name__)
def do_stuff():
print("Doing stuff")
| 0 | 0 |
ea15f02ea347cde7a8bc22e6cd2d89594e3df3dd | 1,096 | py | Python | easy/1025-Divisor Game.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | 2 | 2020-05-08T02:17:17.000Z | 2020-05-17T04:55:56.000Z | easy/1025-Divisor Game.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | easy/1025-Divisor Game.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | """
https://leetcode.com/problems/divisor-game/
Alice and Bob take turns playing a game, with Alice starting first.
Initially, there is a number N on the chalkboard. On each player's turn, that player makes a move consisting of:
Choosing any x with 0 < x < N and N % x == 0.
Replacing the number N on the chalkboard with N - x.
Also, if a player cannot make a move, they lose the game.
Return True if and only if Alice wins the game, assuming both players play optimally.
Example 1:
Input: 2
Output: true
Explanation: Alice chooses 1, and Bob has no more moves.
Example 2:
Input: 3
Output: false
Explanation: Alice chooses 1, Bob chooses 1, and Alice has no more moves.
Note:
1 <= N <= 1000
"""
# time complexity: O(nlogn), space complexity: O(n)
class Solution:
def divisorGame(self, N: int) -> bool:
flags = [False] * (N+1)
import math
for i in range(2, N+1):
for j in range(1, int(math.sqrt(i))+2):
if i % j == 0 and flags[i-j] == False:
flags[i] = True
break
return flags[N] | 25.488372 | 113 | 0.636861 | """
https://leetcode.com/problems/divisor-game/
Alice and Bob take turns playing a game, with Alice starting first.
Initially, there is a number N on the chalkboard. On each player's turn, that player makes a move consisting of:
Choosing any x with 0 < x < N and N % x == 0.
Replacing the number N on the chalkboard with N - x.
Also, if a player cannot make a move, they lose the game.
Return True if and only if Alice wins the game, assuming both players play optimally.
Example 1:
Input: 2
Output: true
Explanation: Alice chooses 1, and Bob has no more moves.
Example 2:
Input: 3
Output: false
Explanation: Alice chooses 1, Bob chooses 1, and Alice has no more moves.
Note:
1 <= N <= 1000
"""
# time complexity: O(nlogn), space complexity: O(n)
class Solution:
def divisorGame(self, N: int) -> bool:
flags = [False] * (N+1)
import math
for i in range(2, N+1):
for j in range(1, int(math.sqrt(i))+2):
if i % j == 0 and flags[i-j] == False:
flags[i] = True
break
return flags[N] | 0 | 0 |
56a51529edb0ee5b8e263f380f1c7725ffa73944 | 4,540 | py | Python | evalml/automl/engine/sequential_engine.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | evalml/automl/engine/sequential_engine.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | evalml/automl/engine/sequential_engine.py | BlockchainClimateInstitute/price_microservice | 11d1cff8965fe1befc997e9da3dc09efceed4579 | [
"BSD-3-Clause"
] | null | null | null | import sys
import traceback
import numpy as np
from evalml.automl.engine import EngineBase
from evalml.exceptions import PipelineScoreError
from evalml.model_family import ModelFamily
from evalml.objectives.utils import get_objective
from evalml.utils import get_logger
logger = get_logger(__file__)
class SequentialEngine(EngineBase):
"""The default engine for the AutoML search. Trains and scores pipelines locally, one after another."""
def evaluate_batch(self, pipelines):
"""Evaluate a batch of pipelines using the current dataset and AutoML state.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to be fitted and evaluated.
Returns:
list (int): a list of the new pipeline IDs which were created by the AutoML search.
"""
if self.X_train is None or self.y_train is None:
raise ValueError("Dataset has not been loaded into the engine.")
new_pipeline_ids = []
index = 0
while self._should_continue_callback() and index < len(pipelines):
pipeline = pipelines[index]
self._pre_evaluation_callback(pipeline)
X, y = self.X_train, self.y_train
if pipeline.model_family == ModelFamily.ENSEMBLE:
X, y = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
elif self.ensembling_indices is not None:
training_indices = [i for i in range(len(self.X_train)) if i not in self.ensembling_indices]
X = self.X_train.iloc[training_indices]
y = self.y_train.iloc[training_indices]
evaluation_result = EngineBase.train_and_score_pipeline(pipeline, self.automl, X, y)
new_pipeline_ids.append(self._post_evaluation_callback(pipeline, evaluation_result))
index += 1
return new_pipeline_ids
def train_batch(self, pipelines):
"""Train a batch of pipelines using the current dataset.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to fit.
Returns:
dict[str, PipelineBase]: Dict of fitted pipelines keyed by pipeline name.
"""
super().train_batch(pipelines)
fitted_pipelines = {}
for pipeline in pipelines:
try:
fitted_pipeline = EngineBase.train_pipeline(
pipeline, self.X_train, self.y_train,
self.automl.optimize_thresholds,
self.automl.objective
)
fitted_pipelines[fitted_pipeline.name] = fitted_pipeline
except Exception as e:
logger.error(f'Train error for {pipeline.name}: {str(e)}')
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
return fitted_pipelines
def score_batch(self, pipelines, X, y, objectives):
"""Score a batch of pipelines.
Arguments:
pipelines (list(PipelineBase)): A batch of fitted pipelines to score.
X (ww.DataTable, pd.DataFrame): Features to score on.
y (ww.DataTable, pd.DataFrame): Data to score on.
objectives (list(ObjectiveBase), list(str)): Objectives to score on.
Returns:
dict: Dict containing scores for all objectives for all pipelines. Keyed by pipeline name.
"""
super().score_batch(pipelines, X, y, objectives)
scores = {}
objectives = [get_objective(o, return_instance=True) for o in objectives]
for pipeline in pipelines:
try:
scores[pipeline.name] = pipeline.score(X, y, objectives)
except Exception as e:
logger.error(f"Score error for {pipeline.name}: {str(e)}")
if isinstance(e, PipelineScoreError):
nan_scores = {objective: np.nan for objective in e.exceptions}
scores[pipeline.name] = {**nan_scores, **e.scored_successfully}
else:
# Traceback already included in the PipelineScoreError so we only
# need to include it for all other errors
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
scores[pipeline.name] = {objective.name: np.nan for objective in objectives}
return scores
| 42.830189 | 109 | 0.620485 | import sys
import traceback
import numpy as np
from evalml.automl.engine import EngineBase
from evalml.exceptions import PipelineScoreError
from evalml.model_family import ModelFamily
from evalml.objectives.utils import get_objective
from evalml.utils import get_logger
logger = get_logger(__file__)
class SequentialEngine(EngineBase):
"""The default engine for the AutoML search. Trains and scores pipelines locally, one after another."""
def evaluate_batch(self, pipelines):
"""Evaluate a batch of pipelines using the current dataset and AutoML state.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to be fitted and evaluated.
Returns:
list (int): a list of the new pipeline IDs which were created by the AutoML search.
"""
if self.X_train is None or self.y_train is None:
raise ValueError("Dataset has not been loaded into the engine.")
new_pipeline_ids = []
index = 0
while self._should_continue_callback() and index < len(pipelines):
pipeline = pipelines[index]
self._pre_evaluation_callback(pipeline)
X, y = self.X_train, self.y_train
if pipeline.model_family == ModelFamily.ENSEMBLE:
X, y = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
elif self.ensembling_indices is not None:
training_indices = [i for i in range(len(self.X_train)) if i not in self.ensembling_indices]
X = self.X_train.iloc[training_indices]
y = self.y_train.iloc[training_indices]
evaluation_result = EngineBase.train_and_score_pipeline(pipeline, self.automl, X, y)
new_pipeline_ids.append(self._post_evaluation_callback(pipeline, evaluation_result))
index += 1
return new_pipeline_ids
def train_batch(self, pipelines):
"""Train a batch of pipelines using the current dataset.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to fit.
Returns:
dict[str, PipelineBase]: Dict of fitted pipelines keyed by pipeline name.
"""
super().train_batch(pipelines)
fitted_pipelines = {}
for pipeline in pipelines:
try:
fitted_pipeline = EngineBase.train_pipeline(
pipeline, self.X_train, self.y_train,
self.automl.optimize_thresholds,
self.automl.objective
)
fitted_pipelines[fitted_pipeline.name] = fitted_pipeline
except Exception as e:
logger.error(f'Train error for {pipeline.name}: {str(e)}')
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
return fitted_pipelines
def score_batch(self, pipelines, X, y, objectives):
"""Score a batch of pipelines.
Arguments:
pipelines (list(PipelineBase)): A batch of fitted pipelines to score.
X (ww.DataTable, pd.DataFrame): Features to score on.
y (ww.DataTable, pd.DataFrame): Data to score on.
objectives (list(ObjectiveBase), list(str)): Objectives to score on.
Returns:
dict: Dict containing scores for all objectives for all pipelines. Keyed by pipeline name.
"""
super().score_batch(pipelines, X, y, objectives)
scores = {}
objectives = [get_objective(o, return_instance=True) for o in objectives]
for pipeline in pipelines:
try:
scores[pipeline.name] = pipeline.score(X, y, objectives)
except Exception as e:
logger.error(f"Score error for {pipeline.name}: {str(e)}")
if isinstance(e, PipelineScoreError):
nan_scores = {objective: np.nan for objective in e.exceptions}
scores[pipeline.name] = {**nan_scores, **e.scored_successfully}
else:
# Traceback already included in the PipelineScoreError so we only
# need to include it for all other errors
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
scores[pipeline.name] = {objective.name: np.nan for objective in objectives}
return scores
| 0 | 0 |
ee2569e70a693fb7569365e25bd376b146aaf167 | 877 | py | Python | source/read-file/app.py | aws-samples/aws-serverless-batch-architecture | 1672d7623c2a0b6141bf83d019efe3c6c70efd00 | [
"MIT-0"
] | 14 | 2021-11-12T02:02:46.000Z | 2022-03-01T23:28:48.000Z | source/read-file/app.py | aws-samples/aws-serverless-batch-architecture | 1672d7623c2a0b6141bf83d019efe3c6c70efd00 | [
"MIT-0"
] | 1 | 2021-11-01T02:56:34.000Z | 2022-01-17T00:19:53.000Z | source/read-file/app.py | aws-samples/aws-serverless-batch-architecture | 1672d7623c2a0b6141bf83d019efe3c6c70efd00 | [
"MIT-0"
] | 1 | 2022-03-24T13:00:45.000Z | 2022-03-24T13:00:45.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import csv
import s3fs
import os
s3 = s3fs.S3FileSystem(anon=False)
header = [
'uuid',
'country',
'itemType',
'salesChannel',
'orderPriority',
'orderDate',
'region',
'shipDate'
]
def lambda_handler(event, context):
input_file = event['input']['FilePath']
output_data = []
skip_first = 0
with s3.open(input_file, 'r', newline='', encoding='utf-8-sig') as inFile:
file_reader = csv.reader(inFile)
for row in file_reader:
if skip_first == 0:
skip_first = skip_first + 1
continue
new_object = {}
for i in range(len(header)):
new_object[header[i]] = row[i]
output_data.append(new_object)
return output_data
| 21.390244 | 78 | 0.58951 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import csv
import s3fs
import os
s3 = s3fs.S3FileSystem(anon=False)
header = [
'uuid',
'country',
'itemType',
'salesChannel',
'orderPriority',
'orderDate',
'region',
'shipDate'
]
def lambda_handler(event, context):
input_file = event['input']['FilePath']
output_data = []
skip_first = 0
with s3.open(input_file, 'r', newline='', encoding='utf-8-sig') as inFile:
file_reader = csv.reader(inFile)
for row in file_reader:
if skip_first == 0:
skip_first = skip_first + 1
continue
new_object = {}
for i in range(len(header)):
new_object[header[i]] = row[i]
output_data.append(new_object)
return output_data
| 0 | 0 |
85f1aa282d6853f2f160254093d5add98f8f0f8b | 5,210 | py | Python | modules/differentialLine.py | inconvergent/differential-line-cuda | 07927dff7c3178821776fccd5ad0aa196a3bb858 | [
"MIT"
] | 21 | 2016-05-22T17:40:02.000Z | 2022-02-03T11:36:31.000Z | modules/differentialLine.py | inconvergent/differential-line-cuda | 07927dff7c3178821776fccd5ad0aa196a3bb858 | [
"MIT"
] | null | null | null | modules/differentialLine.py | inconvergent/differential-line-cuda | 07927dff7c3178821776fccd5ad0aa196a3bb858 | [
"MIT"
] | 2 | 2017-03-17T05:13:16.000Z | 2021-12-09T02:20:03.000Z | # -*- coding: utf-8 -*-
from numpy import pi
from numpy import zeros
from numpy import sin
from numpy import cos
from numpy import sqrt
from numpy.random import random
from numpy import float32 as npfloat
from numpy import int32 as npint
TWOPI = pi*2
PI = pi
class DifferentialLine(object):
def __init__(
self,
size,
stp,
spring_stp,
reject_stp,
near_rad,
far_rad,
threads = 256,
nmax = 1000000
):
self.itt = 0
self.threads = threads
self.nmax = nmax
self.size = size
self.one = 1.0/size
self.stp = stp
self.spring_stp = spring_stp
self.reject_stp = reject_stp
self.near_rad = near_rad
self.far_rad = far_rad
self.__init()
self.__cuda_init()
def __init(self):
self.num = 0
nz = int(1.0/(2*self.far_rad))
self.nz = nz
self.nz2 = nz**2
nmax = self.nmax
self.xy = zeros((nmax, 2), npfloat)
self.dxy = zeros((nmax, 2), npfloat)
self.tmp = zeros((nmax, 1), npfloat)
self.link_len = zeros((nmax, 2), npfloat)
self.link_curv = zeros((nmax, 2), npfloat)
self.links = zeros((nmax, 2), npint)
zone_map_size = self.nz2*64
self.zone_node = zeros(zone_map_size, npint)
self.zone_num = zeros(self.nz2, npint)
def __cuda_init(self):
import pycuda.autoinit
from .helpers import load_kernel
self.cuda_agg_count = load_kernel(
'modules/cuda/agg_count.cu',
'agg_count',
subs={'_THREADS_': self.threads}
)
self.cuda_agg = load_kernel(
'modules/cuda/agg.cu',
'agg',
subs={'_THREADS_': self.threads}
)
self.cuda_step = load_kernel(
'modules/cuda/step.cu',
'step',
subs={
'_THREADS_': self.threads
}
)
def init_circle(self, n, rad):
from numpy import sort
num = self.num
links = self.links
angles = random(n)*TWOPI
angles = sort(angles)
xx = 0.5 + cos(angles)*rad
yy = 0.5 + sin(angles)*rad
self.xy[num:num+n, 0] = xx
self.xy[num:num+n, 1] = yy
for i in range(num+1, num+n-1):
links[i,0] = i-1
links[i,1] = i+1
links[num,1] = num+1
links[num,0] = num+n-1
links[(num+n-1),1] = num
links[(num+n-1),0] = num+n-2
self.num = num+n
def spawn_normal(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
mask = (random(num)<prob).nonzero()[0]
if len(mask)<1:
return
for i in mask:
b = links[i,1]
l = link_len[i,1]
if l>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def spawn_curl(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
curve = sqrt(self.link_curv[1:num,0])
for i, (r, t) in enumerate(zip(random(num), curve)):
b = links[i,1]
if r>t and link_len[i,1]>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def get_line(self):
from numpy import array
links = self.links
curr = links[0,0]
first = curr
order = [first]
while True:
a = links[curr,0]
b = links[curr,1]
if a != curr:
curr = a
else:
curr = b
order.append(curr)
if curr == first:
order.append(a)
break
return array(order, npint)
def step(self, t=None):
import pycuda.driver as drv
self.itt += 1
num = self.num
xy = self.xy
dxy = self.dxy
tmp = self.tmp
link_len = self.link_len
link_curv = self.link_curv
blocks = num//self.threads + 1
self.zone_num[:] = 0
self.cuda_agg_count(
npint(num),
npint(self.nz),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
block=(self.threads,1,1),
grid=(blocks,1)
)
zone_leap = self.zone_num[:].max()
zone_map_size = self.nz2*zone_leap
if zone_map_size>len(self.zone_node):
print('resize, new zone leap: ', zone_map_size*2./self.nz2)
self.zone_node = zeros(zone_map_size*2, npint)
self.zone_num[:] = 0
self.cuda_agg(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
drv.InOut(self.zone_node),
block=(self.threads,1,1),
grid=(blocks,1)
)
self.cuda_step(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.Out(dxy[:num,:]),
drv.Out(tmp[:num,:]),
drv.Out(link_len[:num,:]),
drv.Out(link_curv[:num,:]),
drv.In(self.links[:num,:]),
drv.In(self.zone_num),
drv.In(self.zone_node),
npfloat(self.stp),
npfloat(self.reject_stp),
npfloat(self.spring_stp),
npfloat(self.near_rad),
npfloat(self.far_rad),
block=(self.threads,1,1),
grid=(blocks,1)
)
xy[:num,:] += dxy[:num,:]
| 18.876812 | 65 | 0.55739 | # -*- coding: utf-8 -*-
from numpy import pi
from numpy import zeros
from numpy import sin
from numpy import cos
from numpy import sqrt
from numpy.random import random
from numpy import float32 as npfloat
from numpy import int32 as npint
TWOPI = pi*2
PI = pi
class DifferentialLine(object):
def __init__(
self,
size,
stp,
spring_stp,
reject_stp,
near_rad,
far_rad,
threads = 256,
nmax = 1000000
):
self.itt = 0
self.threads = threads
self.nmax = nmax
self.size = size
self.one = 1.0/size
self.stp = stp
self.spring_stp = spring_stp
self.reject_stp = reject_stp
self.near_rad = near_rad
self.far_rad = far_rad
self.__init()
self.__cuda_init()
def __init(self):
self.num = 0
nz = int(1.0/(2*self.far_rad))
self.nz = nz
self.nz2 = nz**2
nmax = self.nmax
self.xy = zeros((nmax, 2), npfloat)
self.dxy = zeros((nmax, 2), npfloat)
self.tmp = zeros((nmax, 1), npfloat)
self.link_len = zeros((nmax, 2), npfloat)
self.link_curv = zeros((nmax, 2), npfloat)
self.links = zeros((nmax, 2), npint)
zone_map_size = self.nz2*64
self.zone_node = zeros(zone_map_size, npint)
self.zone_num = zeros(self.nz2, npint)
def __cuda_init(self):
import pycuda.autoinit
from .helpers import load_kernel
self.cuda_agg_count = load_kernel(
'modules/cuda/agg_count.cu',
'agg_count',
subs={'_THREADS_': self.threads}
)
self.cuda_agg = load_kernel(
'modules/cuda/agg.cu',
'agg',
subs={'_THREADS_': self.threads}
)
self.cuda_step = load_kernel(
'modules/cuda/step.cu',
'step',
subs={
'_THREADS_': self.threads
}
)
def init_circle(self, n, rad):
from numpy import sort
num = self.num
links = self.links
angles = random(n)*TWOPI
angles = sort(angles)
xx = 0.5 + cos(angles)*rad
yy = 0.5 + sin(angles)*rad
self.xy[num:num+n, 0] = xx
self.xy[num:num+n, 1] = yy
for i in range(num+1, num+n-1):
links[i,0] = i-1
links[i,1] = i+1
links[num,1] = num+1
links[num,0] = num+n-1
links[(num+n-1),1] = num
links[(num+n-1),0] = num+n-2
self.num = num+n
def spawn_normal(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
mask = (random(num)<prob).nonzero()[0]
if len(mask)<1:
return
for i in mask:
b = links[i,1]
l = link_len[i,1]
if l>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def spawn_curl(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
curve = sqrt(self.link_curv[1:num,0])
for i, (r, t) in enumerate(zip(random(num), curve)):
b = links[i,1]
if r>t and link_len[i,1]>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def get_line(self):
from numpy import array
links = self.links
curr = links[0,0]
first = curr
order = [first]
while True:
a = links[curr,0]
b = links[curr,1]
if a != curr:
curr = a
else:
curr = b
order.append(curr)
if curr == first:
order.append(a)
break
return array(order, npint)
def step(self, t=None):
import pycuda.driver as drv
self.itt += 1
num = self.num
xy = self.xy
dxy = self.dxy
tmp = self.tmp
link_len = self.link_len
link_curv = self.link_curv
blocks = num//self.threads + 1
self.zone_num[:] = 0
self.cuda_agg_count(
npint(num),
npint(self.nz),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
block=(self.threads,1,1),
grid=(blocks,1)
)
zone_leap = self.zone_num[:].max()
zone_map_size = self.nz2*zone_leap
if zone_map_size>len(self.zone_node):
print('resize, new zone leap: ', zone_map_size*2./self.nz2)
self.zone_node = zeros(zone_map_size*2, npint)
self.zone_num[:] = 0
self.cuda_agg(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
drv.InOut(self.zone_node),
block=(self.threads,1,1),
grid=(blocks,1)
)
self.cuda_step(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.Out(dxy[:num,:]),
drv.Out(tmp[:num,:]),
drv.Out(link_len[:num,:]),
drv.Out(link_curv[:num,:]),
drv.In(self.links[:num,:]),
drv.In(self.zone_num),
drv.In(self.zone_node),
npfloat(self.stp),
npfloat(self.reject_stp),
npfloat(self.spring_stp),
npfloat(self.near_rad),
npfloat(self.far_rad),
block=(self.threads,1,1),
grid=(blocks,1)
)
xy[:num,:] += dxy[:num,:]
| 0 | 0 |
9c9a724dc974172fe6713d50c40457af8df6ee64 | 2,802 | py | Python | main.py | greenactionstudio/openleadr-python | 03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063 | [
"Apache-2.0"
] | null | null | null | main.py | greenactionstudio/openleadr-python | 03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063 | [
"Apache-2.0"
] | null | null | null | main.py | greenactionstudio/openleadr-python | 03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063 | [
"Apache-2.0"
] | 1 | 2021-11-03T02:36:32.000Z | 2021-11-03T02:36:32.000Z | from logging import debug, exception
from flask import Flask, request
import os
import asyncio
import threading
import ssl
import aiohttp
import nest_asyncio
import json
from openleadr.client import OpenADRClient
from openleadr.utils import report_callback
from openleadr.enums import MEASUREMENTS
nest_asyncio.apply()
client = OpenADRClient(ven_name='myven', vtn_url=os.environ.get('VTN_URL'))
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_STATUS')
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_USAGE', measurement= MEASUREMENTS.POWER_REAL)
app = Flask(__name__)
@app.route('/create_party_registration', methods=['POST', 'GET'])
async def create_party_registration():
await client.create_party_registration(ven_id = client.ven_id, registration_id=client.registration_id)
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/create_party_registration_while_registered', methods=['POST', 'GET'])
async def create_party_registration_while_registered():
await client.create_party_registration_while_registered()
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/query_registration', methods=['POST'])
async def query_registration():
await client.query_registration()
return {'status': 200, 'body': 'return from the query registration'}
@app.route('/cancel_party_registration', methods=['POST'])
async def cancel_party_registration():
await client.cancel_party_registration()
return {'status': 200, 'body': 'return from the cancel registration'}
@app.route('/register_reports')
async def register_reports():
if client.reports:
await client.register_reports(client.reports)
return {'status': 200, 'body': 'The VEN has sent register report with metadata.'}
@app.route('/request_event', methods=['POST'])
async def request_event():
response_type, response_payload = await client.request_event()
if response_type == 'oadrDistributeEvent':
if 'events' in response_payload and len(response_payload['events']) > 0:
await client._on_event(response_payload)
return {'status': 200, 'body': 'return from the request event'}
@app.route('/create_opt', methods =['POST'])
async def create_opt():
return await client.create_opt(request.data)
@app.route('/cancel_opt', methods = ['POST'])
async def cancel_opt():
return await client.cancel_opt(request.data)
def client_run():
loop = asyncio.new_event_loop()
loop.create_task(client.run())
loop.run_forever()
if __name__ == "__main__":
t1 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': os.environ.get('PORT') })
t2 = threading.Thread(target=client_run)
t1.start()
t2.start()
t2.join() | 38.916667 | 120 | 0.745182 | from logging import debug, exception
from flask import Flask, request
import os
import asyncio
import threading
import ssl
import aiohttp
import nest_asyncio
import json
from openleadr.client import OpenADRClient
from openleadr.utils import report_callback
from openleadr.enums import MEASUREMENTS
nest_asyncio.apply()
client = OpenADRClient(ven_name='myven', vtn_url=os.environ.get('VTN_URL'))
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_STATUS')
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_USAGE', measurement= MEASUREMENTS.POWER_REAL)
app = Flask(__name__)
@app.route('/create_party_registration', methods=['POST', 'GET'])
async def create_party_registration():
await client.create_party_registration(ven_id = client.ven_id, registration_id=client.registration_id)
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/create_party_registration_while_registered', methods=['POST', 'GET'])
async def create_party_registration_while_registered():
await client.create_party_registration_while_registered()
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/query_registration', methods=['POST'])
async def query_registration():
await client.query_registration()
return {'status': 200, 'body': 'return from the query registration'}
@app.route('/cancel_party_registration', methods=['POST'])
async def cancel_party_registration():
await client.cancel_party_registration()
return {'status': 200, 'body': 'return from the cancel registration'}
@app.route('/register_reports')
async def register_reports():
if client.reports:
await client.register_reports(client.reports)
return {'status': 200, 'body': 'The VEN has sent register report with metadata.'}
@app.route('/request_event', methods=['POST'])
async def request_event():
response_type, response_payload = await client.request_event()
if response_type == 'oadrDistributeEvent':
if 'events' in response_payload and len(response_payload['events']) > 0:
await client._on_event(response_payload)
return {'status': 200, 'body': 'return from the request event'}
@app.route('/create_opt', methods =['POST'])
async def create_opt():
return await client.create_opt(request.data)
@app.route('/cancel_opt', methods = ['POST'])
async def cancel_opt():
return await client.cancel_opt(request.data)
def client_run():
loop = asyncio.new_event_loop()
loop.create_task(client.run())
loop.run_forever()
if __name__ == "__main__":
t1 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': os.environ.get('PORT') })
t2 = threading.Thread(target=client_run)
t1.start()
t2.start()
t2.join() | 0 | 0 |
0a7253e54a7ce0b7e58517111d748be3f97a40cb | 2,816 | py | Python | sound_lib/external/pybass_aac.py | ctoth/sound_lib | 0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6 | [
"MIT"
] | 1 | 2020-09-03T15:35:03.000Z | 2020-09-03T15:35:03.000Z | sound_lib/external/pybass_aac.py | ctoth/sound_lib | 0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6 | [
"MIT"
] | 2 | 2020-09-25T05:47:44.000Z | 2021-06-25T15:25:34.000Z | sound_lib/external/pybass_aac.py | ctoth/sound_lib | 0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6 | [
"MIT"
] | 2 | 2020-01-05T16:24:20.000Z | 2020-09-03T15:35:07.000Z | from __future__ import absolute_import
# Copyright(c) Max Kolosov 2009 maxkolosov@inbox.ru
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.1'
__versionTime__ = '2009-11-15'
__author__ = 'Max Kolosov <maxkolosov@inbox.ru>'
__doc__ = '''
pybass_aac.py - is ctypes python module for
BASS_AAC - extension to the BASS audio library that enables the playback
of Advanced Audio Coding and MPEG-4 streams (http://www.maresweb.de).
'''
import os, sys, ctypes
from . import pybass
from .paths import x86_path, x64_path
import libloader
bass_aac_module = libloader.load_library('bass_aac', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
#Register the plugin with the Bass plugin system.
pybass.BASS_PluginLoad(libloader.find_library_path('bass_aac', x86_path=x86_path, x64_path=x64_path), 0)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
DOWNLOADPROC = pybass.DOWNLOADPROC
BASS_FILEPROCS = pybass.BASS_FILEPROCS
# Additional BASS_SetConfig options
BASS_CONFIG_MP4_VIDEO = 0x10700 # play the audio from MP4 videos
# Additional tags available from BASS_StreamGetTags (for MP4 files)
BASS_TAG_MP4 = 7 # MP4/iTunes metadata
BASS_AAC_STEREO = 0x400000 # downmatrix to stereo
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_AAC = 0x10b00 # AAC
BASS_CTYPE_STREAM_MP4 = 0x10b01 # MP4
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_AAC_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_AAC_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateURL)(const char *url, DWORD offset, DWORD flags, DOWNLOADPROC *proc, void *user);
BASS_AAC_StreamCreateURL = func_type(HSTREAM, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, DOWNLOADPROC, ctypes.c_void_p)(('BASS_AAC_StreamCreateURL', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_AAC_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_AAC_StreamCreateFileUser', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_MP4_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_MP4_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_MP4_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_MP4_StreamCreateFileUser', bass_aac_module))
| 52.148148 | 184 | 0.79652 | from __future__ import absolute_import
# Copyright(c) Max Kolosov 2009 maxkolosov@inbox.ru
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.1'
__versionTime__ = '2009-11-15'
__author__ = 'Max Kolosov <maxkolosov@inbox.ru>'
__doc__ = '''
pybass_aac.py - is ctypes python module for
BASS_AAC - extension to the BASS audio library that enables the playback
of Advanced Audio Coding and MPEG-4 streams (http://www.maresweb.de).
'''
import os, sys, ctypes
from . import pybass
from .paths import x86_path, x64_path
import libloader
bass_aac_module = libloader.load_library('bass_aac', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
#Register the plugin with the Bass plugin system.
pybass.BASS_PluginLoad(libloader.find_library_path('bass_aac', x86_path=x86_path, x64_path=x64_path), 0)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
DOWNLOADPROC = pybass.DOWNLOADPROC
BASS_FILEPROCS = pybass.BASS_FILEPROCS
# Additional BASS_SetConfig options
BASS_CONFIG_MP4_VIDEO = 0x10700 # play the audio from MP4 videos
# Additional tags available from BASS_StreamGetTags (for MP4 files)
BASS_TAG_MP4 = 7 # MP4/iTunes metadata
BASS_AAC_STEREO = 0x400000 # downmatrix to stereo
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_AAC = 0x10b00 # AAC
BASS_CTYPE_STREAM_MP4 = 0x10b01 # MP4
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_AAC_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_AAC_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateURL)(const char *url, DWORD offset, DWORD flags, DOWNLOADPROC *proc, void *user);
BASS_AAC_StreamCreateURL = func_type(HSTREAM, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, DOWNLOADPROC, ctypes.c_void_p)(('BASS_AAC_StreamCreateURL', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_AAC_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_AAC_StreamCreateFileUser', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_MP4_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_MP4_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_MP4_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_MP4_StreamCreateFileUser', bass_aac_module))
| 0 | 0 |
0a79e9ed00dd7cff3b0787278aa3e51a4698409f | 31,754 | py | Python | src/olympia/addons/views.py | tapaswenipathak/addons-server | b7085559a754248a8baade399d5a27f2c3e3ca7e | [
"BSD-3-Clause"
] | 1 | 2019-08-17T21:17:50.000Z | 2019-08-17T21:17:50.000Z | src/olympia/addons/views.py | tapaswenipathak/addons-server | b7085559a754248a8baade399d5a27f2c3e3ca7e | [
"BSD-3-Clause"
] | null | null | null | src/olympia/addons/views.py | tapaswenipathak/addons-server | b7085559a754248a8baade399d5a27f2c3e3ca7e | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
from django import http
from django.db.models import Prefetch
from django.db.transaction import non_atomic_requests
from django.shortcuts import redirect
from django.utils.cache import patch_cache_control
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from elasticsearch_dsl import Q, query, Search
from rest_framework import exceptions, serializers
from rest_framework.decorators import action
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import get_outgoing_url
from olympia.api.pagination import ESPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowReadOnlyIfPublic, AllowRelatedObjectPermissions,
AllowReviewer, AllowReviewerUnlisted, AnyOf, GroupPermission)
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.search.filters import (
AddonAppQueryParam, AddonAppVersionQueryParam, AddonAuthorQueryParam,
AddonCategoryQueryParam, AddonGuidQueryParam, AddonTypeQueryParam,
AutoCompleteSortFilter,
ReviewedContentFilter, SearchParameterFilter, SearchQueryFilter,
SortingFilter)
from olympia.translations.query import order_by_translation
from olympia.versions.models import Version
from .decorators import addon_view_factory
from .indexers import AddonIndexer
from .models import Addon, CompatOverride, ReplacementAddon
from .serializers import (
AddonEulaPolicySerializer,
AddonSerializer, AddonSerializerWithUnlistedData, CompatOverrideSerializer,
ESAddonAutoCompleteSerializer, ESAddonSerializer, LanguageToolsSerializer,
ReplacementAddonSerializer, StaticCategorySerializer, VersionSerializer)
from .utils import (
get_addon_recommendations, get_addon_recommendations_invalid,
get_creatured_ids, get_featured_ids, is_outcome_recommended)
log = olympia.core.logger.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
return getattr(self, 'filter_{0}'.format(field))()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.base_queryset, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.base_queryset.top_free(self.request.APP, listed=False)
else:
return self.base_queryset.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.base_queryset.top_paid(self.request.APP, listed=False)
else:
return self.base_queryset.top_paid(listed=False)
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return self.base_queryset.order_by('-average_daily_users')
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_updated(self):
return self.base_queryset.order_by('-last_updated')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def filter_hotness(self):
return self.base_queryset.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.base_queryset.all(), 'name')
DEFAULT_FIND_REPLACEMENT_PATH = '/collections/mozilla/featured-add-ons/'
FIND_REPLACEMENT_SRC = 'find-replacement'
def find_replacement_addon(request):
guid = request.GET.get('guid')
if not guid:
raise http.Http404
try:
replacement = ReplacementAddon.objects.get(guid=guid)
path = replacement.path
except ReplacementAddon.DoesNotExist:
path = DEFAULT_FIND_REPLACEMENT_PATH
else:
if replacement.has_external_url():
# It's an external URL:
return redirect(get_outgoing_url(path))
replace_url = '%s%s?src=%s' % (
('/' if not path.startswith('/') else ''), path, FIND_REPLACEMENT_SRC)
return redirect(replace_url, permanent=False)
class AddonViewSet(RetrieveModelMixin, GenericViewSet):
permission_classes = [
AnyOf(AllowReadOnlyIfPublic, AllowAddonAuthor,
AllowReviewer, AllowReviewerUnlisted),
]
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_queryset(self):
"""Return queryset to be used for the view."""
# Special case: admins - and only admins - can see deleted add-ons.
# This is handled outside a permission class because that condition
# would pollute all other classes otherwise.
if (self.request.user.is_authenticated and
acl.action_allowed(self.request,
amo.permissions.ADDONS_VIEW_DELETED)):
return Addon.unfiltered.all()
# Permission classes disallow access to non-public/unlisted add-ons
# unless logged in as a reviewer/addon owner/admin, so we don't have to
# filter the base queryset here.
return Addon.objects.all()
def get_serializer_class(self):
# Override serializer to use serializer_class_with_unlisted_data if
# we are allowed to access unlisted data.
obj = getattr(self, 'instance')
request = self.request
if (acl.check_unlisted_addons_reviewer(request) or
(obj and request.user.is_authenticated and
obj.authors.filter(pk=request.user.pk).exists())):
return self.serializer_class_with_unlisted_data
return self.serializer_class
def get_lookup_field(self, identifier):
return Addon.get_lookup_field(identifier)
def get_object(self):
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AddonViewSet, self).get_object()
return self.instance
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
Calls DRF implementation, but adds `is_disabled_by_developer` to the
exception being thrown so that clients can tell the difference between
a 401/403 returned because an add-on has been disabled by their
developer or something else.
"""
try:
super(AddonViewSet, self).check_object_permissions(request, obj)
except exceptions.APIException as exc:
exc.detail = {
'detail': exc.detail,
'is_disabled_by_developer': obj.disabled_by_user,
'is_disabled_by_mozilla': obj.status == amo.STATUS_DISABLED,
}
raise exc
@action(detail=True)
def eula_policy(self, request, pk=None):
obj = self.get_object()
serializer = AddonEulaPolicySerializer(
obj, context=self.get_serializer_context())
return Response(serializer.data)
class AddonChildMixin(object):
"""Mixin containing method to retrieve the parent add-on object."""
def get_addon_object(self, permission_classes=None, lookup='addon_pk'):
"""Return the parent Addon object using the URL parameter passed
to the view.
`permission_classes` can be use passed to change which permission
classes the parent viewset will be used when loading the Addon object,
otherwise AddonViewSet.permission_classes will be used."""
if hasattr(self, 'addon_object'):
return self.addon_object
if permission_classes is None:
permission_classes = AddonViewSet.permission_classes
self.addon_object = AddonViewSet(
request=self.request, permission_classes=permission_classes,
kwargs={'pk': self.kwargs[lookup]}).get_object()
return self.addon_object
class AddonVersionViewSet(AddonChildMixin, RetrieveModelMixin,
ListModelMixin, GenericViewSet):
# Permissions are always checked against the parent add-on in
# get_addon_object() using AddonViewSet.permission_classes so we don't need
# to set any here. Some extra permission classes are added dynamically
# below in check_permissions() and check_object_permissions() depending on
# what the client is requesting to see.
permission_classes = []
serializer_class = VersionSerializer
def check_permissions(self, request):
requested = self.request.GET.get('filter')
if self.action == 'list':
if requested == 'all_with_deleted':
# To see deleted versions, you need Addons:ViewDeleted.
self.permission_classes = [
GroupPermission(amo.permissions.ADDONS_VIEW_DELETED)]
elif requested == 'all_with_unlisted':
# To see unlisted versions, you need to be add-on author or
# unlisted reviewer.
self.permission_classes = [AnyOf(
AllowReviewerUnlisted, AllowAddonAuthor)]
elif requested == 'all_without_unlisted':
# To see all listed versions (not just public ones) you need to
# be add-on author or reviewer.
self.permission_classes = [AnyOf(
AllowReviewer, AllowReviewerUnlisted, AllowAddonAuthor)]
# When listing, we can't use AllowRelatedObjectPermissions() with
# check_permissions(), because AllowAddonAuthor needs an author to
# do the actual permission check. To work around that, we call
# super + check_object_permission() ourselves, passing down the
# addon object directly.
return super(AddonVersionViewSet, self).check_object_permissions(
request, self.get_addon_object())
super(AddonVersionViewSet, self).check_permissions(request)
def check_object_permissions(self, request, obj):
# If the instance is marked as deleted and the client is not allowed to
# see deleted instances, we want to return a 404, behaving as if it
# does not exist.
if (obj.deleted and
not GroupPermission(amo.permissions.ADDONS_VIEW_DELETED).
has_object_permission(request, self, obj)):
raise http.Http404
if obj.channel == amo.RELEASE_CHANNEL_UNLISTED:
# If the instance is unlisted, only allow unlisted reviewers and
# authors..
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewerUnlisted, AllowAddonAuthor)])
]
elif not obj.is_public():
# If the instance is disabled, only allow reviewers and authors.
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewer, AllowAddonAuthor)])
]
super(AddonVersionViewSet, self).check_object_permissions(request, obj)
def get_queryset(self):
"""Return the right base queryset depending on the situation."""
requested = self.request.GET.get('filter')
valid_filters = (
'all_with_deleted',
'all_with_unlisted',
'all_without_unlisted',
)
if requested is not None:
if self.action != 'list':
raise serializers.ValidationError(
'The "filter" parameter is not valid in this context.')
elif requested not in valid_filters:
raise serializers.ValidationError(
'Invalid "filter" parameter specified.')
# By default we restrict to valid, listed versions. Some filtering
# options are available when listing, and in addition, when returning
# a single instance, we don't filter at all.
if requested == 'all_with_deleted' or self.action != 'list':
queryset = Version.unfiltered.all()
elif requested == 'all_with_unlisted':
queryset = Version.objects.all()
elif requested == 'all_without_unlisted':
queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
else:
# By default, we rely on queryset filtering to hide
# non-public/unlisted versions. get_queryset() might override this
# if we are asked to see non-valid, deleted and/or unlisted
# versions explicitly.
queryset = Version.objects.filter(
files__status=amo.STATUS_APPROVED,
channel=amo.RELEASE_CHANNEL_LISTED).distinct()
# Filter with the add-on.
return queryset.filter(addon=self.get_addon_object())
class AddonSearchView(ListAPIView):
authentication_classes = []
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
SortingFilter,
]
pagination_class = ESPageNumberPagination
permission_classes = []
serializer_class = ESAddonSerializer
def get_queryset(self):
qset = Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name()).extra(
_source={'excludes': AddonIndexer.hidden_fields}).params(
search_type='dfs_query_then_fetch')
return qset
@classmethod
def as_view(cls, **kwargs):
view = super(AddonSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class AddonAutoCompleteSearchView(AddonSearchView):
pagination_class = None
serializer_class = ESAddonAutoCompleteSerializer
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
AutoCompleteSortFilter,
]
def get_queryset(self):
# Minimal set of fields from ES that we need to build our results.
# It's the opposite tactic used by the regular search endpoint, which
# excludes a specific set of fields - because we know that autocomplete
# only needs to return very few things.
included_fields = (
'icon_type', # Needed for icon_url.
'id', # Needed for... id
'is_recommended',
'modified', # Needed for icon_url.
'name_translations', # Needed for... name.
'default_locale', # Needed for translations to work.
'slug', # Needed for url.
'type', # Needed to attach the Persona for icon_url (sadly).
)
qset = (
Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name())
.extra(_source={'includes': included_fields}))
return qset
def list(self, request, *args, **kwargs):
# Ignore pagination (slice directly) but do wrap the data in a
# 'results' property to mimic what the search API does.
queryset = self.filter_queryset(self.get_queryset())[:10]
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class AddonFeaturedView(GenericAPIView):
authentication_classes = []
permission_classes = []
serializer_class = AddonSerializer
# We accept the 'page_size' parameter but we do not allow pagination for
# this endpoint since the order is random.
pagination_class = None
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
# Simulate pagination-like results, without actual pagination.
return Response({'results': serializer.data})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonFeaturedView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def get_queryset(self):
return Addon.objects.valid()
def filter_queryset(self, queryset):
# We can pass the optional lang parameter to either get_creatured_ids()
# or get_featured_ids() below to get locale-specific results in
# addition to the generic ones.
lang = self.request.GET.get('lang')
if 'category' in self.request.GET:
# If a category is passed then the app and type parameters are
# mandatory because we need to find a category in the constants to
# pass to get_creatured_ids(), and category slugs are not unique.
# AddonCategoryQueryParam parses the request parameters for us to
# determine the category.
try:
categories = AddonCategoryQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = []
for category in categories:
ids.extend(get_creatured_ids(category, lang))
else:
# If no category is passed, only the app parameter is mandatory,
# because get_featured_ids() needs it to find the right collection
# to pick addons from. It can optionally filter by type, so we
# parse request for that as well.
try:
app = AddonAppQueryParam(
self.request).get_object_from_reverse_dict()
types = None
if 'type' in self.request.GET:
types = AddonTypeQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_featured_ids(app, lang=lang, types=types)
# ids is going to be a random list of ids, we just slice it to get
# the number of add-ons that was requested. We do it before calling
# manual_order(), since it'll use the ids as part of a id__in filter.
try:
page_size = int(
self.request.GET.get('page_size', api_settings.PAGE_SIZE))
except ValueError:
raise exceptions.ParseError('Invalid page_size parameter')
ids = ids[:page_size]
return manual_order(queryset, ids, 'addons.id')
class StaticCategoryView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = StaticCategorySerializer
def get_queryset(self):
return sorted(CATEGORIES_BY_ID.values(), key=lambda x: x.id)
@classmethod
def as_view(cls, **kwargs):
view = super(StaticCategoryView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def finalize_response(self, request, response, *args, **kwargs):
response = super(StaticCategoryView, self).finalize_response(
request, response, *args, **kwargs)
patch_cache_control(response, max_age=60 * 60 * 6)
return response
class LanguageToolsView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = LanguageToolsSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(LanguageToolsView, cls).as_view(**initkwargs))
def get_query_params(self):
"""
Parse query parameters that this API supports:
- app (mandatory)
- type (optional)
- appversion (optional, makes type mandatory)
- author (optional)
Can raise ParseError() in case a mandatory parameter is missing or a
parameter is invalid.
Returns a dict containing application (int), types (tuple or None),
appversions (dict or None) and author (string or None).
"""
# app parameter is mandatory when calling this API.
try:
application = AddonAppQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError('Invalid or missing app parameter.')
# appversion parameter is optional.
if AddonAppVersionQueryParam.query_param in self.request.GET:
try:
value = AddonAppVersionQueryParam(self.request).get_values()
appversions = {
'min': value[1],
'max': value[2]
}
except ValueError:
raise exceptions.ParseError('Invalid appversion parameter.')
else:
appversions = None
# type is optional, unless appversion is set. That's because the way
# dicts and language packs have their compatibility info set in the
# database differs, so to make things simpler for us we force clients
# to filter by type if they want appversion filtering.
if AddonTypeQueryParam.query_param in self.request.GET or appversions:
try:
addon_types = tuple(
AddonTypeQueryParam(self.request).get_value())
except ValueError:
raise exceptions.ParseError(
'Invalid or missing type parameter while appversion '
'parameter is set.')
else:
addon_types = (amo.ADDON_LPAPP, amo.ADDON_DICT)
# author is optional. It's a string representing the username(s) we're
# filtering on.
if AddonAuthorQueryParam.query_param in self.request.GET:
author = AddonAuthorQueryParam(self.request).get_value()
else:
author = None
return {
'application': application,
'types': addon_types,
'appversions': appversions,
'author': author,
}
def get_queryset(self):
"""
Return queryset to use for this view, depending on query parameters.
"""
# application, addon_types, appversions
params = self.get_query_params()
if params['types'] == (amo.ADDON_LPAPP,) and params['appversions']:
qs = self.get_language_packs_queryset_with_appversions(
params['application'], params['appversions'])
else:
# appversions filtering only makes sense for language packs only,
# so it's ignored here.
qs = self.get_queryset_base(params['application'], params['types'])
if params['author']:
qs = qs.filter(
addonuser__user__username__in=params['author'],
addonuser__listed=True).distinct()
return qs
def get_queryset_base(self, application, addon_types):
"""
Return base queryset to be used as the starting point in both
get_queryset() and get_language_packs_queryset_with_appversions().
"""
return (
Addon.objects.public()
.filter(appsupport__app=application, type__in=addon_types,
target_locale__isnull=False)
.exclude(target_locale='')
# Deactivate default transforms which fetch a ton of stuff we
# don't need here like authors, previews or current version.
# It would be nice to avoid translations entirely, because the
# translations transformer is going to fetch a lot of translations
# we don't need, but some language packs or dictionaries have
# custom names, so we can't use a generic one for them...
.only_translations()
# Since we're fetching everything with no pagination, might as well
# not order it.
.order_by()
)
def get_language_packs_queryset_with_appversions(
self, application, appversions):
"""
Return queryset to use specifically when requesting language packs
compatible with a given app + versions.
application is an application id, and appversions is a dict with min
and max keys pointing to application versions expressed as ints.
"""
# Base queryset.
qs = self.get_queryset_base(application, (amo.ADDON_LPAPP,))
# Version queryset we'll prefetch once for all results. We need to
# find the ones compatible with the app+appversion requested, and we
# can avoid loading translations by removing transforms and then
# re-applying the default one that takes care of the files and compat
# info.
versions_qs = (
Version.objects
.latest_public_compatible_with(application, appversions)
.no_transforms().transform(Version.transformer))
return (
qs.prefetch_related(Prefetch('versions',
to_attr='compatible_versions',
queryset=versions_qs))
.filter(versions__apps__application=application,
versions__apps__min__version_int__lte=appversions['min'],
versions__apps__max__version_int__gte=appversions['max'],
versions__channel=amo.RELEASE_CHANNEL_LISTED,
versions__files__status=amo.STATUS_APPROVED)
.distinct()
)
@method_decorator(cache_page(60 * 60 * 24))
def dispatch(self, *args, **kwargs):
return super(LanguageToolsView, self).dispatch(*args, **kwargs)
def list(self, request, *args, **kwargs):
# Ignore pagination (return everything) but do wrap the data in a
# 'results' property to mimic what the default implementation of list()
# does in DRF.
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class ReplacementAddonView(ListAPIView):
authentication_classes = []
queryset = ReplacementAddon.objects.all()
serializer_class = ReplacementAddonSerializer
class CompatOverrideView(ListAPIView):
"""This view is used by Firefox so it's performance-critical.
Every firefox client requests the list of overrides approx. once per day.
Firefox requests the overrides via a list of GUIDs which makes caching
hard because the variation of possible GUID combinations prevent us to
simply add some dumb-caching and requires us to resolve cache-misses.
"""
queryset = CompatOverride.objects.all()
serializer_class = CompatOverrideSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(CompatOverrideView, cls).as_view(**initkwargs))
def get_guids(self):
# Use the same Filter we use for AddonSearchView for consistency.
guid_filter = AddonGuidQueryParam(self.request)
return guid_filter.get_value()
def filter_queryset(self, queryset):
guids = self.get_guids()
if not guids:
raise exceptions.ParseError(
'Empty, or no, guid parameter provided.')
# Evaluate the queryset and cast it into a list.
# This will force Django to simply use len(queryset) instead of
# calling .count() on it and avoids an additional COUNT query.
# The amount of GUIDs we should get in real-life won't be paginated
# most of the time so it's safe to simply evaluate the query.
# The advantage here is that we are saving ourselves a `COUNT` query
# and these are expensive.
return list(queryset.filter(guid__in=guids).transform(
CompatOverride.transformer).order_by('-pk'))
class AddonRecommendationView(AddonSearchView):
filter_backends = [ReviewedContentFilter]
ab_outcome = None
fallback_reason = None
pagination_class = None
def get_paginated_response(self, data):
data = data[:4] # taar is only supposed to return 4 anyway.
return Response(OrderedDict([
('outcome', self.ab_outcome),
('fallback_reason', self.fallback_reason),
('page_size', 1),
('page_count', 1),
('count', len(data)),
('next', None),
('previous', None),
('results', data),
]))
def filter_queryset(self, qs):
qs = super(AddonRecommendationView, self).filter_queryset(qs)
guid_param = self.request.GET.get('guid')
taar_enable = self.request.GET.get('recommended', '').lower() == 'true'
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations(guid_param, taar_enable))
results_qs = qs.query(query.Bool(must=[Q('terms', guid=guids)]))
results_qs.execute() # To cache the results.
if results_qs.count() != 4 and is_outcome_recommended(self.ab_outcome):
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations_invalid())
return qs.query(query.Bool(must=[Q('terms', guid=guids)]))
return results_qs
def paginate_queryset(self, queryset):
# We don't need pagination for the fixed number of results.
return queryset
| 41.891821 | 79 | 0.653776 | from collections import OrderedDict
from django import http
from django.db.models import Prefetch
from django.db.transaction import non_atomic_requests
from django.shortcuts import redirect
from django.utils.cache import patch_cache_control
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from elasticsearch_dsl import Q, query, Search
from rest_framework import exceptions, serializers
from rest_framework.decorators import action
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import get_outgoing_url
from olympia.api.pagination import ESPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowReadOnlyIfPublic, AllowRelatedObjectPermissions,
AllowReviewer, AllowReviewerUnlisted, AnyOf, GroupPermission)
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.search.filters import (
AddonAppQueryParam, AddonAppVersionQueryParam, AddonAuthorQueryParam,
AddonCategoryQueryParam, AddonGuidQueryParam, AddonTypeQueryParam,
AutoCompleteSortFilter,
ReviewedContentFilter, SearchParameterFilter, SearchQueryFilter,
SortingFilter)
from olympia.translations.query import order_by_translation
from olympia.versions.models import Version
from .decorators import addon_view_factory
from .indexers import AddonIndexer
from .models import Addon, CompatOverride, ReplacementAddon
from .serializers import (
AddonEulaPolicySerializer,
AddonSerializer, AddonSerializerWithUnlistedData, CompatOverrideSerializer,
ESAddonAutoCompleteSerializer, ESAddonSerializer, LanguageToolsSerializer,
ReplacementAddonSerializer, StaticCategorySerializer, VersionSerializer)
from .utils import (
get_addon_recommendations, get_addon_recommendations_invalid,
get_creatured_ids, get_featured_ids, is_outcome_recommended)
log = olympia.core.logger.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
return getattr(self, 'filter_{0}'.format(field))()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.base_queryset, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.base_queryset.top_free(self.request.APP, listed=False)
else:
return self.base_queryset.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.base_queryset.top_paid(self.request.APP, listed=False)
else:
return self.base_queryset.top_paid(listed=False)
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return self.base_queryset.order_by('-average_daily_users')
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_updated(self):
return self.base_queryset.order_by('-last_updated')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def filter_hotness(self):
return self.base_queryset.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.base_queryset.all(), 'name')
DEFAULT_FIND_REPLACEMENT_PATH = '/collections/mozilla/featured-add-ons/'
FIND_REPLACEMENT_SRC = 'find-replacement'
def find_replacement_addon(request):
guid = request.GET.get('guid')
if not guid:
raise http.Http404
try:
replacement = ReplacementAddon.objects.get(guid=guid)
path = replacement.path
except ReplacementAddon.DoesNotExist:
path = DEFAULT_FIND_REPLACEMENT_PATH
else:
if replacement.has_external_url():
# It's an external URL:
return redirect(get_outgoing_url(path))
replace_url = '%s%s?src=%s' % (
('/' if not path.startswith('/') else ''), path, FIND_REPLACEMENT_SRC)
return redirect(replace_url, permanent=False)
class AddonViewSet(RetrieveModelMixin, GenericViewSet):
permission_classes = [
AnyOf(AllowReadOnlyIfPublic, AllowAddonAuthor,
AllowReviewer, AllowReviewerUnlisted),
]
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_queryset(self):
"""Return queryset to be used for the view."""
# Special case: admins - and only admins - can see deleted add-ons.
# This is handled outside a permission class because that condition
# would pollute all other classes otherwise.
if (self.request.user.is_authenticated and
acl.action_allowed(self.request,
amo.permissions.ADDONS_VIEW_DELETED)):
return Addon.unfiltered.all()
# Permission classes disallow access to non-public/unlisted add-ons
# unless logged in as a reviewer/addon owner/admin, so we don't have to
# filter the base queryset here.
return Addon.objects.all()
def get_serializer_class(self):
# Override serializer to use serializer_class_with_unlisted_data if
# we are allowed to access unlisted data.
obj = getattr(self, 'instance')
request = self.request
if (acl.check_unlisted_addons_reviewer(request) or
(obj and request.user.is_authenticated and
obj.authors.filter(pk=request.user.pk).exists())):
return self.serializer_class_with_unlisted_data
return self.serializer_class
def get_lookup_field(self, identifier):
return Addon.get_lookup_field(identifier)
def get_object(self):
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AddonViewSet, self).get_object()
return self.instance
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
Calls DRF implementation, but adds `is_disabled_by_developer` to the
exception being thrown so that clients can tell the difference between
a 401/403 returned because an add-on has been disabled by their
developer or something else.
"""
try:
super(AddonViewSet, self).check_object_permissions(request, obj)
except exceptions.APIException as exc:
exc.detail = {
'detail': exc.detail,
'is_disabled_by_developer': obj.disabled_by_user,
'is_disabled_by_mozilla': obj.status == amo.STATUS_DISABLED,
}
raise exc
@action(detail=True)
def eula_policy(self, request, pk=None):
obj = self.get_object()
serializer = AddonEulaPolicySerializer(
obj, context=self.get_serializer_context())
return Response(serializer.data)
class AddonChildMixin(object):
"""Mixin containing method to retrieve the parent add-on object."""
def get_addon_object(self, permission_classes=None, lookup='addon_pk'):
"""Return the parent Addon object using the URL parameter passed
to the view.
`permission_classes` can be use passed to change which permission
classes the parent viewset will be used when loading the Addon object,
otherwise AddonViewSet.permission_classes will be used."""
if hasattr(self, 'addon_object'):
return self.addon_object
if permission_classes is None:
permission_classes = AddonViewSet.permission_classes
self.addon_object = AddonViewSet(
request=self.request, permission_classes=permission_classes,
kwargs={'pk': self.kwargs[lookup]}).get_object()
return self.addon_object
class AddonVersionViewSet(AddonChildMixin, RetrieveModelMixin,
ListModelMixin, GenericViewSet):
# Permissions are always checked against the parent add-on in
# get_addon_object() using AddonViewSet.permission_classes so we don't need
# to set any here. Some extra permission classes are added dynamically
# below in check_permissions() and check_object_permissions() depending on
# what the client is requesting to see.
permission_classes = []
serializer_class = VersionSerializer
def check_permissions(self, request):
requested = self.request.GET.get('filter')
if self.action == 'list':
if requested == 'all_with_deleted':
# To see deleted versions, you need Addons:ViewDeleted.
self.permission_classes = [
GroupPermission(amo.permissions.ADDONS_VIEW_DELETED)]
elif requested == 'all_with_unlisted':
# To see unlisted versions, you need to be add-on author or
# unlisted reviewer.
self.permission_classes = [AnyOf(
AllowReviewerUnlisted, AllowAddonAuthor)]
elif requested == 'all_without_unlisted':
# To see all listed versions (not just public ones) you need to
# be add-on author or reviewer.
self.permission_classes = [AnyOf(
AllowReviewer, AllowReviewerUnlisted, AllowAddonAuthor)]
# When listing, we can't use AllowRelatedObjectPermissions() with
# check_permissions(), because AllowAddonAuthor needs an author to
# do the actual permission check. To work around that, we call
# super + check_object_permission() ourselves, passing down the
# addon object directly.
return super(AddonVersionViewSet, self).check_object_permissions(
request, self.get_addon_object())
super(AddonVersionViewSet, self).check_permissions(request)
def check_object_permissions(self, request, obj):
# If the instance is marked as deleted and the client is not allowed to
# see deleted instances, we want to return a 404, behaving as if it
# does not exist.
if (obj.deleted and
not GroupPermission(amo.permissions.ADDONS_VIEW_DELETED).
has_object_permission(request, self, obj)):
raise http.Http404
if obj.channel == amo.RELEASE_CHANNEL_UNLISTED:
# If the instance is unlisted, only allow unlisted reviewers and
# authors..
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewerUnlisted, AllowAddonAuthor)])
]
elif not obj.is_public():
# If the instance is disabled, only allow reviewers and authors.
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewer, AllowAddonAuthor)])
]
super(AddonVersionViewSet, self).check_object_permissions(request, obj)
def get_queryset(self):
"""Return the right base queryset depending on the situation."""
requested = self.request.GET.get('filter')
valid_filters = (
'all_with_deleted',
'all_with_unlisted',
'all_without_unlisted',
)
if requested is not None:
if self.action != 'list':
raise serializers.ValidationError(
'The "filter" parameter is not valid in this context.')
elif requested not in valid_filters:
raise serializers.ValidationError(
'Invalid "filter" parameter specified.')
# By default we restrict to valid, listed versions. Some filtering
# options are available when listing, and in addition, when returning
# a single instance, we don't filter at all.
if requested == 'all_with_deleted' or self.action != 'list':
queryset = Version.unfiltered.all()
elif requested == 'all_with_unlisted':
queryset = Version.objects.all()
elif requested == 'all_without_unlisted':
queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
else:
# By default, we rely on queryset filtering to hide
# non-public/unlisted versions. get_queryset() might override this
# if we are asked to see non-valid, deleted and/or unlisted
# versions explicitly.
queryset = Version.objects.filter(
files__status=amo.STATUS_APPROVED,
channel=amo.RELEASE_CHANNEL_LISTED).distinct()
# Filter with the add-on.
return queryset.filter(addon=self.get_addon_object())
class AddonSearchView(ListAPIView):
authentication_classes = []
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
SortingFilter,
]
pagination_class = ESPageNumberPagination
permission_classes = []
serializer_class = ESAddonSerializer
def get_queryset(self):
qset = Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name()).extra(
_source={'excludes': AddonIndexer.hidden_fields}).params(
search_type='dfs_query_then_fetch')
return qset
@classmethod
def as_view(cls, **kwargs):
view = super(AddonSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class AddonAutoCompleteSearchView(AddonSearchView):
pagination_class = None
serializer_class = ESAddonAutoCompleteSerializer
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
AutoCompleteSortFilter,
]
def get_queryset(self):
# Minimal set of fields from ES that we need to build our results.
# It's the opposite tactic used by the regular search endpoint, which
# excludes a specific set of fields - because we know that autocomplete
# only needs to return very few things.
included_fields = (
'icon_type', # Needed for icon_url.
'id', # Needed for... id
'is_recommended',
'modified', # Needed for icon_url.
'name_translations', # Needed for... name.
'default_locale', # Needed for translations to work.
'slug', # Needed for url.
'type', # Needed to attach the Persona for icon_url (sadly).
)
qset = (
Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name())
.extra(_source={'includes': included_fields}))
return qset
def list(self, request, *args, **kwargs):
# Ignore pagination (slice directly) but do wrap the data in a
# 'results' property to mimic what the search API does.
queryset = self.filter_queryset(self.get_queryset())[:10]
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class AddonFeaturedView(GenericAPIView):
authentication_classes = []
permission_classes = []
serializer_class = AddonSerializer
# We accept the 'page_size' parameter but we do not allow pagination for
# this endpoint since the order is random.
pagination_class = None
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
# Simulate pagination-like results, without actual pagination.
return Response({'results': serializer.data})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonFeaturedView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def get_queryset(self):
return Addon.objects.valid()
def filter_queryset(self, queryset):
# We can pass the optional lang parameter to either get_creatured_ids()
# or get_featured_ids() below to get locale-specific results in
# addition to the generic ones.
lang = self.request.GET.get('lang')
if 'category' in self.request.GET:
# If a category is passed then the app and type parameters are
# mandatory because we need to find a category in the constants to
# pass to get_creatured_ids(), and category slugs are not unique.
# AddonCategoryQueryParam parses the request parameters for us to
# determine the category.
try:
categories = AddonCategoryQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = []
for category in categories:
ids.extend(get_creatured_ids(category, lang))
else:
# If no category is passed, only the app parameter is mandatory,
# because get_featured_ids() needs it to find the right collection
# to pick addons from. It can optionally filter by type, so we
# parse request for that as well.
try:
app = AddonAppQueryParam(
self.request).get_object_from_reverse_dict()
types = None
if 'type' in self.request.GET:
types = AddonTypeQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_featured_ids(app, lang=lang, types=types)
# ids is going to be a random list of ids, we just slice it to get
# the number of add-ons that was requested. We do it before calling
# manual_order(), since it'll use the ids as part of a id__in filter.
try:
page_size = int(
self.request.GET.get('page_size', api_settings.PAGE_SIZE))
except ValueError:
raise exceptions.ParseError('Invalid page_size parameter')
ids = ids[:page_size]
return manual_order(queryset, ids, 'addons.id')
class StaticCategoryView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = StaticCategorySerializer
def get_queryset(self):
return sorted(CATEGORIES_BY_ID.values(), key=lambda x: x.id)
@classmethod
def as_view(cls, **kwargs):
view = super(StaticCategoryView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def finalize_response(self, request, response, *args, **kwargs):
response = super(StaticCategoryView, self).finalize_response(
request, response, *args, **kwargs)
patch_cache_control(response, max_age=60 * 60 * 6)
return response
class LanguageToolsView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = LanguageToolsSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(LanguageToolsView, cls).as_view(**initkwargs))
def get_query_params(self):
"""
Parse query parameters that this API supports:
- app (mandatory)
- type (optional)
- appversion (optional, makes type mandatory)
- author (optional)
Can raise ParseError() in case a mandatory parameter is missing or a
parameter is invalid.
Returns a dict containing application (int), types (tuple or None),
appversions (dict or None) and author (string or None).
"""
# app parameter is mandatory when calling this API.
try:
application = AddonAppQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError('Invalid or missing app parameter.')
# appversion parameter is optional.
if AddonAppVersionQueryParam.query_param in self.request.GET:
try:
value = AddonAppVersionQueryParam(self.request).get_values()
appversions = {
'min': value[1],
'max': value[2]
}
except ValueError:
raise exceptions.ParseError('Invalid appversion parameter.')
else:
appversions = None
# type is optional, unless appversion is set. That's because the way
# dicts and language packs have their compatibility info set in the
# database differs, so to make things simpler for us we force clients
# to filter by type if they want appversion filtering.
if AddonTypeQueryParam.query_param in self.request.GET or appversions:
try:
addon_types = tuple(
AddonTypeQueryParam(self.request).get_value())
except ValueError:
raise exceptions.ParseError(
'Invalid or missing type parameter while appversion '
'parameter is set.')
else:
addon_types = (amo.ADDON_LPAPP, amo.ADDON_DICT)
# author is optional. It's a string representing the username(s) we're
# filtering on.
if AddonAuthorQueryParam.query_param in self.request.GET:
author = AddonAuthorQueryParam(self.request).get_value()
else:
author = None
return {
'application': application,
'types': addon_types,
'appversions': appversions,
'author': author,
}
def get_queryset(self):
"""
Return queryset to use for this view, depending on query parameters.
"""
# application, addon_types, appversions
params = self.get_query_params()
if params['types'] == (amo.ADDON_LPAPP,) and params['appversions']:
qs = self.get_language_packs_queryset_with_appversions(
params['application'], params['appversions'])
else:
# appversions filtering only makes sense for language packs only,
# so it's ignored here.
qs = self.get_queryset_base(params['application'], params['types'])
if params['author']:
qs = qs.filter(
addonuser__user__username__in=params['author'],
addonuser__listed=True).distinct()
return qs
def get_queryset_base(self, application, addon_types):
"""
Return base queryset to be used as the starting point in both
get_queryset() and get_language_packs_queryset_with_appversions().
"""
return (
Addon.objects.public()
.filter(appsupport__app=application, type__in=addon_types,
target_locale__isnull=False)
.exclude(target_locale='')
# Deactivate default transforms which fetch a ton of stuff we
# don't need here like authors, previews or current version.
# It would be nice to avoid translations entirely, because the
# translations transformer is going to fetch a lot of translations
# we don't need, but some language packs or dictionaries have
# custom names, so we can't use a generic one for them...
.only_translations()
# Since we're fetching everything with no pagination, might as well
# not order it.
.order_by()
)
def get_language_packs_queryset_with_appversions(
self, application, appversions):
"""
Return queryset to use specifically when requesting language packs
compatible with a given app + versions.
application is an application id, and appversions is a dict with min
and max keys pointing to application versions expressed as ints.
"""
# Base queryset.
qs = self.get_queryset_base(application, (amo.ADDON_LPAPP,))
# Version queryset we'll prefetch once for all results. We need to
# find the ones compatible with the app+appversion requested, and we
# can avoid loading translations by removing transforms and then
# re-applying the default one that takes care of the files and compat
# info.
versions_qs = (
Version.objects
.latest_public_compatible_with(application, appversions)
.no_transforms().transform(Version.transformer))
return (
qs.prefetch_related(Prefetch('versions',
to_attr='compatible_versions',
queryset=versions_qs))
.filter(versions__apps__application=application,
versions__apps__min__version_int__lte=appversions['min'],
versions__apps__max__version_int__gte=appversions['max'],
versions__channel=amo.RELEASE_CHANNEL_LISTED,
versions__files__status=amo.STATUS_APPROVED)
.distinct()
)
@method_decorator(cache_page(60 * 60 * 24))
def dispatch(self, *args, **kwargs):
return super(LanguageToolsView, self).dispatch(*args, **kwargs)
def list(self, request, *args, **kwargs):
# Ignore pagination (return everything) but do wrap the data in a
# 'results' property to mimic what the default implementation of list()
# does in DRF.
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class ReplacementAddonView(ListAPIView):
authentication_classes = []
queryset = ReplacementAddon.objects.all()
serializer_class = ReplacementAddonSerializer
class CompatOverrideView(ListAPIView):
"""This view is used by Firefox so it's performance-critical.
Every firefox client requests the list of overrides approx. once per day.
Firefox requests the overrides via a list of GUIDs which makes caching
hard because the variation of possible GUID combinations prevent us to
simply add some dumb-caching and requires us to resolve cache-misses.
"""
queryset = CompatOverride.objects.all()
serializer_class = CompatOverrideSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(CompatOverrideView, cls).as_view(**initkwargs))
def get_guids(self):
# Use the same Filter we use for AddonSearchView for consistency.
guid_filter = AddonGuidQueryParam(self.request)
return guid_filter.get_value()
def filter_queryset(self, queryset):
guids = self.get_guids()
if not guids:
raise exceptions.ParseError(
'Empty, or no, guid parameter provided.')
# Evaluate the queryset and cast it into a list.
# This will force Django to simply use len(queryset) instead of
# calling .count() on it and avoids an additional COUNT query.
# The amount of GUIDs we should get in real-life won't be paginated
# most of the time so it's safe to simply evaluate the query.
# The advantage here is that we are saving ourselves a `COUNT` query
# and these are expensive.
return list(queryset.filter(guid__in=guids).transform(
CompatOverride.transformer).order_by('-pk'))
class AddonRecommendationView(AddonSearchView):
filter_backends = [ReviewedContentFilter]
ab_outcome = None
fallback_reason = None
pagination_class = None
def get_paginated_response(self, data):
data = data[:4] # taar is only supposed to return 4 anyway.
return Response(OrderedDict([
('outcome', self.ab_outcome),
('fallback_reason', self.fallback_reason),
('page_size', 1),
('page_count', 1),
('count', len(data)),
('next', None),
('previous', None),
('results', data),
]))
def filter_queryset(self, qs):
qs = super(AddonRecommendationView, self).filter_queryset(qs)
guid_param = self.request.GET.get('guid')
taar_enable = self.request.GET.get('recommended', '').lower() == 'true'
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations(guid_param, taar_enable))
results_qs = qs.query(query.Bool(must=[Q('terms', guid=guids)]))
results_qs.execute() # To cache the results.
if results_qs.count() != 4 and is_outcome_recommended(self.ab_outcome):
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations_invalid())
return qs.query(query.Bool(must=[Q('terms', guid=guids)]))
return results_qs
def paginate_queryset(self, queryset):
# We don't need pagination for the fixed number of results.
return queryset
| 0 | 0 |
3840052143a4c80cb731fce500d7e4cb9f141b98 | 11,603 | py | Python | scheduling/methods/k_means_NN_naive.py | CORE-Robotics-Lab/Personalized_Neural_Trees | 3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e | [
"MIT"
] | 3 | 2021-05-22T19:25:01.000Z | 2021-12-01T07:59:56.000Z | scheduling/methods/k_means_NN_naive.py | CORE-Robotics-Lab/Personalized_Neural_Trees | 3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e | [
"MIT"
] | null | null | null | scheduling/methods/k_means_NN_naive.py | CORE-Robotics-Lab/Personalized_Neural_Trees | 3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e | [
"MIT"
] | null | null | null | """
Created by Rohan Paleja on September 23, 2019
Nikolaidis et. al. benchmark
"""
import torch
import torch.nn.functional as F
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
import pickle
from torch.autograd import Variable
from utils.naive_utils import load_in_naive_data, find_which_schedule_this_belongs_to
from utils.hri_utils import save_performance_results
from sklearn.cluster import KMeans
from scheduling.methods.train_autoencoder import Autoencoder, AutoEncoderTrain
# sys.path.insert(0, '../')
import itertools
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
from scheduling.methods.NN_naive import NNSmall
# noinspection PyTypeChecker,PyArgumentList
class NNTrain:
"""
class structure to train the NN for a certain amount of schedules.
This class handles training the NN, evaluating the NN, and saving the results
"""
def __init__(self):
self.num_schedules = 150
self.num_test_schedules = 100
self.total_loss_array = []
self.X_train_naive, self.Y_train_naive, self.schedule_array_train_naive, self.X_test_naive, self.Y_test_naive, self.schedule_array_test_naive = load_in_naive_data(
250,250)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model1 = NNSmall().to(device)
model2 = NNSmall().to(device)
model3 = NNSmall().to(device)
self.models = [model1, model2, model3]
opt1 = torch.optim.Adam(self.models[0].parameters(), lr=.001)
opt2 = torch.optim.Adam(self.models[1].parameters(), lr=.001)
opt3 = torch.optim.Adam(self.models[2].parameters(), lr=.001)
self.optimizers = [opt1, opt2, opt3]
self.when_to_save = 1000
schedule_matrix_load_directory = '/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/results/'+str(self.num_schedules) + 'matrixes.pkl'
self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb"))
self.kmeans_model, self.label = self.cluster_matrices(self.matrices, self.num_schedules)
self.X_train_naive, \
self.Y_train_naive, \
self.schedule_array_train_naive, = self.sample_data(150)
self.X_test_naive, \
self.Y_test_naive, \
self.schedule_array_test_naive, = self.sample_test_data(100)
self.num_test_schedules = 100
def sample_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_min = set_of_twenty * 20
return self.X_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_train_naive[set_of_twenty:set_of_twenty+size]
def sample_test_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_test_min = set_of_twenty * 20
return self.X_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_test_naive[set_of_twenty:set_of_twenty+size]
@staticmethod
def cluster_matrices(matrices, num_schedules):
"""
clusters the matrix schedules
:param matrices:
:param num_schedules:
:return:
"""
# vectorize each matrix
vectorized_set = []
for i in matrices:
vectorized = i.reshape(20 * 2048, 1)
vectorized_set.append(vectorized)
kmeans = KMeans(n_clusters=3, random_state=0) # random state makes it deterministic
# Fitting the input data
new_set = np.hstack(tuple(vectorized_set)).reshape(num_schedules, 20 * 2048)
kmeans_model = kmeans.fit(np.asarray(new_set))
labels = kmeans_model.predict(np.asarray(new_set))
return kmeans_model, labels
def train(self):
"""
Trains NN.
Randomly samples a schedule and timestep within that schedule, and passes in the corresponding data in an attempt to classify which task was scheduled
:return:
"""
epochs = 200000 * 3
for epoch in range(epochs):
# sample a timestep before the cutoff for cross_validation
rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))
input_nn = self.X_train_naive[rand_timestep_within_sched]
truth_nn = self.Y_train_naive[rand_timestep_within_sched]
which_schedule = find_which_schedule_this_belongs_to(self.schedule_array_train_naive, rand_timestep_within_sched+self.sample_min)
cluster_num = self.label[which_schedule]
# iterate over pairwise comparisons
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda()) # change to 5 to increase batch size
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).long())
self.optimizers[cluster_num].zero_grad()
output = self.models[cluster_num].forward(input_nn)
loss = F.cross_entropy(output, truth)
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizers[cluster_num].step()
self.total_loss_array.append(loss.item())
if epoch % 500 == 499:
print('loss at', epoch, ', total loss (average for each 100, averaged)', np.mean(self.total_loss_array[-100:]))
# self.save_trained_nets(str(epoch))
@staticmethod
def create_iterables():
"""
adds all possible state combinations
:return:
"""
iterables = [[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1]]
states = []
for t in itertools.product(*iterables):
states.append(t)
return states
def pass_in_embedding_out_state_ID(self, states, binary):
"""
pass in a binary embedding, and itll return the state id
:param states
:param binary:
:return:
"""
binary_as_tuple = tuple(binary)
index = states.index(binary_as_tuple)
return index
def evaluate_on_test_data(self):
"""
Evaluate performance of a trained network.
This is tested on 20% of the data and will be stored in a text file.
:return:
"""
# confusion_matrix = np.zeros((20,20))
autoencoder_class = AutoEncoderTrain(self.num_schedules)
checkpoint = torch.load('/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/Autoencoder150.tar')
autoencoder_class.model.load_state_dict(checkpoint['nn_state_dict'])
states = self.create_iterables()
prediction_accuracy = [0, 0]
percentage_accuracy_top1 = []
percentage_accuracy_top3 = []
mean_input = [1.3277743, 0.32837677, 1.4974482, -1.3519306, -0.64621973, 0.10534518, -2.338118, -2.7345326, 1.7558736, -3.0746384, -3.485554]
for i, schedule in enumerate(self.schedule_array_test_naive):
current_schedule_matrix = np.zeros((2048, 20))
for count in range(schedule[0]-self.sample_test_min, schedule[1]-self.sample_test_min + 1):
if current_schedule_matrix.sum() == 0:
cluster_num = self.kmeans_model.predict(current_schedule_matrix.reshape(1, -1))
else:
matrix = np.divide(current_schedule_matrix, current_schedule_matrix.sum())
cluster_num = self.kmeans_model.predict(matrix.reshape(1, -1))
net_input = self.X_test_naive[count]
truth = self.Y_test_naive[count]
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)).cuda())
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)))
# forward
output = self.models[int(cluster_num)].forward(input_nn)
index = torch.argmax(output).item()
# confusion_matrix[truth][index] += 1
# top 3
_, top_three = torch.topk(output, 3)
if index == truth.item():
prediction_accuracy[0] += 1
if truth.item() in top_three.detach().cpu().tolist()[0]:
prediction_accuracy[1] += 1
# update matrix
embedding_copy = np.zeros((1, 11))
input_element = autoencoder_class.model.forward_only_encoding(input_nn)
for z, each_element in enumerate(mean_input):
if each_element > input_element[0][z].item():
embedding_copy[0][z] = 0
else:
embedding_copy[0][z] = 1
index = self.pass_in_embedding_out_state_ID(states, embedding_copy[0])
action = truth.item()
current_schedule_matrix[index][int(action)] += 1
print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20)
print('schedule num:', i)
percentage_accuracy_top1.append(prediction_accuracy[0] / 20)
percentage_accuracy_top3.append(prediction_accuracy[1] / 20)
prediction_accuracy = [0, 0]
print(np.mean(percentage_accuracy_top1))
# save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'kmeans_to_NN_naive')
return np.mean(percentage_accuracy_top1)
def save_trained_nets(self, name):
"""
saves the model
:return:
"""
torch.save({'nn1_state_dict': self.models[0].state_dict(),
'nn2_state_dict': self.models[1].state_dict(),
'nn3_state_dict': self.models[2].state_dict()},
'/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/k_means_NN_' + name + '.tar')
def main():
"""
entry point for file
:return:
"""
res = []
for i in range(3):
trainer = NNTrain()
trainer.train()
out = trainer.evaluate_on_test_data()
res.append(out)
print(np.mean(res))
print(np.std(res))
if __name__ == '__main__':
main()
| 40.010345 | 171 | 0.620271 | """
Created by Rohan Paleja on September 23, 2019
Nikolaidis et. al. benchmark
"""
import torch
import torch.nn.functional as F
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
import pickle
from torch.autograd import Variable
from utils.naive_utils import load_in_naive_data, find_which_schedule_this_belongs_to
from utils.hri_utils import save_performance_results
from sklearn.cluster import KMeans
from scheduling.methods.train_autoencoder import Autoencoder, AutoEncoderTrain
# sys.path.insert(0, '../')
import itertools
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
from scheduling.methods.NN_naive import NNSmall
# noinspection PyTypeChecker,PyArgumentList
class NNTrain:
"""
class structure to train the NN for a certain amount of schedules.
This class handles training the NN, evaluating the NN, and saving the results
"""
def __init__(self):
self.num_schedules = 150
self.num_test_schedules = 100
self.total_loss_array = []
self.X_train_naive, self.Y_train_naive, self.schedule_array_train_naive, self.X_test_naive, self.Y_test_naive, self.schedule_array_test_naive = load_in_naive_data(
250,250)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model1 = NNSmall().to(device)
model2 = NNSmall().to(device)
model3 = NNSmall().to(device)
self.models = [model1, model2, model3]
opt1 = torch.optim.Adam(self.models[0].parameters(), lr=.001)
opt2 = torch.optim.Adam(self.models[1].parameters(), lr=.001)
opt3 = torch.optim.Adam(self.models[2].parameters(), lr=.001)
self.optimizers = [opt1, opt2, opt3]
self.when_to_save = 1000
schedule_matrix_load_directory = '/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/results/'+str(self.num_schedules) + 'matrixes.pkl'
self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb"))
self.kmeans_model, self.label = self.cluster_matrices(self.matrices, self.num_schedules)
self.X_train_naive, \
self.Y_train_naive, \
self.schedule_array_train_naive, = self.sample_data(150)
self.X_test_naive, \
self.Y_test_naive, \
self.schedule_array_test_naive, = self.sample_test_data(100)
self.num_test_schedules = 100
def sample_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_min = set_of_twenty * 20
return self.X_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_train_naive[set_of_twenty:set_of_twenty+size]
def sample_test_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_test_min = set_of_twenty * 20
return self.X_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_test_naive[set_of_twenty:set_of_twenty+size]
@staticmethod
def cluster_matrices(matrices, num_schedules):
"""
clusters the matrix schedules
:param matrices:
:param num_schedules:
:return:
"""
# vectorize each matrix
vectorized_set = []
for i in matrices:
vectorized = i.reshape(20 * 2048, 1)
vectorized_set.append(vectorized)
kmeans = KMeans(n_clusters=3, random_state=0) # random state makes it deterministic
# Fitting the input data
new_set = np.hstack(tuple(vectorized_set)).reshape(num_schedules, 20 * 2048)
kmeans_model = kmeans.fit(np.asarray(new_set))
labels = kmeans_model.predict(np.asarray(new_set))
return kmeans_model, labels
def train(self):
"""
Trains NN.
Randomly samples a schedule and timestep within that schedule, and passes in the corresponding data in an attempt to classify which task was scheduled
:return:
"""
epochs = 200000 * 3
for epoch in range(epochs):
# sample a timestep before the cutoff for cross_validation
rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))
input_nn = self.X_train_naive[rand_timestep_within_sched]
truth_nn = self.Y_train_naive[rand_timestep_within_sched]
which_schedule = find_which_schedule_this_belongs_to(self.schedule_array_train_naive, rand_timestep_within_sched+self.sample_min)
cluster_num = self.label[which_schedule]
# iterate over pairwise comparisons
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda()) # change to 5 to increase batch size
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).long())
self.optimizers[cluster_num].zero_grad()
output = self.models[cluster_num].forward(input_nn)
loss = F.cross_entropy(output, truth)
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizers[cluster_num].step()
self.total_loss_array.append(loss.item())
if epoch % 500 == 499:
print('loss at', epoch, ', total loss (average for each 100, averaged)', np.mean(self.total_loss_array[-100:]))
# self.save_trained_nets(str(epoch))
@staticmethod
def create_iterables():
"""
adds all possible state combinations
:return:
"""
iterables = [[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1]]
states = []
for t in itertools.product(*iterables):
states.append(t)
return states
def pass_in_embedding_out_state_ID(self, states, binary):
"""
pass in a binary embedding, and itll return the state id
:param states
:param binary:
:return:
"""
binary_as_tuple = tuple(binary)
index = states.index(binary_as_tuple)
return index
def evaluate_on_test_data(self):
"""
Evaluate performance of a trained network.
This is tested on 20% of the data and will be stored in a text file.
:return:
"""
# confusion_matrix = np.zeros((20,20))
autoencoder_class = AutoEncoderTrain(self.num_schedules)
checkpoint = torch.load('/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/Autoencoder150.tar')
autoencoder_class.model.load_state_dict(checkpoint['nn_state_dict'])
states = self.create_iterables()
prediction_accuracy = [0, 0]
percentage_accuracy_top1 = []
percentage_accuracy_top3 = []
mean_input = [1.3277743, 0.32837677, 1.4974482, -1.3519306, -0.64621973, 0.10534518, -2.338118, -2.7345326, 1.7558736, -3.0746384, -3.485554]
for i, schedule in enumerate(self.schedule_array_test_naive):
current_schedule_matrix = np.zeros((2048, 20))
for count in range(schedule[0]-self.sample_test_min, schedule[1]-self.sample_test_min + 1):
if current_schedule_matrix.sum() == 0:
cluster_num = self.kmeans_model.predict(current_schedule_matrix.reshape(1, -1))
else:
matrix = np.divide(current_schedule_matrix, current_schedule_matrix.sum())
cluster_num = self.kmeans_model.predict(matrix.reshape(1, -1))
net_input = self.X_test_naive[count]
truth = self.Y_test_naive[count]
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)).cuda())
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)))
# forward
output = self.models[int(cluster_num)].forward(input_nn)
index = torch.argmax(output).item()
# confusion_matrix[truth][index] += 1
# top 3
_, top_three = torch.topk(output, 3)
if index == truth.item():
prediction_accuracy[0] += 1
if truth.item() in top_three.detach().cpu().tolist()[0]:
prediction_accuracy[1] += 1
# update matrix
embedding_copy = np.zeros((1, 11))
input_element = autoencoder_class.model.forward_only_encoding(input_nn)
for z, each_element in enumerate(mean_input):
if each_element > input_element[0][z].item():
embedding_copy[0][z] = 0
else:
embedding_copy[0][z] = 1
index = self.pass_in_embedding_out_state_ID(states, embedding_copy[0])
action = truth.item()
current_schedule_matrix[index][int(action)] += 1
print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20)
print('schedule num:', i)
percentage_accuracy_top1.append(prediction_accuracy[0] / 20)
percentage_accuracy_top3.append(prediction_accuracy[1] / 20)
prediction_accuracy = [0, 0]
print(np.mean(percentage_accuracy_top1))
# save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'kmeans_to_NN_naive')
return np.mean(percentage_accuracy_top1)
def save_trained_nets(self, name):
"""
saves the model
:return:
"""
torch.save({'nn1_state_dict': self.models[0].state_dict(),
'nn2_state_dict': self.models[1].state_dict(),
'nn3_state_dict': self.models[2].state_dict()},
'/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/k_means_NN_' + name + '.tar')
def main():
"""
entry point for file
:return:
"""
res = []
for i in range(3):
trainer = NNTrain()
trainer.train()
out = trainer.evaluate_on_test_data()
res.append(out)
print(np.mean(res))
print(np.std(res))
if __name__ == '__main__':
main()
| 0 | 0 |
b388f68a9de50b2d68147365e456767b8e775cd2 | 1,167 | py | Python | candy_collect.py | itiB/poke-scripts | e2da1356ee8000c7345682d4c07709481f2044f8 | [
"MIT"
] | 2 | 2020-02-08T13:55:46.000Z | 2020-07-21T13:17:26.000Z | candy_collect.py | itiB/poke-scripts | e2da1356ee8000c7345682d4c07709481f2044f8 | [
"MIT"
] | null | null | null | candy_collect.py | itiB/poke-scripts | e2da1356ee8000c7345682d4c07709481f2044f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import serial
import time
from time import sleep
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('port')
args = parser.parse_args()
sleep_time = 50
def send(msg, duration=0):
print(msg)
ser.write(f'{msg}\r\n'.encode('utf-8'))
sleep(duration)
ser.write(b'RELEASE\r\n')
ser = serial.Serial(args.port, 9600)
def candyCorrect():
send('LY MAX', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(1)
send('Button B', 0.1)
sleep(0.8)
send('LY MIN', 0.1)
sleep(0.5)
send('Button A', 0.1)
sleep(0.3)
send('LY MAX', 0.1)
sleep(0.2)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(2.5)
send('Button A', 0.1)
sleep(1.0)
send('Button A', 0.1)
for i in range(0, sleep_time):
sleep(1)
if i % 10 == 0:
print(f' {sleep_time - i} ')
sleep(3)
send('Button LCLICK', 0.1)
try:
while 1:
candyCorrect()
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 19.131148 | 51 | 0.552699 | #!/usr/bin/env python3
import argparse
import serial
import time
from time import sleep
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('port')
args = parser.parse_args()
sleep_time = 50
def send(msg, duration=0):
print(msg)
ser.write(f'{msg}\r\n'.encode('utf-8'))
sleep(duration)
ser.write(b'RELEASE\r\n')
ser = serial.Serial(args.port, 9600)
def candyCorrect():
send('LY MAX', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(1)
send('Button B', 0.1)
sleep(0.8)
send('LY MIN', 0.1)
sleep(0.5)
send('Button A', 0.1)
sleep(0.3)
send('LY MAX', 0.1)
sleep(0.2)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(2.5)
send('Button A', 0.1)
sleep(1.0)
send('Button A', 0.1)
for i in range(0, sleep_time):
sleep(1)
if i % 10 == 0:
print(f'あと {sleep_time - i}秒 スリープします')
sleep(3)
send('Button LCLICK', 0.1)
try:
while 1:
candyCorrect()
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 30 | 0 |
a4ef9bd877c250ea0e460a024503d8e819218c76 | 3,683 | py | Python | utils_ic.py | kkkgabriel/50039Homework3 | 69d8f36f60868cac64bb1d1682eba34a548f3565 | [
"MIT"
] | null | null | null | utils_ic.py | kkkgabriel/50039Homework3 | 69d8f36f60868cac64bb1d1682eba34a548f3565 | [
"MIT"
] | null | null | null | utils_ic.py | kkkgabriel/50039Homework3 | 69d8f36f60868cac64bb1d1682eba34a548f3565 | [
"MIT"
] | null | null | null | import json
import torch
from torchvision import datasets, transforms
from PIL import Image
# Define function to read cat names
def read_jason(filename):
with open(filename, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
# Define function to read data
def load_data(data_dir):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_valid_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=test_valid_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=16, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)
return trainloader, testloader, validloader, train_data
# Define processing testing image function
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
# Resize and crop image
im = Image.open(image)
preprocess = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
im_tensor = preprocess(im)
im_tensor.unsqueeze_(0)
return im_tensor
# Define prediction function
def predict(image_path, model, topk, device, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(device)
model.eval()
# TODO: Implement the code to predict the class from an image file
img = process_image(image_path)
img = img.to(device)
output = model.forward(img)
ps = torch.exp(output)
probs, idxs = ps.topk(topk)
idx_to_class = dict((v,k) for k, v in model.classifier.class_to_idx.items())
classes = [v for k, v in idx_to_class.items() if k in idxs.to('cpu').numpy()]
if cat_to_name:
classes = [cat_to_name[str(i + 1)] for c, i in \
model.classifier.class_to_idx.items() if c in classes]
print('Probabilities:', probs.data.cpu().numpy()[0].tolist())
print('Classes:', classes) | 41.852273 | 88 | 0.581048 | import json
import torch
from torchvision import datasets, transforms
from PIL import Image
# Define function to read cat names
def read_jason(filename):
with open(filename, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
# Define function to read data
def load_data(data_dir):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_valid_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=test_valid_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=16, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)
return trainloader, testloader, validloader, train_data
# Define processing testing image function
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
# Resize and crop image
im = Image.open(image)
preprocess = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
im_tensor = preprocess(im)
im_tensor.unsqueeze_(0)
return im_tensor
# Define prediction function
def predict(image_path, model, topk, device, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(device)
model.eval()
# TODO: Implement the code to predict the class from an image file
img = process_image(image_path)
img = img.to(device)
output = model.forward(img)
ps = torch.exp(output)
probs, idxs = ps.topk(topk)
idx_to_class = dict((v,k) for k, v in model.classifier.class_to_idx.items())
classes = [v for k, v in idx_to_class.items() if k in idxs.to('cpu').numpy()]
if cat_to_name:
classes = [cat_to_name[str(i + 1)] for c, i in \
model.classifier.class_to_idx.items() if c in classes]
print('Probabilities:', probs.data.cpu().numpy()[0].tolist())
print('Classes:', classes) | 0 | 0 |
7c9ecc3b2e26c62f94ef6497972cb0e25e5a58f9 | 13,851 | py | Python | tensorflow/python/tools/freeze_graph_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 78 | 2020-08-04T12:36:25.000Z | 2022-03-25T04:23:40.000Z | tensorflow/python/tools/freeze_graph_test.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/python/tools/freeze_graph_test.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 28 | 2020-02-10T07:03:06.000Z | 2022-01-12T11:19:20.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name, tags):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
tags,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},
)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
@parameterized.named_parameters(
("empty_tags_set", "", []),
("default_tags_set", tag_constants.SERVING, [tag_constants.SERVING]))
def testFreezeSavedModel(self, tags_string, tags_list):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name, tags_list)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tags_string
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
if any(u"ParseExampleV2" in node.name for node in output_graph_def.node):
expected_node_count = 10
else:
expected_node_count = 8
self.assertEqual(expected_node_count, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
with self.assertRaises(ValueError):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
if __name__ == "__main__":
test.main()
| 40.264535 | 80 | 0.695401 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name, tags):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
tags,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},
)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
@parameterized.named_parameters(
("empty_tags_set", "", []),
("default_tags_set", tag_constants.SERVING, [tag_constants.SERVING]))
def testFreezeSavedModel(self, tags_string, tags_list):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name, tags_list)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tags_string
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
if any(u"ParseExampleV2" in node.name for node in output_graph_def.node):
expected_node_count = 10
else:
expected_node_count = 8
self.assertEqual(expected_node_count, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
with self.assertRaises(ValueError):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
if __name__ == "__main__":
test.main()
| 0 | 0 |
5d3f7ba06d8f30ec1f43524b350975670db0b280 | 1,597 | py | Python | tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py | usgs/groundmotion-processing- | ed188e2bb1dcd9b17433ef4677874eac654fdd16 | [
"Unlicense"
] | null | null | null | tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py | usgs/groundmotion-processing- | ed188e2bb1dcd9b17433ef4677874eac654fdd16 | [
"Unlicense"
] | null | null | null | tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py | usgs/groundmotion-processing- | ed188e2bb1dcd9b17433ef4677874eac654fdd16 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from gmprocess.core.streamcollection import StreamCollection
from gmprocess.io.read import read_data
from gmprocess.utils.test_utils import read_data_dir
from gmprocess.waveform_processing.adjust_highpass_ridder import ridder_fchp
from gmprocess.utils.config import get_config
def test_auto_fchp():
data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A")
data_files.sort()
streams = []
for f in data_files:
streams += read_data(f)
sc = StreamCollection(streams)
output_fchp = []
config = get_config()
config["integration"]["frequency"] = True
for st in sc:
for tr in st:
tr.setParameter(
"corner_frequencies",
{"type": "constant", "highpass": 0.001, "lowpass": 20},
)
tmp_st = ridder_fchp(st, config=config)
for tr in tmp_st:
initial_corners = tr.getParameter("corner_frequencies")
output_fchp.append(initial_corners["highpass"])
target_fchp = np.array(
[
0.021345158261480087,
0.022839239726168643,
0.02482398434993213,
0.01399481102242619,
0.026850167635921275,
0.004817661513765862,
0.008204101694236587,
0.006429246474225982,
0.004237087327289796,
]
)
np.testing.assert_allclose(output_fchp, target_fchp, atol=1e-7)
if __name__ == "__main__":
os.environ["CALLED_FROM_PYTEST"] = "True"
test_auto_fchp()
| 27.067797 | 76 | 0.634314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from gmprocess.core.streamcollection import StreamCollection
from gmprocess.io.read import read_data
from gmprocess.utils.test_utils import read_data_dir
from gmprocess.waveform_processing.adjust_highpass_ridder import ridder_fchp
from gmprocess.utils.config import get_config
def test_auto_fchp():
data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A")
data_files.sort()
streams = []
for f in data_files:
streams += read_data(f)
sc = StreamCollection(streams)
output_fchp = []
config = get_config()
config["integration"]["frequency"] = True
for st in sc:
for tr in st:
tr.setParameter(
"corner_frequencies",
{"type": "constant", "highpass": 0.001, "lowpass": 20},
)
tmp_st = ridder_fchp(st, config=config)
for tr in tmp_st:
initial_corners = tr.getParameter("corner_frequencies")
output_fchp.append(initial_corners["highpass"])
target_fchp = np.array(
[
0.021345158261480087,
0.022839239726168643,
0.02482398434993213,
0.01399481102242619,
0.026850167635921275,
0.004817661513765862,
0.008204101694236587,
0.006429246474225982,
0.004237087327289796,
]
)
np.testing.assert_allclose(output_fchp, target_fchp, atol=1e-7)
if __name__ == "__main__":
os.environ["CALLED_FROM_PYTEST"] = "True"
test_auto_fchp()
| 0 | 0 |
8314dc9023ae51350cf14c4fc6f29ae2621bb8d7 | 952 | py | Python | supperfeed/importer.py | corydodt/SupperFeed | 2980e3f6d287f56c6eade06cfe57870d9796c5ea | [
"MIT"
] | 2 | 2015-10-28T23:53:29.000Z | 2018-02-27T12:39:54.000Z | supperfeed/importer.py | corydodt/SupperFeed | 2980e3f6d287f56c6eade06cfe57870d9796c5ea | [
"MIT"
] | null | null | null | supperfeed/importer.py | corydodt/SupperFeed | 2980e3f6d287f56c6eade06cfe57870d9796c5ea | [
"MIT"
] | null | null | null | """
Import recipes from URLs to our database
"""
import re
import json
from txpx import background, EchoProcess
from txpx.process import LineGlueProtocol
from supperfeed.build import Recipe
LineGlueProtocol.MAX_LENGTH=10000
class ImportProcess(EchoProcess):
"""
Import a recipe by loading the json data dumped by the downloader process
"""
def __init__(self, *a, **kw):
EchoProcess.__init__(self, *a, **kw)
self.linebuf = []
def outLineReceived(self, line):
if re.match(r'^/\*+/$', line):
return self.finished()
self.linebuf.append(line)
def finished(self):
data = json.loads('\n'.join(self.linebuf))
recipe = Recipe.fromLoadedData(data)
recipe.save()
self.linebuf[:] = []
def importRecipe(url):
d = background(['recipeschema', url], proto=ImportProcess)
d.addCallback(lambda ok: Recipe.objects(importedFrom=url).first())
return d
| 24.410256 | 77 | 0.658613 | """
Import recipes from URLs to our database
"""
import re
import json
from txpx import background, EchoProcess
from txpx.process import LineGlueProtocol
from supperfeed.build import Recipe
LineGlueProtocol.MAX_LENGTH=10000
class ImportProcess(EchoProcess):
"""
Import a recipe by loading the json data dumped by the downloader process
"""
def __init__(self, *a, **kw):
EchoProcess.__init__(self, *a, **kw)
self.linebuf = []
def outLineReceived(self, line):
if re.match(r'^/\*+/$', line):
return self.finished()
self.linebuf.append(line)
def finished(self):
data = json.loads('\n'.join(self.linebuf))
recipe = Recipe.fromLoadedData(data)
recipe.save()
self.linebuf[:] = []
def importRecipe(url):
d = background(['recipeschema', url], proto=ImportProcess)
d.addCallback(lambda ok: Recipe.objects(importedFrom=url).first())
return d
| 0 | 0 |
30f5d5f62a940e9c9c56d93b2735b45ae0a23f7e | 759 | py | Python | src/my_project/medium_problems/from1to50/group_people_give_group_size.py | ivan1016017/LeetCodeAlgorithmProblems | f617f30201fb1cd53e32de35084fdeb88ef36023 | [
"MIT"
] | null | null | null | src/my_project/medium_problems/from1to50/group_people_give_group_size.py | ivan1016017/LeetCodeAlgorithmProblems | f617f30201fb1cd53e32de35084fdeb88ef36023 | [
"MIT"
] | 1 | 2021-09-22T12:26:14.000Z | 2021-09-22T12:26:14.000Z | src/my_project/medium_problems/from1to50/group_people_give_group_size.py | ivan1016017/LeetCodeAlgorithmProblems | 454284b76634cc34ed41f7fa30d857403cedf1bf | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
final_groups=[]
count_dict = {}
for ind, val in enumerate(groupSizes):
# print(count_dict)
if val in count_dict.keys():
if len(count_dict[val]) < val:
count_dict[val].append(ind)
else:
final_groups.append(count_dict[val])
count_dict[val] = [ind]
else:
count_dict[val] = [ind]
for key in count_dict.keys():
final_groups.append(count_dict[key])
return final_groups
solution = Solution()
print(solution.groupThePeople(groupSizes = [3,3,3,3,3,1,3]))
| 29.192308 | 71 | 0.546772 | from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
final_groups=[]
count_dict = {}
for ind, val in enumerate(groupSizes):
# print(count_dict)
if val in count_dict.keys():
if len(count_dict[val]) < val:
count_dict[val].append(ind)
else:
final_groups.append(count_dict[val])
count_dict[val] = [ind]
else:
count_dict[val] = [ind]
for key in count_dict.keys():
final_groups.append(count_dict[key])
return final_groups
solution = Solution()
print(solution.groupThePeople(groupSizes = [3,3,3,3,3,1,3]))
| 0 | 0 |
dbd8f6bc8f256424cc38bdbd062b914922bea024 | 1,011 | py | Python | tests/bidirectional_lut_test.py | pnarvor/nephelae_simulation | 7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c | [
"BSD-3-Clause"
] | null | null | null | tests/bidirectional_lut_test.py | pnarvor/nephelae_simulation | 7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c | [
"BSD-3-Clause"
] | null | null | null | tests/bidirectional_lut_test.py | pnarvor/nephelae_simulation | 7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/python3
import sys
import os
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import imageio
import matplotlib.cm as cm
import time
from netCDF4 import MFDataset
import mesonh_probe as cdf
"""
test file for periodiccontainer and netcdfinterface types
- arguments : mesonh (netcdf) files to open
"""
mesonhfiles = sys.argv[slice(1,len(sys.argv))]
atm = MFDataset(mesonhfiles)
lut = cdf.BiDirectionalLUT(atm.variables['VLEV'][:,0,0])
lin = cdf.BiDirectionalLinear(atm.variables['S_N_direction'][:])
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lut.to_output_space(np.linspace(0,160,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lut.to_input_space(np.linspace(0.005,3.95,1000)))
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lin.to_output_space(np.linspace(0,700,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lin.to_input_space(np.linspace(-1,5,1000)))
plt.show(block=False)
| 25.923077 | 66 | 0.732938 | #! /usr/bin/python3
import sys
import os
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import imageio
import matplotlib.cm as cm
import time
from netCDF4 import MFDataset
import mesonh_probe as cdf
"""
test file for periodiccontainer and netcdfinterface types
- arguments : mesonh (netcdf) files to open
"""
mesonhfiles = sys.argv[slice(1,len(sys.argv))]
atm = MFDataset(mesonhfiles)
lut = cdf.BiDirectionalLUT(atm.variables['VLEV'][:,0,0])
lin = cdf.BiDirectionalLinear(atm.variables['S_N_direction'][:])
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lut.to_output_space(np.linspace(0,160,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lut.to_input_space(np.linspace(0.005,3.95,1000)))
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lin.to_output_space(np.linspace(0,700,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lin.to_input_space(np.linspace(-1,5,1000)))
plt.show(block=False)
| 0 | 0 |
754066c0276ad5f42837eccb52a06e7a374ef810 | 366 | py | Python | tests/test_get_credits.py | freshsecurity/zerobounce-python-api-v2 | 7bd5c838db24cde6c5cbca5ca1590395e707b7cd | [
"MIT"
] | 3 | 2018-11-01T08:37:43.000Z | 2019-10-23T13:58:27.000Z | tests/test_get_credits.py | freshsecurity/zerobounce-python-api-v2 | 7bd5c838db24cde6c5cbca5ca1590395e707b7cd | [
"MIT"
] | 2 | 2019-10-09T22:30:52.000Z | 2022-01-14T15:13:32.000Z | tests/test_get_credits.py | freshsecurity/zerobounce-python-api-v2 | 7bd5c838db24cde6c5cbca5ca1590395e707b7cd | [
"MIT"
] | 5 | 2018-07-18T10:55:07.000Z | 2019-11-21T14:53:11.000Z | import responses
@responses.activate
def test_should_get_2375323_of_credit(zerobounce, zerobounce_response_getcredits):
responses.add(responses.GET,
'https://api.zerobounce.net/v2/getcredits?api_key=123456',
json=zerobounce_response_getcredits,
status=200)
assert zerobounce.get_credits() == 2375323
| 28.153846 | 82 | 0.696721 | import responses
@responses.activate
def test_should_get_2375323_of_credit(zerobounce, zerobounce_response_getcredits):
responses.add(responses.GET,
'https://api.zerobounce.net/v2/getcredits?api_key=123456',
json=zerobounce_response_getcredits,
status=200)
assert zerobounce.get_credits() == 2375323
| 0 | 0 |
5c0bef4089d7fb266a9c29954e06a567e8ce9a6d | 800 | py | Python | constant/constant.py | ZxbMsl160918/covid19-vaccin | 7ebf3fa1de45fdaec8108e79ff6e090400cde9eb | [
"Apache-2.0"
] | null | null | null | constant/constant.py | ZxbMsl160918/covid19-vaccin | 7ebf3fa1de45fdaec8108e79ff6e090400cde9eb | [
"Apache-2.0"
] | null | null | null | constant/constant.py | ZxbMsl160918/covid19-vaccin | 7ebf3fa1de45fdaec8108e79ff6e090400cde9eb | [
"Apache-2.0"
] | null | null | null | #
RESPONSE_OK = 200
# URL
URLS = {
#
"hostUrl": "https://m.r.umiaohealth.com/",
# POST
"vaccinationAddress": "/InstitutionMedicineStock/GetBykeyword_InstitutionMedicineStock",
#
"hospitalTimeRange": "/Reservation/GetByWorkDate_Rsv_TimeRange",
# urlGET
"secVaccination": "/Reservation/Reservation_Create",
# childId
"childId": "/Adult/Index",
#
"userMsg": "/Home/My"
}
#
AREAS = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
""
]
#
VACCINE_TYPES = {
"veroCell": 5601, # Vero
"adenovirusVector": 5602 #
# etc...
}
#
SEC_TYPE = VACCINE_TYPES["veroCell"]
| 18.181818 | 92 | 0.6075 | # 请求成功
RESPONSE_OK = 200
# 请求所需的URL地址
URLS = {
# 主机地址
"hostUrl": "https://m.r.umiaohealth.com/",
# 获取疫苗接种列表地址;POST
"vaccinationAddress": "/InstitutionMedicineStock/GetBykeyword_InstitutionMedicineStock",
# 获取某个社区医院的某一天可预约的时间段
"hospitalTimeRange": "/Reservation/GetByWorkDate_Rsv_TimeRange",
# 执行疫苗预约请求 url;GET
"secVaccination": "/Reservation/Reservation_Create",
# 获取 childId
"childId": "/Adult/Index",
# 获取用户信息
"userMsg": "/Home/My"
}
# 区域名称
AREAS = [
"天河区",
"白云区",
"黄埔区",
"荔湾区",
"越秀区",
"海珠区",
"番禺区",
"花都区",
"南沙区",
"增城区",
"从化区"
]
# 所有疫苗类型
VACCINE_TYPES = {
"veroCell": 5601, # 新冠疫苗(Vero细胞)
"adenovirusVector": 5602 # 新冠疫苗(腺病毒载体)
# etc...
}
# 需要预约的疫苗类型
SEC_TYPE = VACCINE_TYPES["veroCell"]
| 399 | 0 |
5ebd8f1c512b1380b449a67ec585f3905c6bceac | 9,220 | py | Python | personal_context_builder/gensim_hdp.py | InternetOfUs/personal-context-builder | 89e7388d622bc0efbf708542566fdcdca667a4e5 | [
"Apache-2.0"
] | null | null | null | personal_context_builder/gensim_hdp.py | InternetOfUs/personal-context-builder | 89e7388d622bc0efbf708542566fdcdca667a4e5 | [
"Apache-2.0"
] | null | null | null | personal_context_builder/gensim_hdp.py | InternetOfUs/personal-context-builder | 89e7388d622bc0efbf708542566fdcdca667a4e5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
import numpy as np
from gensim import matutils # type: ignore
from gensim import models # type: ignore
from scipy import sparse # type: ignore
from sklearn.base import BaseEstimator, TransformerMixin # type: ignore
from sklearn.exceptions import NotFittedError # type: ignore
class HdpTransformer(TransformerMixin, BaseEstimator):
"""Base HDP module, wraps :class:`~gensim.models.hdpmodel.HdpModel`.
The inner workings of this class heavily depends on `Wang, Paisley, Blei: "Online Variational
Inference for the Hierarchical Dirichlet Process, JMLR (2011)"
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
"""
def __init__(
self,
id2word,
max_chunks=None,
max_time=None,
chunksize=256,
kappa=1.0,
tau=64.0,
K=15,
T=150,
alpha=1,
gamma=1,
eta=0.01,
scale=1.0,
var_converge=0.0001,
outputdir=None,
random_state=None,
):
"""
Parameters
----------
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between a words ID and the word itself in the vocabulary.
max_chunks : int, optional
Upper bound on how many chunks to process.It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time in seconds for which model will be trained.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
kappa : float, optional
Learning rate, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
tau : float, optional
Slow down parameter, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
K : int, optional
Second level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
T : int, optional
Top level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
alpha : int, optional
Second level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
gamma : int, optional
First level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
eta : float, optional
The topic Dirichlet, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters
for a single document.
outputdir : str, optional
Path to a directory where topic and options information will be stored.
random_state : int, optional
Seed used to create a :class:`~np.random.RandomState`. Useful for obtaining reproducible results.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus,
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
return self
def transform(self, docs):
"""Infer a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BOW format.
Returns
-------
numpy.ndarray of shape [`len(docs), num_topics`]
Topic distribution for `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
self.gensim_model.update(corpus=X)
return self
| 42.100457 | 119 | 0.61974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
import numpy as np
from gensim import matutils # type: ignore
from gensim import models # type: ignore
from scipy import sparse # type: ignore
from sklearn.base import BaseEstimator, TransformerMixin # type: ignore
from sklearn.exceptions import NotFittedError # type: ignore
class HdpTransformer(TransformerMixin, BaseEstimator):
"""Base HDP module, wraps :class:`~gensim.models.hdpmodel.HdpModel`.
The inner workings of this class heavily depends on `Wang, Paisley, Blei: "Online Variational
Inference for the Hierarchical Dirichlet Process, JMLR (2011)"
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
"""
def __init__(
self,
id2word,
max_chunks=None,
max_time=None,
chunksize=256,
kappa=1.0,
tau=64.0,
K=15,
T=150,
alpha=1,
gamma=1,
eta=0.01,
scale=1.0,
var_converge=0.0001,
outputdir=None,
random_state=None,
):
"""
Parameters
----------
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between a words ID and the word itself in the vocabulary.
max_chunks : int, optional
Upper bound on how many chunks to process.It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time in seconds for which model will be trained.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
kappa : float, optional
Learning rate, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
tau : float, optional
Slow down parameter, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
K : int, optional
Second level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
T : int, optional
Top level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
alpha : int, optional
Second level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
gamma : int, optional
First level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
eta : float, optional
The topic Dirichlet, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters
for a single document.
outputdir : str, optional
Path to a directory where topic and options information will be stored.
random_state : int, optional
Seed used to create a :class:`~np.random.RandomState`. Useful for obtaining reproducible results.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus,
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
return self
def transform(self, docs):
"""Infer a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BOW format.
Returns
-------
numpy.ndarray of shape [`len(docs), num_topics`]
Topic distribution for `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
self.gensim_model.update(corpus=X)
return self
| 0 | 0 |
b582f1040cc1af3be284ad67fe9c371e838dde5d | 2,060 | py | Python | zaqar-8.0.0/zaqar/common/policies/claims.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 97 | 2015-01-02T09:35:23.000Z | 2022-03-25T00:38:45.000Z | zaqar-8.0.0/zaqar/common/policies/claims.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | zaqar-8.0.0/zaqar/common/policies/claims.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 44 | 2015-01-28T03:01:28.000Z | 2021-05-13T18:55:19.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from zaqar.common.policies import base
CLAIMS = 'claims:%s'
rules = [
policy.DocumentedRuleDefault(
name=CLAIMS % 'create',
check_str=base.UNPROTECTED,
description='Claims a set of messages from the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'get',
check_str=base.UNPROTECTED,
description='Queries the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'delete',
check_str=base.UNPROTECTED,
description='Releases the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'update',
check_str=base.UNPROTECTED,
description='Updates the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'PATCH'
}
]
)
]
def list_rules():
return rules
| 29.428571 | 76 | 0.591748 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from zaqar.common.policies import base
CLAIMS = 'claims:%s'
rules = [
policy.DocumentedRuleDefault(
name=CLAIMS % 'create',
check_str=base.UNPROTECTED,
description='Claims a set of messages from the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'get',
check_str=base.UNPROTECTED,
description='Queries the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'delete',
check_str=base.UNPROTECTED,
description='Releases the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'update',
check_str=base.UNPROTECTED,
description='Updates the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'PATCH'
}
]
)
]
def list_rules():
return rules
| 0 | 0 |
48f845ebbaca4afd27efda9fdf590cf268429691 | 2,139 | py | Python | bot.py | boyuan12/MLH-Helper | efa98907b89bae2ef1f1c8b551075356b61ad741 | [
"MIT"
] | null | null | null | bot.py | boyuan12/MLH-Helper | efa98907b89bae2ef1f1c8b551075356b61ad741 | [
"MIT"
] | null | null | null | bot.py | boyuan12/MLH-Helper | efa98907b89bae2ef1f1c8b551075356b61ad741 | [
"MIT"
] | null | null | null | import discord
import discord.utils
from discord.ext import commands
import requests
import random
client = discord.Client()
SECRET_KEY="secretkey"
BASE_URL="http://0.0.0.0:1234"
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
id = client.user.id
if str(id) in message.content:
# get the question
resp = str(message.content).split(f"<@!{str(id)}> ")[1]
if resp == "checkin":
await message.channel.send(f"Welcome! Please go ahead and go to {BASE_URL}/{message.author.id}. When you finished, please tag me and say finished, and I will send you more information!")
elif resp == "attendees" and "mlh" in [y.name.lower() for y in message.author.roles]:
curr = requests.get("https://mlh-events.now.sh/na-2020").json()[0]["name"]
csv_file = requests.get(f"{BASE_URL}/api/generate/{curr}/{SECRET_KEY}").json()["url"]
channel = await message.author.create_dm()
await channel.send(f"Here's the file link to download: {csv_file}")
elif resp == "attendees" and "mlh" not in [y.name.lower() for y in message.author.roles]:
await message.channel.send(f"Oops, looks like you don't have permission to use this command!")
elif resp == "finished":
resp = requests.get(f"{BASE_URL}/api/current_hack/{message.author.id}").json()
if resp["hack"] in [hack.name for hack in message.guild.roles] and resp["hack"] not in [y.name.lower() for y in message.author.roles]:
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
else:
guild = message.guild
await guild.create_role(name=resp["hack"], colour=discord.Colour(0x00FF00))
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
await message.channel.send(resp["resp"])
client.run("") | 40.358491 | 198 | 0.625993 | import discord
import discord.utils
from discord.ext import commands
import requests
import random
client = discord.Client()
SECRET_KEY="secretkey"
BASE_URL="http://0.0.0.0:1234"
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
id = client.user.id
if str(id) in message.content:
# get the question
resp = str(message.content).split(f"<@!{str(id)}> ")[1]
if resp == "checkin":
await message.channel.send(f"Welcome! Please go ahead and go to {BASE_URL}/{message.author.id}. When you finished, please tag me and say finished, and I will send you more information!")
elif resp == "attendees" and "mlh" in [y.name.lower() for y in message.author.roles]:
curr = requests.get("https://mlh-events.now.sh/na-2020").json()[0]["name"]
csv_file = requests.get(f"{BASE_URL}/api/generate/{curr}/{SECRET_KEY}").json()["url"]
channel = await message.author.create_dm()
await channel.send(f"Here's the file link to download: {csv_file}")
elif resp == "attendees" and "mlh" not in [y.name.lower() for y in message.author.roles]:
await message.channel.send(f"Oops, looks like you don't have permission to use this command!")
elif resp == "finished":
resp = requests.get(f"{BASE_URL}/api/current_hack/{message.author.id}").json()
if resp["hack"] in [hack.name for hack in message.guild.roles] and resp["hack"] not in [y.name.lower() for y in message.author.roles]:
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
else:
guild = message.guild
await guild.create_role(name=resp["hack"], colour=discord.Colour(0x00FF00))
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
await message.channel.send(resp["resp"])
client.run("") | 0 | 0 |
f968e78a0a396c803ccb0a25d591d668dacf68bf | 553 | py | Python | django_pathfinder_statcrunch/urls.py | porowns/django-pathfinder-statcrunch | 4a31dd014b6e1c27b3e70ae88ca5762841ce72db | [
"MIT"
] | null | null | null | django_pathfinder_statcrunch/urls.py | porowns/django-pathfinder-statcrunch | 4a31dd014b6e1c27b3e70ae88ca5762841ce72db | [
"MIT"
] | null | null | null | django_pathfinder_statcrunch/urls.py | porowns/django-pathfinder-statcrunch | 4a31dd014b6e1c27b3e70ae88ca5762841ce72db | [
"MIT"
] | null | null | null | from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib import admin
from django.urls import path, re_path
from . import views
# SSO
urlpatterns = [
path('reports/', views.list_reports,
name="django-pathfinder-statcrunch-list-reports"),
path('reports/<int:pk>/', views.view_report,
name="django-pathfinder-statcrunch-view-report"),
path('reports/<int:pk>/refresh/', views.refresh_report,
name="django-pathfinder-statcrunch-view-report-refresh"),
]
| 34.5625 | 67 | 0.735986 | from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib import admin
from django.urls import path, re_path
from . import views
# SSO
urlpatterns = [
path('reports/', views.list_reports,
name="django-pathfinder-statcrunch-list-reports"),
path('reports/<int:pk>/', views.view_report,
name="django-pathfinder-statcrunch-view-report"),
path('reports/<int:pk>/refresh/', views.refresh_report,
name="django-pathfinder-statcrunch-view-report-refresh"),
]
| 0 | 0 |
0ed78fe0d10f673e40978db5dbae120f7d215015 | 222 | py | Python | adventofcode/2021/1/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | 2 | 2018-01-18T11:01:36.000Z | 2021-12-20T18:14:48.000Z | adventofcode/2021/1/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | adventofcode/2021/1/2.py | jan25/code_sorted | f405fd0898f72eb3d5428f9e10aefb4a009d5089 | [
"Unlicense"
] | null | null | null | import fileinput
nums = list(map(int, fileinput.input()))
def gen():
for i in range(1, len(nums) - 2):
if sum(nums[i:i + 3]) > sum(nums[i - 1: i + 2]):
yield 1
print(sum(inc for inc in gen()))
| 17.076923 | 56 | 0.540541 | import fileinput
nums = list(map(int, fileinput.input()))
def gen():
for i in range(1, len(nums) - 2):
if sum(nums[i:i + 3]) > sum(nums[i - 1: i + 2]):
yield 1
print(sum(inc for inc in gen()))
| 0 | 0 |
2f907545d59c9d2bffc0a13955bd9ba29dc05554 | 477 | py | Python | openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py | michilu/proto-api | aca02aaa11064e87462ab34674c0c4974cf70372 | [
"Apache-2.0"
] | null | null | null | openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py | michilu/proto-api | aca02aaa11064e87462ab34674c0c4974cf70372 | [
"Apache-2.0"
] | 1 | 2020-07-15T09:50:06.000Z | 2020-07-15T09:50:06.000Z | openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py | michilu/proto-openapi | aca02aaa11064e87462ab34674c0c4974cf70372 | [
"Apache-2.0"
] | null | null | null | import connexion
import six
from openapi_server.models.runtime_error import RuntimeError # noqa: E501
from openapi_server.models.v1_health_check_service_health_check_response import V1HealthCheckServiceHealthCheckResponse # noqa: E501
from openapi_server import util
def health_check_service_health_check(): # noqa: E501
"""health_check_service_health_check
# noqa: E501
:rtype: V1HealthCheckServiceHealthCheckResponse
"""
return 'do some magic!'
| 26.5 | 133 | 0.802935 | import connexion
import six
from openapi_server.models.runtime_error import RuntimeError # noqa: E501
from openapi_server.models.v1_health_check_service_health_check_response import V1HealthCheckServiceHealthCheckResponse # noqa: E501
from openapi_server import util
def health_check_service_health_check(): # noqa: E501
"""health_check_service_health_check
# noqa: E501
:rtype: V1HealthCheckServiceHealthCheckResponse
"""
return 'do some magic!'
| 0 | 0 |
4cb74abbf6f8f44cb294e4d005f81aad23521f19 | 148 | py | Python | gfat/models/one_eight_two/model.py | paurosello/gfat | 23bcb7bf328be8bd0c1aa99ff869d8e539563eba | [
"MIT"
] | null | null | null | gfat/models/one_eight_two/model.py | paurosello/gfat | 23bcb7bf328be8bd0c1aa99ff869d8e539563eba | [
"MIT"
] | 2 | 2021-03-25T21:40:00.000Z | 2021-11-15T17:46:49.000Z | gfat/models/one_eight_two/model.py | paurosello/gfat | 23bcb7bf328be8bd0c1aa99ff869d8e539563eba | [
"MIT"
] | null | null | null | class Model182:
"""Class to create Model 182 files"""
declarant = None
declared_registers = {}
def __init__(self):
return
| 16.444444 | 41 | 0.614865 | class Model182:
"""Class to create Model 182 files"""
declarant = None
declared_registers = {}
def __init__(self):
return
| 0 | 0 |
887fea9a7f42da7c5675403aa05cab40094c6fb6 | 1,067 | py | Python | src/model/synapses/tensor_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | src/model/synapses/tensor_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | src/model/synapses/tensor_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | """
Created on 12:39, June. 4th, 2021
Author: fassial
Filename: VoltageJump.py
"""
import brainpy as bp
__all__ = [
"VoltageJump",
]
class VoltageJump(bp.TwoEndConn):
target_backend = "general"
def __init__(self, pre, post, conn,
weight = 1., delay = 0., **kwargs
):
# init params
self.weight = weight
self.delay = delay
# init connections
self.conn = conn(pre.size, post.size)
self.conn_mat = self.conn.requires("conn_mat")
self.size = bp.ops.shape(self.conn_mat)
# init vars
self.w = bp.ops.ones(self.size) * self.weight
self.Isyn = self.register_constant_delay("Isyn",
size = self.size,
delay_time = self.delay
)
# init super
super(VoltageJump, self).__init__(pre = pre, post = post, **kwargs)
def update(self, _t):
# set Isyn & post.V
Isyn = self.w * bp.ops.unsqueeze(self.pre.spike, 1) * self.conn_mat
self.post.V += bp.ops.sum(Isyn * (1. - self.post.refractory), axis = 0)
| 25.404762 | 79 | 0.582943 | """
Created on 12:39, June. 4th, 2021
Author: fassial
Filename: VoltageJump.py
"""
import brainpy as bp
__all__ = [
"VoltageJump",
]
class VoltageJump(bp.TwoEndConn):
target_backend = "general"
def __init__(self, pre, post, conn,
weight = 1., delay = 0., **kwargs
):
# init params
self.weight = weight
self.delay = delay
# init connections
self.conn = conn(pre.size, post.size)
self.conn_mat = self.conn.requires("conn_mat")
self.size = bp.ops.shape(self.conn_mat)
# init vars
self.w = bp.ops.ones(self.size) * self.weight
self.Isyn = self.register_constant_delay("Isyn",
size = self.size,
delay_time = self.delay
)
# init super
super(VoltageJump, self).__init__(pre = pre, post = post, **kwargs)
def update(self, _t):
# set Isyn & post.V
Isyn = self.w * bp.ops.unsqueeze(self.pre.spike, 1) * self.conn_mat
self.post.V += bp.ops.sum(Isyn * (1. - self.post.refractory), axis = 0)
| 0 | 0 |
2356a07736148deea7e9a4b60909d1ff37a28c82 | 845 | py | Python | clahe_and_augmentation/parseConfig.py | RandomeName745/DD2424---Project-Covid-19 | 2e1e647e841eeb00760daecb58effeba3ca237c4 | [
"MIT"
] | null | null | null | clahe_and_augmentation/parseConfig.py | RandomeName745/DD2424---Project-Covid-19 | 2e1e647e841eeb00760daecb58effeba3ca237c4 | [
"MIT"
] | null | null | null | clahe_and_augmentation/parseConfig.py | RandomeName745/DD2424---Project-Covid-19 | 2e1e647e841eeb00760daecb58effeba3ca237c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 10:31:44 2020
@author: alex
"""
import argparse
from clodsa.utils.conf import Conf
def parseConfig(configfile):
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
args = vars(ap.parse_args(args=["-c",configfile]))
config = {}
conf = Conf(args["conf"])
config["problem"] = conf["problem"]
config["annotationMode"] = conf["annotation_mode"]
config["outputMode"] = conf["output_mode"]
config["generationMode"] = conf["generation_mode"]
config["inputPath"] = conf["input_path"]
# parameters = conf["parameters"]
config["outputPath"] = conf["output_path"]
config["augmentationTechniques"] = conf["augmentation_techniques"]
return config | 29.137931 | 85 | 0.654438 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 10:31:44 2020
@author: alex
"""
import argparse
from clodsa.utils.conf import Conf
def parseConfig(configfile):
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
args = vars(ap.parse_args(args=["-c",configfile]))
config = {}
conf = Conf(args["conf"])
config["problem"] = conf["problem"]
config["annotationMode"] = conf["annotation_mode"]
config["outputMode"] = conf["output_mode"]
config["generationMode"] = conf["generation_mode"]
config["inputPath"] = conf["input_path"]
# parameters = conf["parameters"]
config["outputPath"] = conf["output_path"]
config["augmentationTechniques"] = conf["augmentation_techniques"]
return config | 0 | 0 |
d6844ad5af8ac8dd3731b4be397175e5d6c05d9f | 3,563 | py | Python | clientui/vstrm_server.py | cbk914/BlackMamba | 826d5e2994368006cad09acaaa7c6bfa047891b5 | [
"MIT"
] | 902 | 2021-02-09T09:42:57.000Z | 2022-03-26T09:28:03.000Z | clientui/vstrm_server.py | cbk914/BlackMamba | 826d5e2994368006cad09acaaa7c6bfa047891b5 | [
"MIT"
] | 11 | 2021-02-12T16:46:51.000Z | 2021-12-20T21:12:14.000Z | clientui/vstrm_server.py | cbk914/BlackMamba | 826d5e2994368006cad09acaaa7c6bfa047891b5 | [
"MIT"
] | 126 | 2021-02-09T12:16:50.000Z | 2022-02-25T04:19:18.000Z | ####################################################################################
# BLACKMAMBA BY: LOSEYS (https://github.com/loseys)
#
# QT GUI INTERFACE BY: WANDERSON M.PIMENTA (https://github.com/Wanderson-Magalhaes)
# ORIGINAL QT GUI: https://github.com/Wanderson-Magalhaes/Simple_PySide_Base
####################################################################################
"""
Video streaming server.
"""
import sys
import socket
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
from zlib import decompress
from cryptography.fernet import Fernet
try:
SERVER_IP = sys.argv[1]
PORT_VIDEO = sys.argv[2]
except:
SERVER_IP = 0
PORT_VIDEO = 0
def crypt(msg, key):
command = str(msg)
command = bytes(command, encoding='utf8')
cipher_suite = Fernet(key)
encoded_text = cipher_suite.encrypt(command)
return encoded_text
def decrypt(msg, key):
cipher_suite = Fernet(key)
decoded_text_f = cipher_suite.decrypt(msg)
return decoded_text_f
def recvall(conn, length):
try:
buf = b''
while len(buf) < length:
data = conn.recv(length - len(buf))
if not data:
return data
buf += data
return buf
except:
pass
def start_stream(host=str(SERVER_IP), port=int(PORT_VIDEO)):
sock = socket.socket()
sock.bind((host, port))
print("Listening ....")
sock.settimeout(15.0)
sock.listen(5)
try:
conn, addr = sock.accept()
except:
print('socket.timeout: timed out')
return
print("Accepted ....", addr)
client_resolution = (conn.recv(50).decode())
client_resolution = str(client_resolution).split(',')
CLIENT_WIDTH = int(client_resolution[0])
CLIENT_HEIGHT = int(client_resolution[1])
with open('bin/profile/vstream_size.txt', 'r') as f:
scsize = f.read()
f.close()
try:
scsize = scsize.split(':')
SERVER_WIDTH = int(scsize[0])
SERVER_HEIGHT = int(
scsize[1])
except:
SERVER_WIDTH = 1000
SERVER_HEIGHT = 600
pygame.init()
pygame.display.set_caption('BlackMamba')
programIcon = pygame.image.load('icons/others/icon_3.png')
pygame.display.set_icon(programIcon)
screen = pygame.display.set_mode((SERVER_WIDTH, SERVER_HEIGHT))
clock = pygame.time.Clock()
watching = True
try:
while watching:
for event in pygame.event.get():
if event.type == pygame.QUIT:
watching = False
break
# Retreive the size of the pixels length, the pixels length and pixels
try:
size_len = int.from_bytes(conn.recv(1), byteorder='big')
size = int.from_bytes(conn.recv(size_len), byteorder='big')
pixels = decompress(recvall(conn, size))
# Create the Surface from raw pixels
img = pygame.image.fromstring(pixels, (CLIENT_WIDTH, CLIENT_HEIGHT), 'RGB')
# resize the client image to match the server's screen dimensions
scaled_img = pygame.transform.scale(img, (SERVER_WIDTH, SERVER_HEIGHT))
# Display the picture
screen.blit(scaled_img, (0, 0))
pygame.display.flip()
#clock.tick(60)
clock.tick(120)
except:
break
finally:
pygame.quit()
sock.close()
if __name__ == "__main__":
start_stream()
| 27.407692 | 91 | 0.572271 | ####################################################################################
# BLACKMAMBA BY: LOSEYS (https://github.com/loseys)
#
# QT GUI INTERFACE BY: WANDERSON M.PIMENTA (https://github.com/Wanderson-Magalhaes)
# ORIGINAL QT GUI: https://github.com/Wanderson-Magalhaes/Simple_PySide_Base
####################################################################################
"""
Video streaming server.
"""
import sys
import socket
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
from zlib import decompress
from cryptography.fernet import Fernet
try:
SERVER_IP = sys.argv[1]
PORT_VIDEO = sys.argv[2]
except:
SERVER_IP = 0
PORT_VIDEO = 0
def crypt(msg, key):
command = str(msg)
command = bytes(command, encoding='utf8')
cipher_suite = Fernet(key)
encoded_text = cipher_suite.encrypt(command)
return encoded_text
def decrypt(msg, key):
cipher_suite = Fernet(key)
decoded_text_f = cipher_suite.decrypt(msg)
return decoded_text_f
def recvall(conn, length):
try:
buf = b''
while len(buf) < length:
data = conn.recv(length - len(buf))
if not data:
return data
buf += data
return buf
except:
pass
def start_stream(host=str(SERVER_IP), port=int(PORT_VIDEO)):
sock = socket.socket()
sock.bind((host, port))
print("Listening ....")
sock.settimeout(15.0)
sock.listen(5)
try:
conn, addr = sock.accept()
except:
print('socket.timeout: timed out')
return
print("Accepted ....", addr)
client_resolution = (conn.recv(50).decode())
client_resolution = str(client_resolution).split(',')
CLIENT_WIDTH = int(client_resolution[0])
CLIENT_HEIGHT = int(client_resolution[1])
with open('bin/profile/vstream_size.txt', 'r') as f:
scsize = f.read()
f.close()
try:
scsize = scsize.split(':')
SERVER_WIDTH = int(scsize[0])
SERVER_HEIGHT = int(
scsize[1])
except:
SERVER_WIDTH = 1000
SERVER_HEIGHT = 600
pygame.init()
pygame.display.set_caption('BlackMamba')
programIcon = pygame.image.load('icons/others/icon_3.png')
pygame.display.set_icon(programIcon)
screen = pygame.display.set_mode((SERVER_WIDTH, SERVER_HEIGHT))
clock = pygame.time.Clock()
watching = True
try:
while watching:
for event in pygame.event.get():
if event.type == pygame.QUIT:
watching = False
break
# Retreive the size of the pixels length, the pixels length and pixels
try:
size_len = int.from_bytes(conn.recv(1), byteorder='big')
size = int.from_bytes(conn.recv(size_len), byteorder='big')
pixels = decompress(recvall(conn, size))
# Create the Surface from raw pixels
img = pygame.image.fromstring(pixels, (CLIENT_WIDTH, CLIENT_HEIGHT), 'RGB')
# resize the client image to match the server's screen dimensions
scaled_img = pygame.transform.scale(img, (SERVER_WIDTH, SERVER_HEIGHT))
# Display the picture
screen.blit(scaled_img, (0, 0))
pygame.display.flip()
#clock.tick(60)
clock.tick(120)
except:
break
finally:
pygame.quit()
sock.close()
if __name__ == "__main__":
start_stream()
| 0 | 0 |
9b17df01413810c9aafe1a9d4b06d58e9e2eff88 | 257 | py | Python | src/api/domain/schedule/ScheduleCronJob/ScheduleCronJobRequest.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 14 | 2020-12-19T15:06:13.000Z | 2022-01-12T19:52:17.000Z | src/api/domain/schedule/ScheduleCronJob/ScheduleCronJobRequest.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 43 | 2021-01-06T22:05:22.000Z | 2022-03-10T10:30:30.000Z | src/api/domain/schedule/ScheduleCronJob/ScheduleCronJobRequest.py | PythonDataIntegrator/pythondataintegrator | 6167778c36c2295e36199ac0d4d256a4a0c28d7a | [
"MIT"
] | 4 | 2020-12-18T23:10:09.000Z | 2021-04-02T13:03:12.000Z | from datetime import datetime
from infrastructure.cqrs.decorators.requestclass import requestclass
@requestclass
class ScheduleCronJobRequest:
OperationName: str = None
Cron: str = None
StartDate: datetime = None
EndDate: datetime = None
| 21.416667 | 68 | 0.770428 | from datetime import datetime
from infrastructure.cqrs.decorators.requestclass import requestclass
@requestclass
class ScheduleCronJobRequest:
OperationName: str = None
Cron: str = None
StartDate: datetime = None
EndDate: datetime = None
| 0 | 0 |
58f8d59cc2c4eb8e7df9a61a133e9fc628033ad5 | 4,775 | py | Python | Unit3SC_0205/northwind.py | TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases | cba1e0f5476f5e7a13e10ad450474a565d302b33 | [
"MIT"
] | null | null | null | Unit3SC_0205/northwind.py | TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases | cba1e0f5476f5e7a13e10ad450474a565d302b33 | [
"MIT"
] | null | null | null | Unit3SC_0205/northwind.py | TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases | cba1e0f5476f5e7a13e10ad450474a565d302b33 | [
"MIT"
] | null | null | null | import sqlite3
# Connect to the sqlite3 file
connection = sqlite3.connect("northwind_small.sqlite3")
cursor = connection.cursor()
# Queries
# `expensive_items`: What are the ten most expensive items (per unit price) in the database?
price_query = f""" SELECT UnitPrice, ProductName
FROM product
ORDER BY UnitPrice DESC
LIMIT 10;"""
expensive_items = cursor.execute(price_query).fetchall()
print("Expensive items:", expensive_items)
# Expensive items: [(263.5, 'Cte de Blaye'), (123.79, 'Thringer Rostbratwurst'),
# (97, 'Mishi Kobe Niku'), (81, "Sir Rodney's Marmalade"), (62.5, 'Carnarvon Tigers'),
# (55, 'Raclette Courdavault'), (53, 'Manjimup Dried Apples'), (49.3, 'Tarte au sucre'),
# (46, 'Ipoh Coffee'), (45.6, 'Rssle Sauerkraut')]
# `avg_hire_age`: What is the average age of an employee at the time of their hiring?
# ONLY RAN THIS THE FIRST TIME, then commented it out
# add_age_column = f"""
# ALTER TABLE Employee
# ADD age INT AS (hiredate - birthdate)
# """
# cursor.execute(add_age_column)
avghire_query = f"""SELECT AVG(age) from employee"""
avg_hire_age = cursor.execute(avghire_query).fetchone()[0]
print("Average hire age:", avg_hire_age)
# Average hire age: 37.22222222222222
# (*Stretch*) `avg_age_by_city`: How does the average age of employee at hire vary by city?
avg_by_city_query = f"""SELECT AVG(age), city FROM employee
GROUP BY city
"""
avg_age_by_city = cursor.execute(avg_by_city_query).fetchall()
print("Average age by city:", avg_age_by_city)
# Average age by city: [(29.0, 'Kirkland'), (32.5, 'London'),
# (56.0, 'Redmond'), (40.0, 'Seattle'), (40.0, 'Tacoma')]
# - `ten_most_expensive`: What are the ten most expensive items (per unit price) in the database
# *and* their suppliers?
# COMMENTING OUT AFTER RUNNING ONCE
# suppliers_prices_table = f"""CREATE TABLE suppliers_prices AS
# SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id
# """
# cursor.execute(suppliers_prices_table)
# insertion_query = f"""SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id"""
# cursor.execute(insertion_query)
price_supplier_query = f"""SELECT unitprice, companyname
FROM suppliers_prices
ORDER BY unitprice DESC
LIMIT 10;
"""
price_supplier_topten = cursor.execute(price_supplier_query).fetchall()
print("Top most expensive items and their suppliers:", price_supplier_topten)
# Top most expensive items and their suppliers: [(263.5, 'Aux
# joyeux ecclsiastiques'), (123.79, 'Plutzer Lebensmittelgromrkte AG'),
# (97, 'Tokyo Traders'), (81, 'Specialty Biscuits, Ltd.'),
# (62.5, 'Pavlova, Ltd.'), (55, 'Gai pturage'), (53, "G'day, Mate"),
# (49.3, "Forts d'rables"), (46, 'Leka Trading'), (45.6, 'Plutzer Lebensmittelgromrkte AG')]
# - `largest_category`: What is the largest category (by number of unique products in it)?
largest_category_query = f"""SELECT CategoryId, COUNT(DISTINCT ProductName) FROM Product
GROUP BY CategoryId
ORDER BY COUNT(DISTINCT ProductName) DESC"""
largest_category = cursor.execute(largest_category_query).fetchone()[0]
print("Largest category:", largest_category)
# Largest category: 3
# - (*Stretch*) `most_territories`: Who's the employee with the most territories?
# Use `TerritoryId` (not name, region, or other fields) as the unique
# identifier for territories.
# COMMENT OUT AFTER RUNNING ONCE
# employee_territory_table = f"""CREATE TABLE employee_territory AS
# SELECT Employee.FirstName, Employee.LastName,
# EmployeeTerritory.EmployeeId, EmployeeTerritory.TerritoryId
# FROM Employee
# JOIN EmployeeTerritory ON Employee.Id = EmployeeTerritory.EmployeeId;"""
# cursor.execute(employee_territory_table)
territory_query = f"""SELECT COUNT(DISTINCT TerritoryId), FirstName, LastName, EmployeeId from employee_territory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC"""
employee_territory = cursor.execute(territory_query).fetchone()
print("Which employee has the most territory?", employee_territory)
# Which employee has the most territory? (10, 'Robert', 'King', 7)
connection.commit()
connection.close()
| 45.47619 | 113 | 0.660314 | import sqlite3
# Connect to the sqlite3 file
connection = sqlite3.connect("northwind_small.sqlite3")
cursor = connection.cursor()
# Queries
# `expensive_items`: What are the ten most expensive items (per unit price) in the database?
price_query = f""" SELECT UnitPrice, ProductName
FROM product
ORDER BY UnitPrice DESC
LIMIT 10;"""
expensive_items = cursor.execute(price_query).fetchall()
print("Expensive items:", expensive_items)
# Expensive items: [(263.5, 'Côte de Blaye'), (123.79, 'Thüringer Rostbratwurst'),
# (97, 'Mishi Kobe Niku'), (81, "Sir Rodney's Marmalade"), (62.5, 'Carnarvon Tigers'),
# (55, 'Raclette Courdavault'), (53, 'Manjimup Dried Apples'), (49.3, 'Tarte au sucre'),
# (46, 'Ipoh Coffee'), (45.6, 'Rössle Sauerkraut')]
# `avg_hire_age`: What is the average age of an employee at the time of their hiring?
# ONLY RAN THIS THE FIRST TIME, then commented it out
# add_age_column = f"""
# ALTER TABLE Employee
# ADD age INT AS (hiredate - birthdate)
# """
# cursor.execute(add_age_column)
avghire_query = f"""SELECT AVG(age) from employee"""
avg_hire_age = cursor.execute(avghire_query).fetchone()[0]
print("Average hire age:", avg_hire_age)
# Average hire age: 37.22222222222222
# (*Stretch*) `avg_age_by_city`: How does the average age of employee at hire vary by city?
avg_by_city_query = f"""SELECT AVG(age), city FROM employee
GROUP BY city
"""
avg_age_by_city = cursor.execute(avg_by_city_query).fetchall()
print("Average age by city:", avg_age_by_city)
# Average age by city: [(29.0, 'Kirkland'), (32.5, 'London'),
# (56.0, 'Redmond'), (40.0, 'Seattle'), (40.0, 'Tacoma')]
# - `ten_most_expensive`: What are the ten most expensive items (per unit price) in the database
# *and* their suppliers?
# COMMENTING OUT AFTER RUNNING ONCE
# suppliers_prices_table = f"""CREATE TABLE suppliers_prices AS
# SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id
# """
# cursor.execute(suppliers_prices_table)
# insertion_query = f"""SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id"""
# cursor.execute(insertion_query)
price_supplier_query = f"""SELECT unitprice, companyname
FROM suppliers_prices
ORDER BY unitprice DESC
LIMIT 10;
"""
price_supplier_topten = cursor.execute(price_supplier_query).fetchall()
print("Top most expensive items and their suppliers:", price_supplier_topten)
# Top most expensive items and their suppliers: [(263.5, 'Aux
# joyeux ecclésiastiques'), (123.79, 'Plutzer Lebensmittelgroßmärkte AG'),
# (97, 'Tokyo Traders'), (81, 'Specialty Biscuits, Ltd.'),
# (62.5, 'Pavlova, Ltd.'), (55, 'Gai pâturage'), (53, "G'day, Mate"),
# (49.3, "Forêts d'érables"), (46, 'Leka Trading'), (45.6, 'Plutzer Lebensmittelgroßmärkte AG')]
# - `largest_category`: What is the largest category (by number of unique products in it)?
largest_category_query = f"""SELECT CategoryId, COUNT(DISTINCT ProductName) FROM Product
GROUP BY CategoryId
ORDER BY COUNT(DISTINCT ProductName) DESC"""
largest_category = cursor.execute(largest_category_query).fetchone()[0]
print("Largest category:", largest_category)
# Largest category: 3
# - (*Stretch*) `most_territories`: Who's the employee with the most territories?
# Use `TerritoryId` (not name, region, or other fields) as the unique
# identifier for territories.
# COMMENT OUT AFTER RUNNING ONCE
# employee_territory_table = f"""CREATE TABLE employee_territory AS
# SELECT Employee.FirstName, Employee.LastName,
# EmployeeTerritory.EmployeeId, EmployeeTerritory.TerritoryId
# FROM Employee
# JOIN EmployeeTerritory ON Employee.Id = EmployeeTerritory.EmployeeId;"""
# cursor.execute(employee_territory_table)
territory_query = f"""SELECT COUNT(DISTINCT TerritoryId), FirstName, LastName, EmployeeId from employee_territory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC"""
employee_territory = cursor.execute(territory_query).fetchone()
print("Which employee has the most territory?", employee_territory)
# Which employee has the most territory? (10, 'Robert', 'King', 7)
connection.commit()
connection.close()
| 22 | 0 |
1c787236266f19826db1b1ea01fce5c806ce4267 | 8,541 | py | Python | lib/model/complete_net.py | chensjtu/poxture | f6abea1216c987f0e4c628b250054d764eaecf2e | [
"Apache-2.0"
] | null | null | null | lib/model/complete_net.py | chensjtu/poxture | f6abea1216c987f0e4c628b250054d764eaecf2e | [
"Apache-2.0"
] | null | null | null | lib/model/complete_net.py | chensjtu/poxture | f6abea1216c987f0e4c628b250054d764eaecf2e | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from .SurfaceClassifier import conv1_1, period_loss
# from .DepthNormalizer import DepthNormalizer
from ..net_util import *
# from iPERCore.models.networks.criterions import VGGLoss
from lib.model.Models import NestedUNet
import numpy as np
class Pint_Model(nn.Module):
def __init__(self, opt):
super(Pint_Model, self).__init__()
self.period_loss = period_loss()
self.feat_uv_error = nn.SmoothL1Loss() # A feature with B uvmap
self.opt = opt
self.NUnet = NestedUNet(in_ch=3, out_ch=3)
norm_type = get_norm_layer(norm_type=opt.norm_color)
self.image_filter = ResnetFilter(opt, norm_layer=norm_type)
# self.conv = conv1_1(input_layers=256, output_layers=16)
init_net(self)
def filter(self, images):
'''
Filter the input images
store all intermediate features.
:param images: [B, C, H, W] input images
'''
self.im_feat = self.image_filter(images)
def forward(self, uv_A, uv_B, part_uv_B, index):
'''
this function is made for pint total train.
'''
complete_feat = self.NUnet(uv_A)
complete_feat_B = self.NUnet(uv_B)
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B)
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# part_uv_B.requires_grad=True # to make uvb as one leaf
# A_feat = complete_feat[:,0:3,:,:]
# part_uv_B = F.interpolate(part_uv_B, scale_factor=0.25, mode='bilinear', align_corners=True)
A_vis_feat = complete_feat[index==1]
B_vis_uv = part_uv_B[index==1]
loss1 = self.feat_uv_error(A_vis_feat, B_vis_uv.detach())
# loss2 = self.vgg_loss(complete_feat[:,:3], complete_feat_B[:,:3].detach())
# loss2 = self.period_loss(complete_feat, complete_feat_B.detach())
loss2=0
return complete_feat, complete_feat_B, loss1, loss2
# def pint_forward(self, uv_A, uv_B):
# '''
# this function is made for pint total train.
# '''
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# self.complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B.squeeze(1))
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(self.complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# uv_B_feat = uv_B.squeeze(1).expand_as(A_feat)
# uv_B_feat.requires_grad=True # to make uvb as one leaf
# A_vis_feat = A_feat[uv_B_feat != 0.0]
# B_vis_uv = uv_B_feat[uv_B_feat != 0.0]
# loss_content = self.feat_uv_error(A_vis_feat, B_vis_uv) * 100
# loss_content1 = self.feat_uv_error(A_feat, uv_A)*100
# # loss_feat = self.error_term(self.complete_feat, complete_feat_B)
# return A_feat, A_vis_feat, B_vis_uv, self.complete_feat, complete_feat_B, loss_content+loss_content1
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if last:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class ResnetFilter(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,
n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert (n_blocks >= 0)
super(ResnetFilter, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
if i == n_blocks - 1:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias, last=True)]
else:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
if opt.use_tanh:
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
| 44.717277 | 164 | 0.619482 | import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from .SurfaceClassifier import conv1_1, period_loss
# from .DepthNormalizer import DepthNormalizer
from ..net_util import *
# from iPERCore.models.networks.criterions import VGGLoss
from lib.model.Models import NestedUNet
import numpy as np
class Pint_Model(nn.Module):
def __init__(self, opt):
super(Pint_Model, self).__init__()
self.period_loss = period_loss()
self.feat_uv_error = nn.SmoothL1Loss() # A feature with B uvmap
self.opt = opt
self.NUnet = NestedUNet(in_ch=3, out_ch=3)
norm_type = get_norm_layer(norm_type=opt.norm_color)
self.image_filter = ResnetFilter(opt, norm_layer=norm_type)
# self.conv = conv1_1(input_layers=256, output_layers=16)
init_net(self)
def filter(self, images):
'''
Filter the input images
store all intermediate features.
:param images: [B, C, H, W] input images
'''
self.im_feat = self.image_filter(images)
def forward(self, uv_A, uv_B, part_uv_B, index):
'''
this function is made for pint total train.
'''
complete_feat = self.NUnet(uv_A)
complete_feat_B = self.NUnet(uv_B)
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B)
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# part_uv_B.requires_grad=True # to make uvb as one leaf
# A_feat = complete_feat[:,0:3,:,:]
# part_uv_B = F.interpolate(part_uv_B, scale_factor=0.25, mode='bilinear', align_corners=True)
A_vis_feat = complete_feat[index==1]
B_vis_uv = part_uv_B[index==1]
loss1 = self.feat_uv_error(A_vis_feat, B_vis_uv.detach())
# loss2 = self.vgg_loss(complete_feat[:,:3], complete_feat_B[:,:3].detach())
# loss2 = self.period_loss(complete_feat, complete_feat_B.detach())
loss2=0
return complete_feat, complete_feat_B, loss1, loss2
# def pint_forward(self, uv_A, uv_B):
# '''
# this function is made for pint total train.
# '''
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# self.complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B.squeeze(1))
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(self.complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# uv_B_feat = uv_B.squeeze(1).expand_as(A_feat)
# uv_B_feat.requires_grad=True # to make uvb as one leaf
# A_vis_feat = A_feat[uv_B_feat != 0.0]
# B_vis_uv = uv_B_feat[uv_B_feat != 0.0]
# loss_content = self.feat_uv_error(A_vis_feat, B_vis_uv) * 100
# loss_content1 = self.feat_uv_error(A_feat, uv_A)*100
# # loss_feat = self.error_term(self.complete_feat, complete_feat_B)
# return A_feat, A_vis_feat, B_vis_uv, self.complete_feat, complete_feat_B, loss_content+loss_content1
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if last:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class ResnetFilter(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,
n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert (n_blocks >= 0)
super(ResnetFilter, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
if i == n_blocks - 1:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias, last=True)]
else:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
if opt.use_tanh:
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
| 0 | 0 |
91b66cd5922aac62b6769b7d844bbbf6732242a0 | 13,831 | py | Python | ActiveCell_RealMorphology_Burst_Inh_cluster.py | EilamLeleo/burst | 538dbf6845f4de5519c0392368d611d1e54608e1 | [
"MIT"
] | null | null | null | ActiveCell_RealMorphology_Burst_Inh_cluster.py | EilamLeleo/burst | 538dbf6845f4de5519c0392368d611d1e54608e1 | [
"MIT"
] | null | null | null | ActiveCell_RealMorphology_Burst_Inh_cluster.py | EilamLeleo/burst | 538dbf6845f4de5519c0392368d611d1e54608e1 | [
"MIT"
] | null | null | null | #!/usr/lib/python-exec/python2.7/python
import os
import sys
os.chdir('C:/Users/Leleo/Documents/Active Cell Real Morphology/')
from neuron import h
from neuron import gui
#%%
import numpy as np
import time
import math
import cPickle as pickle
#%%
sk = False
if sk==True:
from sklearn import decomposition
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
#%%
h.load_file('nrngui.hoc')
h.load_file("import3d.hoc")
cvode = h.CVode()
cvode.active(0)
morphologyFilename = "morphologies/cell1.asc"
#morphologyFilename = "morphologies/cell2.asc"
#morphologyFilename = "morphologies/cell3.asc"
#biophysicalModelFilename = "L5PCbiophys1.hoc"
#biophysicalModelFilename = "L5PCbiophys2.hoc"
#biophysicalModelFilename = "L5PCbiophys3.hoc"
#biophysicalModelFilename = "L5PCbiophys4.hoc"
#biophysicalModelFilename = "L5PCbiophys5.hoc"
biophysicalModelFilename = "L5PCbiophys5b.hoc"
#biophysicalModelTemplateFilename = "L5PCtemplate.hoc"
biophysicalModelTemplateFilename = "L5PCtemplate_2.hoc"
#%%
h.load_file(biophysicalModelFilename)
h.load_file(biophysicalModelTemplateFilename)
L5PC = h.L5PCtemplate(morphologyFilename)
h.celsius = 34
#%% set dendritic VDCC g=0
#secs = h.allsec
VDCC_g = 1
if VDCC_g==0:
for sec in h.allsec():
if hasattr(sec, 'gCa_HVAbar_Ca_HVA'):
sec.gCa_HVAbar_Ca_HVA = 0
#%% helper functions
def Add_NMDA_SingleSynapticEventToSegment(segment, activationTime, synapseWeight, exc_inh):
# synapse = h.ProbAMPANMDA2(segment)
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
if exc_inh==0: # inhibitory
synapse = h.ProbGABAAB_EMS(segment) #GABAA/B
synapse.tau_r_GABAA = 0.2
synapse.tau_d_GABAA = 8
synapse.tau_r_GABAB = 3.5
synapse.tau_d_GABAB = 260.9
# synapse.gmax = .001
synapse.e_GABAA = -80
synapse.e_GABAB = -97
synapse.GABAB_ratio = 0.0
# synapse.Use = 1
# synapse.u0 = 0
# synapse.Dep = 0
# synapse.Fac = 0
else: # excitatory
synapse = h.ProbAMPANMDA2(segment)
synapse.gmax = .0004
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
synapse.Use = 1.0
synapse.Dep = 0
synapse.Fac = 0
netStimulation = h.NetStim()
netStimulation.number = 1
netStimulation.start = activationTime
netConnection = h.NetCon(netStimulation,synapse)
netConnection.delay = 0
netConnection.weight[0] = synapseWeight
return netStimulation,netConnection,synapse
#%% create length-weighted random section list
def randSecWeight(obj,medSeg,part,num):
allLen = []
for i in range(len(obj)):
allLen.append(obj[i].L)
randSecList = [0 for i in range(num)]
h.distance(sec=obj[medSeg]) # define distance measure from medSeg
# draw from cumulative length a seg for syn
x = np.sum(allLen[:medSeg])+(np.random.rand(num)-0.5)*np.sum(allLen)/part
j=0
farbug=0
while j<num:
# redraw boundary crossers
if x[j]<0 or x[j]>np.sum(allLen):
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
continue
# find sec
for i in range(len(obj)):
if x[j]<np.sum(allLen[:i+1]):
randSecList[j]=i
break
# check that sec is sufficiently close to medseg
if h.distance(obj[randSecList[j]](1))>sum(allLen)/part and farbug<5:#obj[medSeg].L+obj[randSecList[j]].L:#
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
farbug+=1
continue
j+=1
farbug=0
return randSecList
#%% add some random NMDA synapses and plot a somatic trace just to see all things are alive and kicking
def runSim(cell,ApiBasInt,treeT,numBas,numApi,partApi,medSeg,inSec,numExp):
simulationTime = 400
silentTimeAtStart = 100
delayTime = 200
silentTimeAtEnd = 100
origNumSamplesPerMS = 40 #20 # was 20!!!
totalSimDuration = simulationTime + silentTimeAtStart + silentTimeAtEnd
listOfSomaTraces = []
spikes = []
numSpikes = 0
numSpikesPerExp = [0]*numExp
freq = [0]*numExp
for experiment in range(numExp):
startTime = time.time()
listOfRandBasalSectionInds = randSecWeight(cell.dend,44,1,int(numBas))#np.random.randint(0,len(cell.dend),int(numBas))
listOfRandApicalSectionInds = randSecWeight(cell.apic,62,20,20)#int(numApi)) #medSeg + np.random.randint(-distance,distance,int(numApi))
if partApi>15:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
else:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
# listOfRandApicalSectionInds = randSecWeight(cell.apic,np.random.randint(37,78),partApi,10)#int(numApi))
# listOfRandObliqueSectionInds = np.random.randint(0,len(cell.apic)/partApi,0)#int(40-numApi)) #obliques
listOfBasalSections = [cell.dend[x] for x in listOfRandBasalSectionInds]
listOfApicalSections = [cell.apic[x] for x in listOfRandApicalSectionInds]
listOfInhSections = [cell.apic[x] for x in listOfRandInhSectionInds]
# listOfObliqueSections = [cell.apic[x] for x in listOfRandObliqueSectionInds]
# listOfSections = listOfApicalSections + listOfBasalSections
listOfRandBasalLocationsInSection = np.random.rand(len(listOfRandBasalSectionInds))
listOfRandApicalLocationsInSection = np.random.rand(len(listOfRandApicalSectionInds))
# listOfRandInhLocationsInSection = float(inSec)/4 + 0.25*np.random.rand(len(listOfRandInhSectionInds))
if partApi>30:
listOfRandInhLocationsInSection = [1]*numApi #min(1,float(7440)/partApi/cell.apic[medSeg].L)*np.random.rand(len(listOfRandInhSectionInds))
else:
listOfRandInhLocationsInSection = np.random.rand(len(listOfRandInhSectionInds))
# listOfRandObliqueLocationsInSection = np.random.rand(len(listOfRandObliqueSectionInds))
# listOfSegLocs = list(listOfRandApicalLocationsInSection) + list(listOfRandBasalLocationsInSection)
listOfEvents = []
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #gauss(0.5,0.2)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + delayTime + ApiBasInt + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
#add obliques
# for k, section in enumerate(listOfObliqueSections):
# eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(1,0.2) #simulationTime/2*np.random.rand(1)[0]
# listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandObliqueLocationsInSection[k]), eventTime, 2))
##%% run the simulation
h.dt = 0.025
recTime = h.Vector()
recTime.record(h._ref_t)
recVoltage = h.Vector()
recVoltage.record(cell.soma[0](0.5)._ref_v)
cvode.cache_efficient(1)
h.finitialize(-76)
stopTime = totalSimDuration
neuron.run(stopTime)
# plot the trace
origRecordingTime = np.array(recTime.to_python()) # ugly workaround to recTime.as_numpy()
origSomaVoltage = np.array(recVoltage.to_python()) # ugly workaround to recVoltage.as_numpy()
recordingTime = np.arange(0,totalSimDuration,1.0/origNumSamplesPerMS)
somaVoltage = np.interp(recordingTime, origRecordingTime, origSomaVoltage)
listOfSomaTraces.append(somaVoltage)
origSpikes = []
tempSpikes = 0
k = (silentTimeAtStart+delayTime-50)*origNumSamplesPerMS #int(np.min([0,ApiBasInt]))
while k < (totalSimDuration-silentTimeAtEnd)*origNumSamplesPerMS:
if somaVoltage[k]>-10:
tempTime = float(k)/origNumSamplesPerMS
if tempSpikes>0 and tempTime-origSpikes[-1]>20:
break
origSpikes.append(tempTime)
# numSpikesPerExp[experiment] = tempSpikes + 1
numSpikes = numSpikes + 1
tempSpikes += 1 # numSpikesPerExp[experiment]
k = k+origNumSamplesPerMS*3
else:
k = k+5 # was 1 before
# spikes = []
spikes.append(origSpikes)
if tempSpikes>1:
freq[experiment] = tempSpikes/(origSpikes[-1]-origSpikes[-tempSpikes])
# plt.figure()
# plt.plot(recordingTime, somaVoltage)
# plt.xlabel('Time [ms]'); plt.ylabel('Voltage [mV]')
# plt.axis(xmin=0, xmax=stopTime, ymin=min(somaVoltage)-5, ymax=max(somaVoltage)+5)
#listOfEvents = []
if (experiment+1)%10==0 or (time.time()-startTime)/60>5 or numExp<5:
print "Dt %s treeTime %s exp. # %s took %.3f minutes" % (ApiBasInt,treeT,experiment+1, (time.time()-startTime)/60)
print "Mean no. of spikes: %s" % (float(numSpikes)/numExp)
return float(numSpikes)/numExp,np.mean(freq)#, listOfSomaTraces, recordingTime
#%% run simulation on some parameter pair, plot the space
L5PC = h.L5PCtemplate(morphologyFilename)
name = 'inh_secdt_meds62_exc60dt0sd0num15'
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/coincidence/wgh1/'+name+'/'
saveDir = 'C:/Users/Leleo/Documents/coincidence/wgh1/'+name+'/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
try:
randomSeed = int(sys.argv[1])
print 'random seed selected by user - %d' %(randomSeed)
except:
randomSeed = np.random.randint(100000)
print 'randomly chose seed - %d' %(randomSeed)
np.random.seed(randomSeed)
#ind = 1
#a = np.linspace(-50,-25,num=6),np.linspace(-20,20,num=21),np.linspace(25,100,num=16)
ApicalBasalInterval = [0]#np.linspace(-10,10,num=11) #[x for xs in a for x in xs]
numBasal = 50 #35 #np.linspace(0,200,num=81)
numApical = 30 #np.linspace(0,20,num=11)#50,num=21)#
numInh = 20 #0
#numOblique = 40-numApical
#totalSyn = [20,50,100,200,400,600,800]#[80,120,150,180]#np.linspace(0,200,num=5)#41)
partApical = 2 #[5,10,20,50,100,200,500]#[i for i in np.linspace(10,100,num=10)]+[200,300,400,500]#np.logspace(0,7,num=29,base=2)
medSegment = [0,36,60,63]#[36]+[i for i in np.linspace(60,65,num=6)]#37,44,num=8)] ##40#60 #
#secInh = [60[0.5],60[1],61[0],62[0],63[0],64[0],67[0]] #optimal planned inh at prox junc
#secInh = [60[1],61[0],63[1]] #encapsulating inh for partApi=20
#random.choice(secInh)
treeTime = 0 #0.1*np.logspace(3,10,num=22,base=2)
numExperiments = 20
spks = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
frqs = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
#trc = [[[] for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
i = 0
j = 0
start = time.time()
for ApiBasInd in ApicalBasalInterval:#treeT in treeTime:#
print "Running for interval: %s [ms]" % (int(ApiBasInd))#treeTime: %.2f [ms]" % (treeT)#
#for numB in numBasal:#totalS in totalSyn:#
# print "Running for %s basal synapses" % (int(numB))
# for partApi in partApical:
for medS in medSegment:
# for numA in numApical:#np.linspace(0,totalS,num=41):#
print "Running for inhibition in sec: %s" % (int(medS)) #partApi=%s" % (float(partApi)) #
# numA = int(totalS*0.4)
spks[j][i],frqs[j][i] = runSim(L5PC,ApiBasInd,treeTime,numBasal,numInh,numApical,medS,partApical,numExperiments)
j = j+1
j = 0
i = i+1
pickle.dump(spks,open(saveDir+name+'_spks'+str(randomSeed)+".npy","wb"),protocol=2)
pickle.dump(frqs,open(saveDir+name+'_frqs'+str(randomSeed)+".npy","wb"),protocol=2)
print "Saved as ", saveDir+name+'_spks'+str(randomSeed)+".npy"
print "Total running time was: ", (time.time()-start)/3600, "hours"
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/concidence/'
#pickle.dump(spks1,open(saveDir+'dt_treet_30tot_hires_spks',"wb"),protocol=2)
#pickle.dump(frqs1,open(saveDir+'dt_treet_30tot_hires_frqs',"wb"),protocol=2)
| 41.041543 | 150 | 0.670161 | #!/usr/lib/python-exec/python2.7/python
import os
import sys
os.chdir('C:/Users/Leleo/Documents/Active Cell Real Morphology/')
from neuron import h
from neuron import gui
#%%
import numpy as np
import time
import math
import cPickle as pickle
#%%
sk = False
if sk==True:
from sklearn import decomposition
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
#%%
h.load_file('nrngui.hoc')
h.load_file("import3d.hoc")
cvode = h.CVode()
cvode.active(0)
morphologyFilename = "morphologies/cell1.asc"
#morphologyFilename = "morphologies/cell2.asc"
#morphologyFilename = "morphologies/cell3.asc"
#biophysicalModelFilename = "L5PCbiophys1.hoc"
#biophysicalModelFilename = "L5PCbiophys2.hoc"
#biophysicalModelFilename = "L5PCbiophys3.hoc"
#biophysicalModelFilename = "L5PCbiophys4.hoc"
#biophysicalModelFilename = "L5PCbiophys5.hoc"
biophysicalModelFilename = "L5PCbiophys5b.hoc"
#biophysicalModelTemplateFilename = "L5PCtemplate.hoc"
biophysicalModelTemplateFilename = "L5PCtemplate_2.hoc"
#%%
h.load_file(biophysicalModelFilename)
h.load_file(biophysicalModelTemplateFilename)
L5PC = h.L5PCtemplate(morphologyFilename)
h.celsius = 34
#%% set dendritic VDCC g=0
#secs = h.allsec
VDCC_g = 1
if VDCC_g==0:
for sec in h.allsec():
if hasattr(sec, 'gCa_HVAbar_Ca_HVA'):
sec.gCa_HVAbar_Ca_HVA = 0
#%% helper functions
def Add_NMDA_SingleSynapticEventToSegment(segment, activationTime, synapseWeight, exc_inh):
# synapse = h.ProbAMPANMDA2(segment)
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
if exc_inh==0: # inhibitory
synapse = h.ProbGABAAB_EMS(segment) #GABAA/B
synapse.tau_r_GABAA = 0.2
synapse.tau_d_GABAA = 8
synapse.tau_r_GABAB = 3.5
synapse.tau_d_GABAB = 260.9
# synapse.gmax = .001
synapse.e_GABAA = -80
synapse.e_GABAB = -97
synapse.GABAB_ratio = 0.0
# synapse.Use = 1
# synapse.u0 = 0
# synapse.Dep = 0
# synapse.Fac = 0
else: # excitatory
synapse = h.ProbAMPANMDA2(segment)
synapse.gmax = .0004
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
synapse.Use = 1.0
synapse.Dep = 0
synapse.Fac = 0
netStimulation = h.NetStim()
netStimulation.number = 1
netStimulation.start = activationTime
netConnection = h.NetCon(netStimulation,synapse)
netConnection.delay = 0
netConnection.weight[0] = synapseWeight
return netStimulation,netConnection,synapse
#%% create length-weighted random section list
def randSecWeight(obj,medSeg,part,num):
allLen = []
for i in range(len(obj)):
allLen.append(obj[i].L)
randSecList = [0 for i in range(num)]
h.distance(sec=obj[medSeg]) # define distance measure from medSeg
# draw from cumulative length a seg for syn
x = np.sum(allLen[:medSeg])+(np.random.rand(num)-0.5)*np.sum(allLen)/part
j=0
farbug=0
while j<num:
# redraw boundary crossers
if x[j]<0 or x[j]>np.sum(allLen):
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
continue
# find sec
for i in range(len(obj)):
if x[j]<np.sum(allLen[:i+1]):
randSecList[j]=i
break
# check that sec is sufficiently close to medseg
if h.distance(obj[randSecList[j]](1))>sum(allLen)/part and farbug<5:#obj[medSeg].L+obj[randSecList[j]].L:#
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
farbug+=1
continue
j+=1
farbug=0
return randSecList
#%% add some random NMDA synapses and plot a somatic trace just to see all things are alive and kicking
def runSim(cell,ApiBasInt,treeT,numBas,numApi,partApi,medSeg,inSec,numExp):
simulationTime = 400
silentTimeAtStart = 100
delayTime = 200
silentTimeAtEnd = 100
origNumSamplesPerMS = 40 #20 # was 20!!!
totalSimDuration = simulationTime + silentTimeAtStart + silentTimeAtEnd
listOfSomaTraces = []
spikes = []
numSpikes = 0
numSpikesPerExp = [0]*numExp
freq = [0]*numExp
for experiment in range(numExp):
startTime = time.time()
listOfRandBasalSectionInds = randSecWeight(cell.dend,44,1,int(numBas))#np.random.randint(0,len(cell.dend),int(numBas))
listOfRandApicalSectionInds = randSecWeight(cell.apic,62,20,20)#int(numApi)) #medSeg + np.random.randint(-distance,distance,int(numApi))
if partApi>15:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
else:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
# listOfRandApicalSectionInds = randSecWeight(cell.apic,np.random.randint(37,78),partApi,10)#int(numApi))
# listOfRandObliqueSectionInds = np.random.randint(0,len(cell.apic)/partApi,0)#int(40-numApi)) #obliques
listOfBasalSections = [cell.dend[x] for x in listOfRandBasalSectionInds]
listOfApicalSections = [cell.apic[x] for x in listOfRandApicalSectionInds]
listOfInhSections = [cell.apic[x] for x in listOfRandInhSectionInds]
# listOfObliqueSections = [cell.apic[x] for x in listOfRandObliqueSectionInds]
# listOfSections = listOfApicalSections + listOfBasalSections
listOfRandBasalLocationsInSection = np.random.rand(len(listOfRandBasalSectionInds))
listOfRandApicalLocationsInSection = np.random.rand(len(listOfRandApicalSectionInds))
# listOfRandInhLocationsInSection = float(inSec)/4 + 0.25*np.random.rand(len(listOfRandInhSectionInds))
if partApi>30:
listOfRandInhLocationsInSection = [1]*numApi #min(1,float(7440)/partApi/cell.apic[medSeg].L)*np.random.rand(len(listOfRandInhSectionInds))
else:
listOfRandInhLocationsInSection = np.random.rand(len(listOfRandInhSectionInds))
# listOfRandObliqueLocationsInSection = np.random.rand(len(listOfRandObliqueSectionInds))
# listOfSegLocs = list(listOfRandApicalLocationsInSection) + list(listOfRandBasalLocationsInSection)
listOfEvents = []
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #gauss(0.5,0.2)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + delayTime + ApiBasInt + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
#add obliques
# for k, section in enumerate(listOfObliqueSections):
# eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(1,0.2) #simulationTime/2*np.random.rand(1)[0]
# listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandObliqueLocationsInSection[k]), eventTime, 2))
##%% run the simulation
h.dt = 0.025
recTime = h.Vector()
recTime.record(h._ref_t)
recVoltage = h.Vector()
recVoltage.record(cell.soma[0](0.5)._ref_v)
cvode.cache_efficient(1)
h.finitialize(-76)
stopTime = totalSimDuration
neuron.run(stopTime)
# plot the trace
origRecordingTime = np.array(recTime.to_python()) # ugly workaround to recTime.as_numpy()
origSomaVoltage = np.array(recVoltage.to_python()) # ugly workaround to recVoltage.as_numpy()
recordingTime = np.arange(0,totalSimDuration,1.0/origNumSamplesPerMS)
somaVoltage = np.interp(recordingTime, origRecordingTime, origSomaVoltage)
listOfSomaTraces.append(somaVoltage)
origSpikes = []
tempSpikes = 0
k = (silentTimeAtStart+delayTime-50)*origNumSamplesPerMS #int(np.min([0,ApiBasInt]))
while k < (totalSimDuration-silentTimeAtEnd)*origNumSamplesPerMS:
if somaVoltage[k]>-10:
tempTime = float(k)/origNumSamplesPerMS
if tempSpikes>0 and tempTime-origSpikes[-1]>20:
break
origSpikes.append(tempTime)
# numSpikesPerExp[experiment] = tempSpikes + 1
numSpikes = numSpikes + 1
tempSpikes += 1 # numSpikesPerExp[experiment]
k = k+origNumSamplesPerMS*3
else:
k = k+5 # was 1 before
# spikes = []
spikes.append(origSpikes)
if tempSpikes>1:
freq[experiment] = tempSpikes/(origSpikes[-1]-origSpikes[-tempSpikes])
# plt.figure()
# plt.plot(recordingTime, somaVoltage)
# plt.xlabel('Time [ms]'); plt.ylabel('Voltage [mV]')
# plt.axis(xmin=0, xmax=stopTime, ymin=min(somaVoltage)-5, ymax=max(somaVoltage)+5)
#listOfEvents = []
if (experiment+1)%10==0 or (time.time()-startTime)/60>5 or numExp<5:
print "Dt %s treeTime %s exp. # %s took %.3f minutes" % (ApiBasInt,treeT,experiment+1, (time.time()-startTime)/60)
print "Mean no. of spikes: %s" % (float(numSpikes)/numExp)
return float(numSpikes)/numExp,np.mean(freq)#, listOfSomaTraces, recordingTime
#%% run simulation on some parameter pair, plot the space
L5PC = h.L5PCtemplate(morphologyFilename)
name = 'inh_secdt_meds62_exc60dt0sd0num15'
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/coincidence/wgh1/'+name+'/'
saveDir = 'C:/Users/Leleo/Documents/coincidence/wgh1/'+name+'/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
try:
randomSeed = int(sys.argv[1])
print 'random seed selected by user - %d' %(randomSeed)
except:
randomSeed = np.random.randint(100000)
print 'randomly chose seed - %d' %(randomSeed)
np.random.seed(randomSeed)
#ind = 1
#a = np.linspace(-50,-25,num=6),np.linspace(-20,20,num=21),np.linspace(25,100,num=16)
ApicalBasalInterval = [0]#np.linspace(-10,10,num=11) #[x for xs in a for x in xs]
numBasal = 50 #35 #np.linspace(0,200,num=81)
numApical = 30 #np.linspace(0,20,num=11)#50,num=21)#
numInh = 20 #0
#numOblique = 40-numApical
#totalSyn = [20,50,100,200,400,600,800]#[80,120,150,180]#np.linspace(0,200,num=5)#41)
partApical = 2 #[5,10,20,50,100,200,500]#[i for i in np.linspace(10,100,num=10)]+[200,300,400,500]#np.logspace(0,7,num=29,base=2)
medSegment = [0,36,60,63]#[36]+[i for i in np.linspace(60,65,num=6)]#37,44,num=8)] ##40#60 #
#secInh = [60[0.5],60[1],61[0],62[0],63[0],64[0],67[0]] #optimal planned inh at prox junc
#secInh = [60[1],61[0],63[1]] #encapsulating inh for partApi=20
#random.choice(secInh)
treeTime = 0 #0.1*np.logspace(3,10,num=22,base=2)
numExperiments = 20
spks = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
frqs = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
#trc = [[[] for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
i = 0
j = 0
start = time.time()
for ApiBasInd in ApicalBasalInterval:#treeT in treeTime:#
print "Running for interval: %s [ms]" % (int(ApiBasInd))#treeTime: %.2f [ms]" % (treeT)#
#for numB in numBasal:#totalS in totalSyn:#
# print "Running for %s basal synapses" % (int(numB))
# for partApi in partApical:
for medS in medSegment:
# for numA in numApical:#np.linspace(0,totalS,num=41):#
print "Running for inhibition in sec: %s" % (int(medS)) #partApi=%s" % (float(partApi)) #
# numA = int(totalS*0.4)
spks[j][i],frqs[j][i] = runSim(L5PC,ApiBasInd,treeTime,numBasal,numInh,numApical,medS,partApical,numExperiments)
j = j+1
j = 0
i = i+1
pickle.dump(spks,open(saveDir+name+'_spks'+str(randomSeed)+".npy","wb"),protocol=2)
pickle.dump(frqs,open(saveDir+name+'_frqs'+str(randomSeed)+".npy","wb"),protocol=2)
print "Saved as ", saveDir+name+'_spks'+str(randomSeed)+".npy"
print "Total running time was: ", (time.time()-start)/3600, "hours"
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/concidence/'
#pickle.dump(spks1,open(saveDir+'dt_treet_30tot_hires_spks',"wb"),protocol=2)
#pickle.dump(frqs1,open(saveDir+'dt_treet_30tot_hires_frqs',"wb"),protocol=2)
| 0 | 0 |
ec42f79f6e9bebd0d6cc7d934779f60d52434f4f | 1,263 | py | Python | sharesansar/driver.py | prajwal-stha/web-scrapers | bfcc5a065e859c69f1a9a2065c9c857b22af42c0 | [
"MIT"
] | null | null | null | sharesansar/driver.py | prajwal-stha/web-scrapers | bfcc5a065e859c69f1a9a2065c9c857b22af42c0 | [
"MIT"
] | null | null | null | sharesansar/driver.py | prajwal-stha/web-scrapers | bfcc5a065e859c69f1a9a2065c9c857b22af42c0 | [
"MIT"
] | null | null | null | from selenium import webdriver
import time
from datetime import date
from selenium.webdriver.common.keys import Keys
from scrape_table_all import scrape_table
from return_dates import return_dates
# Open the link
PATH = "/Users/prajwalshrestha/Desktop/PythonApp/thesis/web-scrapers/sharesansar/chromedriver"
browser = webdriver.Chrome(PATH)
browser.maximize_window()
browser.get("https://www.sharesansar.com/today-share-price")
# Select the type of data to scrape
searchBar = browser.find_element_by_id('sector')
browser.implicitly_wait(20)
# Select Commercial Bank
searchBar.send_keys('Commercial Bank')
sdate = date(2020, 3, 23)
edate = date(2021, 5, 13)
dates = return_dates(sdate, edate)
for day in dates:
# Enter the date
date_box = browser.find_elements_by_id('fromdate')
date_box[0].clear()
date_box[0].send_keys(day)
# Click Search
searchBar = browser.find_element_by_id('btn_todayshareprice_submit')
searchBar.click()
time.sleep(3)
# Needed for this sites
searchBar.send_keys(Keys.ENTER)
# Wait for data to show up longer wait time ensures data has loaded before scraping begins
time.sleep(8)
# Scrape the table
html = browser.page_source
scrape_table(data=html, date=day)
browser.close()
| 30.071429 | 94 | 0.760095 | from selenium import webdriver
import time
from datetime import date
from selenium.webdriver.common.keys import Keys
from scrape_table_all import scrape_table
from return_dates import return_dates
# Open the link
PATH = "/Users/prajwalshrestha/Desktop/PythonApp/thesis/web-scrapers/sharesansar/chromedriver"
browser = webdriver.Chrome(PATH)
browser.maximize_window()
browser.get("https://www.sharesansar.com/today-share-price")
# Select the type of data to scrape
searchBar = browser.find_element_by_id('sector')
browser.implicitly_wait(20)
# Select Commercial Bank
searchBar.send_keys('Commercial Bank')
sdate = date(2020, 3, 23)
edate = date(2021, 5, 13)
dates = return_dates(sdate, edate)
for day in dates:
# Enter the date
date_box = browser.find_elements_by_id('fromdate')
date_box[0].clear()
date_box[0].send_keys(day)
# Click Search
searchBar = browser.find_element_by_id('btn_todayshareprice_submit')
searchBar.click()
time.sleep(3)
# Needed for this sites
searchBar.send_keys(Keys.ENTER)
# Wait for data to show up longer wait time ensures data has loaded before scraping begins
time.sleep(8)
# Scrape the table
html = browser.page_source
scrape_table(data=html, date=day)
browser.close()
| 0 | 0 |
d08252eba99ccd39e5902637266d30dc619c8ac2 | 4,502 | py | Python | tests/view_tests/test_group_routes.py | Kalafut-organization/elephant_vending_machine_backend | f8b94adda2b2e9c824a3983749061a549ad1d0ec | [
"MIT"
] | null | null | null | tests/view_tests/test_group_routes.py | Kalafut-organization/elephant_vending_machine_backend | f8b94adda2b2e9c824a3983749061a549ad1d0ec | [
"MIT"
] | 30 | 2020-03-23T21:36:07.000Z | 2022-02-16T17:09:18.000Z | tests/view_tests/test_group_routes.py | Kalafut-organization/elephants_cse5911 | 3d2c72173070666e8b7503af6efe891e6507936d | [
"MIT"
] | 6 | 2020-04-14T00:48:58.000Z | 2022-02-23T18:39:44.000Z | import pytest
import subprocess
from io import BytesIO
import json
from werkzeug.wrappers import Response
from elephant_vending_machine import elephant_vending_machine
from subprocess import CompletedProcess, CalledProcessError
def raise_(ex):
raise ex
@pytest.fixture
def client():
elephant_vending_machine.APP.config['TESTING'] = True
with elephant_vending_machine.APP.test_client() as client:
yield client
subprocess.call(["rmdir","elephant_vending_machine/static/img/test"])
subprocess.call(["rmdir","elephant_vending_machine/static/img/test2"])
def test_post_group_route_no_name(client):
response = client.post('/groups')
assert response.status_code == 400
assert b'Error with request: No name field in body of request.' in response.data
def test_post_group_route_empty_name(client):
data = {'name': ''}
response = client.post('/groups', data=data)
assert response.status_code == 400
assert json.loads(response.data)['message'] == 'Error with request: Group name must not be empty.'
def test_post_group_route_duplicate(client):
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 400
assert json.loads(response.data)['message'] == 'Error with request: Group already exists.'
def test_post_group_route_copying_exception(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: raise_(CalledProcessError(1, ['ssh'])))
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 500
assert json.loads(response.data)['message'] == 'Error: Failed to create group on hosts.'
def test_post_group_route_happy_path(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 201
assert b'Success: Group created.'
def test_delete_group_route_not_exist(client):
response = client.delete('/groups/test')
assert response.status_code == 400
assert b"Group test does not exist and so couldn't be deleted." in response.data
def test_delete_group_route_os_error(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
monkeypatch.setattr('shutil.rmtree', lambda path: raise_(OSError))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 400
assert b'An error has occurred and the group could not be deleted' in response.data
def test_delete_group_happy_path(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 200
assert b'Group test was successfully deleted.' in response.data
def test_delete_fixations_group(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
response = client.delete('/groups/Fixations')
assert response.status_code == 400
assert b'The fixations group cannot be deleted' in response.data
def test_delete_group_no_connection(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: raise_(CalledProcessError(1, ['ssh'])))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 500
assert json.loads(response.data)['message'] == ['Error: Failed to delete file from hosts. ', \
'Group not deleted, please try again']
def test_get_group_route(client):
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test2"])
response = client.get('/groups')
response_json_files = json.loads(response.data)['names']
min_elements_expected = ["test","test2"]
assert all(elem in response_json_files for elem in min_elements_expected)
assert response.status_code == 200
| 45.02 | 121 | 0.740116 | import pytest
import subprocess
from io import BytesIO
import json
from werkzeug.wrappers import Response
from elephant_vending_machine import elephant_vending_machine
from subprocess import CompletedProcess, CalledProcessError
def raise_(ex):
raise ex
@pytest.fixture
def client():
elephant_vending_machine.APP.config['TESTING'] = True
with elephant_vending_machine.APP.test_client() as client:
yield client
subprocess.call(["rmdir","elephant_vending_machine/static/img/test"])
subprocess.call(["rmdir","elephant_vending_machine/static/img/test2"])
def test_post_group_route_no_name(client):
response = client.post('/groups')
assert response.status_code == 400
assert b'Error with request: No name field in body of request.' in response.data
def test_post_group_route_empty_name(client):
data = {'name': ''}
response = client.post('/groups', data=data)
assert response.status_code == 400
assert json.loads(response.data)['message'] == 'Error with request: Group name must not be empty.'
def test_post_group_route_duplicate(client):
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 400
assert json.loads(response.data)['message'] == 'Error with request: Group already exists.'
def test_post_group_route_copying_exception(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: raise_(CalledProcessError(1, ['ssh'])))
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 500
assert json.loads(response.data)['message'] == 'Error: Failed to create group on hosts.'
def test_post_group_route_happy_path(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
data = {'name': 'test'}
response = client.post('/groups', data=data)
assert response.status_code == 201
assert b'Success: Group created.'
def test_delete_group_route_not_exist(client):
response = client.delete('/groups/test')
assert response.status_code == 400
assert b"Group test does not exist and so couldn't be deleted." in response.data
def test_delete_group_route_os_error(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
monkeypatch.setattr('shutil.rmtree', lambda path: raise_(OSError))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 400
assert b'An error has occurred and the group could not be deleted' in response.data
def test_delete_group_happy_path(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 200
assert b'Group test was successfully deleted.' in response.data
def test_delete_fixations_group(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: CompletedProcess(['some_command'], returncode=0))
response = client.delete('/groups/Fixations')
assert response.status_code == 400
assert b'The fixations group cannot be deleted' in response.data
def test_delete_group_no_connection(monkeypatch, client):
monkeypatch.setattr('subprocess.run', lambda command, check, shell: raise_(CalledProcessError(1, ['ssh'])))
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
response = client.delete('/groups/test')
assert response.status_code == 500
assert json.loads(response.data)['message'] == ['Error: Failed to delete file from hosts. ', \
'Group not deleted, please try again']
def test_get_group_route(client):
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test"])
subprocess.call(["mkdir", "elephant_vending_machine/static/img/test2"])
response = client.get('/groups')
response_json_files = json.loads(response.data)['names']
min_elements_expected = ["test","test2"]
assert all(elem in response_json_files for elem in min_elements_expected)
assert response.status_code == 200
| 0 | 0 |
b8886f82232200a3b8d715152e899cad7d68af4a | 2,038 | py | Python | ots/main/urls.py | rashikbuksh/Optimal-Transportation-System | 18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882 | [
"MIT"
] | 3 | 2021-12-01T15:56:42.000Z | 2021-12-23T15:49:48.000Z | ots/main/urls.py | rashikbuksh/Optimal-Transportation-System | 18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882 | [
"MIT"
] | null | null | null | ots/main/urls.py | rashikbuksh/Optimal-Transportation-System | 18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882 | [
"MIT"
] | 2 | 2021-09-24T19:49:28.000Z | 2021-12-22T10:25:38.000Z | from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
url(r'^$', views.homepage, name="list"),
url(r'^about/$', views.about, name="about"),
url(r'^contact/$', views.contact, name="contact"),
url(r'^bolaka/$', views.bolaka, name="bolaka"),
url(r'^offers_page/$', views.offers, name="offers_page"),
url(r'^bolakareview/$', views.bolakareview, name="bolakareview"),
url(r'^ticket/$', views.ticket, name="ticket"),
path('deletebalaka/<str:pk>/$', views.deletebalaka, name="deletebalaka"),
url(r'^ticket_page/$', views.ticket_page, name="ticket_page"),
# Air
url(r'^Air_Biman_Bangladesh/$', views.Air_Biman_Bangladesh, name="Air_Biman_Bangladesh"),
url(r'^Air_Novoair/$', views.Air_Novoair, name="Air_Novoair"),
url(r'^Air_US_Bangla/$', views.Air_US_Bangla, name="Air_US_Bangla"),
# Bus
url(r'^Bus_Akash/$', views.Bus_Akash, name="Bus_Akash"),
url(r'^Bus_Alif/$', views.Bus_Alif, name="Bus_Alif"),
url(r'^Bus_Anabil/$', views.Bus_Anabil, name="Bus_Anabil"),
url(r'^Bus_BRTC/$', views.Bus_BRTC, name="Bus_BRTC"),
url(r'^Bus_Green_Dhaka/$', views.Bus_Green_Dhaka, name="Bus_Green_Dhaka"),
url(r'^Bus_Raida/$', views.Bus_Raida, name="Bus_Raida"),
url(r'^Bus_Skyline/$', views.Bus_Skyline, name="Bus_Skyline"),
url(r'^Bus_Supravat/$', views.Bus_Supravat, name="Bus_Supravat"),
url(r'^Bus_VIP/$', views.Bus_VIP, name="Bus_VIP"),
# Train
url(r'^Train_Chitra_Express/$', views.Train_Chitra_Express, name="Train_Chitra_Express"),
url(r'^Train_Ekota_Express/$', views.Train_Ekota_Express, name="Train_Ekota_Express"),
url(r'^Train_Mahanagar_Godhuli/$', views.Train_Mahanagar_Godhuli, name="Train_Mahanagar_Godhuli"),
url(r'^Train_Suborno_Express/$', views.Train_Suborno_Express, name="Train_Suborno_Express"),
url(r'^Train_Tista_Express/$', views.Train_Tista_Express, name="Train_Tista_Express"),
url(r'^(?P<slug>[\w-]+)/$', views.homepage, name="list"),
]
| 44.304348 | 102 | 0.686948 | from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
url(r'^$', views.homepage, name="list"),
url(r'^about/$', views.about, name="about"),
url(r'^contact/$', views.contact, name="contact"),
url(r'^bolaka/$', views.bolaka, name="bolaka"),
url(r'^offers_page/$', views.offers, name="offers_page"),
url(r'^bolakareview/$', views.bolakareview, name="bolakareview"),
url(r'^ticket/$', views.ticket, name="ticket"),
path('deletebalaka/<str:pk>/$', views.deletebalaka, name="deletebalaka"),
url(r'^ticket_page/$', views.ticket_page, name="ticket_page"),
# Air
url(r'^Air_Biman_Bangladesh/$', views.Air_Biman_Bangladesh, name="Air_Biman_Bangladesh"),
url(r'^Air_Novoair/$', views.Air_Novoair, name="Air_Novoair"),
url(r'^Air_US_Bangla/$', views.Air_US_Bangla, name="Air_US_Bangla"),
# Bus
url(r'^Bus_Akash/$', views.Bus_Akash, name="Bus_Akash"),
url(r'^Bus_Alif/$', views.Bus_Alif, name="Bus_Alif"),
url(r'^Bus_Anabil/$', views.Bus_Anabil, name="Bus_Anabil"),
url(r'^Bus_BRTC/$', views.Bus_BRTC, name="Bus_BRTC"),
url(r'^Bus_Green_Dhaka/$', views.Bus_Green_Dhaka, name="Bus_Green_Dhaka"),
url(r'^Bus_Raida/$', views.Bus_Raida, name="Bus_Raida"),
url(r'^Bus_Skyline/$', views.Bus_Skyline, name="Bus_Skyline"),
url(r'^Bus_Supravat/$', views.Bus_Supravat, name="Bus_Supravat"),
url(r'^Bus_VIP/$', views.Bus_VIP, name="Bus_VIP"),
# Train
url(r'^Train_Chitra_Express/$', views.Train_Chitra_Express, name="Train_Chitra_Express"),
url(r'^Train_Ekota_Express/$', views.Train_Ekota_Express, name="Train_Ekota_Express"),
url(r'^Train_Mahanagar_Godhuli/$', views.Train_Mahanagar_Godhuli, name="Train_Mahanagar_Godhuli"),
url(r'^Train_Suborno_Express/$', views.Train_Suborno_Express, name="Train_Suborno_Express"),
url(r'^Train_Tista_Express/$', views.Train_Tista_Express, name="Train_Tista_Express"),
url(r'^(?P<slug>[\w-]+)/$', views.homepage, name="list"),
]
| 0 | 0 |
b81f72c1a629494935d113e4876d1627760656f2 | 53 | py | Python | bana/OpenMayaFX/__init__.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | 24 | 2017-01-11T15:57:46.000Z | 2020-09-23T06:18:30.000Z | bana/OpenMayaFX/__init__.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | null | null | null | bana/OpenMayaFX/__init__.py | christophercrouzet/bana | 8087df05ba9844b4d78d3c4699948ca61cf7621d | [
"MIT"
] | 2 | 2017-03-06T23:52:08.000Z | 2020-09-23T06:19:03.000Z | """Extensions for the ``maya.OpenMayaFX`` module."""
| 26.5 | 52 | 0.679245 | """Extensions for the ``maya.OpenMayaFX`` module."""
| 0 | 0 |
8b0fe30871875ffa461d0b5d638a14149f7fd68f | 378 | py | Python | apps/metadata/users/models.py | DiegoCorrea/ouvidoMusical | e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5 | [
"MIT"
] | 1 | 2021-10-06T19:35:48.000Z | 2021-10-06T19:35:48.000Z | apps/metadata/users/models.py | DiegoCorrea/ouvido_musical-Back | e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5 | [
"MIT"
] | null | null | null | apps/metadata/users/models.py | DiegoCorrea/ouvido_musical-Back | e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models
class User(models.Model):
id = models.CharField(
max_length=255,
unique=True,
db_index=True,
primary_key=True,
default=uuid.uuid1().hex
)
def as_json(self):
return dict(
user_id=self.id
)
| 17.181818 | 39 | 0.589947 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models
class User(models.Model):
id = models.CharField(
max_length=255,
unique=True,
db_index=True,
primary_key=True,
default=uuid.uuid1().hex
)
def as_json(self):
return dict(
user_id=self.id
)
| 0 | 0 |
13ba18ae2cb6655e5d831659bbe1a04a83ddcd4a | 2,312 | py | Python | running_modes/constructors/running_mode.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 183 | 2020-04-04T02:01:15.000Z | 2022-03-30T21:56:56.000Z | running_modes/constructors/running_mode.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 39 | 2020-04-05T15:19:56.000Z | 2022-03-09T12:58:21.000Z | running_modes/constructors/running_mode.py | lilleswing/Reinvent-1 | ac4e3e6fa6379c6f4af883478dfd1b3407933ada | [
"Apache-2.0"
] | 70 | 2020-04-05T19:25:43.000Z | 2022-02-22T12:04:39.000Z | from running_modes.configurations import GeneralConfigurationEnvelope
from running_modes.constructors.base_running_mode import BaseRunningMode
from running_modes.constructors.create_model_mode_constructor import CreateModelModeConstructor
from running_modes.constructors.curriculum_learning_mode_constructor import CurriculumLearningModeConstructor
from running_modes.constructors.reinforcement_learning_mode_constructor import ReinforcementLearningModeConstructor
from running_modes.constructors.sampling_mode_constructor import SamplingModeConstructor
from running_modes.constructors.scoring_mode_constructor import ScoringModeConstructor
from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor
from running_modes.constructors.validation_mode_constructor import ValidationModeConstructor
from running_modes.enums.running_mode_enum import RunningModeEnum
class RunningMode:
def __new__(cls, configuration: GeneralConfigurationEnvelope) -> BaseRunningMode:
running_mode_enum = RunningModeEnum()
_configuration = configuration
if configuration.run_type == running_mode_enum.REINFORCEMENT_LEARNING:
return ReinforcementLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.CURRICULUM_LEARNING:
return CurriculumLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.TRANSFER_LEARNING:
return TransferLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.SCORING:
return ScoringModeConstructor(configuration)
if configuration.run_type == running_mode_enum.SAMPLING:
return SamplingModeConstructor(configuration)
if configuration.run_type == running_mode_enum.CREATE_MODEL:
return CreateModelModeConstructor(configuration)
if configuration.run_type == running_mode_enum.VALIDATION:
return ValidationModeConstructor(configuration)
# if configuration.run_type == running_mode_enum.AUTOMATED_CURRICULUM_LEARNING:
# return AutomatedCurriculumLearningModeConstructor(configuration)
else:
raise TypeError(f"Requested run type: '{configuration.run_type}' is not implemented.") | 68 | 115 | 0.820502 | from running_modes.configurations import GeneralConfigurationEnvelope
from running_modes.constructors.base_running_mode import BaseRunningMode
from running_modes.constructors.create_model_mode_constructor import CreateModelModeConstructor
from running_modes.constructors.curriculum_learning_mode_constructor import CurriculumLearningModeConstructor
from running_modes.constructors.reinforcement_learning_mode_constructor import ReinforcementLearningModeConstructor
from running_modes.constructors.sampling_mode_constructor import SamplingModeConstructor
from running_modes.constructors.scoring_mode_constructor import ScoringModeConstructor
from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor
from running_modes.constructors.validation_mode_constructor import ValidationModeConstructor
from running_modes.enums.running_mode_enum import RunningModeEnum
class RunningMode:
def __new__(cls, configuration: GeneralConfigurationEnvelope) -> BaseRunningMode:
running_mode_enum = RunningModeEnum()
_configuration = configuration
if configuration.run_type == running_mode_enum.REINFORCEMENT_LEARNING:
return ReinforcementLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.CURRICULUM_LEARNING:
return CurriculumLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.TRANSFER_LEARNING:
return TransferLearningModeConstructor(configuration)
if configuration.run_type == running_mode_enum.SCORING:
return ScoringModeConstructor(configuration)
if configuration.run_type == running_mode_enum.SAMPLING:
return SamplingModeConstructor(configuration)
if configuration.run_type == running_mode_enum.CREATE_MODEL:
return CreateModelModeConstructor(configuration)
if configuration.run_type == running_mode_enum.VALIDATION:
return ValidationModeConstructor(configuration)
# if configuration.run_type == running_mode_enum.AUTOMATED_CURRICULUM_LEARNING:
# return AutomatedCurriculumLearningModeConstructor(configuration)
else:
raise TypeError(f"Requested run type: '{configuration.run_type}' is not implemented.") | 0 | 0 |
1477ad88726678c8460cc5fe89ba40da27efa1cb | 626 | py | Python | ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py | uk-gov-mirror/nhsdigital.api-management-utils | 4ee5489f7ce7595c371e2f4e83fc0c753308905a | [
"MIT"
] | null | null | null | ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py | uk-gov-mirror/nhsdigital.api-management-utils | 4ee5489f7ce7595c371e2f4e83fc0c753308905a | [
"MIT"
] | 20 | 2020-05-27T15:00:31.000Z | 2021-09-13T11:38:58.000Z | ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py | uk-gov-mirror/nhsdigital.api-management-utils | 4ee5489f7ce7595c371e2f4e83fc0c753308905a | [
"MIT"
] | 3 | 2021-04-11T07:31:36.000Z | 2022-01-24T11:23:18.000Z | from ansible_collections.nhsd.apigee.plugins.module_utils import constants
def org_from_env(environment) -> str:
"""Get nhsd apigee organization name from environment name."""
for org, envs in constants.APIGEE_ORG_TO_ENV.items():
if environment in envs:
return org
valid_envs = []
for v in constants.APIGEE_ORG_TO_ENV.values():
valid_envs = valid_envs + v
raise ValueError(f"Unknown environment {environment}, valid environments are {valid_envs}")
class FilterModule:
@staticmethod
def filters():
return {
'org_from_env': org_from_env
}
| 28.454545 | 95 | 0.683706 | from ansible_collections.nhsd.apigee.plugins.module_utils import constants
def org_from_env(environment) -> str:
"""Get nhsd apigee organization name from environment name."""
for org, envs in constants.APIGEE_ORG_TO_ENV.items():
if environment in envs:
return org
valid_envs = []
for v in constants.APIGEE_ORG_TO_ENV.values():
valid_envs = valid_envs + v
raise ValueError(f"Unknown environment {environment}, valid environments are {valid_envs}")
class FilterModule:
@staticmethod
def filters():
return {
'org_from_env': org_from_env
}
| 0 | 0 |
9821d58fd217247d13910323dd73ffca785b11ca | 11,055 | py | Python | IPython/deathrow/astyle.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | IPython/deathrow/astyle.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-04-01T13:14:57.000Z | 2015-05-26T16:01:37.000Z | IPython/deathrow/astyle.py | dchichkov/ipython | 8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4 | [
"BSD-3-Clause-Clear"
] | 5 | 2019-06-29T03:13:02.000Z | 2020-04-23T04:47:11.000Z | """
``astyle`` provides classes for adding style (foreground and background color;
bold; blink; etc.) to terminal and curses output.
"""
import sys, os
try:
import curses
except ImportError:
curses = None
COLOR_BLACK = 0
COLOR_RED = 1
COLOR_GREEN = 2
COLOR_YELLOW = 3
COLOR_BLUE = 4
COLOR_MAGENTA = 5
COLOR_CYAN = 6
COLOR_WHITE = 7
A_BLINK = 1<<0 # Blinking text
A_BOLD = 1<<1 # Extra bright or bold text
A_DIM = 1<<2 # Half bright text
A_REVERSE = 1<<3 # Reverse-video text
A_STANDOUT = 1<<4 # The best highlighting mode available
A_UNDERLINE = 1<<5 # Underlined text
class Style(object):
"""
Store foreground color, background color and attribute (bold, underlined
etc.).
"""
__slots__ = ("fg", "bg", "attrs")
COLORNAMES = {
"black": COLOR_BLACK,
"red": COLOR_RED,
"green": COLOR_GREEN,
"yellow": COLOR_YELLOW,
"blue": COLOR_BLUE,
"magenta": COLOR_MAGENTA,
"cyan": COLOR_CYAN,
"white": COLOR_WHITE,
}
ATTRNAMES = {
"blink": A_BLINK,
"bold": A_BOLD,
"dim": A_DIM,
"reverse": A_REVERSE,
"standout": A_STANDOUT,
"underline": A_UNDERLINE,
}
def __init__(self, fg, bg, attrs=0):
"""
Create a ``Style`` object with ``fg`` as the foreground color,
``bg`` as the background color and ``attrs`` as the attributes.
Examples:
>>> Style(COLOR_RED, COLOR_BLACK)
<Style fg=red bg=black attrs=0>
>>> Style(COLOR_YELLOW, COLOR_BLUE, A_BOLD|A_UNDERLINE)
<Style fg=yellow bg=blue attrs=bold|underline>
"""
self.fg = fg
self.bg = bg
self.attrs = attrs
def __call__(self, *args):
text = Text()
for arg in args:
if isinstance(arg, Text):
text.extend(arg)
else:
text.append((self, arg))
return text
def __eq__(self, other):
return self.fg == other.fg and self.bg == other.bg and self.attrs == other.attrs
def __neq__(self, other):
return self.fg != other.fg or self.bg != other.bg or self.attrs != other.attrs
def __repr__(self):
color2name = ("black", "red", "green", "yellow", "blue", "magenta", "cyan", "white")
attrs2name = ("blink", "bold", "dim", "reverse", "standout", "underline")
return "<%s fg=%s bg=%s attrs=%s>" % (
self.__class__.__name__, color2name[self.fg], color2name[self.bg],
"|".join([attrs2name[b] for b in xrange(6) if self.attrs&(1<<b)]) or 0)
def fromstr(cls, value):
"""
Create a ``Style`` object from a string. The format looks like this:
``"red:black:bold|blink"``.
"""
# defaults
fg = COLOR_WHITE
bg = COLOR_BLACK
attrs = 0
parts = value.split(":")
if len(parts) > 0:
fg = cls.COLORNAMES[parts[0].lower()]
if len(parts) > 1:
bg = cls.COLORNAMES[parts[1].lower()]
if len(parts) > 2:
for strattr in parts[2].split("|"):
attrs |= cls.ATTRNAMES[strattr.lower()]
return cls(fg, bg, attrs)
fromstr = classmethod(fromstr)
def fromenv(cls, name, default):
"""
Create a ``Style`` from an environment variable named ``name``
(using ``default`` if the environment variable doesn't exist).
"""
return cls.fromstr(os.environ.get(name, default))
fromenv = classmethod(fromenv)
def switchstyle(s1, s2):
"""
Return the ANSI escape sequence needed to switch from style ``s1`` to
style ``s2``.
"""
attrmask = (A_BLINK|A_BOLD|A_UNDERLINE|A_REVERSE)
a1 = s1.attrs & attrmask
a2 = s2.attrs & attrmask
args = []
if s1 != s2:
# do we have to get rid of the bold/underline/blink bit?
# (can only be done by a reset)
# use reset when our target color is the default color
# (this is shorter than 37;40)
if (a1 & ~a2 or s2==style_default):
args.append("0")
s1 = style_default
a1 = 0
# now we know that old and new color have the same boldness,
# or the new color is bold and the old isn't,
# i.e. we only might have to switch bold on, not off
if not (a1 & A_BOLD) and (a2 & A_BOLD):
args.append("1")
# Fix underline
if not (a1 & A_UNDERLINE) and (a2 & A_UNDERLINE):
args.append("4")
# Fix blink
if not (a1 & A_BLINK) and (a2 & A_BLINK):
args.append("5")
# Fix reverse
if not (a1 & A_REVERSE) and (a2 & A_REVERSE):
args.append("7")
# Fix foreground color
if s1.fg != s2.fg:
args.append("3%d" % s2.fg)
# Finally fix the background color
if s1.bg != s2.bg:
args.append("4%d" % s2.bg)
if args:
return "\033[%sm" % ";".join(args)
return ""
class Text(list):
"""
A colored string. A ``Text`` object is a sequence, the sequence
items will be ``(style, string)`` tuples.
"""
def __init__(self, *args):
list.__init__(self)
self.append(*args)
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__, self.__class__.__name__,
list.__repr__(self)[1:-1])
def append(self, *args):
for arg in args:
if isinstance(arg, Text):
self.extend(arg)
elif isinstance(arg, tuple): # must be (style, string)
list.append(self, arg)
elif isinstance(arg, unicode):
list.append(self, (style_default, arg))
else:
list.append(self, (style_default, str(arg)))
def insert(self, index, *args):
self[index:index] = Text(*args)
def __add__(self, other):
new = Text()
new.append(self)
new.append(other)
return new
def __iadd__(self, other):
self.append(other)
return self
def format(self, styled=True):
"""
This generator yields the strings that will make up the final
colorized string.
"""
if styled:
oldstyle = style_default
for (style, string) in self:
if not isinstance(style, (int, long)):
switch = switchstyle(oldstyle, style)
if switch:
yield switch
if string:
yield string
oldstyle = style
switch = switchstyle(oldstyle, style_default)
if switch:
yield switch
else:
for (style, string) in self:
if not isinstance(style, (int, long)):
yield string
def string(self, styled=True):
"""
Return the resulting string (with escape sequences, if ``styled``
is true).
"""
return "".join(self.format(styled))
def __str__(self):
"""
Return ``self`` as a string (without ANSI escape sequences).
"""
return self.string(False)
def write(self, stream, styled=True):
"""
Write ``self`` to the output stream ``stream`` (with escape sequences,
if ``styled`` is true).
"""
for part in self.format(styled):
stream.write(part)
try:
import ipipe
except ImportError:
pass
else:
def xrepr_astyle_text(self, mode="default"):
yield (-1, True)
for info in self:
yield info
ipipe.xrepr.when_type(Text)(xrepr_astyle_text)
def streamstyle(stream, styled=None):
"""
If ``styled`` is ``None``, return whether ``stream`` refers to a terminal.
If this can't be determined (either because ``stream`` doesn't refer to a
real OS file, or because you're on Windows) return ``False``. If ``styled``
is not ``None`` ``styled`` will be returned unchanged.
"""
if styled is None:
try:
styled = os.isatty(stream.fileno())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
styled = False
return styled
def write(stream, styled, *texts):
"""
Write ``texts`` to ``stream``.
"""
text = Text(*texts)
text.write(stream, streamstyle(stream, styled))
def writeln(stream, styled, *texts):
"""
Write ``texts`` to ``stream`` and finish with a line feed.
"""
write(stream, styled, *texts)
stream.write("\n")
class Stream(object):
"""
Stream wrapper that adds color output.
"""
def __init__(self, stream, styled=None):
self.stream = stream
self.styled = streamstyle(stream, styled)
def write(self, *texts):
write(self.stream, self.styled, *texts)
def writeln(self, *texts):
writeln(self.stream, self.styled, *texts)
def __getattr__(self, name):
return getattr(self.stream, name)
class stdout(object):
"""
Stream wrapper for ``sys.stdout`` that adds color output.
"""
def write(self, *texts):
write(sys.stdout, None, *texts)
def writeln(self, *texts):
writeln(sys.stdout, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stdout = stdout()
class stderr(object):
"""
Stream wrapper for ``sys.stderr`` that adds color output.
"""
def write(self, *texts):
write(sys.stderr, None, *texts)
def writeln(self, *texts):
writeln(sys.stderr, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stderr = stderr()
if curses is not None:
# This is probably just range(8)
COLOR2CURSES = [
COLOR_BLACK,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE,
]
A2CURSES = {
A_BLINK: curses.A_BLINK,
A_BOLD: curses.A_BOLD,
A_DIM: curses.A_DIM,
A_REVERSE: curses.A_REVERSE,
A_STANDOUT: curses.A_STANDOUT,
A_UNDERLINE: curses.A_UNDERLINE,
}
# default style
style_default = Style.fromstr("white:black")
# Styles for datatypes
style_type_none = Style.fromstr("magenta:black")
style_type_bool = Style.fromstr("magenta:black")
style_type_number = Style.fromstr("yellow:black")
style_type_datetime = Style.fromstr("magenta:black")
style_type_type = Style.fromstr("cyan:black")
# Style for URLs and file/directory names
style_url = Style.fromstr("green:black")
style_dir = Style.fromstr("cyan:black")
style_file = Style.fromstr("green:black")
# Style for ellipsis (when an output has been shortened
style_ellisis = Style.fromstr("red:black")
# Style for displaying exceptions
style_error = Style.fromstr("red:black")
# Style for displaying non-existing attributes
style_nodata = Style.fromstr("red:black")
| 27.568579 | 92 | 0.569064 | """
``astyle`` provides classes for adding style (foreground and background color;
bold; blink; etc.) to terminal and curses output.
"""
import sys, os
try:
import curses
except ImportError:
curses = None
COLOR_BLACK = 0
COLOR_RED = 1
COLOR_GREEN = 2
COLOR_YELLOW = 3
COLOR_BLUE = 4
COLOR_MAGENTA = 5
COLOR_CYAN = 6
COLOR_WHITE = 7
A_BLINK = 1<<0 # Blinking text
A_BOLD = 1<<1 # Extra bright or bold text
A_DIM = 1<<2 # Half bright text
A_REVERSE = 1<<3 # Reverse-video text
A_STANDOUT = 1<<4 # The best highlighting mode available
A_UNDERLINE = 1<<5 # Underlined text
class Style(object):
"""
Store foreground color, background color and attribute (bold, underlined
etc.).
"""
__slots__ = ("fg", "bg", "attrs")
COLORNAMES = {
"black": COLOR_BLACK,
"red": COLOR_RED,
"green": COLOR_GREEN,
"yellow": COLOR_YELLOW,
"blue": COLOR_BLUE,
"magenta": COLOR_MAGENTA,
"cyan": COLOR_CYAN,
"white": COLOR_WHITE,
}
ATTRNAMES = {
"blink": A_BLINK,
"bold": A_BOLD,
"dim": A_DIM,
"reverse": A_REVERSE,
"standout": A_STANDOUT,
"underline": A_UNDERLINE,
}
def __init__(self, fg, bg, attrs=0):
"""
Create a ``Style`` object with ``fg`` as the foreground color,
``bg`` as the background color and ``attrs`` as the attributes.
Examples:
>>> Style(COLOR_RED, COLOR_BLACK)
<Style fg=red bg=black attrs=0>
>>> Style(COLOR_YELLOW, COLOR_BLUE, A_BOLD|A_UNDERLINE)
<Style fg=yellow bg=blue attrs=bold|underline>
"""
self.fg = fg
self.bg = bg
self.attrs = attrs
def __call__(self, *args):
text = Text()
for arg in args:
if isinstance(arg, Text):
text.extend(arg)
else:
text.append((self, arg))
return text
def __eq__(self, other):
return self.fg == other.fg and self.bg == other.bg and self.attrs == other.attrs
def __neq__(self, other):
return self.fg != other.fg or self.bg != other.bg or self.attrs != other.attrs
def __repr__(self):
color2name = ("black", "red", "green", "yellow", "blue", "magenta", "cyan", "white")
attrs2name = ("blink", "bold", "dim", "reverse", "standout", "underline")
return "<%s fg=%s bg=%s attrs=%s>" % (
self.__class__.__name__, color2name[self.fg], color2name[self.bg],
"|".join([attrs2name[b] for b in xrange(6) if self.attrs&(1<<b)]) or 0)
def fromstr(cls, value):
"""
Create a ``Style`` object from a string. The format looks like this:
``"red:black:bold|blink"``.
"""
# defaults
fg = COLOR_WHITE
bg = COLOR_BLACK
attrs = 0
parts = value.split(":")
if len(parts) > 0:
fg = cls.COLORNAMES[parts[0].lower()]
if len(parts) > 1:
bg = cls.COLORNAMES[parts[1].lower()]
if len(parts) > 2:
for strattr in parts[2].split("|"):
attrs |= cls.ATTRNAMES[strattr.lower()]
return cls(fg, bg, attrs)
fromstr = classmethod(fromstr)
def fromenv(cls, name, default):
"""
Create a ``Style`` from an environment variable named ``name``
(using ``default`` if the environment variable doesn't exist).
"""
return cls.fromstr(os.environ.get(name, default))
fromenv = classmethod(fromenv)
def switchstyle(s1, s2):
"""
Return the ANSI escape sequence needed to switch from style ``s1`` to
style ``s2``.
"""
attrmask = (A_BLINK|A_BOLD|A_UNDERLINE|A_REVERSE)
a1 = s1.attrs & attrmask
a2 = s2.attrs & attrmask
args = []
if s1 != s2:
# do we have to get rid of the bold/underline/blink bit?
# (can only be done by a reset)
# use reset when our target color is the default color
# (this is shorter than 37;40)
if (a1 & ~a2 or s2==style_default):
args.append("0")
s1 = style_default
a1 = 0
# now we know that old and new color have the same boldness,
# or the new color is bold and the old isn't,
# i.e. we only might have to switch bold on, not off
if not (a1 & A_BOLD) and (a2 & A_BOLD):
args.append("1")
# Fix underline
if not (a1 & A_UNDERLINE) and (a2 & A_UNDERLINE):
args.append("4")
# Fix blink
if not (a1 & A_BLINK) and (a2 & A_BLINK):
args.append("5")
# Fix reverse
if not (a1 & A_REVERSE) and (a2 & A_REVERSE):
args.append("7")
# Fix foreground color
if s1.fg != s2.fg:
args.append("3%d" % s2.fg)
# Finally fix the background color
if s1.bg != s2.bg:
args.append("4%d" % s2.bg)
if args:
return "\033[%sm" % ";".join(args)
return ""
class Text(list):
"""
A colored string. A ``Text`` object is a sequence, the sequence
items will be ``(style, string)`` tuples.
"""
def __init__(self, *args):
list.__init__(self)
self.append(*args)
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__, self.__class__.__name__,
list.__repr__(self)[1:-1])
def append(self, *args):
for arg in args:
if isinstance(arg, Text):
self.extend(arg)
elif isinstance(arg, tuple): # must be (style, string)
list.append(self, arg)
elif isinstance(arg, unicode):
list.append(self, (style_default, arg))
else:
list.append(self, (style_default, str(arg)))
def insert(self, index, *args):
self[index:index] = Text(*args)
def __add__(self, other):
new = Text()
new.append(self)
new.append(other)
return new
def __iadd__(self, other):
self.append(other)
return self
def format(self, styled=True):
"""
This generator yields the strings that will make up the final
colorized string.
"""
if styled:
oldstyle = style_default
for (style, string) in self:
if not isinstance(style, (int, long)):
switch = switchstyle(oldstyle, style)
if switch:
yield switch
if string:
yield string
oldstyle = style
switch = switchstyle(oldstyle, style_default)
if switch:
yield switch
else:
for (style, string) in self:
if not isinstance(style, (int, long)):
yield string
def string(self, styled=True):
"""
Return the resulting string (with escape sequences, if ``styled``
is true).
"""
return "".join(self.format(styled))
def __str__(self):
"""
Return ``self`` as a string (without ANSI escape sequences).
"""
return self.string(False)
def write(self, stream, styled=True):
"""
Write ``self`` to the output stream ``stream`` (with escape sequences,
if ``styled`` is true).
"""
for part in self.format(styled):
stream.write(part)
try:
import ipipe
except ImportError:
pass
else:
def xrepr_astyle_text(self, mode="default"):
yield (-1, True)
for info in self:
yield info
ipipe.xrepr.when_type(Text)(xrepr_astyle_text)
def streamstyle(stream, styled=None):
"""
If ``styled`` is ``None``, return whether ``stream`` refers to a terminal.
If this can't be determined (either because ``stream`` doesn't refer to a
real OS file, or because you're on Windows) return ``False``. If ``styled``
is not ``None`` ``styled`` will be returned unchanged.
"""
if styled is None:
try:
styled = os.isatty(stream.fileno())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
styled = False
return styled
def write(stream, styled, *texts):
"""
Write ``texts`` to ``stream``.
"""
text = Text(*texts)
text.write(stream, streamstyle(stream, styled))
def writeln(stream, styled, *texts):
"""
Write ``texts`` to ``stream`` and finish with a line feed.
"""
write(stream, styled, *texts)
stream.write("\n")
class Stream(object):
"""
Stream wrapper that adds color output.
"""
def __init__(self, stream, styled=None):
self.stream = stream
self.styled = streamstyle(stream, styled)
def write(self, *texts):
write(self.stream, self.styled, *texts)
def writeln(self, *texts):
writeln(self.stream, self.styled, *texts)
def __getattr__(self, name):
return getattr(self.stream, name)
class stdout(object):
"""
Stream wrapper for ``sys.stdout`` that adds color output.
"""
def write(self, *texts):
write(sys.stdout, None, *texts)
def writeln(self, *texts):
writeln(sys.stdout, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stdout = stdout()
class stderr(object):
"""
Stream wrapper for ``sys.stderr`` that adds color output.
"""
def write(self, *texts):
write(sys.stderr, None, *texts)
def writeln(self, *texts):
writeln(sys.stderr, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stderr = stderr()
if curses is not None:
# This is probably just range(8)
COLOR2CURSES = [
COLOR_BLACK,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE,
]
A2CURSES = {
A_BLINK: curses.A_BLINK,
A_BOLD: curses.A_BOLD,
A_DIM: curses.A_DIM,
A_REVERSE: curses.A_REVERSE,
A_STANDOUT: curses.A_STANDOUT,
A_UNDERLINE: curses.A_UNDERLINE,
}
# default style
style_default = Style.fromstr("white:black")
# Styles for datatypes
style_type_none = Style.fromstr("magenta:black")
style_type_bool = Style.fromstr("magenta:black")
style_type_number = Style.fromstr("yellow:black")
style_type_datetime = Style.fromstr("magenta:black")
style_type_type = Style.fromstr("cyan:black")
# Style for URLs and file/directory names
style_url = Style.fromstr("green:black")
style_dir = Style.fromstr("cyan:black")
style_file = Style.fromstr("green:black")
# Style for ellipsis (when an output has been shortened
style_ellisis = Style.fromstr("red:black")
# Style for displaying exceptions
style_error = Style.fromstr("red:black")
# Style for displaying non-existing attributes
style_nodata = Style.fromstr("red:black")
| 0 | 0 |
0ba81da5ce04ec487462a1396a9e594fbd6ac8ba | 853 | py | Python | 0000_examples/gelsight/tst.py | Photon26/wrs-main_0614 | c0d0e38deac9785e9c382305f65f3ac5f221787d | [
"MIT"
] | null | null | null | 0000_examples/gelsight/tst.py | Photon26/wrs-main_0614 | c0d0e38deac9785e9c382305f65f3ac5f221787d | [
"MIT"
] | null | null | null | 0000_examples/gelsight/tst.py | Photon26/wrs-main_0614 | c0d0e38deac9785e9c382305f65f3ac5f221787d | [
"MIT"
] | null | null | null | import pickle
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import rbt_con.force_control as ur3dx
# import robot_con.ur.ur3_dual_x as ur3dx
import visualization.panda.world as wd
import modeling.geometric_model as gm
import motion.optimization_based.incremental_nik as inik
import numpy as np
import modeling.collision_model as cm
import cv2
import img_to_depth as itd
import time
import motion.probabilistic.rrt_connect as rrtc
ur_dual_x = ur3dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.100')
base = wd.World(cam_pos=[2,1,3], lookat_pos=[0,0,1.1])
gm.gen_frame().attach_to(base)
robot_s = ur3d.UR3Dual()
jnt = ur_dual_x.get_jnt_values("lft_arm")
robot_s.fk(component_name="lft_arm",jnt_values= np.array(jnt))
robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
robot_meshmodel.attach_to(base)
base.run() | 32.807692 | 98 | 0.805393 | import pickle
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import rbt_con.force_control as ur3dx
# import robot_con.ur.ur3_dual_x as ur3dx
import visualization.panda.world as wd
import modeling.geometric_model as gm
import motion.optimization_based.incremental_nik as inik
import numpy as np
import modeling.collision_model as cm
import cv2
import img_to_depth as itd
import time
import motion.probabilistic.rrt_connect as rrtc
ur_dual_x = ur3dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.100')
base = wd.World(cam_pos=[2,1,3], lookat_pos=[0,0,1.1])
gm.gen_frame().attach_to(base)
robot_s = ur3d.UR3Dual()
jnt = ur_dual_x.get_jnt_values("lft_arm")
robot_s.fk(component_name="lft_arm",jnt_values= np.array(jnt))
robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
robot_meshmodel.attach_to(base)
base.run() | 0 | 0 |
3fd17c06364dbbcfbf61a1c852f8a863c6286b83 | 4,656 | py | Python | galaxy_model/spiral_arms/three_kpc.py | K-Monty/galaxy-model | 9515d7c87c31e68338466d2044d8e9c679bf8648 | [
"MIT"
] | null | null | null | galaxy_model/spiral_arms/three_kpc.py | K-Monty/galaxy-model | 9515d7c87c31e68338466d2044d8e9c679bf8648 | [
"MIT"
] | null | null | null | galaxy_model/spiral_arms/three_kpc.py | K-Monty/galaxy-model | 9515d7c87c31e68338466d2044d8e9c679bf8648 | [
"MIT"
] | null | null | null | """
This module use SpiralArm superclass, with some modifications,
to create 3-kpc arm.
"""
from shapely.geometry.polygon import Polygon
from descartes import PolygonPatch
from .spiral_parameters import Three_Kpc
from . import spiral_property as spiral_eq
from .spiral_arm_superclass import SpiralArm
class ThreeKpcArm(SpiralArm):
def __init__(self):
self.params = Three_Kpc
self._color = 'yellow'
self.tuning_window = 3
self._spine_r_kpc, self.x_spine, self.y_spine, self._B_spine, \
self._width_kpc = \
self.spine_radii_coords_b_range_and_width_with_smoothing()
self._poly_coords_inner, self._poly_coords_outer = self._poly_coords()
self._polygon_near = Polygon(self._poly_coords_inner)
self._polygon_far = Polygon(self._poly_coords_outer)
self.polypatch_near = PolygonPatch(self._polygon_near,
color=self._color,
alpha=0.2)
self.polypatch_far = PolygonPatch(self._polygon_far,
color=self._color,
alpha=0.2)
def __repr__(self):
return "ThreeKpc"
def _radii_factory(self, B):
return self._spine_radius_at_B_and_psi(
B, self.params['B-kink'],
self.params['psi'],
self.params['R-kink'])
def _width_factory(self, B, r):
return spiral_eq.CylinderSize.width_kpc(
self.params['w-kink'], r, self.params['R-kink']) + 0.1
def spine_radii_coords_b_range_and_width_with_smoothing(self):
r_spine_near = []
x_spine_near = []
y_spine_near = []
width_kpc_near = []
B_list_near = [x for x in range(self.params['B-begin-near'],
self.params['B-end-near'])]
r_spine_far = []
x_spine_far = []
y_spine_far = []
width_kpc_far = []
B_list_far = [x for x in range(self.params['B-begin-far'],
self.params['B-end-far'])]
for B_near in B_list_near:
r_near = self._radii_factory(B_near)
w_near = self._width_factory(B_near, r_near)
r_spine_near.append(r_near)
width_kpc_near.append(w_near)
for B_far in B_list_far:
r_far = self._radii_factory(B_far)
w_far = self._width_factory(B_far, r_far)
r_spine_far.append(r_far)
width_kpc_far.append(w_far)
for B_near, r_near in zip(B_list_near, r_spine_near):
cartesian_coords = spiral_eq.polar_to_cartesian(r_near, B_near)
x_spine_near.append(cartesian_coords[0])
y_spine_near.append(cartesian_coords[1])
for B_far, r_far in zip(B_list_far, r_spine_far):
cartesian_coords = spiral_eq.polar_to_cartesian(r_far, B_far)
x_spine_far.append(cartesian_coords[0])
y_spine_far.append(cartesian_coords[1])
r_spine = [r_spine_near, r_spine_far]
x_spine = [x_spine_near, x_spine_far]
y_spine = [y_spine_near, y_spine_far]
B_list = [B_list_near, B_list_far]
width_kpc = [width_kpc_near, width_kpc_far]
return (r_spine, x_spine, y_spine, B_list, width_kpc)
def _poly_coords(self):
x_border_inner_near, y_border_inner_near, x_border_outer_near, \
y_border_outer_near = \
self._border_coords(self.x_spine[0],
self.y_spine[0],
self._B_spine[0],
self._width_kpc[0])
x_border_inner_far, y_border_inner_far, x_border_outer_far, \
y_border_outer_far = \
self._border_coords(self.x_spine[1],
self.y_spine[1],
self._B_spine[1],
self._width_kpc[1])
x_poly_edge_coords_near = x_border_inner_near \
+ x_border_outer_near[::-1]
y_poly_edge_coords_near = y_border_inner_near \
+ y_border_outer_near[::-1]
poly_edge_coords_near = [xy for xy in zip(x_poly_edge_coords_near,
y_poly_edge_coords_near)]
x_poly_edge_coords_far = x_border_inner_far + x_border_outer_far[::-1]
y_poly_edge_coords_far = y_border_inner_far + y_border_outer_far[::-1]
poly_edge_coords_far = [xy for xy in zip(x_poly_edge_coords_far,
y_poly_edge_coords_far)]
return poly_edge_coords_near, poly_edge_coords_far
| 40.486957 | 78 | 0.597938 | """
This module use SpiralArm superclass, with some modifications,
to create 3-kpc arm.
"""
from shapely.geometry.polygon import Polygon
from descartes import PolygonPatch
from .spiral_parameters import Three_Kpc
from . import spiral_property as spiral_eq
from .spiral_arm_superclass import SpiralArm
class ThreeKpcArm(SpiralArm):
def __init__(self):
self.params = Three_Kpc
self._color = 'yellow'
self.tuning_window = 3
self._spine_r_kpc, self.x_spine, self.y_spine, self._B_spine, \
self._width_kpc = \
self.spine_radii_coords_b_range_and_width_with_smoothing()
self._poly_coords_inner, self._poly_coords_outer = self._poly_coords()
self._polygon_near = Polygon(self._poly_coords_inner)
self._polygon_far = Polygon(self._poly_coords_outer)
self.polypatch_near = PolygonPatch(self._polygon_near,
color=self._color,
alpha=0.2)
self.polypatch_far = PolygonPatch(self._polygon_far,
color=self._color,
alpha=0.2)
def __repr__(self):
return "ThreeKpc"
def _radii_factory(self, B):
return self._spine_radius_at_B_and_psi(
B, self.params['B-kink'],
self.params['psi'],
self.params['R-kink'])
def _width_factory(self, B, r):
return spiral_eq.CylinderSize.width_kpc(
self.params['w-kink'], r, self.params['R-kink']) + 0.1
def spine_radii_coords_b_range_and_width_with_smoothing(self):
r_spine_near = []
x_spine_near = []
y_spine_near = []
width_kpc_near = []
B_list_near = [x for x in range(self.params['B-begin-near'],
self.params['B-end-near'])]
r_spine_far = []
x_spine_far = []
y_spine_far = []
width_kpc_far = []
B_list_far = [x for x in range(self.params['B-begin-far'],
self.params['B-end-far'])]
for B_near in B_list_near:
r_near = self._radii_factory(B_near)
w_near = self._width_factory(B_near, r_near)
r_spine_near.append(r_near)
width_kpc_near.append(w_near)
for B_far in B_list_far:
r_far = self._radii_factory(B_far)
w_far = self._width_factory(B_far, r_far)
r_spine_far.append(r_far)
width_kpc_far.append(w_far)
for B_near, r_near in zip(B_list_near, r_spine_near):
cartesian_coords = spiral_eq.polar_to_cartesian(r_near, B_near)
x_spine_near.append(cartesian_coords[0])
y_spine_near.append(cartesian_coords[1])
for B_far, r_far in zip(B_list_far, r_spine_far):
cartesian_coords = spiral_eq.polar_to_cartesian(r_far, B_far)
x_spine_far.append(cartesian_coords[0])
y_spine_far.append(cartesian_coords[1])
r_spine = [r_spine_near, r_spine_far]
x_spine = [x_spine_near, x_spine_far]
y_spine = [y_spine_near, y_spine_far]
B_list = [B_list_near, B_list_far]
width_kpc = [width_kpc_near, width_kpc_far]
return (r_spine, x_spine, y_spine, B_list, width_kpc)
def _poly_coords(self):
x_border_inner_near, y_border_inner_near, x_border_outer_near, \
y_border_outer_near = \
self._border_coords(self.x_spine[0],
self.y_spine[0],
self._B_spine[0],
self._width_kpc[0])
x_border_inner_far, y_border_inner_far, x_border_outer_far, \
y_border_outer_far = \
self._border_coords(self.x_spine[1],
self.y_spine[1],
self._B_spine[1],
self._width_kpc[1])
x_poly_edge_coords_near = x_border_inner_near \
+ x_border_outer_near[::-1]
y_poly_edge_coords_near = y_border_inner_near \
+ y_border_outer_near[::-1]
poly_edge_coords_near = [xy for xy in zip(x_poly_edge_coords_near,
y_poly_edge_coords_near)]
x_poly_edge_coords_far = x_border_inner_far + x_border_outer_far[::-1]
y_poly_edge_coords_far = y_border_inner_far + y_border_outer_far[::-1]
poly_edge_coords_far = [xy for xy in zip(x_poly_edge_coords_far,
y_poly_edge_coords_far)]
return poly_edge_coords_near, poly_edge_coords_far
| 0 | 0 |
dbeff05f1c61b09708b8a95665c71d6764ac49fd | 2,045 | py | Python | rbi/rbi_budgets_scraper.py | cbgaindia/scrappers | f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9 | [
"MIT"
] | null | null | null | rbi/rbi_budgets_scraper.py | cbgaindia/scrappers | f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9 | [
"MIT"
] | null | null | null | rbi/rbi_budgets_scraper.py | cbgaindia/scrappers | f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9 | [
"MIT"
] | null | null | null | 'Code for scrapping RBI Data'
from datetime import date
from lxml import etree
import logging
from logging.config import fileConfig
from scrappers.scrapping_utils import ScrappingUtils
fileConfig('scrappers/logging_config.ini')
logger = logging.getLogger()
OUT_FOLDER = "rbi"
class RBIBudgetScraper(ScrappingUtils):
def fetch_docs_for_year(self, url, year=None):
'''Fetches all documents for a budget year
'''
if not year:
current_year = date.today().year
year = "%s" % (current_year)
page_dom = self.get_page_dom(url)
title = self.get_text_from_element(page_dom, xpath="//h2[@class='page_title']/text()")
download_dir = "%s/%s/%s" % (OUT_FOLDER, year, title)
file_dir = download_dir
for node in page_dom.xpath("//table[@class='tablebg']/tr"):
node_title = self.get_text_from_element(node, xpath="./td[@class='tableheader']//text()")
if node_title:
file_dir = "%s/%s" % (download_dir, node_title)
continue
node_title = self.get_text_from_element(node, xpath="./td[@style]//text()")
file_path = "%s/%s" % (file_dir, node_title)
file_link = node.xpath("./td[2]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".xls")
file_link = node.xpath("./td[3]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".pdf")
if __name__ == '__main__':
obj = RBIBudgetScraper()
for year in range(2002,2015):
year = str(year)
url1 = "https://www.rbi.org.in/scripts/AnnualPublications.aspx?head=Handbook%20of%20Statistics%20on%20Indian%20Economy"
url2 = "https://rbi.org.in/Scripts/AnnualPublications.aspx?head=State+Finances+%3a+A+Study+of+Budgets"
obj.fetch_docs_for_year(url1, year)
obj.fetch_docs_for_year(url2, year)
| 44.456522 | 127 | 0.630318 | 'Code for scrapping RBI Data'
from datetime import date
from lxml import etree
import logging
from logging.config import fileConfig
from scrappers.scrapping_utils import ScrappingUtils
fileConfig('scrappers/logging_config.ini')
logger = logging.getLogger()
OUT_FOLDER = "rbi"
class RBIBudgetScraper(ScrappingUtils):
def fetch_docs_for_year(self, url, year=None):
'''Fetches all documents for a budget year
'''
if not year:
current_year = date.today().year
year = "%s" % (current_year)
page_dom = self.get_page_dom(url)
title = self.get_text_from_element(page_dom, xpath="//h2[@class='page_title']/text()")
download_dir = "%s/%s/%s" % (OUT_FOLDER, year, title)
file_dir = download_dir
for node in page_dom.xpath("//table[@class='tablebg']/tr"):
node_title = self.get_text_from_element(node, xpath="./td[@class='tableheader']//text()")
if node_title:
file_dir = "%s/%s" % (download_dir, node_title)
continue
node_title = self.get_text_from_element(node, xpath="./td[@style]//text()")
file_path = "%s/%s" % (file_dir, node_title)
file_link = node.xpath("./td[2]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".xls")
file_link = node.xpath("./td[3]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".pdf")
if __name__ == '__main__':
obj = RBIBudgetScraper()
for year in range(2002,2015):
year = str(year)
url1 = "https://www.rbi.org.in/scripts/AnnualPublications.aspx?head=Handbook%20of%20Statistics%20on%20Indian%20Economy"
url2 = "https://rbi.org.in/Scripts/AnnualPublications.aspx?head=State+Finances+%3a+A+Study+of+Budgets"
obj.fetch_docs_for_year(url1, year)
obj.fetch_docs_for_year(url2, year)
| 0 | 0 |
30b7600879ed7470a3a4e7671d503041ddaee708 | 404 | py | Python | 0x11-python-network_1/2-post_email.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | 0x11-python-network_1/2-post_email.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | 0x11-python-network_1/2-post_email.py | Nahi-Terefe/alx-higher_level_programming | c67a78a6f79e853918963971f8352979e7691541 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
""" post email """
import urllib.request
import urllib.parse
import sys
if __name__ == "__main__":
value = {'email': sys.argv[2]}
data = urllib.parse.urlencode(value)
data = data.encode('utf-8')
req = urllib.request.Request(sys.argv[1], data)
with urllib.request.urlopen(req) as response:
res = response.read().decode(encoding='UTF-8')
print(res)
| 25.25 | 54 | 0.65099 | #!/usr/bin/python3
""" post email """
import urllib.request
import urllib.parse
import sys
if __name__ == "__main__":
value = {'email': sys.argv[2]}
data = urllib.parse.urlencode(value)
data = data.encode('utf-8')
req = urllib.request.Request(sys.argv[1], data)
with urllib.request.urlopen(req) as response:
res = response.read().decode(encoding='UTF-8')
print(res)
| 0 | 0 |
98dd68e4820e061630336d4d9dc6896eb5df6fb1 | 2,534 | py | Python | modules/selection.py | psp-codes/reduced-decoy-ensemble | 096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f | [
"MIT"
] | 2 | 2020-04-22T04:16:03.000Z | 2020-08-19T13:50:20.000Z | modules/selection.py | psp-codes/reduced-decoy-ensemble | 096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f | [
"MIT"
] | null | null | null | modules/selection.py | psp-codes/reduced-decoy-ensemble | 096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f | [
"MIT"
] | 1 | 2020-08-19T13:50:26.000Z | 2020-08-19T13:50:26.000Z | # selection.py
# since: 10/2018
# Developed by: Shehu Lab
"""Module for selecting next generation from current generation.
This module provides methods to select next generation from
current generation.
Available Functions:
- truncation: Selects next generation via elitism truncation selection.
"""
def truncation(parent_population, child_population, parents_scores,
children_scores, elitism_rate):
"""Selects next generation using elitism truncation selection.
This function implements truncation selection while ensuring elitism
to select a specific number of members for the next generation.
Args:
parent_population: A list containing members of parent
population.
child_population: A list containing members of offspring
population.
parents_scores: A list containing scores of each member of the
parent population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
parent_population argument.
children_scores: A list containing scores of each member of the
offspring population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
child_population argument.
elitism_rate: A float indicating the elitism percentage.
Returns:
A list of members for the next generation of population.
"""
population_size = len(parent_population)
population_indices = list(range(population_size))
sorted_parents_indices = [x for _, x in sorted(zip(
parents_scores, population_indices
))]
sorted_parents_scores = sorted(parents_scores)
# Slice parent population using elitism rate
slice_index = int(population_size * elitism_rate)
selected_parents_indices = sorted_parents_indices[:slice_index]
selected_parents = [parent_population[i] for i in selected_parents_indices]
combined_population = selected_parents + child_population
combined_scores = sorted_parents_scores[:slice_index] + children_scores
combined_population_indices = list(range(len(combined_population)))
sorted_population_indices = [x for _, x in sorted(zip(
combined_scores, combined_population_indices
))]
selected_population_indices = sorted_population_indices[:population_size]
# Truncate and return
return [combined_population[i] for i in selected_population_indices]
| 37.820896 | 79 | 0.723757 | # selection.py
# since: 10/2018
# Developed by: Shehu Lab
"""Module for selecting next generation from current generation.
This module provides methods to select next generation from
current generation.
Available Functions:
- truncation: Selects next generation via elitism truncation selection.
"""
def truncation(parent_population, child_population, parents_scores,
children_scores, elitism_rate):
"""Selects next generation using elitism truncation selection.
This function implements truncation selection while ensuring elitism
to select a specific number of members for the next generation.
Args:
parent_population: A list containing members of parent
population.
child_population: A list containing members of offspring
population.
parents_scores: A list containing scores of each member of the
parent population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
parent_population argument.
children_scores: A list containing scores of each member of the
offspring population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
child_population argument.
elitism_rate: A float indicating the elitism percentage.
Returns:
A list of members for the next generation of population.
"""
population_size = len(parent_population)
population_indices = list(range(population_size))
sorted_parents_indices = [x for _, x in sorted(zip(
parents_scores, population_indices
))]
sorted_parents_scores = sorted(parents_scores)
# Slice parent population using elitism rate
slice_index = int(population_size * elitism_rate)
selected_parents_indices = sorted_parents_indices[:slice_index]
selected_parents = [parent_population[i] for i in selected_parents_indices]
combined_population = selected_parents + child_population
combined_scores = sorted_parents_scores[:slice_index] + children_scores
combined_population_indices = list(range(len(combined_population)))
sorted_population_indices = [x for _, x in sorted(zip(
combined_scores, combined_population_indices
))]
selected_population_indices = sorted_population_indices[:population_size]
# Truncate and return
return [combined_population[i] for i in selected_population_indices]
| 0 | 0 |
3f46e12fb5d861255b9732050d691bdff492592b | 129 | py | Python | Desafios/desafio030.py | VanessaCML/python | 56133b9000ba89154f37038e11a3c2d1aa6d1094 | [
"MIT"
] | null | null | null | Desafios/desafio030.py | VanessaCML/python | 56133b9000ba89154f37038e11a3c2d1aa6d1094 | [
"MIT"
] | null | null | null | Desafios/desafio030.py | VanessaCML/python | 56133b9000ba89154f37038e11a3c2d1aa6d1094 | [
"MIT"
] | null | null | null | n = int(input('Digite um nmero: '))
if n % 2 == 0:
print(f'O nmero {n} par.')
else:
print(f'O nmero {n} mpar.')
| 18.428571 | 36 | 0.542636 | n = int(input('Digite um número: '))
if n % 2 == 0:
print(f'O número {n} é par.')
else:
print(f'O número {n} é ímpar.')
| 12 | 0 |
be760d7ba6e881b8660d24ef1857485b859c9f5b | 1,487 | py | Python | sorted_insert_position.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | sorted_insert_position.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | sorted_insert_position.py | UPstartDeveloper/Problem_Solving_Practice | bd61333b3b056e82a94297e02bc05a17552e3496 | [
"MIT"
] | null | null | null | import math
def find_index(sorted_list, target):
"""Finds the index where the target value is expected in a sorted list."""
def binary_search(low_index, hi_index):
"""Searches for a value in a list, throwing away half each call"""
# locate the middle index
mid_index = math.ceil((low_index + hi_index) / 2)
# obtain values from all three indices
low_val, mid_val, high_val = (
sorted_list[low_index],
sorted_list[mid_index],
sorted_list[hi_index],
)
# Base case: the target value is found
if mid_val == target:
return mid_index
# target value not found:
elif mid_val > target:
# if target lies "before" the array
if low_index == hi_index:
# return the 0 index
return mid_index
# otherwise search the lower half of the array
return binary_search(low_index, mid_index - 1)
elif mid_val < target:
# if target lies "after" the last value
if low_index == hi_index:
return mid_index + 1
# otherwise search the larger half of the array
return binary_search(mid_index + 1, hi_index)
# store the array length
ARRAY_LENGTH = len(sorted_list)
# execute binary search on the array
return binary_search(0, ARRAY_LENGTH - 1)
if __name__ == "__main__":
print(find_index([1, 3, 5, 6], 5))
| 34.581395 | 78 | 0.597848 | import math
def find_index(sorted_list, target):
"""Finds the index where the target value is expected in a sorted list."""
def binary_search(low_index, hi_index):
"""Searches for a value in a list, throwing away half each call"""
# locate the middle index
mid_index = math.ceil((low_index + hi_index) / 2)
# obtain values from all three indices
low_val, mid_val, high_val = (
sorted_list[low_index],
sorted_list[mid_index],
sorted_list[hi_index],
)
# Base case: the target value is found
if mid_val == target:
return mid_index
# target value not found:
elif mid_val > target:
# if target lies "before" the array
if low_index == hi_index:
# return the 0 index
return mid_index
# otherwise search the lower half of the array
return binary_search(low_index, mid_index - 1)
elif mid_val < target:
# if target lies "after" the last value
if low_index == hi_index:
return mid_index + 1
# otherwise search the larger half of the array
return binary_search(mid_index + 1, hi_index)
# store the array length
ARRAY_LENGTH = len(sorted_list)
# execute binary search on the array
return binary_search(0, ARRAY_LENGTH - 1)
if __name__ == "__main__":
print(find_index([1, 3, 5, 6], 5))
| 0 | 0 |
b4bd721ab4429c8eb5ef6b2554f6ed76ab1c49a3 | 404 | py | Python | 2021/Day_8/AoCSignals.py | ArrowThunder/AoC | f4649115fc83b989745c83251a85710e76eb0368 | [
"MIT"
] | 1 | 2021-12-04T12:44:51.000Z | 2021-12-04T12:44:51.000Z | 2021/Day_8/AoCSignals.py | ArrowThunder/AoC2021 | f4649115fc83b989745c83251a85710e76eb0368 | [
"MIT"
] | null | null | null | 2021/Day_8/AoCSignals.py | ArrowThunder/AoC2021 | f4649115fc83b989745c83251a85710e76eb0368 | [
"MIT"
] | null | null | null | def parse_line(line):
line = line.split('|')
inputs = line[0].split()
outputs = line[1].split()
return inputs, outputs
with open('input.txt') as file:
total = 0
for line in file:
inputs, outputs = parse_line(line)
for code in outputs:
if len(code) == 2 or len(code) == 3 or len(code) == 4 or len(code) == 7:
total += 1
print(total) | 28.857143 | 84 | 0.549505 | def parse_line(line):
line = line.split('|')
inputs = line[0].split()
outputs = line[1].split()
return inputs, outputs
with open('input.txt') as file:
total = 0
for line in file:
inputs, outputs = parse_line(line)
for code in outputs:
if len(code) == 2 or len(code) == 3 or len(code) == 4 or len(code) == 7:
total += 1
print(total) | 0 | 0 |
13d4af942e0928ac2e926dad1c9830ea003345d6 | 1,885 | py | Python | 4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py | YaoZhou89/TGG | b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd | [
"MIT"
] | 7 | 2022-01-19T14:17:23.000Z | 2022-02-08T12:17:39.000Z | 4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py | YaoZhou89/TGG | b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd | [
"MIT"
] | null | null | null | 4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py | YaoZhou89/TGG | b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd | [
"MIT"
] | null | null | null | import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
parser = argparse.ArgumentParser(description='Make fasta for each variant to align/augment.')
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-r', help='the reference FASTA file.', required=True)
parser.add_argument('-s', help='the output FASTA file with SV sequence to align/augment', required=True)
parser.add_argument('-f', default=50000, type=int,
help='the flank size. Default 50000.')
args = parser.parse_args()
# get chromosome length
ref = Fasta(args.r)
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
fa_outf = open(args.s, 'w')
tail_buff = 1000 # tail buffer: no sequence extracted from a buffer at the chunk tails to ensure they stay untouched
for record in vcf_reader:
chr_len = len(ref[record.CHROM])
# retrieve alt allele with flanks
# left flank sequence
fl1_e = record.POS - 1
if fl1_e < tail_buff:
l1_s = tail_buff / 2
else:
fl1_s = fl1_e - args.f
fl1_s = max(0, fl1_s) + tail_buff
fl1_seq = ref[record.CHROM][fl1_s:fl1_e]
fl1_seq = fl1_seq.seq
# Get flank 2 sequence
fl2_s = record.POS + len(record.REF) - 1
if fl2_s > chr_len - tail_buff:
fl2_e = (chr_len + fl2_s)/2
else:
fl2_e = fl2_s + args.f
fl2_e = min(fl2_e, len(ref[record.CHROM])) - tail_buff
fl2_seq = ref[record.CHROM][int(fl2_s):int(fl2_e)]
fl2_seq = fl2_seq.seq
# Fasta record
oseq = fl1_seq + str(record.ALT[0]) + fl2_seq
svid = '{}_{}_{}_{}'.format(record.CHROM, int(fl1_s), int(fl2_e), record.ID)
orec = SeqRecord(MutableSeq(oseq.upper()), id=svid,
description='')
SeqIO.write(orec, fa_outf, "fasta")
fa_outf.close()
vcfi.close()
| 35.566038 | 116 | 0.669496 | import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
parser = argparse.ArgumentParser(description='Make fasta for each variant to align/augment.')
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-r', help='the reference FASTA file.', required=True)
parser.add_argument('-s', help='the output FASTA file with SV sequence to align/augment', required=True)
parser.add_argument('-f', default=50000, type=int,
help='the flank size. Default 50000.')
args = parser.parse_args()
# get chromosome length
ref = Fasta(args.r)
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
fa_outf = open(args.s, 'w')
tail_buff = 1000 # tail buffer: no sequence extracted from a buffer at the chunk tails to ensure they stay untouched
for record in vcf_reader:
chr_len = len(ref[record.CHROM])
# retrieve alt allele with flanks
# left flank sequence
fl1_e = record.POS - 1
if fl1_e < tail_buff:
l1_s = tail_buff / 2
else:
fl1_s = fl1_e - args.f
fl1_s = max(0, fl1_s) + tail_buff
fl1_seq = ref[record.CHROM][fl1_s:fl1_e]
fl1_seq = fl1_seq.seq
# Get flank 2 sequence
fl2_s = record.POS + len(record.REF) - 1
if fl2_s > chr_len - tail_buff:
fl2_e = (chr_len + fl2_s)/2
else:
fl2_e = fl2_s + args.f
fl2_e = min(fl2_e, len(ref[record.CHROM])) - tail_buff
fl2_seq = ref[record.CHROM][int(fl2_s):int(fl2_e)]
fl2_seq = fl2_seq.seq
# Fasta record
oseq = fl1_seq + str(record.ALT[0]) + fl2_seq
svid = '{}_{}_{}_{}'.format(record.CHROM, int(fl1_s), int(fl2_e), record.ID)
orec = SeqRecord(MutableSeq(oseq.upper()), id=svid,
description='')
SeqIO.write(orec, fa_outf, "fasta")
fa_outf.close()
vcfi.close()
| 0 | 0 |
878b92fc40120b934c575d753a800a9538d9a14d | 22,851 | py | Python | gateware/daqnet/ethernet/rmii.py | dvdfreitag/daqnet | 6a84185d2cf35d99dd620d1e09b4df7fb0630784 | [
"MIT"
] | null | null | null | gateware/daqnet/ethernet/rmii.py | dvdfreitag/daqnet | 6a84185d2cf35d99dd620d1e09b4df7fb0630784 | [
"MIT"
] | null | null | null | gateware/daqnet/ethernet/rmii.py | dvdfreitag/daqnet | 6a84185d2cf35d99dd620d1e09b4df7fb0630784 | [
"MIT"
] | null | null | null | """
Ethernet RMII Interface
Copyright 2018-2019 Adam Greig
Released under the MIT license; see LICENSE for details.
"""
from nmigen import Elaboratable, Module, Signal, Cat
from .crc import CRC32
from .mac_address_match import MACAddressMatch
class RMIIRx(Elaboratable):
"""
RMII receive module
Receives incoming packets and saves them to a memory. Validates incoming
frame check sequence and only asserts `rx_valid` when an entire valid
packet has been saved to the port.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Parameters:
* `mac_addr`: 6-byte MAC address (list of ints)
Ports:
* `write_port`: a write-capable memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `crs_dv`: RMII carrier sense/data valid
* `rxd0`: RMII receive data 0
* `rxd1`: RMII receive data 1
Outputs:
* `rx_valid`: pulsed when a valid packet is in memory
* `rx_offset`: n-bit start address of received packet
* `rx_len`: 11-bit length of received packet
"""
def __init__(self, mac_addr, write_port, crs_dv, rxd0, rxd1):
# Outputs
self.rx_valid = Signal()
self.rx_offset = Signal(write_port.addr.width)
self.rx_len = Signal(11)
# Store arguments
self.mac_addr = mac_addr
self.write_port = write_port
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC32()
m.submodules.mac_match = mac_match = MACAddressMatch(self.mac_addr)
m.submodules.rxbyte = rxbyte = RMIIRxByte(
self.crs_dv, self.rxd0, self.rxd1)
adr = Signal(self.write_port.addr.width)
with m.FSM() as fsm:
m.d.comb += [
self.write_port.addr.eq(adr),
self.write_port.data.eq(rxbyte.data),
self.write_port.en.eq(rxbyte.data_valid),
crc.data.eq(rxbyte.data),
crc.data_valid.eq(rxbyte.data_valid),
crc.reset.eq(fsm.ongoing("IDLE")),
mac_match.data.eq(rxbyte.data),
mac_match.data_valid.eq(rxbyte.data_valid),
mac_match.reset.eq(fsm.ongoing("IDLE")),
]
# Idle until we see data valid
with m.State("IDLE"):
m.d.sync += self.rx_len.eq(0)
m.d.sync += self.rx_valid.eq(0)
with m.If(rxbyte.dv):
m.d.sync += self.rx_offset.eq(adr)
m.next = "DATA"
# Save incoming data to memory
with m.State("DATA"):
with m.If(rxbyte.data_valid):
m.d.sync += adr.eq(adr + 1)
m.d.sync += self.rx_len.eq(self.rx_len + 1)
with m.Elif(~rxbyte.dv):
m.next = "EOF"
with m.State("EOF"):
with m.If(crc.crc_match & mac_match.mac_match):
m.d.sync += self.rx_valid.eq(1)
m.next = "IDLE"
return m
class RMIIRxByte(Elaboratable):
"""
RMII Receive Byte De-muxer
Handles receiving a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its outputs are likewise in that domain.
Pins:
* `crs_dv`: Data valid, input
* `rxd0`: RX data 0, input
* `rxd1`: RX data 1, input
Outputs:
* `data`: 8-bit wide output data
* `data_valid`: Asserted for one cycle when `data` is valid
* `dv`: RMII Data valid recovered signal
* `crs`: RMII Carrier sense recovered signal
"""
def __init__(self, crs_dv, rxd0, rxd1):
# Outputs
self.data = Signal(8)
self.data_valid = Signal()
self.dv = Signal()
self.crs = Signal()
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
# Sample RMII signals on rising edge of ref_clk
crs_dv_reg = Signal()
rxd_reg = Signal(2)
m.d.sync += [
crs_dv_reg.eq(self.crs_dv),
rxd_reg.eq(Cat(self.rxd0, self.rxd1)),
]
with m.FSM():
with m.State("IDLE"):
m.d.sync += [
self.crs.eq(0),
self.dv.eq(0),
self.data_valid.eq(0),
]
with m.If(crs_dv_reg & (rxd_reg == 0b01)):
m.next = "PREAMBLE_SFD"
with m.State("PREAMBLE_SFD"):
m.d.sync += [
self.crs.eq(1),
self.dv.eq(1),
self.data_valid.eq(0),
]
with m.If(rxd_reg == 0b11):
m.next = "NIBBLE1"
with m.Elif(rxd_reg != 0b01):
m.next = "IDLE"
with m.State("NIBBLE1"):
m.d.sync += [
self.data[0:2].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE2"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE2"):
m.d.sync += [
self.data[2:4].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.dv.eq(crs_dv_reg)
m.next = "NIBBLE3"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE3"):
m.d.sync += [
self.data[4:6].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE4"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE4"):
m.d.sync += [
self.data[6:8].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += [
self.dv.eq(crs_dv_reg),
self.data_valid.eq(1),
]
m.next = "NIBBLE1"
with m.Else():
m.d.sync += self.data_valid.eq(1),
m.next = "IDLE"
return m
class RMIITx(Elaboratable):
"""
RMII transmit module
Transmits outgoing packets from a memory. Adds preamble, start of frame
delimiter, and frame check sequence (CRC32) automatically.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Ports:
* `read_port`: a read memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `txen`: RMII transmit enable
* `txd0`: RMII transmit data 0
* `txd1`: RMII transmit data 1
Inputs:
* `tx_start`: Pulse high to begin transmission of a packet
* `tx_offset`: n-bit address offset of packet to transmit
* `tx_len`: 11-bit length of packet to transmit
Outputs:
* `tx_ready`: Asserted while ready to transmit a new packet
"""
def __init__(self, read_port, txen, txd0, txd1):
# Inputs
self.tx_start = Signal()
self.tx_offset = Signal(read_port.addr.width)
self.tx_len = Signal(11)
# Outputs
self.tx_ready = Signal()
self.read_port = read_port
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Transmit byte counter
tx_idx = Signal(self.read_port.addr.width)
# Transmit length latch
tx_len = Signal(11)
# Transmit offset latch
tx_offset = Signal(self.read_port.addr.width)
m.submodules.crc = crc = CRC32()
m.submodules.txbyte = txbyte = RMIITxByte(
self.txen, self.txd0, self.txd1)
with m.FSM() as fsm:
m.d.comb += [
self.read_port.addr.eq(tx_idx + tx_offset),
crc.data.eq(txbyte.data),
crc.reset.eq(fsm.ongoing("IDLE")),
crc.data_valid.eq(
(fsm.ongoing("DATA") | fsm.ongoing("PAD"))
& txbyte.ready),
self.tx_ready.eq(fsm.ongoing("IDLE")),
txbyte.data_valid.eq(
~(fsm.ongoing("IDLE") | fsm.ongoing("IPG"))),
]
with m.State("IDLE"):
m.d.comb += txbyte.data.eq(0)
m.d.sync += [
tx_idx.eq(0),
tx_offset.eq(self.tx_offset),
tx_len.eq(self.tx_len),
]
with m.If(self.tx_start):
m.next = "PREAMBLE"
with m.State("PREAMBLE"):
m.d.comb += txbyte.data.eq(0x55)
with m.If(txbyte.ready):
with m.If(tx_idx == 6):
m.d.sync += tx_idx.eq(0)
m.next = "SFD"
with m.Else():
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.State("SFD"):
m.d.comb += txbyte.data.eq(0xD5)
with m.If(txbyte.ready):
m.next = "DATA"
with m.State("DATA"):
m.d.comb += txbyte.data.eq(self.read_port.data)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == tx_len - 1):
with m.If(tx_len < 60):
m.next = "PAD"
with m.Else():
m.next = "FCS1"
with m.State("PAD"):
m.d.comb += txbyte.data.eq(0x00)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 59):
m.next = "FCS1"
with m.State("FCS1"):
m.d.comb += txbyte.data.eq(crc.crc_out[0:8])
with m.If(txbyte.ready):
m.next = "FCS2"
with m.State("FCS2"):
m.d.comb += txbyte.data.eq(crc.crc_out[8:16])
with m.If(txbyte.ready):
m.next = "FCS3"
with m.State("FCS3"):
m.d.comb += txbyte.data.eq(crc.crc_out[16:24])
with m.If(txbyte.ready):
m.next = "FCS4"
with m.State("FCS4"):
m.d.comb += txbyte.data.eq(crc.crc_out[24:32])
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(0)
m.next = "IPG"
with m.State("IPG"):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 48):
m.next = "IDLE"
return m
class RMIITxByte(Elaboratable):
"""
RMII Transmit Byte Muxer
Handles transmitting a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its inputs and outputs are likewise in that domain.
Pins:
* `txen`: RMII Transmit enable
* `txd0`: TMII Transmit data 0
* `txd1`: TMII Transmit data 1
Inputs:
* `data`: 8-bit wide data to transmit. Latched internally so you may
update it to the next word after asserting `data_valid`.
* `data_valid`: Assert while valid data is present at `data`.
Outputs:
* `ready`: Asserted when ready to receive new data. This is asserted
while the final dibit is being transmitted so that new data
can be produced on the next clock cycle.
"""
def __init__(self, txen, txd0, txd1):
# Inputs
self.data = Signal(8)
self.data_valid = Signal()
# Outputs
self.ready = Signal()
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Register input data on the data_valid signal
data_reg = Signal(8)
with m.FSM() as fsm:
m.d.comb += [
self.ready.eq(fsm.ongoing("IDLE") | fsm.ongoing("NIBBLE4")),
self.txen.eq(~fsm.ongoing("IDLE")),
]
with m.State("IDLE"):
m.d.comb += [
self.txd0.eq(0),
self.txd1.eq(0),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.State("NIBBLE1"):
m.d.comb += [
self.txd0.eq(data_reg[0]),
self.txd1.eq(data_reg[1]),
]
m.next = "NIBBLE2"
with m.State("NIBBLE2"):
m.d.comb += [
self.txd0.eq(data_reg[2]),
self.txd1.eq(data_reg[3]),
]
m.next = "NIBBLE3"
with m.State("NIBBLE3"):
m.d.comb += [
self.txd0.eq(data_reg[4]),
self.txd1.eq(data_reg[5]),
]
m.next = "NIBBLE4"
with m.State("NIBBLE4"):
m.d.comb += [
self.txd0.eq(data_reg[6]),
self.txd1.eq(data_reg[7]),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.Else():
m.next = "IDLE"
return m
def test_rmii_rx():
import random
from nmigen.back import pysim
from nmigen import Memory
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
mem = Memory(width=8, depth=128)
mem_port = mem.write_port()
mac_addr = [random.randint(0, 255) for _ in range(6)]
rmii_rx = RMIIRx(mac_addr, mem_port, crs_dv, rxd0, rxd1)
def testbench():
def tx_packet():
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data
for txbyte in txbytes:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
yield (crs_dv.eq(0))
# Finish clocking
for _ in range(6):
yield
for _ in range(10):
yield
txbytes = [
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xDE, 0xF1, 0x38, 0x89,
0x40, 0x08, 0x00, 0x45, 0x00, 0x00, 0x54, 0x00, 0x00, 0x40, 0x00,
0x40, 0x01, 0xB6, 0xD0, 0xC0, 0xA8, 0x01, 0x88, 0xC0, 0xA8, 0x01,
0x00, 0x08, 0x00, 0x0D, 0xD9, 0x12, 0x1E, 0x00, 0x07, 0x3B, 0x3E,
0x0C, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x13, 0x03, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x52,
0x32, 0x1F, 0x9E
]
# Transmit first packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 0
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[idx]))
assert mem_contents == txbytes
# Pause (inter-frame gap)
for _ in range(20):
yield
assert (yield rmii_rx.rx_valid) == 0
# Transmit a second packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 102
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[(102+idx) % 128]))
assert mem_contents == txbytes
yield
mod = Module()
mod.submodules += rmii_rx, mem_port
vcdf = open("rmii_rx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_rx_byte():
import random
from nmigen.back import pysim
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
rmii_rx_byte = RMIIRxByte(crs_dv, rxd0, rxd1)
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
rxbytes = []
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data (except last two bytes), with CRS=1 DV=1
for txbyte in txbytes[:-2]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
# Data (last two bytes), with CRS=0 DV=1
for txbyte in txbytes[-2:]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
if dibit in (0, 4):
# CRS=0
yield (crs_dv.eq(0))
else:
# DV=1
yield (crs_dv.eq(1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
yield (crs_dv.eq(0))
for _ in range(10):
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
assert rxbytes == txbytes
vcdf = open("rmii_rx_byte.vcd", "w")
with pysim.Simulator(rmii_rx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx():
from nmigen.back import pysim
from nmigen import Memory
txen = Signal()
txd0 = Signal()
txd1 = Signal()
txbytes = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x44, 0x4e, 0x30, 0x76,
0x9e, 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
0x02, 0x44, 0x4e, 0x30, 0x76, 0x9e, 0xc0, 0xa8, 0x02, 0xc8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x02, 0xc8
]
preamblebytes = [0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xD5]
padbytes = [0x00] * (60 - len(txbytes))
crcbytes = [0x44, 0x5E, 0xB4, 0xD2]
txnibbles = []
rxnibbles = []
for txbyte in preamblebytes + txbytes + padbytes + crcbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
# Put the transmit bytes into memory at some offset, and fill the rest of
# memory with all-1s (to ensure we're not relying on memory being zeroed).
txbytes_zp = txbytes + [0xFF]*(128 - len(txbytes))
txoffset = 120
txbytes_mem = txbytes_zp[-txoffset:] + txbytes_zp[:-txoffset]
mem = Memory(width=8, depth=128, init=txbytes_mem)
mem_port = mem.read_port()
rmii_tx = RMIITx(mem_port, txen, txd0, txd1)
def testbench():
for _ in range(10):
yield
yield (rmii_tx.tx_start.eq(1))
yield (rmii_tx.tx_offset.eq(txoffset))
yield (rmii_tx.tx_len.eq(len(txbytes)))
yield
yield (rmii_tx.tx_start.eq(0))
yield (rmii_tx.tx_offset.eq(0))
yield (rmii_tx.tx_len.eq(0))
for _ in range((len(txbytes) + 12) * 4 + 120):
if (yield txen):
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
print(len(txnibbles), len(rxnibbles))
print(txnibbles)
print(rxnibbles)
assert txnibbles == rxnibbles
mod = Module()
mod.submodules += rmii_tx, mem_port
vcdf = open("rmii_tx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx_byte():
import random
from nmigen.back import pysim
txen = Signal()
txd0 = Signal()
txd1 = Signal()
rmii_tx_byte = RMIITxByte(txen, txd0, txd1)
data = rmii_tx_byte.data
data_valid = rmii_tx_byte.data_valid
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
txnibbles = []
rxnibbles = []
yield (data_valid.eq(1))
for txbyte in txbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
yield (data.eq(txbyte))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield (data_valid.eq(0))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
rxnibbles = rxnibbles[1:]
assert txnibbles == rxnibbles
for _ in range(10):
yield
vcdf = open("rmii_tx_byte.vcd", "w")
with pysim.Simulator(rmii_tx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
| 30.921516 | 78 | 0.495427 | """
Ethernet RMII Interface
Copyright 2018-2019 Adam Greig
Released under the MIT license; see LICENSE for details.
"""
from nmigen import Elaboratable, Module, Signal, Cat
from .crc import CRC32
from .mac_address_match import MACAddressMatch
class RMIIRx(Elaboratable):
"""
RMII receive module
Receives incoming packets and saves them to a memory. Validates incoming
frame check sequence and only asserts `rx_valid` when an entire valid
packet has been saved to the port.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Parameters:
* `mac_addr`: 6-byte MAC address (list of ints)
Ports:
* `write_port`: a write-capable memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `crs_dv`: RMII carrier sense/data valid
* `rxd0`: RMII receive data 0
* `rxd1`: RMII receive data 1
Outputs:
* `rx_valid`: pulsed when a valid packet is in memory
* `rx_offset`: n-bit start address of received packet
* `rx_len`: 11-bit length of received packet
"""
def __init__(self, mac_addr, write_port, crs_dv, rxd0, rxd1):
# Outputs
self.rx_valid = Signal()
self.rx_offset = Signal(write_port.addr.width)
self.rx_len = Signal(11)
# Store arguments
self.mac_addr = mac_addr
self.write_port = write_port
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC32()
m.submodules.mac_match = mac_match = MACAddressMatch(self.mac_addr)
m.submodules.rxbyte = rxbyte = RMIIRxByte(
self.crs_dv, self.rxd0, self.rxd1)
adr = Signal(self.write_port.addr.width)
with m.FSM() as fsm:
m.d.comb += [
self.write_port.addr.eq(adr),
self.write_port.data.eq(rxbyte.data),
self.write_port.en.eq(rxbyte.data_valid),
crc.data.eq(rxbyte.data),
crc.data_valid.eq(rxbyte.data_valid),
crc.reset.eq(fsm.ongoing("IDLE")),
mac_match.data.eq(rxbyte.data),
mac_match.data_valid.eq(rxbyte.data_valid),
mac_match.reset.eq(fsm.ongoing("IDLE")),
]
# Idle until we see data valid
with m.State("IDLE"):
m.d.sync += self.rx_len.eq(0)
m.d.sync += self.rx_valid.eq(0)
with m.If(rxbyte.dv):
m.d.sync += self.rx_offset.eq(adr)
m.next = "DATA"
# Save incoming data to memory
with m.State("DATA"):
with m.If(rxbyte.data_valid):
m.d.sync += adr.eq(adr + 1)
m.d.sync += self.rx_len.eq(self.rx_len + 1)
with m.Elif(~rxbyte.dv):
m.next = "EOF"
with m.State("EOF"):
with m.If(crc.crc_match & mac_match.mac_match):
m.d.sync += self.rx_valid.eq(1)
m.next = "IDLE"
return m
class RMIIRxByte(Elaboratable):
"""
RMII Receive Byte De-muxer
Handles receiving a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its outputs are likewise in that domain.
Pins:
* `crs_dv`: Data valid, input
* `rxd0`: RX data 0, input
* `rxd1`: RX data 1, input
Outputs:
* `data`: 8-bit wide output data
* `data_valid`: Asserted for one cycle when `data` is valid
* `dv`: RMII Data valid recovered signal
* `crs`: RMII Carrier sense recovered signal
"""
def __init__(self, crs_dv, rxd0, rxd1):
# Outputs
self.data = Signal(8)
self.data_valid = Signal()
self.dv = Signal()
self.crs = Signal()
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
# Sample RMII signals on rising edge of ref_clk
crs_dv_reg = Signal()
rxd_reg = Signal(2)
m.d.sync += [
crs_dv_reg.eq(self.crs_dv),
rxd_reg.eq(Cat(self.rxd0, self.rxd1)),
]
with m.FSM():
with m.State("IDLE"):
m.d.sync += [
self.crs.eq(0),
self.dv.eq(0),
self.data_valid.eq(0),
]
with m.If(crs_dv_reg & (rxd_reg == 0b01)):
m.next = "PREAMBLE_SFD"
with m.State("PREAMBLE_SFD"):
m.d.sync += [
self.crs.eq(1),
self.dv.eq(1),
self.data_valid.eq(0),
]
with m.If(rxd_reg == 0b11):
m.next = "NIBBLE1"
with m.Elif(rxd_reg != 0b01):
m.next = "IDLE"
with m.State("NIBBLE1"):
m.d.sync += [
self.data[0:2].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE2"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE2"):
m.d.sync += [
self.data[2:4].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.dv.eq(crs_dv_reg)
m.next = "NIBBLE3"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE3"):
m.d.sync += [
self.data[4:6].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE4"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE4"):
m.d.sync += [
self.data[6:8].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += [
self.dv.eq(crs_dv_reg),
self.data_valid.eq(1),
]
m.next = "NIBBLE1"
with m.Else():
m.d.sync += self.data_valid.eq(1),
m.next = "IDLE"
return m
class RMIITx(Elaboratable):
"""
RMII transmit module
Transmits outgoing packets from a memory. Adds preamble, start of frame
delimiter, and frame check sequence (CRC32) automatically.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Ports:
* `read_port`: a read memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `txen`: RMII transmit enable
* `txd0`: RMII transmit data 0
* `txd1`: RMII transmit data 1
Inputs:
* `tx_start`: Pulse high to begin transmission of a packet
* `tx_offset`: n-bit address offset of packet to transmit
* `tx_len`: 11-bit length of packet to transmit
Outputs:
* `tx_ready`: Asserted while ready to transmit a new packet
"""
def __init__(self, read_port, txen, txd0, txd1):
# Inputs
self.tx_start = Signal()
self.tx_offset = Signal(read_port.addr.width)
self.tx_len = Signal(11)
# Outputs
self.tx_ready = Signal()
self.read_port = read_port
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Transmit byte counter
tx_idx = Signal(self.read_port.addr.width)
# Transmit length latch
tx_len = Signal(11)
# Transmit offset latch
tx_offset = Signal(self.read_port.addr.width)
m.submodules.crc = crc = CRC32()
m.submodules.txbyte = txbyte = RMIITxByte(
self.txen, self.txd0, self.txd1)
with m.FSM() as fsm:
m.d.comb += [
self.read_port.addr.eq(tx_idx + tx_offset),
crc.data.eq(txbyte.data),
crc.reset.eq(fsm.ongoing("IDLE")),
crc.data_valid.eq(
(fsm.ongoing("DATA") | fsm.ongoing("PAD"))
& txbyte.ready),
self.tx_ready.eq(fsm.ongoing("IDLE")),
txbyte.data_valid.eq(
~(fsm.ongoing("IDLE") | fsm.ongoing("IPG"))),
]
with m.State("IDLE"):
m.d.comb += txbyte.data.eq(0)
m.d.sync += [
tx_idx.eq(0),
tx_offset.eq(self.tx_offset),
tx_len.eq(self.tx_len),
]
with m.If(self.tx_start):
m.next = "PREAMBLE"
with m.State("PREAMBLE"):
m.d.comb += txbyte.data.eq(0x55)
with m.If(txbyte.ready):
with m.If(tx_idx == 6):
m.d.sync += tx_idx.eq(0)
m.next = "SFD"
with m.Else():
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.State("SFD"):
m.d.comb += txbyte.data.eq(0xD5)
with m.If(txbyte.ready):
m.next = "DATA"
with m.State("DATA"):
m.d.comb += txbyte.data.eq(self.read_port.data)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == tx_len - 1):
with m.If(tx_len < 60):
m.next = "PAD"
with m.Else():
m.next = "FCS1"
with m.State("PAD"):
m.d.comb += txbyte.data.eq(0x00)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 59):
m.next = "FCS1"
with m.State("FCS1"):
m.d.comb += txbyte.data.eq(crc.crc_out[0:8])
with m.If(txbyte.ready):
m.next = "FCS2"
with m.State("FCS2"):
m.d.comb += txbyte.data.eq(crc.crc_out[8:16])
with m.If(txbyte.ready):
m.next = "FCS3"
with m.State("FCS3"):
m.d.comb += txbyte.data.eq(crc.crc_out[16:24])
with m.If(txbyte.ready):
m.next = "FCS4"
with m.State("FCS4"):
m.d.comb += txbyte.data.eq(crc.crc_out[24:32])
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(0)
m.next = "IPG"
with m.State("IPG"):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 48):
m.next = "IDLE"
return m
class RMIITxByte(Elaboratable):
"""
RMII Transmit Byte Muxer
Handles transmitting a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its inputs and outputs are likewise in that domain.
Pins:
* `txen`: RMII Transmit enable
* `txd0`: TMII Transmit data 0
* `txd1`: TMII Transmit data 1
Inputs:
* `data`: 8-bit wide data to transmit. Latched internally so you may
update it to the next word after asserting `data_valid`.
* `data_valid`: Assert while valid data is present at `data`.
Outputs:
* `ready`: Asserted when ready to receive new data. This is asserted
while the final dibit is being transmitted so that new data
can be produced on the next clock cycle.
"""
def __init__(self, txen, txd0, txd1):
# Inputs
self.data = Signal(8)
self.data_valid = Signal()
# Outputs
self.ready = Signal()
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Register input data on the data_valid signal
data_reg = Signal(8)
with m.FSM() as fsm:
m.d.comb += [
self.ready.eq(fsm.ongoing("IDLE") | fsm.ongoing("NIBBLE4")),
self.txen.eq(~fsm.ongoing("IDLE")),
]
with m.State("IDLE"):
m.d.comb += [
self.txd0.eq(0),
self.txd1.eq(0),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.State("NIBBLE1"):
m.d.comb += [
self.txd0.eq(data_reg[0]),
self.txd1.eq(data_reg[1]),
]
m.next = "NIBBLE2"
with m.State("NIBBLE2"):
m.d.comb += [
self.txd0.eq(data_reg[2]),
self.txd1.eq(data_reg[3]),
]
m.next = "NIBBLE3"
with m.State("NIBBLE3"):
m.d.comb += [
self.txd0.eq(data_reg[4]),
self.txd1.eq(data_reg[5]),
]
m.next = "NIBBLE4"
with m.State("NIBBLE4"):
m.d.comb += [
self.txd0.eq(data_reg[6]),
self.txd1.eq(data_reg[7]),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.Else():
m.next = "IDLE"
return m
def test_rmii_rx():
import random
from nmigen.back import pysim
from nmigen import Memory
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
mem = Memory(width=8, depth=128)
mem_port = mem.write_port()
mac_addr = [random.randint(0, 255) for _ in range(6)]
rmii_rx = RMIIRx(mac_addr, mem_port, crs_dv, rxd0, rxd1)
def testbench():
def tx_packet():
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data
for txbyte in txbytes:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
yield (crs_dv.eq(0))
# Finish clocking
for _ in range(6):
yield
for _ in range(10):
yield
txbytes = [
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xDE, 0xF1, 0x38, 0x89,
0x40, 0x08, 0x00, 0x45, 0x00, 0x00, 0x54, 0x00, 0x00, 0x40, 0x00,
0x40, 0x01, 0xB6, 0xD0, 0xC0, 0xA8, 0x01, 0x88, 0xC0, 0xA8, 0x01,
0x00, 0x08, 0x00, 0x0D, 0xD9, 0x12, 0x1E, 0x00, 0x07, 0x3B, 0x3E,
0x0C, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x13, 0x03, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x52,
0x32, 0x1F, 0x9E
]
# Transmit first packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 0
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[idx]))
assert mem_contents == txbytes
# Pause (inter-frame gap)
for _ in range(20):
yield
assert (yield rmii_rx.rx_valid) == 0
# Transmit a second packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 102
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[(102+idx) % 128]))
assert mem_contents == txbytes
yield
mod = Module()
mod.submodules += rmii_rx, mem_port
vcdf = open("rmii_rx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_rx_byte():
import random
from nmigen.back import pysim
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
rmii_rx_byte = RMIIRxByte(crs_dv, rxd0, rxd1)
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
rxbytes = []
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data (except last two bytes), with CRS=1 DV=1
for txbyte in txbytes[:-2]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
# Data (last two bytes), with CRS=0 DV=1
for txbyte in txbytes[-2:]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
if dibit in (0, 4):
# CRS=0
yield (crs_dv.eq(0))
else:
# DV=1
yield (crs_dv.eq(1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
yield (crs_dv.eq(0))
for _ in range(10):
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
assert rxbytes == txbytes
vcdf = open("rmii_rx_byte.vcd", "w")
with pysim.Simulator(rmii_rx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx():
from nmigen.back import pysim
from nmigen import Memory
txen = Signal()
txd0 = Signal()
txd1 = Signal()
txbytes = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x44, 0x4e, 0x30, 0x76,
0x9e, 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
0x02, 0x44, 0x4e, 0x30, 0x76, 0x9e, 0xc0, 0xa8, 0x02, 0xc8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x02, 0xc8
]
preamblebytes = [0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xD5]
padbytes = [0x00] * (60 - len(txbytes))
crcbytes = [0x44, 0x5E, 0xB4, 0xD2]
txnibbles = []
rxnibbles = []
for txbyte in preamblebytes + txbytes + padbytes + crcbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
# Put the transmit bytes into memory at some offset, and fill the rest of
# memory with all-1s (to ensure we're not relying on memory being zeroed).
txbytes_zp = txbytes + [0xFF]*(128 - len(txbytes))
txoffset = 120
txbytes_mem = txbytes_zp[-txoffset:] + txbytes_zp[:-txoffset]
mem = Memory(width=8, depth=128, init=txbytes_mem)
mem_port = mem.read_port()
rmii_tx = RMIITx(mem_port, txen, txd0, txd1)
def testbench():
for _ in range(10):
yield
yield (rmii_tx.tx_start.eq(1))
yield (rmii_tx.tx_offset.eq(txoffset))
yield (rmii_tx.tx_len.eq(len(txbytes)))
yield
yield (rmii_tx.tx_start.eq(0))
yield (rmii_tx.tx_offset.eq(0))
yield (rmii_tx.tx_len.eq(0))
for _ in range((len(txbytes) + 12) * 4 + 120):
if (yield txen):
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
print(len(txnibbles), len(rxnibbles))
print(txnibbles)
print(rxnibbles)
assert txnibbles == rxnibbles
mod = Module()
mod.submodules += rmii_tx, mem_port
vcdf = open("rmii_tx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx_byte():
import random
from nmigen.back import pysim
txen = Signal()
txd0 = Signal()
txd1 = Signal()
rmii_tx_byte = RMIITxByte(txen, txd0, txd1)
data = rmii_tx_byte.data
data_valid = rmii_tx_byte.data_valid
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
txnibbles = []
rxnibbles = []
yield (data_valid.eq(1))
for txbyte in txbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
yield (data.eq(txbyte))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield (data_valid.eq(0))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
rxnibbles = rxnibbles[1:]
assert txnibbles == rxnibbles
for _ in range(10):
yield
vcdf = open("rmii_tx_byte.vcd", "w")
with pysim.Simulator(rmii_tx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
| 0 | 0 |
d60d3beec1a8bf5f5b1156875db9e3b65d35b8d6 | 536 | py | Python | cogs/roll.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | cogs/roll.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | cogs/roll.py | morozoffnor/govnoed_grisha_rewritten | 6a34336cede03a081954479f998d5a8162e1a31d | [
"Apache-2.0"
] | null | null | null | import discord
from discord.ext import commands
import random
import sys
sys.path.insert(1, '../functions')
from functions.cmd_print import cmd_print
class Roll(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def roll(self, ctx, *, number=100):
generatedNumber = random.randrange(1, number, 1)
await ctx.send(generatedNumber)
await cmd_print('debug', f'Generated number - {generatedNumber}')
def setup(client):
client.add_cog(Roll(client))
| 23.304348 | 73 | 0.697761 | import discord
from discord.ext import commands
import random
import sys
sys.path.insert(1, '../functions')
from functions.cmd_print import cmd_print
class Roll(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def roll(self, ctx, *, number=100):
generatedNumber = random.randrange(1, number, 1)
await ctx.send(generatedNumber)
await cmd_print('debug', f'Generated number - {generatedNumber}')
def setup(client):
client.add_cog(Roll(client))
| 0 | 0 |
e3ae88a557bc39da15fb4fb98b0be693a0a7911c | 377 | py | Python | scripts/aggregation_test_script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | scripts/aggregation_test_script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | 3 | 2021-12-10T01:22:05.000Z | 2021-12-14T21:33:16.000Z | scripts/aggregation_test_script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | sentences = [['a', 'b', 'c'], ['a', 'd','e']]
def doThis(sentence):
ret = None
for x in sentence:
if ret is None:
ret = x
else:
ret += x
return ret
default_val = ''
entity_embeddings_dict = {}
entity_embeddings_dict = {sentence[0]: doThis(sentence) + entity_embeddings_dict.get(sentence[0], default_val) \
for sentence in sentences }
print(entity_embeddings_dict) | 25.133333 | 112 | 0.681698 | sentences = [['a', 'b', 'c'], ['a', 'd','e']]
def doThis(sentence):
ret = None
for x in sentence:
if ret is None:
ret = x
else:
ret += x
return ret
default_val = ''
entity_embeddings_dict = {}
entity_embeddings_dict = {sentence[0]: doThis(sentence) + entity_embeddings_dict.get(sentence[0], default_val) \
for sentence in sentences }
print(entity_embeddings_dict) | 0 | 0 |
bd662dc3d2fdbefbc6e614c5d255badccde8b474 | 5,481 | py | Python | scripts/mod_grav/plot_limits.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | null | null | null | scripts/mod_grav/plot_limits.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | null | null | null | scripts/mod_grav/plot_limits.py | charlesblakemore/opt_lev_analysis | 704f174e9860907de349688ed82b5812bbb07c2d | [
"MIT"
] | 1 | 2019-11-27T19:10:25.000Z | 2019-11-27T19:10:25.000Z | import dill as pickle
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import configuration as config
import warnings
warnings.filterwarnings("ignore")
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
data_dirs = [#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \
#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz_elec-term', \
#\
#'/data/20180704/bead1/grav_data/shield', \
#'/data/20180704/bead1/grav_data/shield_1s_1h', \
#'/data/20180704/bead1/grav_data/shield2', \
#'/data/20180704/bead1/grav_data/shield3', \
#'/data/20180704/bead1/grav_data/shield4', \
'/data/20180704/no_bead/grav_data/shield', \
#\
#'/data/20180808/bead4/grav_data/shield1'
]
fit_type = 'Gaussian'
#fit_type = 'Planar'
p0_bead_dict = {'20180625': [19.0,40.0,20.0], \
'20180704': [18.7,40.0,20.0], \
'20180808': [18,40.0,23.0] \
}
load_agg = True
harms = [1,2,3,4,5,6]
#opt_ext = 'TEST'
opt_ext = '_6harm-full'
if fit_type == 'Gaussian':
data_ind = 2
err_ind = 4
if fit_type == 'Planar':
data_ind = 0
err_ind = 1
for ddir in data_dirs:
print()
parts = ddir.split('/')
date = parts[2]
p0_bead = p0_bead_dict[date]
nobead = ('no_bead' in parts) or ('nobead' in parts) or ('no-bead' in parts)
if nobead:
opt_ext += '_NO-BEAD'
agg_path = '/processed_data/aggdat/' + date + '_' + parts[-1] + opt_ext + '.agg'
alpha_arr_path = '/processed_data/alpha_arrs/' + date + '_' + parts[-1] + opt_ext + '.arr'
lambda_path = alpha_arr_path[:-4] + '_lambdas.arr'
if load_agg:
print(agg_path)
agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms)
agg_dat.load(agg_path)
agg_dat.reload_grav_funcs()
#agg_dat.fit_alpha_xyz_vs_alldim(weight_planar=False, plot=False, plot_hists=True)
alpha_arr = agg_dat.alpha_xyz_best_fit
lambdas = agg_dat.lambdas
np.save(open(alpha_arr_path, 'wb'), alpha_arr)
np.save(open(lambda_path, 'wb'), agg_dat.lambdas)
else:
alpha_arr = np.load(open(alpha_arr_path, 'rb'))
lambdas = np.load(open(lambda_path, 'rb'))
Ncomp = alpha_arr.shape[-2]
comp_colors = bu.get_color_map(Ncomp, cmap='viridis')
alpha_w = np.sum(alpha_arr[:,0:2,:,data_ind]*alpha_arr[:,0:2,:,err_ind]**(-2), axis=1) / \
np.sum(alpha_arr[:,0:2,:,err_ind]**(-2), axis=1)
#alpha_w = np.sum(alpha_arr[:,0:2,:,2], axis=1) * 0.5
errs_x = np.zeros_like(alpha_arr[:,0,0,0])
N = 0
for ind in range(Ncomp - 1):
errs_x += alpha_w[:,ind+1]**2
N += 1
errs_x = np.sqrt(errs_x / N)
sigma_alpha_w = 1.0 / np.sqrt( np.sum(alpha_arr[:,:2,:,3]**(-2), axis=1) )
N_w = np.sum(alpha_arr[:,:2,:,7], axis=1)
plt.figure(1)
if nobead:
plt.title(date + '_' + 'no-bead' + ': Result of %s Fitting' % fit_type, fontsize=16)
else:
plt.title(date + '_' + parts[-1] + ': Result of %s Fitting' % fit_type, fontsize=16)
plt.loglog(lambdas, np.abs(alpha_w[:,0]), lw=4, \
label='Template basis vector')
plt.loglog(lambdas, errs_x, '--', lw=2, \
label='Quadrature sum of other vectors')
plt.loglog(gu.limitdata[:,0], gu.limitdata[:,1], '--', label=gu.limitlab, \
linewidth=3, color='r')
plt.loglog(gu.limitdata2[:,0], gu.limitdata2[:,1], '--', label=gu.limitlab2, \
linewidth=3, color='k')
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-7, 1e-3)
plt.ylim(1e4, 1e14)
plt.legend()
plt.grid()
plt.show()
for ind in range(Ncomp):
fig2 = plt.figure(2)
plt.title("%s fit for Basis Vector: %i" % (fit_type, ind))
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,data_ind]), \
color=comp_colors[ind], ls='--', label='$\\alpha_x$')
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,err_ind]), \
color=comp_colors[ind], ls='--', label='$\sigma_{\\alpha_x}$', \
alpha=0.5)
plt.loglog(lambdas, np.abs(alpha_w[:,ind]), \
color=comp_colors[ind], ls='-', lw=3, label='Weighted mean')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,data_ind]), \
color=comp_colors[ind], ls='-.', label='$\\alpha_y$')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,err_ind]), \
color=comp_colors[ind], ls='-.', label='$\sigma_{\\alpha_y}$', \
alpha=0.5)
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-6, 1e-3)
plt.ylim(1e6, 1e15)
plt.legend()
plt.grid()
fig_title = '/home/charles/plots/' + date + '/' + parts[-1] + '/' \
+ date + '_' + parts[-1] + '_%s-fit_comp%i.png' % (fit_type, ind)
fig2.savefig(fig_title)
plt.close(fig2)
#plt.show()
#for fig_num in [1,2,3]:
# plt.figure(fig_num)
# plt.xlabel('Length Scale: $\lambda$ [m]')
# plt.ylabel('Strength: |$\\alpha$| [arb]')
# plt.legend()
# plt.grid()
#plt.show()
| 31.142045 | 94 | 0.560482 | import dill as pickle
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import configuration as config
import warnings
warnings.filterwarnings("ignore")
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
data_dirs = [#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \
#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz_elec-term', \
#\
#'/data/20180704/bead1/grav_data/shield', \
#'/data/20180704/bead1/grav_data/shield_1s_1h', \
#'/data/20180704/bead1/grav_data/shield2', \
#'/data/20180704/bead1/grav_data/shield3', \
#'/data/20180704/bead1/grav_data/shield4', \
'/data/20180704/no_bead/grav_data/shield', \
#\
#'/data/20180808/bead4/grav_data/shield1'
]
fit_type = 'Gaussian'
#fit_type = 'Planar'
p0_bead_dict = {'20180625': [19.0,40.0,20.0], \
'20180704': [18.7,40.0,20.0], \
'20180808': [18,40.0,23.0] \
}
load_agg = True
harms = [1,2,3,4,5,6]
#opt_ext = 'TEST'
opt_ext = '_6harm-full'
if fit_type == 'Gaussian':
data_ind = 2
err_ind = 4
if fit_type == 'Planar':
data_ind = 0
err_ind = 1
for ddir in data_dirs:
print()
parts = ddir.split('/')
date = parts[2]
p0_bead = p0_bead_dict[date]
nobead = ('no_bead' in parts) or ('nobead' in parts) or ('no-bead' in parts)
if nobead:
opt_ext += '_NO-BEAD'
agg_path = '/processed_data/aggdat/' + date + '_' + parts[-1] + opt_ext + '.agg'
alpha_arr_path = '/processed_data/alpha_arrs/' + date + '_' + parts[-1] + opt_ext + '.arr'
lambda_path = alpha_arr_path[:-4] + '_lambdas.arr'
if load_agg:
print(agg_path)
agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms)
agg_dat.load(agg_path)
agg_dat.reload_grav_funcs()
#agg_dat.fit_alpha_xyz_vs_alldim(weight_planar=False, plot=False, plot_hists=True)
alpha_arr = agg_dat.alpha_xyz_best_fit
lambdas = agg_dat.lambdas
np.save(open(alpha_arr_path, 'wb'), alpha_arr)
np.save(open(lambda_path, 'wb'), agg_dat.lambdas)
else:
alpha_arr = np.load(open(alpha_arr_path, 'rb'))
lambdas = np.load(open(lambda_path, 'rb'))
Ncomp = alpha_arr.shape[-2]
comp_colors = bu.get_color_map(Ncomp, cmap='viridis')
alpha_w = np.sum(alpha_arr[:,0:2,:,data_ind]*alpha_arr[:,0:2,:,err_ind]**(-2), axis=1) / \
np.sum(alpha_arr[:,0:2,:,err_ind]**(-2), axis=1)
#alpha_w = np.sum(alpha_arr[:,0:2,:,2], axis=1) * 0.5
errs_x = np.zeros_like(alpha_arr[:,0,0,0])
N = 0
for ind in range(Ncomp - 1):
errs_x += alpha_w[:,ind+1]**2
N += 1
errs_x = np.sqrt(errs_x / N)
sigma_alpha_w = 1.0 / np.sqrt( np.sum(alpha_arr[:,:2,:,3]**(-2), axis=1) )
N_w = np.sum(alpha_arr[:,:2,:,7], axis=1)
plt.figure(1)
if nobead:
plt.title(date + '_' + 'no-bead' + ': Result of %s Fitting' % fit_type, fontsize=16)
else:
plt.title(date + '_' + parts[-1] + ': Result of %s Fitting' % fit_type, fontsize=16)
plt.loglog(lambdas, np.abs(alpha_w[:,0]), lw=4, \
label='Template basis vector')
plt.loglog(lambdas, errs_x, '--', lw=2, \
label='Quadrature sum of other vectors')
plt.loglog(gu.limitdata[:,0], gu.limitdata[:,1], '--', label=gu.limitlab, \
linewidth=3, color='r')
plt.loglog(gu.limitdata2[:,0], gu.limitdata2[:,1], '--', label=gu.limitlab2, \
linewidth=3, color='k')
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-7, 1e-3)
plt.ylim(1e4, 1e14)
plt.legend()
plt.grid()
plt.show()
for ind in range(Ncomp):
fig2 = plt.figure(2)
plt.title("%s fit for Basis Vector: %i" % (fit_type, ind))
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,data_ind]), \
color=comp_colors[ind], ls='--', label='$\\alpha_x$')
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,err_ind]), \
color=comp_colors[ind], ls='--', label='$\sigma_{\\alpha_x}$', \
alpha=0.5)
plt.loglog(lambdas, np.abs(alpha_w[:,ind]), \
color=comp_colors[ind], ls='-', lw=3, label='Weighted mean')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,data_ind]), \
color=comp_colors[ind], ls='-.', label='$\\alpha_y$')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,err_ind]), \
color=comp_colors[ind], ls='-.', label='$\sigma_{\\alpha_y}$', \
alpha=0.5)
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-6, 1e-3)
plt.ylim(1e6, 1e15)
plt.legend()
plt.grid()
fig_title = '/home/charles/plots/' + date + '/' + parts[-1] + '/' \
+ date + '_' + parts[-1] + '_%s-fit_comp%i.png' % (fit_type, ind)
fig2.savefig(fig_title)
plt.close(fig2)
#plt.show()
#for fig_num in [1,2,3]:
# plt.figure(fig_num)
# plt.xlabel('Length Scale: $\lambda$ [m]')
# plt.ylabel('Strength: |$\\alpha$| [arb]')
# plt.legend()
# plt.grid()
#plt.show()
| 0 | 0 |
6a7f23c80626351657198207fd0b08ed99b96da9 | 1,095 | py | Python | Intro to us.py | DarshPro/Minecraft-in-python | 3a52a60e9a36107252aafc971b3a32fc84b135df | [
"MIT"
] | 1 | 2021-03-04T15:42:36.000Z | 2021-03-04T15:42:36.000Z | Intro to us.py | DarshPro/Minecraft-in-python | 3a52a60e9a36107252aafc971b3a32fc84b135df | [
"MIT"
] | null | null | null | Intro to us.py | DarshPro/Minecraft-in-python | 3a52a60e9a36107252aafc971b3a32fc84b135df | [
"MIT"
] | null | null | null | from ursina import *
# Test Cube
class Test_cube(Entity):
def __init__(self):
super().__init__(
parent = scene,
model = 'cube',
texture = 'white_cube',
rotation = Vec3(45,45,45))
# Test button
class Test_button(Button):
def __init__(self,scale = 0.1):
super().__init__(
parent = scene,
model = 'cube',
texture = 'brick',
color = color.white,
highlight_color = color.red,
pressed_color = color.lime)
def input(self,key):
if self.hovered:
if key == 'left mouse down':
punch_sound.play()
# update is run every frame
def update():
#print('test')
if held_keys['a']:
cube.x -= 1 * time.dt
# basic window
app = Ursina()
# basic cube
cube = Entity(model='quad', color=color.orange, scale = (2,5), position = (5,1))
# quad with texture
#sans_image = load_texture('Sans.png')
#sans = Entity(model = 'quad', texture = sans_image)
#sans = Entity(model = 'quad', texture = 'Sans.png')
# creating a block properly
test = Test_cube()
# creating a button
btn = Test_button()
punch_sound = Audio('assets/punch', loop=False, autoplay=False)
app.run()
| 20.277778 | 80 | 0.660274 | from ursina import *
# Test Cube
class Test_cube(Entity):
def __init__(self):
super().__init__(
parent = scene,
model = 'cube',
texture = 'white_cube',
rotation = Vec3(45,45,45))
# Test button
class Test_button(Button):
def __init__(self,scale = 0.1):
super().__init__(
parent = scene,
model = 'cube',
texture = 'brick',
color = color.white,
highlight_color = color.red,
pressed_color = color.lime)
def input(self,key):
if self.hovered:
if key == 'left mouse down':
punch_sound.play()
# update is run every frame
def update():
#print('test')
if held_keys['a']:
cube.x -= 1 * time.dt
# basic window
app = Ursina()
# basic cube
cube = Entity(model='quad', color=color.orange, scale = (2,5), position = (5,1))
# quad with texture
#sans_image = load_texture('Sans.png')
#sans = Entity(model = 'quad', texture = sans_image)
#sans = Entity(model = 'quad', texture = 'Sans.png')
# creating a block properly
test = Test_cube()
# creating a button
btn = Test_button()
punch_sound = Audio('assets/punch', loop=False, autoplay=False)
app.run()
| 0 | 0 |
dc32ad09fb9c4eae33279bcdb6ba48542e65e16b | 20,087 | py | Python | Python/utils/rvs/expressions.py | sgiguere/Fairness-Gaurantees-under-Demographic-Shift | d081307d34cde75ca74e07ddbe059e8273095aee | [
"MIT"
] | 1 | 2022-03-22T20:13:02.000Z | 2022-03-22T20:13:02.000Z | Python/utils/rvs/expressions.py | sgiguere/Fairness-Gaurantees-under-Demographic-Shift | d081307d34cde75ca74e07ddbe059e8273095aee | [
"MIT"
] | null | null | null | Python/utils/rvs/expressions.py | sgiguere/Fairness-Gaurantees-under-Demographic-Shift | d081307d34cde75ca74e07ddbe059e8273095aee | [
"MIT"
] | null | null | null | import numpy as np
from copy import copy, deepcopy
from utils.rvs.utils import COMPARATOR_NEGATIONS
def get_constant_name(counter={'c':0}):
name = 'c%d' % counter['c']
counter['c'] += 1
return name
def get_variable_name(counter={'v':0}):
name = 'v%d' % counter['v']
counter['v'] += 1
return name
def get_expression_name(counter={'e':0}):
name = 'e%d' % counter['c']
counter['e'] += 1
return name
class Expression():
def __init__(self):
self.trivial_bounds = None
self._terms = []
def __eq__(self, E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms)])
class CommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
if not(isinstance(E, self.__class__)):
return False
terms, _terms = copy(self._terms), copy(E._terms)
try:
for term in terms:
_terms.remove(term)
except ValueError:
return False
return len(_terms) == 0
class NoncommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms) ])
class SingleTermExpression():
pass
class SampleSet(Expression):
def __init__(self, expression, condition=None):
super().__init__()
self.expression = expression
self.condition = condition
class ConstantExpression(Expression, SingleTermExpression):
def __init__(self, name, value):
super().__init__()
self.name = get_constant_name()
self.value = value
def __repr__(self):
return str(self.value)
def __eq__(self, E):
return isinstance(E,self.__class__) and self.value == E.value
class VariableExpression(Expression, SingleTermExpression):
def __init__(self, name):
super().__init__()
if name.startswith('#'):
self.name = name[1:]
self._special = 'index'
else:
self.name = name
self._special = None
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and self.name == E.name and self._special == E._special
class SampleSet(Expression, SingleTermExpression):
def __init__(self, expression, condition=None):
super().__init__()
name = '%r' % expression
if not(condition is None):
name += '|%r' % condition
self.name = '[%s]' % name
self.expression = expression
self.condition = condition
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and (self.expression == E.expression) and (self.condition == E.condition)
class ExpectedValue(Expression, SingleTermExpression):
def __init__(self, sample_set, is_func=None, is_expr=None):
super().__init__()
if is_func is None:
self.name = 'E%s' % sample_set.name
else:
self.name = 'E{%s(%s)}%s' % (is_func, is_expr.name, sample_set.name)
# self.name = 'E{%s}%s' % () + sample_set.name
self.sample_set = sample_set
self._is_func = is_func
self._is_expr = is_expr
def __repr__(self):
return self.name
def __eq__(self, E):
if not(isinstance(E,self.__class__)):
return False
if not(self.sample_set == E.sample_set):
return False
if self._is_func is None and E._is_func is None:
return True
else:
return (self._is_func == E._is_func) and (self._is_expr == E._is_expr)
class ComparatorExpression(VariableExpression):
def __init__(self, term1, comp, term2):
name = '%r %s %r' % (term1, comp, term2)
super().__init__(name)
self.variable = term1
self.comparator = comp
self.value = term2
class NegativeExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
if isinstance(self._terms[0], SumExpression):
return '-(%r)' % self._terms[0]
return '-%r' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
if isinstance(E, SumExpression):
return E == self
return False
class NotExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '~(%r)' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
return False
class AbsExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '|%r|' % self._terms[0]
def __eq__(self, E):
return isinstance(E,self.__class__) and (self._terms[0]==E._terms[0])
class FractionExpression(NoncommutativeExpression):
def __init__(self, num, den):
super().__init__()
self._terms = [num, den]
def __repr__(self):
num, den = self._terms
num_str = '(%r)'%num if isinstance(num, SumExpression) else '%r'%num
den_str = '%r'%den if isinstance(den, SingleTermExpression) else '(%r)'%den
return '%s/%s' % (num_str, den_str)
def __eq__(self, E):
return isinstance(E, self.__class__) and (self._terms[0]==E._terms[0]) and (self._terms[1]==E._terms[1])
class SumExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '%r' % self._terms[0]
for t in self._terms[1:]:
string += '%r'%t if isinstance(t, NegativeExpression) else '+%r'%t
return string
def __eq__(self, E):
if super().__eq__(E):
return True
if isinstance(E, NegativeExpression):
return E == RVFuncs.negative(SumExpression([ RVFuncs.negative(e) for e in self._terms ]))
return False
class AndExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = ','.join('%s'%c.name for c in comparisons)
def __repr__(self):
return ','.join([('(%r)' % t) if isinstance(t,OrExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class OrExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = '||'.join('%s'%c.name for c in comparisons)
def __repr__(self):
return '||'.join([('(%r)' % t) if isinstance(t,AndExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class ProductExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '(%r)'%self._terms[0] if (isinstance(self._terms[0], SumExpression) and len(self._terms) > 1) else '%r'%self._terms[0]
for t in self._terms[1:]:
string += '*(%r)'%t if isinstance(t, SumExpression) else '*%r'%t
return string
class MaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'MAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
class MaxRecipExpression(Expression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return 'MAX{%s, %s}' % (self._terms[0], RVFuncs.fraction(RVFuncs.constant(1), self._terms[0]))
class NANMaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'NANMAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
def safesum(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if a_inf and b_inf and (np.sign(a) != np.sign(b)):
return np.nan
return a + b
def safeprod(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if (a_inf and b==0) or (b_inf and a==0):
return 0.0
return a * b
def safediv(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan) or (a_inf and b_inf):
return np.nan
if (b==0):
return np.nan
return a / b
def parse_value(value):
''' Attempts to interpret <value> as a number. '''
if isinstance(value, str):
try:
value = int(value)
except ValueError:
value = float(value)
return value
class RVFuncs():
@staticmethod
def constant(value_raw):
value = parse_value(value_raw)
return ConstantExpression('c', value)
@staticmethod
def variable(name):
return VariableExpression(name)
@staticmethod
def comparator_variable(term1, comp, term2):
return ComparatorExpression(term1, comp, term2)
@staticmethod
def sample_set(variable, condition=None):
return SampleSet(variable, condition)
@staticmethod
def expected_value(sampleset, is_func=None, is_expr=None):
return ExpectedValue(sampleset, is_func=is_func, is_expr=is_expr)
@staticmethod
def negative(e):
''' Returns the negative of <e>, reducing nested negatives. '''
n_negatives = 1
while isinstance(e, NegativeExpression):
e = e._terms[0]
n_negatives += 1
if isinstance(e, ConstantExpression):
return RVFuncs.constant(-e.value if (n_negatives % 2 == 1) else e.value)
return NegativeExpression(e) if (n_negatives % 2 == 1) else e
@staticmethod
def logical_not(e):
n_nots = 1
while isinstance(e, NotExpression):
e = e._terms[0]
n_nots += 1
if (n_nots % 2 == 0):
return e
if isinstance(e, ComparatorExpression):
return ComparatorExpression(e.variable, COMPARATOR_NEGATIONS[e.comparator], e.value)
return NotExpression(e)
@staticmethod
def sum(*expressions):
''' Returns the sum of <expressions>, factoring out constants and shared factors. '''
# Aggregate terms that are sums themselves
exps = []
for e in expressions:
if isinstance(e, SumExpression):
exps.extend(e._terms)
else:
exps.append(e)
expressions = exps
# Aggregate terms that are constants
cval = 0
exps = []
for e in expressions:
if isinstance(e, ConstantExpression):
cval += e.value
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0], ConstantExpression):
cval -= e._terms[0].value
else:
exps.append(e)
if cval != 0 or len(exps) == 0:
const = RVFuncs.constant(cval)
exps = [ const, *exps]
expressions = exps
if len(expressions) == 1:
return expressions[0]
# Check if all terms share a common denominator and factor it out
def split_as_fraction(e):
if isinstance(e, FractionExpression):
return [e._terms[0], e._terms[1]]
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0],FractionExpression):
return [RVFuncs.negative(e._terms[0]._terms[0]), e._terms[0]._terms[1]]
return [e, None]
nums, dens = zip(*[ split_as_fraction(e) for e in exps ])
if all([ not(dens[0] is None) and d==dens[0] for d in dens ]):
exps = nums
common_den = dens[0]
else:
common_den = None
# Check if any terms have shared product factors and factor them out
def extract_unsigned_terms(e):
if isinstance(e, NegativeExpression) or isinstance(e, FractionExpression):
return extract_unsigned_terms(e._terms[0])
if isinstance(e, ProductExpression):
return e._terms
return [e]
def remove_terms(e, terms):
if isinstance(e, NegativeExpression):
return RVFuncs.negative(remove_terms(e._terms[0], terms))
if isinstance(e, FractionExpression):
return RVFuncs.fraction(remove_terms(e._terms[0], terms), e._terms[1])
if isinstance(e, ProductExpression):
remaining = e._terms.copy()
for t in terms:
remaining.remove(t)
return RVFuncs.product(*remaining) if len(remaining) > 0 else RVFuncs.constant(1)
return RVFuncs.constant(1) if len(terms) > 0 else e
has_negative = [ isinstance(e,NegativeExpression) for e in exps ]
unsigned_terms = [ extract_unsigned_terms(e) for e in exps ]
unsigned_terms_tmp = deepcopy(unsigned_terms)
shared_terms = []
for st in unsigned_terms[0]:
if isinstance(st, ConstantExpression) and (st.value == 1):
continue
if all([ (st in terms) for terms in unsigned_terms_tmp[1:] ]):
shared_terms.append(st)
for terms in unsigned_terms_tmp:
terms.remove(st)
if len(shared_terms) > 0:
remainder = RVFuncs.sum(*[ remove_terms(e, shared_terms) for e in exps ])
else:
remainder = SumExpression(exps)
# Return the product of the common factor and the remainder sum
if len(shared_terms) > 0 and common_den is None:
common_factor = RVFuncs.product(*shared_terms)
return RVFuncs.product(common_factor, remainder)
elif len(shared_terms) > 0:
common_factor = RVFuncs.fraction(RVFuncs.product(*shared_terms), common_den)
return RVFuncs.product(common_factor, remainder)
return remainder
@staticmethod
def diff(e0, e1):
return RVFuncs.sum(e0, RVFuncs.negative(e1))
@staticmethod
def max(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
if len(expressions) == 2:
e1, e2 = expressions
# If the max *happens* to be Max(E, 1/E) for some E, reduce to a MaxRecip
if e1 == RVFuncs.fraction(RVFuncs.constant(1), e2):
return MaxRecipExpression(e1)
# If the max *happens* to be Max(E, -E) for some E, reduce to Abs
elif e1 == RVFuncs.negative(e2):
return AbsExpression(e1)
return MaxExpression(exps)
def nanmax(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
return NANMaxExpression(exps)
@staticmethod
def min(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
# Convert to a negative max
exps = [ RVFuncs.negative(e) for e in exps ]
return RVFuncs.negative(RVFuncs.max(*exps))
@staticmethod
def abs(e):
if isinstance(e, NegativeExpression):
e = e._terms[0]
return AbsExpression(e)
@staticmethod
def pow(e, c):
return e # fix
@staticmethod
def logical_and(expressions):
events = []
for e in expressions:
if isinstance(e, AndExpression):
events.extend(e._terms)
else:
events.append(e)
return AndExpression(events)
@staticmethod
def logical_or(expressions):
events = []
for e in expressions:
if isinstance(e, OrExpression):
events.extend(e._terms)
else:
events.append(e)
return OrExpression(events)
@staticmethod
def product(*expressions):
# Strip negatives from input expressions
n_negatives = 0
exps = []
for e in expressions:
if isinstance(e, NegativeExpression):
exps.append(e._terms[0])
n_negatives += 1
else:
exps.append(e)
expressions = exps
# Remove and input expressions that are a constant 1
exps = []
for e in expressions:
if not(isinstance(e, ConstantExpression) and (e.value == 1)):
exps.append(e)
expressions = exps
# # If there is only one input expression remaining, just return it
# if len(expressions) == 1:
# return RVFuncs.negative(expressions[0]) if n_negatives % 2 == 1 else expressions[0]
# If any of the input expressions are a constant equal to 0, return 0
if any([ isinstance(e,ConstantExpression) and (e.value==0) for e in expressions ]):
return RVFuncs.constant(0)
# Aggregate input expressions that are products or fractions
num_exps = []
den_exps = []
for e in expressions:
if isinstance(e, ProductExpression):
num_exps.extend(e._terms)
elif isinstance(e, FractionExpression):
num_exps.append(e._terms[0])
den_exps.append(e._terms[1])
else:
num_exps.append(e)
if len(den_exps) > 0:
# We have a fraction
num = RVFuncs.product(*num_exps) if len(num_exps) > 1 else num_exps[0]
den = RVFuncs.product(*den_exps) if len(den_exps) > 1 else den_exps[0]
expr = RVFuncs.fraction(num, den)
else:
# We have a non-fraction product
# Aggregate constants
cval = 1
_exps = []
for e in num_exps:
if isinstance(e, ConstantExpression):
cval = safeprod(cval, e.value)
else:
_exps.append(e)
if len(_exps) == 0:
expr = RVFuncs.constant(cval)
elif cval != 1:
_exps.append(RVFuncs.constant(cval))
expr = ProductExpression(_exps)
elif len(_exps) > 1:
expr = ProductExpression(_exps)
else:
expr = _exps[0]
return expr if (n_negatives % 2 == 0) else RVFuncs.negative(expr)
@staticmethod
def fraction(num, den):
''' Process the numerator and denominator to produce a reduced expression of one of the following forms, in this priority:
Constant or Variable
Negative(Product(PositiveConstant, Fraction))
Product(PositiveConstant, Fraction)
Negative(Fraction).
Assumes that num and den are already processed into Negative(Product(Constant, Expression)) form. '''
# Simplify negative signs in the numerator/denominator
n_negatives = 0
if isinstance(num, NegativeExpression):
num = num._terms[0]
n_negatives += 1
if isinstance(den, NegativeExpression):
den = den._terms[0]
n_negatives += 1
# Remove any constants in front of the numerator or denominator
num_val = 1
den_val = 1
if isinstance(num, ProductExpression) and isinstance(num._terms[0], ConstantExpression):
num_val = num._terms[0].value
num = RVFuncs.product(*num._terms[1:]) if len(num._terms) > 1 else RVFuncs.constant(1)
if isinstance(den, ProductExpression) and isinstance(den._terms[0], ConstantExpression):
den_val = den._terms[0].value
den = RVFuncs.product(*den._terms[1:]) if len(den._terms) > 1 else RVFuncs.constant(1)
cval = safediv(num_val, den_val)
if cval < 0:
n_negatives += 1
cval = -cval
# Aggregate terms in the numerator/denominator if one or both are already a fraction
if isinstance(num, FractionExpression) and isinstance(den, FractionExpression):
_num = RVFuncs.product(num._terms[0], den._terms[1])
_den = RVFuncs.product(num._terms[1], den._terms[0])
num, den = _num, _den
elif isinstance(num, FractionExpression):
_num = num._terms[0]
_den = RVFuncs.product(num._terms[1], den)
num, den = _num, _den
elif isinstance(den, FractionExpression):
_num = RVFuncs.product(den._terms[1], num)
_den = den._terms[0]
num, den = _num, _den
# Remove terms in products that are present in both the numerator and denominator
expr = None
if num == den:
expr = RVFuncs.constant(1)
elif isinstance(den, ConstantExpression) and den.value == 1:
expr = num
elif isinstance(num, ProductExpression) and isinstance(den, ProductExpression):
nterms, dterms = copy(num._terms), copy(den._terms)
for term in nterms:
if term in den._terms:
num._terms.remove(term)
den._terms.remove(term)
num = RVFuncs.constant(1) if len(num._terms) == 0 else RVFuncs.product(*num._terms)
den = RVFuncs.constant(1) if len(den._terms) == 0 else RVFuncs.product(*den._terms)
if isinstance(num, ConstantExpression) and isinstance(den, ConstantExpression):
expr = RVFuncs.constant(safediv(num.value, den.value))
elif isinstance(num, ProductExpression) and isinstance(den, SingleTermExpression):
if den in num._terms:
num._terms.remove(den)
expr = RVFuncs.product(*num._terms)
elif isinstance(den, ProductExpression) and isinstance(num, SingleTermExpression):
if num in den._terms:
den._terms.remove(num)
den = RVFuncs.product(*den._terms)
if isinstance(den, ConstantExpression):
print(safediv(1,den.value), RVFuncs.constant(safediv(1,den.value)).value)
expr = RVFuncs.constant(safediv(1,den.value))
else:
expr = FractionExpression(RVFuncs.constant(1), RVFuncs.product(*den._terms))
if expr is None:
expr = FractionExpression(num, den)
# Add a constant scaling factor if it is not 1
if cval != 1:
constant = RVFuncs.constant(cval)
expr = RVFuncs.product(constant, expr)
return RVFuncs.negative(expr) if n_negatives % 2 == 1 else expr
| 29.980597 | 129 | 0.697366 | import numpy as np
from copy import copy, deepcopy
from utils.rvs.utils import COMPARATOR_NEGATIONS
def get_constant_name(counter={'c':0}):
name = 'c%d' % counter['c']
counter['c'] += 1
return name
def get_variable_name(counter={'v':0}):
name = 'v%d' % counter['v']
counter['v'] += 1
return name
def get_expression_name(counter={'e':0}):
name = 'e%d' % counter['c']
counter['e'] += 1
return name
class Expression():
def __init__(self):
self.trivial_bounds = None
self._terms = []
def __eq__(self, E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms)])
class CommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
if not(isinstance(E, self.__class__)):
return False
terms, _terms = copy(self._terms), copy(E._terms)
try:
for term in terms:
_terms.remove(term)
except ValueError:
return False
return len(_terms) == 0
class NoncommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms) ])
class SingleTermExpression():
pass
class SampleSet(Expression):
def __init__(self, expression, condition=None):
super().__init__()
self.expression = expression
self.condition = condition
class ConstantExpression(Expression, SingleTermExpression):
def __init__(self, name, value):
super().__init__()
self.name = get_constant_name()
self.value = value
def __repr__(self):
return str(self.value)
def __eq__(self, E):
return isinstance(E,self.__class__) and self.value == E.value
class VariableExpression(Expression, SingleTermExpression):
def __init__(self, name):
super().__init__()
if name.startswith('#'):
self.name = name[1:]
self._special = 'index'
else:
self.name = name
self._special = None
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and self.name == E.name and self._special == E._special
class SampleSet(Expression, SingleTermExpression):
def __init__(self, expression, condition=None):
super().__init__()
name = '%r' % expression
if not(condition is None):
name += '|%r' % condition
self.name = '[%s]' % name
self.expression = expression
self.condition = condition
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and (self.expression == E.expression) and (self.condition == E.condition)
class ExpectedValue(Expression, SingleTermExpression):
def __init__(self, sample_set, is_func=None, is_expr=None):
super().__init__()
if is_func is None:
self.name = 'E%s' % sample_set.name
else:
self.name = 'E{%s(%s)}%s' % (is_func, is_expr.name, sample_set.name)
# self.name = 'E{%s}%s' % () + sample_set.name
self.sample_set = sample_set
self._is_func = is_func
self._is_expr = is_expr
def __repr__(self):
return self.name
def __eq__(self, E):
if not(isinstance(E,self.__class__)):
return False
if not(self.sample_set == E.sample_set):
return False
if self._is_func is None and E._is_func is None:
return True
else:
return (self._is_func == E._is_func) and (self._is_expr == E._is_expr)
class ComparatorExpression(VariableExpression):
def __init__(self, term1, comp, term2):
name = '%r %s %r' % (term1, comp, term2)
super().__init__(name)
self.variable = term1
self.comparator = comp
self.value = term2
class NegativeExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
if isinstance(self._terms[0], SumExpression):
return '-(%r)' % self._terms[0]
return '-%r' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
if isinstance(E, SumExpression):
return E == self
return False
class NotExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '~(%r)' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
return False
class AbsExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '|%r|' % self._terms[0]
def __eq__(self, E):
return isinstance(E,self.__class__) and (self._terms[0]==E._terms[0])
class FractionExpression(NoncommutativeExpression):
def __init__(self, num, den):
super().__init__()
self._terms = [num, den]
def __repr__(self):
num, den = self._terms
num_str = '(%r)'%num if isinstance(num, SumExpression) else '%r'%num
den_str = '%r'%den if isinstance(den, SingleTermExpression) else '(%r)'%den
return '%s/%s' % (num_str, den_str)
def __eq__(self, E):
return isinstance(E, self.__class__) and (self._terms[0]==E._terms[0]) and (self._terms[1]==E._terms[1])
class SumExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '%r' % self._terms[0]
for t in self._terms[1:]:
string += '%r'%t if isinstance(t, NegativeExpression) else '+%r'%t
return string
def __eq__(self, E):
if super().__eq__(E):
return True
if isinstance(E, NegativeExpression):
return E == RVFuncs.negative(SumExpression([ RVFuncs.negative(e) for e in self._terms ]))
return False
class AndExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = ','.join('%s'%c.name for c in comparisons)
def __repr__(self):
return ','.join([('(%r)' % t) if isinstance(t,OrExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class OrExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = '||'.join('%s'%c.name for c in comparisons)
def __repr__(self):
return '||'.join([('(%r)' % t) if isinstance(t,AndExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class ProductExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '(%r)'%self._terms[0] if (isinstance(self._terms[0], SumExpression) and len(self._terms) > 1) else '%r'%self._terms[0]
for t in self._terms[1:]:
string += '*(%r)'%t if isinstance(t, SumExpression) else '*%r'%t
return string
class MaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'MAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
class MaxRecipExpression(Expression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return 'MAX{%s, %s}' % (self._terms[0], RVFuncs.fraction(RVFuncs.constant(1), self._terms[0]))
class NANMaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'NANMAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
def safesum(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if a_inf and b_inf and (np.sign(a) != np.sign(b)):
return np.nan
return a + b
def safeprod(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if (a_inf and b==0) or (b_inf and a==0):
return 0.0
return a * b
def safediv(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan) or (a_inf and b_inf):
return np.nan
if (b==0):
return np.nan
return a / b
def parse_value(value):
''' Attempts to interpret <value> as a number. '''
if isinstance(value, str):
try:
value = int(value)
except ValueError:
value = float(value)
return value
class RVFuncs():
@staticmethod
def constant(value_raw):
value = parse_value(value_raw)
return ConstantExpression('c', value)
@staticmethod
def variable(name):
return VariableExpression(name)
@staticmethod
def comparator_variable(term1, comp, term2):
return ComparatorExpression(term1, comp, term2)
@staticmethod
def sample_set(variable, condition=None):
return SampleSet(variable, condition)
@staticmethod
def expected_value(sampleset, is_func=None, is_expr=None):
return ExpectedValue(sampleset, is_func=is_func, is_expr=is_expr)
@staticmethod
def negative(e):
''' Returns the negative of <e>, reducing nested negatives. '''
n_negatives = 1
while isinstance(e, NegativeExpression):
e = e._terms[0]
n_negatives += 1
if isinstance(e, ConstantExpression):
return RVFuncs.constant(-e.value if (n_negatives % 2 == 1) else e.value)
return NegativeExpression(e) if (n_negatives % 2 == 1) else e
@staticmethod
def logical_not(e):
n_nots = 1
while isinstance(e, NotExpression):
e = e._terms[0]
n_nots += 1
if (n_nots % 2 == 0):
return e
if isinstance(e, ComparatorExpression):
return ComparatorExpression(e.variable, COMPARATOR_NEGATIONS[e.comparator], e.value)
return NotExpression(e)
@staticmethod
def sum(*expressions):
''' Returns the sum of <expressions>, factoring out constants and shared factors. '''
# Aggregate terms that are sums themselves
exps = []
for e in expressions:
if isinstance(e, SumExpression):
exps.extend(e._terms)
else:
exps.append(e)
expressions = exps
# Aggregate terms that are constants
cval = 0
exps = []
for e in expressions:
if isinstance(e, ConstantExpression):
cval += e.value
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0], ConstantExpression):
cval -= e._terms[0].value
else:
exps.append(e)
if cval != 0 or len(exps) == 0:
const = RVFuncs.constant(cval)
exps = [ const, *exps]
expressions = exps
if len(expressions) == 1:
return expressions[0]
# Check if all terms share a common denominator and factor it out
def split_as_fraction(e):
if isinstance(e, FractionExpression):
return [e._terms[0], e._terms[1]]
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0],FractionExpression):
return [RVFuncs.negative(e._terms[0]._terms[0]), e._terms[0]._terms[1]]
return [e, None]
nums, dens = zip(*[ split_as_fraction(e) for e in exps ])
if all([ not(dens[0] is None) and d==dens[0] for d in dens ]):
exps = nums
common_den = dens[0]
else:
common_den = None
# Check if any terms have shared product factors and factor them out
def extract_unsigned_terms(e):
if isinstance(e, NegativeExpression) or isinstance(e, FractionExpression):
return extract_unsigned_terms(e._terms[0])
if isinstance(e, ProductExpression):
return e._terms
return [e]
def remove_terms(e, terms):
if isinstance(e, NegativeExpression):
return RVFuncs.negative(remove_terms(e._terms[0], terms))
if isinstance(e, FractionExpression):
return RVFuncs.fraction(remove_terms(e._terms[0], terms), e._terms[1])
if isinstance(e, ProductExpression):
remaining = e._terms.copy()
for t in terms:
remaining.remove(t)
return RVFuncs.product(*remaining) if len(remaining) > 0 else RVFuncs.constant(1)
return RVFuncs.constant(1) if len(terms) > 0 else e
has_negative = [ isinstance(e,NegativeExpression) for e in exps ]
unsigned_terms = [ extract_unsigned_terms(e) for e in exps ]
unsigned_terms_tmp = deepcopy(unsigned_terms)
shared_terms = []
for st in unsigned_terms[0]:
if isinstance(st, ConstantExpression) and (st.value == 1):
continue
if all([ (st in terms) for terms in unsigned_terms_tmp[1:] ]):
shared_terms.append(st)
for terms in unsigned_terms_tmp:
terms.remove(st)
if len(shared_terms) > 0:
remainder = RVFuncs.sum(*[ remove_terms(e, shared_terms) for e in exps ])
else:
remainder = SumExpression(exps)
# Return the product of the common factor and the remainder sum
if len(shared_terms) > 0 and common_den is None:
common_factor = RVFuncs.product(*shared_terms)
return RVFuncs.product(common_factor, remainder)
elif len(shared_terms) > 0:
common_factor = RVFuncs.fraction(RVFuncs.product(*shared_terms), common_den)
return RVFuncs.product(common_factor, remainder)
return remainder
@staticmethod
def diff(e0, e1):
return RVFuncs.sum(e0, RVFuncs.negative(e1))
@staticmethod
def max(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
if len(expressions) == 2:
e1, e2 = expressions
# If the max *happens* to be Max(E, 1/E) for some E, reduce to a MaxRecip
if e1 == RVFuncs.fraction(RVFuncs.constant(1), e2):
return MaxRecipExpression(e1)
# If the max *happens* to be Max(E, -E) for some E, reduce to Abs
elif e1 == RVFuncs.negative(e2):
return AbsExpression(e1)
return MaxExpression(exps)
def nanmax(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
return NANMaxExpression(exps)
@staticmethod
def min(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
# Convert to a negative max
exps = [ RVFuncs.negative(e) for e in exps ]
return RVFuncs.negative(RVFuncs.max(*exps))
@staticmethod
def abs(e):
if isinstance(e, NegativeExpression):
e = e._terms[0]
return AbsExpression(e)
@staticmethod
def pow(e, c):
return e # fix
@staticmethod
def logical_and(expressions):
events = []
for e in expressions:
if isinstance(e, AndExpression):
events.extend(e._terms)
else:
events.append(e)
return AndExpression(events)
@staticmethod
def logical_or(expressions):
events = []
for e in expressions:
if isinstance(e, OrExpression):
events.extend(e._terms)
else:
events.append(e)
return OrExpression(events)
@staticmethod
def product(*expressions):
# Strip negatives from input expressions
n_negatives = 0
exps = []
for e in expressions:
if isinstance(e, NegativeExpression):
exps.append(e._terms[0])
n_negatives += 1
else:
exps.append(e)
expressions = exps
# Remove and input expressions that are a constant 1
exps = []
for e in expressions:
if not(isinstance(e, ConstantExpression) and (e.value == 1)):
exps.append(e)
expressions = exps
# # If there is only one input expression remaining, just return it
# if len(expressions) == 1:
# return RVFuncs.negative(expressions[0]) if n_negatives % 2 == 1 else expressions[0]
# If any of the input expressions are a constant equal to 0, return 0
if any([ isinstance(e,ConstantExpression) and (e.value==0) for e in expressions ]):
return RVFuncs.constant(0)
# Aggregate input expressions that are products or fractions
num_exps = []
den_exps = []
for e in expressions:
if isinstance(e, ProductExpression):
num_exps.extend(e._terms)
elif isinstance(e, FractionExpression):
num_exps.append(e._terms[0])
den_exps.append(e._terms[1])
else:
num_exps.append(e)
if len(den_exps) > 0:
# We have a fraction
num = RVFuncs.product(*num_exps) if len(num_exps) > 1 else num_exps[0]
den = RVFuncs.product(*den_exps) if len(den_exps) > 1 else den_exps[0]
expr = RVFuncs.fraction(num, den)
else:
# We have a non-fraction product
# Aggregate constants
cval = 1
_exps = []
for e in num_exps:
if isinstance(e, ConstantExpression):
cval = safeprod(cval, e.value)
else:
_exps.append(e)
if len(_exps) == 0:
expr = RVFuncs.constant(cval)
elif cval != 1:
_exps.append(RVFuncs.constant(cval))
expr = ProductExpression(_exps)
elif len(_exps) > 1:
expr = ProductExpression(_exps)
else:
expr = _exps[0]
return expr if (n_negatives % 2 == 0) else RVFuncs.negative(expr)
@staticmethod
def fraction(num, den):
''' Process the numerator and denominator to produce a reduced expression of one of the following forms, in this priority:
Constant or Variable
Negative(Product(PositiveConstant, Fraction))
Product(PositiveConstant, Fraction)
Negative(Fraction).
Assumes that num and den are already processed into Negative(Product(Constant, Expression)) form. '''
# Simplify negative signs in the numerator/denominator
n_negatives = 0
if isinstance(num, NegativeExpression):
num = num._terms[0]
n_negatives += 1
if isinstance(den, NegativeExpression):
den = den._terms[0]
n_negatives += 1
# Remove any constants in front of the numerator or denominator
num_val = 1
den_val = 1
if isinstance(num, ProductExpression) and isinstance(num._terms[0], ConstantExpression):
num_val = num._terms[0].value
num = RVFuncs.product(*num._terms[1:]) if len(num._terms) > 1 else RVFuncs.constant(1)
if isinstance(den, ProductExpression) and isinstance(den._terms[0], ConstantExpression):
den_val = den._terms[0].value
den = RVFuncs.product(*den._terms[1:]) if len(den._terms) > 1 else RVFuncs.constant(1)
cval = safediv(num_val, den_val)
if cval < 0:
n_negatives += 1
cval = -cval
# Aggregate terms in the numerator/denominator if one or both are already a fraction
if isinstance(num, FractionExpression) and isinstance(den, FractionExpression):
_num = RVFuncs.product(num._terms[0], den._terms[1])
_den = RVFuncs.product(num._terms[1], den._terms[0])
num, den = _num, _den
elif isinstance(num, FractionExpression):
_num = num._terms[0]
_den = RVFuncs.product(num._terms[1], den)
num, den = _num, _den
elif isinstance(den, FractionExpression):
_num = RVFuncs.product(den._terms[1], num)
_den = den._terms[0]
num, den = _num, _den
# Remove terms in products that are present in both the numerator and denominator
expr = None
if num == den:
expr = RVFuncs.constant(1)
elif isinstance(den, ConstantExpression) and den.value == 1:
expr = num
elif isinstance(num, ProductExpression) and isinstance(den, ProductExpression):
nterms, dterms = copy(num._terms), copy(den._terms)
for term in nterms:
if term in den._terms:
num._terms.remove(term)
den._terms.remove(term)
num = RVFuncs.constant(1) if len(num._terms) == 0 else RVFuncs.product(*num._terms)
den = RVFuncs.constant(1) if len(den._terms) == 0 else RVFuncs.product(*den._terms)
if isinstance(num, ConstantExpression) and isinstance(den, ConstantExpression):
expr = RVFuncs.constant(safediv(num.value, den.value))
elif isinstance(num, ProductExpression) and isinstance(den, SingleTermExpression):
if den in num._terms:
num._terms.remove(den)
expr = RVFuncs.product(*num._terms)
elif isinstance(den, ProductExpression) and isinstance(num, SingleTermExpression):
if num in den._terms:
den._terms.remove(num)
den = RVFuncs.product(*den._terms)
if isinstance(den, ConstantExpression):
print(safediv(1,den.value), RVFuncs.constant(safediv(1,den.value)).value)
expr = RVFuncs.constant(safediv(1,den.value))
else:
expr = FractionExpression(RVFuncs.constant(1), RVFuncs.product(*den._terms))
if expr is None:
expr = FractionExpression(num, den)
# Add a constant scaling factor if it is not 1
if cval != 1:
constant = RVFuncs.constant(cval)
expr = RVFuncs.product(constant, expr)
return RVFuncs.negative(expr) if n_negatives % 2 == 1 else expr
| 0 | 0 |
bb849e15eb80277ce9b2f2a5c8794288fc0aacec | 1,134 | py | Python | formy_site/formy_app/forms.py | cmhedrick/formy | 778d98cf8a705514e1fbd9ba8b116fb667d749be | [
"MIT"
] | null | null | null | formy_site/formy_app/forms.py | cmhedrick/formy | 778d98cf8a705514e1fbd9ba8b116fb667d749be | [
"MIT"
] | 13 | 2020-05-30T22:30:04.000Z | 2021-09-22T19:04:40.000Z | formy_site/formy_app/forms.py | cmhedrick/formy | 778d98cf8a705514e1fbd9ba8b116fb667d749be | [
"MIT"
] | null | null | null | from django import forms
class LazyQuestionForm(forms.Form):
question = forms.CharField(required=True, label="email")
class CustomForm(forms.Form):
def __init__(self, context, spreadsheet=None, *args, **kwargs):
super(CustomForm, self).__init__(*args, **kwargs)
for field in spreadsheet.spreadsheetfield_set.iterator():
key = field.field_name.lower().replace(" ", "_")
if field.field_type == "INTEGER":
self.fields[key] = forms.IntegerField(
required=True, label=field.field_name
)
elif field.field_type == "STRING":
self.fields[key] = forms.CharField(
required=True, label=field.field_name
)
elif field.field_type == "AREA":
self.fields[key] = forms.CharField(
required=True, label=field.field_name, widget=forms.Textarea
)
elif field.field_type == "BOOL":
self.fields[key] = forms.BooleanField(
required=True, label=field.field_name
)
| 37.8 | 80 | 0.563492 | from django import forms
class LazyQuestionForm(forms.Form):
question = forms.CharField(required=True, label="email")
class CustomForm(forms.Form):
def __init__(self, context, spreadsheet=None, *args, **kwargs):
super(CustomForm, self).__init__(*args, **kwargs)
for field in spreadsheet.spreadsheetfield_set.iterator():
key = field.field_name.lower().replace(" ", "_")
if field.field_type == "INTEGER":
self.fields[key] = forms.IntegerField(
required=True, label=field.field_name
)
elif field.field_type == "STRING":
self.fields[key] = forms.CharField(
required=True, label=field.field_name
)
elif field.field_type == "AREA":
self.fields[key] = forms.CharField(
required=True, label=field.field_name, widget=forms.Textarea
)
elif field.field_type == "BOOL":
self.fields[key] = forms.BooleanField(
required=True, label=field.field_name
)
| 0 | 0 |
aa30c170932818c11a70361a198633d3d3e3f96b | 1,659 | py | Python | docs/conf.py | Qiskit/qiskit-aqt-provider | 276b10bde45027e0d33c80a6942887f7de0204da | [
"Apache-2.0"
] | 5 | 2019-10-21T02:57:22.000Z | 2020-04-09T00:03:42.000Z | docs/conf.py | Qiskit/qiskit-aqt-provider | 276b10bde45027e0d33c80a6942887f7de0204da | [
"Apache-2.0"
] | 8 | 2019-09-25T19:48:34.000Z | 2020-02-27T16:30:41.000Z | docs/conf.py | Qiskit/qiskit-aqt-provider | 276b10bde45027e0d33c80a6942887f7de0204da | [
"Apache-2.0"
] | 10 | 2019-09-25T18:47:44.000Z | 2020-06-05T17:45:35.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Sphinx documentation builder
"""
project = 'Qiskit AQT Provider'
copyright = '2021, Qiskit and AQT development teams' # pylint: disable=redefined-builtin
author = 'Qiskit and AQT development teams'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'jupyter_sphinx',
]
templates_path = ["_templates"]
html_static_path = ['_static']
html_css_files = []
autosummary_generate = True
autosummary_generate_overwrite = False
autoclass_content = "both"
numfig = True
numfig_format = {
'table': 'Table %s'
}
language = None
exclude_patterns = ['_build', '**.ipynb_checkpoints']
pygments_style = 'colorful'
add_module_names = False
modindex_common_prefix = ['qiskit_aqt.']
html_theme = 'qiskit_sphinx_theme'
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
| 24.397059 | 89 | 0.724533 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Sphinx documentation builder
"""
project = 'Qiskit AQT Provider'
copyright = '2021, Qiskit and AQT development teams' # pylint: disable=redefined-builtin
author = 'Qiskit and AQT development teams'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'jupyter_sphinx',
]
templates_path = ["_templates"]
html_static_path = ['_static']
html_css_files = []
autosummary_generate = True
autosummary_generate_overwrite = False
autoclass_content = "both"
numfig = True
numfig_format = {
'table': 'Table %s'
}
language = None
exclude_patterns = ['_build', '**.ipynb_checkpoints']
pygments_style = 'colorful'
add_module_names = False
modindex_common_prefix = ['qiskit_aqt.']
html_theme = 'qiskit_sphinx_theme'
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
| 0 | 0 |
4af823cb5f863d54376d0a7984be2ad7dd1c341a | 448 | py | Python | backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py | eth-library-lab/open-image-search | 7be76cd4b7730dd76623e15f034f1c337ab99f84 | [
"MIT"
] | 5 | 2021-06-14T10:49:52.000Z | 2022-02-16T15:56:49.000Z | backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py | eth-library-lab/open-image-search | 7be76cd4b7730dd76623e15f034f1c337ab99f84 | [
"MIT"
] | 11 | 2021-06-11T16:12:49.000Z | 2021-12-03T16:41:13.000Z | backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py | eth-library-lab/open-image-search | 7be76cd4b7730dd76623e15f034f1c337ab99f84 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2021-09-20 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ImageSearch', '0008_auto_20210916_0909'),
]
operations = [
migrations.AddField(
model_name='imagemetadata',
name='image_fpath',
field=models.CharField(max_length=300, null=True, verbose_name='local path to image'),
),
]
| 23.578947 | 98 | 0.629464 | # Generated by Django 3.1.2 on 2021-09-20 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ImageSearch', '0008_auto_20210916_0909'),
]
operations = [
migrations.AddField(
model_name='imagemetadata',
name='image_fpath',
field=models.CharField(max_length=300, null=True, verbose_name='local path to image'),
),
]
| 0 | 0 |
244d8e1d804d4ab001743ae24cbdc613ee9ca42f | 9,566 | py | Python | test_interest.py | castacks/interestingness | b614818ab11dcc15c5fe6b55fe993882add3e8e6 | [
"BSD-3-Clause"
] | 1 | 2021-07-20T14:58:36.000Z | 2021-07-20T14:58:36.000Z | test_interest.py | castacks/interestingness | b614818ab11dcc15c5fe6b55fe993882add3e8e6 | [
"BSD-3-Clause"
] | null | null | null | test_interest.py | castacks/interestingness | b614818ab11dcc15c5fe6b55fe993882add3e8e6 | [
"BSD-3-Clause"
] | 1 | 2021-04-17T08:25:05.000Z | 2021-04-17T08:25:05.000Z | #!/usr/bin/env python3
# Copyright <2019> <Chen Wang [https://chenwang.site], Carnegie Mellon University>
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
import cv2
import copy
import time
import math
import torch
import os.path
import argparse
import torchvision
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.utils.data as Data
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.models.vgg import VGG
import torchvision.transforms as transforms
from torchvision.datasets import CocoDetection
from torch.optim.lr_scheduler import ReduceLROnPlateau
from interestingness import AE, VAE, AutoEncoder, Interestingness
from dataset import ImageData, Dronefilm, DroneFilming, SubT, SubTF, PersonalVideo
from torchutil import count_parameters, show_batch, show_batch_origin, Timer, MovAvg
from torchutil import ConvLoss, CosineLoss, CorrelationLoss, Split2d, Merge2d, PearsonLoss, FiveSplit2d
class Interest():
'''
Maintain top K interests
'''
def __init__(self, K, filename):
self.K = K
self.interests = []
self.filename = filename
f = open(self.filename, 'w')
f.close()
def add_interest(self, tensor, loss, batch_idx, visualize_window=None):
f = open(self.filename, 'a+')
f.write("%d %f\n" % (batch_idx, loss))
f.close()
self.interests.append((loss, tensor, batch_idx))
self.interests.sort(key=self._sort_loss, reverse=True)
self._maintain()
interests = np.concatenate([self.interests[i][1] for i in range(len(self.interests))], axis=1)
if visualize_window is not None:
cv2.imshow(visualize_window, interests)
return interests
def _sort_loss(self, val):
return val[0]
def _maintain(self):
if len(self.interests) > self.K:
self.interests = self.interests[:self.K]
def performance(loader, net):
test_loss, time_use = 0, 0
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
if batch_idx % args.skip_frames !=0:
continue
if torch.cuda.is_available():
inputs = inputs.cuda()
timer.tic()
inputs = Variable(inputs)
outputs, loss = net(inputs)
loss = movavg.append(loss)
time_use += timer.end()
if args.drawbox is True:
drawbox(inputs, outputs)
test_loss += loss.item()
frame = show_batch_box(inputs, batch_idx, loss.item())
top_interests = interest.add_interest(frame, loss, batch_idx, visualize_window='Top Interests')
if args.debug is True:
image = show_batch(torch.cat([outputs], dim=0), 'reconstruction')
recon = show_batch(torch.cat([(inputs-outputs).abs()], dim=0), 'difference')
cv2.imwrite('images/%s-%d/%s-interestingness-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), frame*255)
cv2.imwrite('images/%s-%d/%s-reconstruction-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), image*255)
cv2.imwrite('images/%s-%d/%s-difference-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), recon*255)
print('batch_idx:', batch_idx, 'loss:%.6f'%(loss.item()))
print("Total time using: %.2f seconds, %.2f ms/frame"%(time_use, 1000*time_use/(batch_idx+1)))
cv2.imwrite('results/%s.png'%(test_name), 255*top_interests)
return test_loss/(batch_idx+1)
def level_height(bar, ranges=[0.02, 0.08]):
h = min(max(0,(bar-ranges[0])/(ranges[1]-ranges[0])),1)
return (np.tanh(np.tan(math.pi/2*(2*h-1))-0.8)+1)/2
def boxbar(height, bar, ranges=[0.02, 0.08], threshold=[0.05, 0.06]):
width = 15
box = np.zeros((height,width,3), np.uint8)
h = level_height(bar, ranges)
x1, y1 = 0, int((1-h)*height)
x2, y2 = int(width), int(height)
cv2.rectangle(box,(x1,y1),(x2,y2),(0,1,0),-1)
for i in threshold:
x1, y1 = 0, int((1.0-i/ranges[1])*height)
x2, y2 = width, int((1.0-i/ranges[1])*height)
cv2.line(box,(x1, y1), (x2, y2), (1,0,0), 3)
return box
def show_batch_box(batch, batch_idx, loss, box_id=None, show_now=True):
min_v = torch.min(batch)
range_v = torch.max(batch) - min_v
if range_v > 0:
batch = (batch - min_v) / range_v
else:
batch = torch.zeros(batch.size())
grid = torchvision.utils.make_grid(batch).cpu()
img = grid.numpy()[::-1].transpose((1, 2, 0))
box = boxbar(grid.size(-2), loss, threshold=[])
frame = np.hstack([img, box])
if show_now:
cv2.imshow('interestingness', frame)
cv2.waitKey(1)
return frame
if __name__ == "__main__":
# Arguements
parser = argparse.ArgumentParser(description='Test Interestingness Networks')
parser.add_argument("--data-root", type=str, default='/data/datasets', help="dataset root folder")
parser.add_argument("--model-save", type=str, default='saves/ae.pt.SubTF.n1000.mse', help="read model")
parser.add_argument("--test-data", type=int, default=2, help='test data ID.')
parser.add_argument("--seed", type=int, default=0, help='Random seed.')
parser.add_argument("--crop-size", type=int, default=320, help='crop size')
parser.add_argument("--num-interest", type=int, default=10, help='loss compute by grid')
parser.add_argument("--skip-frames", type=int, default=1, help='number of skip frame')
parser.add_argument("--window-size", type=int, default=1, help='smooth window size >=1')
parser.add_argument('--dataset', type=str, default='SubTF', help='dataset type (SubTF, DroneFilming')
parser.add_argument('--save-flag', type=str, default='n1000', help='save name flag')
parser.add_argument("--rr", type=float, default=5, help="reading rate")
parser.add_argument("--wr", type=float, default=5, help="writing rate")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--drawbox', dest='drawbox', action='store_true')
parser.set_defaults(debug=False)
parser.set_defaults(drawbox=False)
args = parser.parse_args(); print(args)
torch.manual_seed(args.seed)
os.makedirs('results', exist_ok=True)
if args.debug is True and not os.path.exists('images/%s-%d'%(args.dataset,args.test_data)):
os.makedirs('images/%s-%d'%(args.dataset,args.test_data))
transform = transforms.Compose([
# transforms.CenterCrop(args.crop_size),
transforms.Resize((args.crop_size,args.crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
timer = Timer()
test_name = '%s-%d-%s-%s'%(args.dataset, args.test_data, time.strftime('%Y-%m-%d-%H:%M:%S'), args.save_flag)
if args.dataset == 'DroneFilming':
test_data = DroneFilming(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'SubTF':
test_data = SubTF(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'PersonalVideo':
test_data = PersonalVideo(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
test_loader = Data.DataLoader(dataset=test_data, batch_size=1, shuffle=False)
net = torch.load(args.model_save)
net.set_train(False)
net.memory.set_learning_rate(rr=args.rr, wr=args.wr)
interest = Interest(args.num_interest, 'results/%s.txt'%(test_name))
movavg = MovAvg(args.window_size)
if torch.cuda.is_available():
net = net.cuda()
drawbox = ConvLoss(input_size=args.crop_size, kernel_size=args.crop_size//2, stride=args.crop_size//4)
criterion = CorrelationLoss(args.crop_size//2, reduce=False, accept_translation=False)
fivecrop = FiveSplit2d(args.crop_size//2)
print('number of parameters:', count_parameters(net))
val_loss = performance(test_loader, net)
print('Done.')
| 44.493023 | 137 | 0.681685 | #!/usr/bin/env python3
# Copyright <2019> <Chen Wang [https://chenwang.site], Carnegie Mellon University>
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
import cv2
import copy
import time
import math
import torch
import os.path
import argparse
import torchvision
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.utils.data as Data
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.models.vgg import VGG
import torchvision.transforms as transforms
from torchvision.datasets import CocoDetection
from torch.optim.lr_scheduler import ReduceLROnPlateau
from interestingness import AE, VAE, AutoEncoder, Interestingness
from dataset import ImageData, Dronefilm, DroneFilming, SubT, SubTF, PersonalVideo
from torchutil import count_parameters, show_batch, show_batch_origin, Timer, MovAvg
from torchutil import ConvLoss, CosineLoss, CorrelationLoss, Split2d, Merge2d, PearsonLoss, FiveSplit2d
class Interest():
'''
Maintain top K interests
'''
def __init__(self, K, filename):
self.K = K
self.interests = []
self.filename = filename
f = open(self.filename, 'w')
f.close()
def add_interest(self, tensor, loss, batch_idx, visualize_window=None):
f = open(self.filename, 'a+')
f.write("%d %f\n" % (batch_idx, loss))
f.close()
self.interests.append((loss, tensor, batch_idx))
self.interests.sort(key=self._sort_loss, reverse=True)
self._maintain()
interests = np.concatenate([self.interests[i][1] for i in range(len(self.interests))], axis=1)
if visualize_window is not None:
cv2.imshow(visualize_window, interests)
return interests
def _sort_loss(self, val):
return val[0]
def _maintain(self):
if len(self.interests) > self.K:
self.interests = self.interests[:self.K]
def performance(loader, net):
test_loss, time_use = 0, 0
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
if batch_idx % args.skip_frames !=0:
continue
if torch.cuda.is_available():
inputs = inputs.cuda()
timer.tic()
inputs = Variable(inputs)
outputs, loss = net(inputs)
loss = movavg.append(loss)
time_use += timer.end()
if args.drawbox is True:
drawbox(inputs, outputs)
test_loss += loss.item()
frame = show_batch_box(inputs, batch_idx, loss.item())
top_interests = interest.add_interest(frame, loss, batch_idx, visualize_window='Top Interests')
if args.debug is True:
image = show_batch(torch.cat([outputs], dim=0), 'reconstruction')
recon = show_batch(torch.cat([(inputs-outputs).abs()], dim=0), 'difference')
cv2.imwrite('images/%s-%d/%s-interestingness-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), frame*255)
cv2.imwrite('images/%s-%d/%s-reconstruction-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), image*255)
cv2.imwrite('images/%s-%d/%s-difference-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), recon*255)
print('batch_idx:', batch_idx, 'loss:%.6f'%(loss.item()))
print("Total time using: %.2f seconds, %.2f ms/frame"%(time_use, 1000*time_use/(batch_idx+1)))
cv2.imwrite('results/%s.png'%(test_name), 255*top_interests)
return test_loss/(batch_idx+1)
def level_height(bar, ranges=[0.02, 0.08]):
h = min(max(0,(bar-ranges[0])/(ranges[1]-ranges[0])),1)
return (np.tanh(np.tan(math.pi/2*(2*h-1))-0.8)+1)/2
def boxbar(height, bar, ranges=[0.02, 0.08], threshold=[0.05, 0.06]):
width = 15
box = np.zeros((height,width,3), np.uint8)
h = level_height(bar, ranges)
x1, y1 = 0, int((1-h)*height)
x2, y2 = int(width), int(height)
cv2.rectangle(box,(x1,y1),(x2,y2),(0,1,0),-1)
for i in threshold:
x1, y1 = 0, int((1.0-i/ranges[1])*height)
x2, y2 = width, int((1.0-i/ranges[1])*height)
cv2.line(box,(x1, y1), (x2, y2), (1,0,0), 3)
return box
def show_batch_box(batch, batch_idx, loss, box_id=None, show_now=True):
min_v = torch.min(batch)
range_v = torch.max(batch) - min_v
if range_v > 0:
batch = (batch - min_v) / range_v
else:
batch = torch.zeros(batch.size())
grid = torchvision.utils.make_grid(batch).cpu()
img = grid.numpy()[::-1].transpose((1, 2, 0))
box = boxbar(grid.size(-2), loss, threshold=[])
frame = np.hstack([img, box])
if show_now:
cv2.imshow('interestingness', frame)
cv2.waitKey(1)
return frame
if __name__ == "__main__":
# Arguements
parser = argparse.ArgumentParser(description='Test Interestingness Networks')
parser.add_argument("--data-root", type=str, default='/data/datasets', help="dataset root folder")
parser.add_argument("--model-save", type=str, default='saves/ae.pt.SubTF.n1000.mse', help="read model")
parser.add_argument("--test-data", type=int, default=2, help='test data ID.')
parser.add_argument("--seed", type=int, default=0, help='Random seed.')
parser.add_argument("--crop-size", type=int, default=320, help='crop size')
parser.add_argument("--num-interest", type=int, default=10, help='loss compute by grid')
parser.add_argument("--skip-frames", type=int, default=1, help='number of skip frame')
parser.add_argument("--window-size", type=int, default=1, help='smooth window size >=1')
parser.add_argument('--dataset', type=str, default='SubTF', help='dataset type (SubTF, DroneFilming')
parser.add_argument('--save-flag', type=str, default='n1000', help='save name flag')
parser.add_argument("--rr", type=float, default=5, help="reading rate")
parser.add_argument("--wr", type=float, default=5, help="writing rate")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--drawbox', dest='drawbox', action='store_true')
parser.set_defaults(debug=False)
parser.set_defaults(drawbox=False)
args = parser.parse_args(); print(args)
torch.manual_seed(args.seed)
os.makedirs('results', exist_ok=True)
if args.debug is True and not os.path.exists('images/%s-%d'%(args.dataset,args.test_data)):
os.makedirs('images/%s-%d'%(args.dataset,args.test_data))
transform = transforms.Compose([
# transforms.CenterCrop(args.crop_size),
transforms.Resize((args.crop_size,args.crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
timer = Timer()
test_name = '%s-%d-%s-%s'%(args.dataset, args.test_data, time.strftime('%Y-%m-%d-%H:%M:%S'), args.save_flag)
if args.dataset == 'DroneFilming':
test_data = DroneFilming(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'SubTF':
test_data = SubTF(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'PersonalVideo':
test_data = PersonalVideo(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
test_loader = Data.DataLoader(dataset=test_data, batch_size=1, shuffle=False)
net = torch.load(args.model_save)
net.set_train(False)
net.memory.set_learning_rate(rr=args.rr, wr=args.wr)
interest = Interest(args.num_interest, 'results/%s.txt'%(test_name))
movavg = MovAvg(args.window_size)
if torch.cuda.is_available():
net = net.cuda()
drawbox = ConvLoss(input_size=args.crop_size, kernel_size=args.crop_size//2, stride=args.crop_size//4)
criterion = CorrelationLoss(args.crop_size//2, reduce=False, accept_translation=False)
fivecrop = FiveSplit2d(args.crop_size//2)
print('number of parameters:', count_parameters(net))
val_loss = performance(test_loader, net)
print('Done.')
| 0 | 0 |
6a1d39e1c5b43161354a2960c1c94b76e994dfc3 | 281 | py | Python | aix360/algorithms/rbm/__init__.py | Qingtian-Zou/AIX360 | cf25f58077ae002fb4542b680fd98db47758dae5 | [
"Apache-2.0"
] | 609 | 2019-08-02T17:55:18.000Z | 2020-07-11T18:11:09.000Z | aix360/algorithms/rbm/__init__.py | Qingtian-Zou/AIX360 | cf25f58077ae002fb4542b680fd98db47758dae5 | [
"Apache-2.0"
] | 47 | 2019-08-05T15:00:35.000Z | 2020-07-13T20:35:57.000Z | aix360/algorithms/rbm/__init__.py | Qingtian-Zou/AIX360 | cf25f58077ae002fb4542b680fd98db47758dae5 | [
"Apache-2.0"
] | 147 | 2019-07-12T11:30:31.000Z | 2020-07-04T19:18:49.000Z | from .features import FeatureBinarizer, FeatureBinarizerFromTrees
from .linear_regression import LinearRuleRegression
from .logistic_regression import LogisticRuleRegression
from .boolean_rule_cg import BooleanRuleCG
from .GLRM import GLRMExplainer
from .BRCG import BRCGExplainer
| 40.142857 | 65 | 0.886121 | from .features import FeatureBinarizer, FeatureBinarizerFromTrees
from .linear_regression import LinearRuleRegression
from .logistic_regression import LogisticRuleRegression
from .boolean_rule_cg import BooleanRuleCG
from .GLRM import GLRMExplainer
from .BRCG import BRCGExplainer
| 0 | 0 |
62792ed8eb6d2e940b91586e73b91e2349dbbaa8 | 2,325 | py | Python | python_actr/display/tk/core.py | osaaso1/python_actr | f7cb03bcf78310f1a6b68e72d2cef28bd8f83aab | [
"MIT"
] | 3 | 2021-12-11T02:51:51.000Z | 2022-01-23T02:33:18.000Z | python_actr/display/tk/core.py | osaaso1/python_actr | f7cb03bcf78310f1a6b68e72d2cef28bd8f83aab | [
"MIT"
] | 8 | 2022-01-17T22:51:27.000Z | 2022-03-15T00:44:20.000Z | python_actr/display/tk/core.py | osaaso1/python_actr | f7cb03bcf78310f1a6b68e72d2cef28bd8f83aab | [
"MIT"
] | 8 | 2021-12-06T20:16:35.000Z | 2022-03-14T07:21:56.000Z | import tkinter
import time
from . import render
class TkinterDisplay:
root=None
def get_root(self):
if TkinterDisplay.root is None:
TkinterDisplay.root=tkinter.Tk()
return TkinterDisplay.root
def __init__(self,obj,width=640,height=480,full=False,title='CCMSuite3',background='#CCCCCC'):
self.obj=obj
self.title=title
self.paused=False
self.skipped_frame=False
self.rate=1.0
root=self.get_root()
if full:
width, height = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (width, height))
self.canvas=tkinter.Canvas(root)
self.canvas.configure(width=width,height=height,background=background)
self.canvas.pack()
obj._get_scheduler().add(self.render_loop)
root.bind('<Escape>',self.on_escape)
root.bind('<Pause>',self.on_pause)
root.bind('<Prior>',self.on_pgup)
root.bind('<Next>',self.on_pgdown)
root.bind('<Key>',self.on_key)
root.protocol('WM_DELETE_WINDOW',self.obj.stop)
def update_title(self):
rateinfo=''
if self.rate!=1.0: rateinfo='[%1.3fx]'%self.rate
self.get_root().title('%s: time=%1.3f%s'%(self.title,self.obj.now(),rateinfo))
def on_escape(self,event):
self.obj.stop()
def on_pause(self,event):
self.paused=not self.paused
def on_pgup(self,event):
self.rate*=1.1
def on_pgdown(self,event):
self.rate/=1.1
def on_key(self,event):
if hasattr(self.obj,'key_pressed'):
self.obj.key_pressed(event.char)
def render_loop(self):
root=self.get_root()
obj=self.obj
root.update()
dt=0.01
while True:
next_time=time.time()+dt
yield dt*self.rate
render(obj,self.canvas)
root.update()
if time.time()>next_time and obj.now()>dt:
#print 'frame skipped at t=%1.3fs'%obj.now()
self.skipped_frame=True
while self.paused or time.time()<next_time:
root.update()
self.update_title()
| 27.678571 | 98 | 0.566452 | import tkinter
import time
from . import render
class TkinterDisplay:
root=None
def get_root(self):
if TkinterDisplay.root is None:
TkinterDisplay.root=tkinter.Tk()
return TkinterDisplay.root
def __init__(self,obj,width=640,height=480,full=False,title='CCMSuite3',background='#CCCCCC'):
self.obj=obj
self.title=title
self.paused=False
self.skipped_frame=False
self.rate=1.0
root=self.get_root()
if full:
width, height = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (width, height))
self.canvas=tkinter.Canvas(root)
self.canvas.configure(width=width,height=height,background=background)
self.canvas.pack()
obj._get_scheduler().add(self.render_loop)
root.bind('<Escape>',self.on_escape)
root.bind('<Pause>',self.on_pause)
root.bind('<Prior>',self.on_pgup)
root.bind('<Next>',self.on_pgdown)
root.bind('<Key>',self.on_key)
root.protocol('WM_DELETE_WINDOW',self.obj.stop)
def update_title(self):
rateinfo=''
if self.rate!=1.0: rateinfo='[%1.3fx]'%self.rate
self.get_root().title('%s: time=%1.3f%s'%(self.title,self.obj.now(),rateinfo))
def on_escape(self,event):
self.obj.stop()
def on_pause(self,event):
self.paused=not self.paused
def on_pgup(self,event):
self.rate*=1.1
def on_pgdown(self,event):
self.rate/=1.1
def on_key(self,event):
if hasattr(self.obj,'key_pressed'):
self.obj.key_pressed(event.char)
def render_loop(self):
root=self.get_root()
obj=self.obj
root.update()
dt=0.01
while True:
next_time=time.time()+dt
yield dt*self.rate
render(obj,self.canvas)
root.update()
if time.time()>next_time and obj.now()>dt:
#print 'frame skipped at t=%1.3fs'%obj.now()
self.skipped_frame=True
while self.paused or time.time()<next_time:
root.update()
self.update_title()
| 0 | 0 |
9a70fd2b8f51d98c7016e821a8b8f28b97a4e005 | 108,582 | py | Python | src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py | Ericsson/oss-automatic-bug-assignment | f4965babd0491118713d7b19bd7ddd30fa39254f | [
"MIT"
] | 3 | 2018-09-25T02:29:54.000Z | 2020-02-12T12:35:55.000Z | src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py | dbreddyAI/oss-automatic-bug-assignment | f4965babd0491118713d7b19bd7ddd30fa39254f | [
"MIT"
] | null | null | null | src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py | dbreddyAI/oss-automatic-bug-assignment | f4965babd0491118713d7b19bd7ddd30fa39254f | [
"MIT"
] | 3 | 2017-10-26T13:50:21.000Z | 2019-12-17T03:40:11.000Z | from numpy import array
import os
import inspect
import matplotlib.pyplot as plt
def plot_learning_curve(title, computed_score, train_sizes, \
train_scores_mean, train_scores_std, test_scores_mean, \
test_scores_std):
"""Generate a plot of the test and training learning curves.
Parameters
----------
title: string
Contains the title of the chart.
computed_score: string
Contains the name of the computed score.
train_sizes: a one dimension numpy.ndarray
An array containing the various sizes of the training set for
which the scores have been computed.
train_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the training set.
train_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the training set.
test_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the test set.
test_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the test set.
ylim: tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
"""
fig = plt.figure(figsize=(20.0, 12.5))
plt.title(title, size=31)
plt.xlim(xmin=0, xmax=25000)
plt.ylim(ymin=0.0, ymax=1.0)
plt.xlabel("Training examples", size=28)
plt.ylabel(computed_score.capitalize(), size=28)
plt.grid(linewidth=3)
plt.fill_between(train_sizes, train_scores_mean - \
train_scores_std, train_scores_mean + train_scores_std, \
alpha=0.3, color="r")
plt.fill_between(train_sizes, test_scores_mean - \
test_scores_std, test_scores_mean + test_scores_std, \
alpha=0.3, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", \
label="Training {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", \
label="Test {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.legend(loc="best", prop={'size': 26})
plt.tick_params(axis='both', which='major', labelsize=22)
return fig
def main():
current_dir = os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe())))
incremental_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565, 0.84031413612565442], 6112: [0.90068717277486909, 0.88890706806282727, 0.88219895287958117]}
incremental_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966, 0.224967277486911], 6112: [0.10091593065096501, 0.23707460732984292, 0.24803664921465968]}
incremental_train_sizes_4_folds = array([ 6112, 12224, 18336])
incremental_train_scores_mean_4_folds = array([ 0.89059773, 0.84685864, 0.82422557])
incremental_train_scores_std_4_folds = array([ 0.00764187, 0.0065445, 0. ])
incremental_test_scores_mean_4_folds = array([ 0.1953424, 0.16155135, 0.09421001])
incremental_test_scores_std_4_folds = array([ 0.0669194, 0.06341593, 0. ])
incremental_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952, 0.82993865030674852], 12225: [0.85824130879345606, 0.84932515337423309, 0.84040899795501023], 8150: [0.88981595092024535, 0.88110429447852756, 0.86932515337423311, 0.86687116564417177], 4075: [0.92122699386503071, 0.91533742331288348, 0.90625766871165647, 0.90134969325153369, 0.89447852760736202]}
incremental_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043, 0.21398773006134969], 12225: [0.05865030674846626, 0.22331288343558281, 0.24957055214723928], 8150: [0.063803680981595098, 0.23018404907975459, 0.26085889570552145, 0.27558282208588958], 4075: [0.075828220858895706, 0.22012269938650306, 0.25840490797546012, 0.2723926380368098, 0.23484662576687115]}
incremental_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
incremental_train_scores_mean_6_folds = array([ 0.90773006, 0.87677914, 0.84932515, 0.83276074, 0.8201227 ])
incremental_train_scores_std_6_folds = array([ 0.00957621, 0.00925196, 0.00728001, 0.00282209, 0. ])
incremental_test_scores_mean_6_folds = array([ 0.21231902, 0.20760736, 0.17717791, 0.13521472, 0.05496933])
incremental_test_scores_std_6_folds = array([ 0.07061286, 0.08462505, 0.08449442, 0.07877301, 0. ])
incremental_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754, 0.82422556719022688], 15280: [0.84332460732984293, 0.83776178010471203, 0.83082460732984298], 12224: [0.86338350785340312, 0.8534031413612565, 0.84710405759162299, 0.84031413612565442], 9168: [0.88568935427574169, 0.87565445026178013, 0.87041884816753923, 0.85907504363001741, 0.8586387434554974], 6112: [0.90085078534031415, 0.90068717277486909, 0.89725130890052351, 0.88890706806282727, 0.88334424083769636, 0.88219895287958117], 3056: [0.91819371727748689, 0.9240837696335078, 0.92702879581151831, 0.91852094240837701, 0.91819371727748689, 0.92833769633507857, 0.90150523560209428]}
incremental_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004, 0.17604712041884818], 15280: [0.043819489862655332, 0.17702879581151831, 0.2486910994764398], 12224: [0.048724656638325703, 0.18160994764397906, 0.25327225130890052, 0.26897905759162305], 9168: [0.055591890124264222, 0.18422774869109948, 0.26734293193717279, 0.27945026178010474, 0.30268324607329844], 6112: [0.072596468279921514, 0.18259162303664922, 0.27192408376963351, 0.28435863874345552, 0.29286649214659688, 0.26145287958115182], 3056: [0.11118378024852844, 0.1806282722513089, 0.2581806282722513, 0.27290575916230364, 0.27748691099476441, 0.26897905759162305, 0.25785340314136124]}
incremental_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
incremental_train_scores_mean_8_folds = array([ 0.91940912, 0.89220659, 0.86989529, 0.85105121, 0.83730366, 0.82646161,
0.81740838])
incremental_train_scores_std_8_folds = array([ 0.00831456, 0.00776394, 0.01026335, 0.00849238, 0.00511337, 0.00223604,
0. ])
incremental_test_scores_mean_8_folds = array([ 0.23245969, 0.2276317, 0.21785922, 0.18814648, 0.15651313, 0.10780773,
0.03924133])
incremental_test_scores_std_8_folds = array([ 0.05818413, 0.07814916, 0.09044222, 0.0869719, 0.08488723, 0.06823939,
0. ])
incremental_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444, 0.82249488752556232], 17115: [0.83651767455448434, 0.83102541630148996, 0.82810400233713122], 14670: [0.84935241990456711, 0.84226312201772324, 0.83974096796182685, 0.83278800272665299], 12225: [0.86633946830265851, 0.85758691206543969, 0.85251533742331287, 0.84621676891615538, 0.84040899795501023], 9780: [0.88548057259713697, 0.87525562372188137, 0.87300613496932511, 0.86124744376278117, 0.85429447852760731, 0.8535787321063395], 7335: [0.89788684389911388, 0.89570552147239269, 0.88943421949556922, 0.88657123381049763, 0.87607361963190189, 0.87075664621676896, 0.87457396046353097], 4890: [0.91942740286298563, 0.90817995910020455, 0.9122699386503067, 0.90817995910020455, 0.90122699386503069, 0.88568507157464216, 0.90081799591002043, 0.88670756646216764], 2445: [0.92842535787321068, 0.93006134969325149, 0.93660531697341509, 0.93047034764826175, 0.93987730061349695, 0.91574642126789363, 0.91983640081799589, 0.9235173824130879, 0.91002044989775055]}
incremental_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455, 0.095296523517382409], 17115: [0.12106339468302658, 0.10061349693251534, 0.25439672801635993], 14670: [0.12269938650306748, 0.10224948875255624, 0.2593047034764826, 0.23640081799591003], 12225: [0.12719836400817996, 0.11615541922290389, 0.25807770961145193, 0.23885480572597137, 0.29284253578732106], 9780: [0.14069529652351739, 0.11942740286298568, 0.26625766871165646, 0.24498977505112474, 0.30224948875255625, 0.31533742331288345], 7335: [0.16196319018404909, 0.12392638036809817, 0.2593047034764826, 0.2560327198364008, 0.31083844580777098, 0.31124744376278118, 0.2523517382413088], 4890: [0.17995910020449898, 0.1329243353783231, 0.2523517382413088, 0.26339468302658486, 0.29734151329243352, 0.29979550102249491, 0.26134969325153373, 0.26871165644171779], 2445: [0.19386503067484662, 0.13006134969325153, 0.2392638036809816, 0.24498977505112474, 0.29202453987730059, 0.27034764826175867, 0.24130879345603273, 0.24989775051124744, 0.25071574642126787]}
incremental_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
incremental_train_scores_mean_10_folds = array([ 0.92606226, 0.90281186, 0.88442886, 0.86714383, 0.8526135, 0.84103613,
0.83188236, 0.82433538, 0.81799591])
incremental_train_scores_std_10_folds = array([ 0.00914095, 0.0110811, 0.00994113, 0.01169251, 0.00897791, 0.005924,
0.00348791, 0.00184049, 0. ])
incremental_test_scores_mean_10_folds = array([ 0.23471938, 0.24447853, 0.23938066, 0.23149284, 0.20662577, 0.1801636,
0.15869121, 0.10633947, 0.11820041])
incremental_test_scores_std_10_folds = array([ 0.04451149, 0.05448996, 0.06594014, 0.07553149, 0.07157226, 0.06855415,
0.06818705, 0.01104294, 0. ])
incremental_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918, 0.81727229825389336], 19560: [0.83052147239263807, 0.82264826175869121, 0.82249488752556232], 17930: [0.83736754043502515, 0.830117122141662, 0.82861126603457891, 0.82381483547127721], 16300: [0.84699386503067486, 0.83619631901840485, 0.83668711656441719, 0.83073619631901841, 0.82993865030674852], 14670: [0.85405589638718471, 0.8477164280845263, 0.84226312201772324, 0.83871847307430125, 0.83653715064758005, 0.83278800272665299], 13040: [0.86618098159509205, 0.85552147239263798, 0.85421779141104293, 0.84739263803680986, 0.84631901840490797, 0.84072085889570547, 0.8377300613496933], 11410: [0.87765118317265556, 0.86958808063102544, 0.86126205083260299, 0.86038562664329532, 0.85723049956178787, 0.85021910604732687, 0.84469763365468886, 0.84522348816827342], 9780: [0.88588957055214723, 0.87924335378323104, 0.87525562372188137, 0.87044989775051129, 0.87157464212678937, 0.86124744376278117, 0.8563394683026585, 0.85685071574642124, 0.8535787321063395], 8150: [0.89447852760736202, 0.89042944785276079, 0.88858895705521468, 0.88638036809815945, 0.88110429447852756, 0.87754601226993867, 0.87067484662576689, 0.86907975460122699, 0.86453987730061355, 0.86687116564417177], 6520: [0.90567484662576692, 0.90000000000000002, 0.89892638036809813, 0.89984662576687113, 0.89309815950920246, 0.89401840490797546, 0.88696319018404912, 0.88312883435582823, 0.87423312883435578, 0.87944785276073623, 0.87898773006134967], 4890: [0.91390593047034763, 0.91451942740286296, 0.90817995910020455, 0.91267893660531696, 0.90899795501022496, 0.90817995910020455, 0.90061349693251536, 0.9002044989775051, 0.88568507157464216, 0.89836400817995909, 0.90040899795501017, 0.88670756646216764], 3260: [0.91932515337423315, 0.92638036809815949, 0.92699386503067482, 0.92791411042944782, 0.91748466257668715, 0.92392638036809815, 0.93006134969325149, 0.91625766871165648, 0.90490797546012269, 0.90674846625766869, 0.91901840490797548, 0.90889570552147236, 0.89785276073619635], 1630: [0.93496932515337428, 0.94049079754601228, 0.94601226993865029, 0.94969325153374229, 0.93619631901840494, 0.92883435582822083, 0.93803680981595094, 0.94907975460122695, 0.91717791411042948, 0.92699386503067482, 0.9319018404907975, 0.94723926380368095, 0.91533742331288348, 0.92638036809815949]}
incremental_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773, 0.060122699386503067], 19560: [0.15337423312883436, 0.059509202453987733, 0.12331288343558282], 17930: [0.15214723926380369, 0.058282208588957052, 0.13190184049079753, 0.2607361963190184], 16300: [0.15398773006134969, 0.061963190184049083, 0.13190184049079753, 0.27116564417177913, 0.25398773006134967], 14670: [0.15460122699386503, 0.066257668711656448, 0.13374233128834356, 0.27116564417177913, 0.25398773006134967, 0.2411042944785276], 13040: [0.15766871165644172, 0.068711656441717797, 0.14294478527607363, 0.27300613496932513, 0.25398773006134967, 0.24049079754601227, 0.29877300613496932], 11410: [0.16809815950920245, 0.066871165644171782, 0.15337423312883436, 0.26625766871165646, 0.26380368098159507, 0.2460122699386503, 0.30858895705521472, 0.33128834355828218], 9780: [0.17668711656441718, 0.073619631901840496, 0.15398773006134969, 0.27484662576687119, 0.26503067484662579, 0.25644171779141106, 0.30613496932515338, 0.32822085889570551, 0.31411042944785278], 8150: [0.18466257668711655, 0.078527607361963195, 0.16073619631901839, 0.26687116564417179, 0.27423312883435585, 0.26932515337423313, 0.31226993865030672, 0.33742331288343558, 0.31963190184049078, 0.26748466257668713], 6520: [0.19570552147239265, 0.095092024539877307, 0.16809815950920245, 0.2588957055214724, 0.26993865030674846, 0.26564417177914113, 0.30797546012269938, 0.33680981595092024, 0.31963190184049078, 0.27361963190184047, 0.27975460122699386], 4890: [0.20429447852760735, 0.11288343558282209, 0.17055214723926379, 0.26503067484662579, 0.26319018404907973, 0.2822085889570552, 0.30184049079754599, 0.32699386503067485, 0.30306748466257671, 0.28650306748466259, 0.27607361963190186, 0.26625766871165646], 3260: [0.21717791411042944, 0.12208588957055215, 0.16380368098159509, 0.25950920245398773, 0.252760736196319, 0.27300613496932513, 0.29815950920245399, 0.33128834355828218, 0.29509202453987732, 0.27975460122699386, 0.28343558282208586, 0.26503067484662579, 0.27300613496932513], 1630: [0.23803680981595093, 0.15889570552147239, 0.15705521472392639, 0.23251533742331287, 0.25521472392638039, 0.25582822085889573, 0.28159509202453986, 0.31901840490797545, 0.2570552147239264, 0.26196319018404907, 0.26319018404907973, 0.23558282208588957, 0.25214723926380367, 0.25950920245398773]}
incremental_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
incremental_train_scores_mean_15_folds = array([ 0.93488168, 0.91736668, 0.90320382, 0.8903932, 0.87896933, 0.86782549,
0.85828221, 0.84972612, 0.84201318, 0.83611043, 0.82997769, 0.82522154,
0.82012742, 0.81849255])
incremental_train_scores_std_15_folds = array([ 0.01074397, 0.00967823, 0.00931587, 0.00992714, 0.01023275, 0.01070537,
0.01082517, 0.00899664, 0.00711293, 0.00609528, 0.00485997, 0.00374814,
0.00285512, 0. ])
incremental_test_scores_mean_15_folds = array([ 0.2448291, 0.25493157, 0.25490798, 0.25192415, 0.24711656, 0.23878664,
0.22553681, 0.20508326, 0.18680982, 0.17460123, 0.15076687, 0.11206544,
0.10552147, 0.15276074])
incremental_test_scores_std_15_folds = array([ 0.04097555, 0.05470141, 0.05920192, 0.06843836, 0.07712113, 0.08085383,
0.08314464, 0.07722247, 0.07412044, 0.07818233, 0.07246458, 0.03913685,
0.04539877, 0. ])
incremental_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484, 0.81821819151773811], 21516: [0.824595649749024, 0.82041271611823763, 0.81692693809258221], 20538: [0.82753919563735512, 0.82520206446586819, 0.81892102444249681, 0.81862888304606096], 19560: [0.83195296523517381, 0.82878323108384455, 0.82356850715746421, 0.82014314928425358, 0.82249488752556232], 18582: [0.8366698955978904, 0.83397911957808635, 0.82741362608976432, 0.82692928640619956, 0.82348509310085027, 0.82396943278441503], 17604: [0.84003635537377863, 0.83702567598273114, 0.83242444898886614, 0.83094751192910699, 0.82969779595546467, 0.826630311292888, 0.82509656896159966], 16626: [0.84620473956453746, 0.84187417298207623, 0.83585949717310237, 0.83549861662456393, 0.8338145073980513, 0.83297245278479493, 0.82828100565379525, 0.8286418862023337], 15648: [0.85090746421267893, 0.84809560327198363, 0.84189672801635995, 0.83876533742331283, 0.83825408997955009, 0.83640081799591004, 0.8339723926380368, 0.83167177914110424, 0.83032975460122704], 14670: [0.85821404226312203, 0.85262440354464897, 0.84723926380368098, 0.84464894342194952, 0.84226312201772324, 0.84137695978186777, 0.83749147920927058, 0.83715064758009539, 0.83360599863667351, 0.83278800272665299], 13692: [0.86240140227870288, 0.86064855390008765, 0.8523225241016652, 0.85210341805433831, 0.84845165059888983, 0.84567630733274901, 0.8437043529068069, 0.84041776219690334, 0.83939526730937775, 0.83384458077709611, 0.83494011101373067], 12714: [0.871558911436212, 0.86644643699858426, 0.85921031933301872, 0.85614283467044205, 0.85456976561271036, 0.85189554821456659, 0.84867075664621672, 0.84827748938178382, 0.84481673745477426, 0.84135598552776469, 0.83687273871322954, 0.83852446122384772], 11736: [0.87585207907293794, 0.87491479209270617, 0.86699045671438313, 0.86255964553510567, 0.859492160872529, 0.859492160872529, 0.85506134969325154, 0.85565780504430811, 0.85216428084526241, 0.84688139059304701, 0.84398432174505789, 0.83980913428766191, 0.84355828220858897], 10758: [0.88213422569250788, 0.8812046848856665, 0.87488380739914484, 0.87153746049451575, 0.86642498605688789, 0.86558839933073062, 0.8642870422011526, 0.86242796058746984, 0.85833798103736758, 0.85489868005205427, 0.85015802193716306, 0.84960029745305821, 0.84690462911321807, 0.84987915969511063], 9780: [0.88486707566462164, 0.88926380368098157, 0.88159509202453989, 0.87770961145194271, 0.87525562372188137, 0.87259713701431496, 0.87157464212678937, 0.87044989775051129, 0.86543967280163603, 0.86124744376278117, 0.85664621676891617, 0.85521472392638032, 0.85644171779141109, 0.85419222903885483, 0.8535787321063395], 8802: [0.88559418314019545, 0.89036582594864799, 0.88798000454442172, 0.88479890933878669, 0.8827539195637355, 0.87991365598727567, 0.87855032947057488, 0.87832310838445804, 0.87502840263576465, 0.86980231765507843, 0.86639400136332656, 0.86321290615769142, 0.86332651670074978, 0.8626448534423995, 0.85810043172006367, 0.86332651670074978], 7824: [0.89187116564417179, 0.89570552147239269, 0.89327709611451944, 0.89199897750511248, 0.89059304703476483, 0.88905930470347649, 0.88854805725971375, 0.8880368098159509, 0.88036809815950923, 0.88087934560327197, 0.87359406952965235, 0.87397750511247441, 0.87282719836400813, 0.86707566462167684, 0.86809815950920244, 0.87103783231083842, 0.86950408997955009], 6846: [0.89789658194566169, 0.90213263219398188, 0.89702015775635413, 0.89789658194566169, 0.89775051124744376, 0.89599766286882854, 0.89322231960268772, 0.89307624890446979, 0.89176161262050835, 0.88913234005258546, 0.88504236050248319, 0.88080631025416301, 0.88036809815950923, 0.87554776511831722, 0.87394098743791993, 0.87773882559158634, 0.87642418930762489, 0.87452527023079174], 5868: [0.90184049079754602, 0.91104294478527603, 0.90252215405589642, 0.90235173824130877, 0.90593047034764829, 0.9038854805725971, 0.90081799591002043, 0.8989434219495569, 0.8979209270620313, 0.8989434219495569, 0.89383094751192915, 0.8933197000681663, 0.88957055214723924, 0.88599182004089982, 0.87917518745739609, 0.8887184730743013, 0.8873551465576005, 0.88650306748466257, 0.8834355828220859], 4890: [0.89959100204498976, 0.91676891615541922, 0.91554192229038855, 0.91083844580777096, 0.90817995910020455, 0.91206543967280163, 0.91349693251533748, 0.90531697341513295, 0.90490797546012269, 0.90817995910020455, 0.90081799591002043, 0.90368098159509203, 0.90163599182004095, 0.89345603271983642, 0.88568507157464216, 0.89345603271983642, 0.89938650306748469, 0.90000000000000002, 0.89775051124744376, 0.88670756646216764], 3912: [0.90030674846625769, 0.91768916155419222, 0.92075664621676889, 0.92382413087934556, 0.91589979550102252, 0.91641104294478526, 0.92663599182004086, 0.91385480572597133, 0.91155419222903888, 0.91794478527607359, 0.91641104294478526, 0.91308793456032722, 0.91411042944785281, 0.90720858895705525, 0.89800613496932513, 0.90030674846625769, 0.9066973415132924, 0.91206543967280163, 0.91359918200408996, 0.89979550102249484, 0.89340490797546013], 2934: [0.90149965916837083, 0.92638036809815949, 0.92092706203135655, 0.93387866394001362, 0.92876618950238587, 0.92706203135650989, 0.93626448534423989, 0.92842535787321068, 0.91990456714383095, 0.92603953646898429, 0.91922290388548056, 0.93456032719836402, 0.92160872528970683, 0.91717791411042948, 0.91240627130197682, 0.91717791411042948, 0.91342876618950242, 0.92058623040218135, 0.92808452624403548, 0.91615541922290389, 0.9035446489434219, 0.90558963871847309], 1956: [0.90286298568507162, 0.93762781186094069, 0.93098159509202449, 0.93711656441717794, 0.93813905930470343, 0.93865030674846628, 0.94734151329243355, 0.93762781186094069, 0.93302658486707568, 0.92944785276073616, 0.92484662576687116, 0.93404907975460127, 0.94529652351738236, 0.92331288343558282, 0.91666666666666663, 0.93456032719836402, 0.92995910020449901, 0.92586912065439675, 0.93404907975460127, 0.93660531697341509, 0.91922290388548056, 0.91768916155419222, 0.9253578732106339], 978: [0.89059304703476483, 0.95194274028629855, 0.94887525562372188, 0.95194274028629855, 0.93762781186094069, 0.94478527607361962, 0.95910020449897748, 0.95501022494887522, 0.9253578732106339, 0.95092024539877296, 0.93456032719836402, 0.9468302658486708, 0.95705521472392641, 0.9468302658486708, 0.91411042944785281, 0.94478527607361962, 0.93149284253578735, 0.94069529652351735, 0.93456032719836402, 0.96319018404907975, 0.93865030674846628, 0.93558282208588961, 0.93149284253578735, 0.92126789366053174]}
incremental_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292, 0.22903885480572597], 21516: [0.10736196319018405, 0.22392638036809817, 0.070552147239263799], 20538: [0.10736196319018405, 0.22699386503067484, 0.07259713701431493, 0.13905930470347649], 19560: [0.10531697341513292, 0.22903885480572597, 0.068507157464212681, 0.13803680981595093, 0.1492842535787321], 18582: [0.10736196319018405, 0.22290388548057261, 0.06646216768916155, 0.14519427402862986, 0.15235173824130879, 0.26687116564417179], 17604: [0.11349693251533742, 0.22903885480572597, 0.06646216768916155, 0.14519427402862986, 0.15644171779141106, 0.26789366053169733, 0.28936605316973413], 16626: [0.11042944785276074, 0.22392638036809817, 0.07259713701431493, 0.14519427402862986, 0.15848670756646216, 0.27505112474437626, 0.29345603271983639, 0.254601226993865], 15648: [0.1165644171779141, 0.22494887525562371, 0.07259713701431493, 0.14621676891615543, 0.15950920245398773, 0.26993865030674846, 0.29550102249488752, 0.25153374233128833, 0.31083844580777098], 14670: [0.11349693251533742, 0.22699386503067484, 0.07259713701431493, 0.14621676891615543, 0.15848670756646216, 0.27402862985685073, 0.29038854805725972, 0.24846625766871167, 0.30981595092024539, 0.21881390593047034], 13692: [0.11451942740286299, 0.23210633946830267, 0.070552147239263799, 0.14723926380368099, 0.16666666666666666, 0.27198364008179959, 0.29550102249488752, 0.25869120654396727, 0.31799591002044991, 0.22392638036809817, 0.2822085889570552], 12714: [0.11758691206543967, 0.23210633946830267, 0.075664621676891614, 0.14826175869120656, 0.17484662576687116, 0.27505112474437626, 0.29652351738241312, 0.2658486707566462, 0.31901840490797545, 0.23517382413087934, 0.28629856850715746, 0.34049079754601225], 11736: [0.1196319018404908, 0.24028629856850717, 0.075664621676891614, 0.15337423312883436, 0.18609406952965235, 0.26993865030674846, 0.28732106339468305, 0.26278118609406953, 0.31186094069529652, 0.22699386503067484, 0.28936605316973413, 0.34049079754601225, 0.34049079754601225], 10758: [0.130879345603272, 0.2607361963190184, 0.078732106339468297, 0.15848670756646216, 0.18711656441717792, 0.27607361963190186, 0.28936605316973413, 0.26278118609406953, 0.32106339468302658, 0.22801635991820041, 0.29141104294478526, 0.34458077709611451, 0.34151329243353784, 0.32617586912065438], 9780: [0.1329243353783231, 0.27709611451942739, 0.079754601226993863, 0.15439672801635992, 0.18813905930470348, 0.28527607361963192, 0.29141104294478526, 0.27198364008179959, 0.32515337423312884, 0.24335378323108384, 0.29243353783231085, 0.34662576687116564, 0.34458077709611451, 0.33435582822085891, 0.32515337423312884], 8802: [0.13190184049079753, 0.28629856850715746, 0.078732106339468297, 0.16564417177914109, 0.19325153374233128, 0.28118609406952966, 0.28834355828220859, 0.28834355828220859, 0.32924335378323111, 0.25153374233128833, 0.28834355828220859, 0.35276073619631904, 0.34253578732106338, 0.31697341513292432, 0.33333333333333331, 0.28323108384458079], 7824: [0.13496932515337423, 0.29550102249488752, 0.085889570552147243, 0.16871165644171779, 0.19836400817995911, 0.27300613496932513, 0.29243353783231085, 0.27811860940695299, 0.32822085889570551, 0.26482617586912066, 0.29447852760736198, 0.35787321063394684, 0.34867075664621677, 0.33128834355828218, 0.32310838445807771, 0.2822085889570552, 0.28732106339468305], 6846: [0.13701431492842536, 0.29447852760736198, 0.096114519427402859, 0.16973415132924335, 0.19836400817995911, 0.27402862985685073, 0.28527607361963192, 0.27402862985685073, 0.32719836400817998, 0.25153374233128833, 0.29345603271983639, 0.34969325153374231, 0.34969325153374231, 0.33026584867075665, 0.32617586912065438, 0.29243353783231085, 0.29550102249488752, 0.29141104294478526], 5868: [0.14723926380368099, 0.30572597137014312, 0.097137014314928424, 0.17177914110429449, 0.20449897750511248, 0.26380368098159507, 0.28118609406952966, 0.27607361963190186, 0.32515337423312884, 0.27300613496932513, 0.29754601226993865, 0.34049079754601225, 0.34969325153374231, 0.32310838445807771, 0.32924335378323111, 0.29243353783231085, 0.28629856850715746, 0.30572597137014312, 0.27096114519427406], 4890: [0.15132924335378323, 0.31697341513292432, 0.11247443762781185, 0.17791411042944785, 0.21370143149284254, 0.26278118609406953, 0.28732106339468305, 0.2822085889570552, 0.32310838445807771, 0.27709611451942739, 0.28732106339468305, 0.35378323108384457, 0.32924335378323111, 0.31799591002044991, 0.31288343558282211, 0.28834355828220859, 0.29447852760736198, 0.31083844580777098, 0.27198364008179959, 0.27505112474437626], 3912: [0.15746421267893659, 0.32515337423312884, 0.12678936605316973, 0.20040899795501022, 0.21267893660531698, 0.25664621676891614, 0.28834355828220859, 0.26993865030674846, 0.30981595092024539, 0.28118609406952966, 0.28016359918200406, 0.34969325153374231, 0.33946830265848671, 0.30981595092024539, 0.31492842535787319, 0.28425357873210633, 0.29141104294478526, 0.31186094069529652, 0.2822085889570552, 0.27811860940695299, 0.30163599182004092], 2934: [0.17586912065439672, 0.33435582822085891, 0.14314928425357873, 0.21676891615541921, 0.19938650306748465, 0.25255623721881393, 0.28732106339468305, 0.27096114519427406, 0.29959100204498978, 0.27709611451942739, 0.26380368098159507, 0.35378323108384457, 0.34049079754601225, 0.29038854805725972, 0.30470347648261759, 0.27096114519427406, 0.2658486707566462, 0.32208588957055212, 0.28834355828220859, 0.26278118609406953, 0.31492842535787319, 0.25869120654396727], 1956: [0.19325153374233128, 0.34253578732106338, 0.15848670756646216, 0.24130879345603273, 0.17382413087934559, 0.22290388548057261, 0.31390593047034765, 0.26482617586912066, 0.30674846625766872, 0.2822085889570552, 0.26789366053169733, 0.34049079754601225, 0.33231083844580778, 0.28936605316973413, 0.28016359918200406, 0.28425357873210633, 0.25766871165644173, 0.30470347648261759, 0.28527607361963192, 0.24335378323108384, 0.27198364008179959, 0.26482617586912066, 0.29243353783231085], 978: [0.19529652351738241, 0.34253578732106338, 0.18507157464212678, 0.24233128834355827, 0.21370143149284254, 0.20961145194274028, 0.27811860940695299, 0.27505112474437626, 0.25869120654396727, 0.2556237218813906, 0.2310838445807771, 0.29447852760736198, 0.29038854805725972, 0.27198364008179959, 0.24846625766871167, 0.24539877300613497, 0.20143149284253578, 0.25869120654396727, 0.25255623721881393, 0.26993865030674846, 0.18916155419222905, 0.22903885480572597, 0.28323108384458079, 0.23415132924335377]}
incremental_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
incremental_train_scores_mean_25_folds = array([ 0.93988582, 0.93044812, 0.9208496, 0.91140812, 0.90287321, 0.89537366,
0.88779336, 0.881556, 0.87438224, 0.86840491, 0.86273338, 0.85664753,
0.8515285, 0.84671872, 0.84274029, 0.83892155, 0.83539336, 0.8316941,
0.82874108, 0.82538855, 0.82257279, 0.8206451, 0.81868498, 0.81799591])
incremental_train_scores_std_25_folds = array([ 0.01574382, 0.00986344, 0.00941513, 0.0086523, 0.00859881, 0.00843904,
0.0094164, 0.00978435, 0.0102025, 0.01149665, 0.01132038, 0.011076,
0.01026567, 0.00910232, 0.00777419, 0.00661961, 0.00574061, 0.00496077,
0.00492852, 0.00433164, 0.00388806, 0.00313505, 0.00046679, 0. ])
incremental_test_scores_mean_25_folds = array([ 0.24816803, 0.27020539, 0.27244841, 0.27485636, 0.27234151, 0.27058444,
0.26868893, 0.26735234, 0.26322853, 0.2595092, 0.24978089, 0.23879188,
0.2305726, 0.21649005, 0.20593047, 0.20529425, 0.19171779, 0.18112767,
0.16019087, 0.13803681, 0.13650307, 0.13394683, 0.16717791, 0.10633947])
incremental_test_scores_std_25_folds = array([ 0.03709919, 0.0474123, 0.05100164, 0.0554582, 0.06067133, 0.06643791,
0.07039766, 0.07419473, 0.0772398, 0.08042228, 0.07961067, 0.08014752,
0.08023328, 0.07686982, 0.0755778, 0.07961578, 0.07611923, 0.07668308,
0.06730022, 0.05350297, 0.05728938, 0.06537574, 0.06186094, 0. ])
incremental_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894, 0.81799591002044991], 22983: [0.81877909759387368, 0.81821346212417878, 0.81890962885611107], 22494: [0.82177469547434867, 0.8191517738063484, 0.819196230105806, 0.81821819151773811], 22005: [0.82394910247670983, 0.82140422631220178, 0.81972279027493755, 0.81904112701658716, 0.81799591002044991], 21516: [0.82594348391894401, 0.824595649749024, 0.82250418293363081, 0.82041271611823763, 0.81911135898865961, 0.81692693809258221], 21027: [0.8270319113520711, 0.82679412184334422, 0.82455890046131164, 0.82260902648975132, 0.81966043658153798, 0.81832881533266755, 0.81766300470823228], 20538: [0.82958418541240631, 0.82753919563735512, 0.82753919563735512, 0.82520206446586819, 0.82232934073424868, 0.81892102444249681, 0.81819067095140718, 0.81862888304606096], 20049: [0.8316624270537184, 0.82891914808718636, 0.82822085889570551, 0.82762232530300761, 0.82448002394134368, 0.82168686717542017, 0.81874407701132224, 0.81944236620280309, 0.82058955558880742], 19560: [0.83333333333333337, 0.83195296523517381, 0.83067484662576685, 0.82878323108384455, 0.82617586912065444, 0.82356850715746421, 0.82249488752556232, 0.82014314928425358, 0.82055214723926384, 0.82249488752556232], 19071: [0.83682030307797184, 0.83320224424518907, 0.83377903623302396, 0.83141943264642648, 0.82858790834250962, 0.82612343348539663, 0.82476010696869595, 0.82344921608725286, 0.82093230559488228, 0.82240050338209847, 0.82318703791096426], 18582: [0.83785383704660421, 0.8366698955978904, 0.83435582822085885, 0.83397911957808635, 0.83118071251749004, 0.82741362608976432, 0.82800559681412123, 0.82692928640619956, 0.82488429663114837, 0.82348509310085027, 0.82386180174362289, 0.82396943278441503], 18093: [0.84115403747305584, 0.83722986790471454, 0.8366771679655115, 0.83474271817830104, 0.83280826839109046, 0.83087381860387999, 0.82998949870115513, 0.82860774885314759, 0.82706018902337919, 0.82656275907809651, 0.82484938926656715, 0.82595478914497322, 0.8232465594428785], 17604: [0.84350147693705979, 0.84003635537377863, 0.83844580777096112, 0.83702567598273114, 0.83441263349238814, 0.83242444898886614, 0.83145875937286984, 0.83094751192910699, 0.83037945921381506, 0.82969779595546467, 0.82833446943876388, 0.826630311292888, 0.8259486480345376, 0.82509656896159966], 17115: [0.84691790826760149, 0.84317849839322234, 0.84224364592462753, 0.83827052293309967, 0.83651767455448434, 0.83447268477943326, 0.83342097575226415, 0.83353783231083844, 0.83196026877008478, 0.83102541630148996, 0.83219398188723337, 0.82921413964358748, 0.82629272567922873, 0.8269354367513877, 0.82810400233713122], 16626: [0.84885119692048594, 0.84620473956453746, 0.84536268495128108, 0.84187417298207623, 0.83844580777096112, 0.83585949717310237, 0.83489714904366652, 0.83549861662456393, 0.83417538794658963, 0.8338145073980513, 0.83267171899434622, 0.83297245278479493, 0.82966438108985929, 0.82828100565379525, 0.82972452784794903, 0.8286418862023337], 16137: [0.85077771580839068, 0.84792712400074366, 0.84817500154923464, 0.84513850158021941, 0.84166821590134477, 0.83881762409369776, 0.83695854248001489, 0.83708248125426044, 0.83621490983454172, 0.83584309351180519, 0.83454173638222717, 0.83435582822085885, 0.83274462415566708, 0.83156720580033461, 0.83125735886472085, 0.83038978744500214, 0.83082357315486155], 15648: [0.8539749488752556, 0.85090746421267893, 0.84924591002044991, 0.84809560327198363, 0.84502811860940696, 0.84189672801635995, 0.83946830265848671, 0.83876533742331283, 0.83819018404907975, 0.83825408997955009, 0.83780674846625769, 0.83640081799591004, 0.83416411042944782, 0.8339723926380368, 0.83467535787321068, 0.83167177914110424, 0.83141615541922287, 0.83032975460122704], 15159: [0.85744442245530705, 0.85421201926248436, 0.85196912725113794, 0.84959430041559469, 0.84814301734942943, 0.84444884227191763, 0.843723200738835, 0.84148030872748858, 0.84075466719440595, 0.83956725377663433, 0.84002902566132331, 0.83818193812256747, 0.83659872023220527, 0.83574114387492582, 0.83587307869912264, 0.83448776304505579, 0.83277261033049677, 0.83217890362161095, 0.83125535985223298], 14670: [0.8597818677573279, 0.85821404226312203, 0.85555555555555551, 0.85262440354464897, 0.84935241990456711, 0.84723926380368098, 0.84689843217450578, 0.84464894342194952, 0.84294478527607364, 0.84226312201772324, 0.84192229038854804, 0.84137695978186777, 0.83871847307430125, 0.83749147920927058, 0.83974096796182685, 0.83715064758009539, 0.83640081799591004, 0.83360599863667351, 0.83251533742331285, 0.83278800272665299], 14181: [0.86150483040688242, 0.86002397574219025, 0.85995345885339536, 0.85508779352654962, 0.85057471264367812, 0.85050419575488334, 0.8488117904238065, 0.84860023975742194, 0.84648473309357586, 0.84648473309357586, 0.84380509131937098, 0.84458077709611457, 0.8404907975460123, 0.84161906776673012, 0.84098441576757632, 0.83978562865806361, 0.83886890910373035, 0.83752908821662786, 0.83329807488893592, 0.83414427755447429, 0.83160566955785908], 13692: [0.86634531113058721, 0.86240140227870288, 0.86364300321355536, 0.86064855390008765, 0.8555360794624598, 0.8523225241016652, 0.8507157464212679, 0.85210341805433831, 0.85159217061057557, 0.84845165059888983, 0.84786736780601812, 0.84567630733274901, 0.84421560035056964, 0.8437043529068069, 0.84341221151037105, 0.84041776219690334, 0.84290096406660819, 0.83939526730937775, 0.83822670172363423, 0.83384458077709611, 0.83333333333333337, 0.83494011101373067], 13203: [0.87002953874119515, 0.86737862606983263, 0.86609103991517078, 0.86344012724380825, 0.85942588805574494, 0.85465424524729228, 0.85442702416117544, 0.85435128379913661, 0.85359388017874727, 0.85344239945466938, 0.84965538135272289, 0.84995834280087856, 0.84511095963038707, 0.84647428614708775, 0.84556540180262063, 0.84397485419980312, 0.84382337347572522, 0.84382337347572522, 0.83958191320154507, 0.84071801863212903, 0.83458304930697569, 0.83624933727183215, 0.83655229871998793], 12714: [0.8728173666823974, 0.871558911436212, 0.86967122856693413, 0.86644643699858426, 0.8619631901840491, 0.85921031933301872, 0.85779455718106024, 0.85614283467044205, 0.85677206229353464, 0.85456976561271036, 0.85472707251848357, 0.85189554821456659, 0.84977190498662891, 0.84867075664621672, 0.84945729117508262, 0.84827748938178382, 0.84812018247601073, 0.84481673745477426, 0.84324366839704268, 0.84135598552776469, 0.84143463898065129, 0.83687273871322954, 0.83600755073147714, 0.83852446122384772], 12225: [0.87427402862985681, 0.874601226993865, 0.87337423312883433, 0.87108384458077714, 0.86633946830265851, 0.86298568507157469, 0.86159509202453988, 0.85946830265848673, 0.85881390593047036, 0.85758691206543969, 0.8555419222903885, 0.85652351738241306, 0.85194274028629857, 0.85284253578732105, 0.85251533742331287, 0.8512883435582822, 0.85194274028629857, 0.8489979550102249, 0.84719836400817994, 0.84621676891615538, 0.84220858895705519, 0.84261758691206545, 0.83918200408997956, 0.84016359918200412, 0.84040899795501023], 11736: [0.87857873210633952, 0.87585207907293794, 0.87849352419904569, 0.87491479209270617, 0.87125085207907293, 0.86699045671438313, 0.86486025903203823, 0.86255964553510567, 0.86170756646216773, 0.859492160872529, 0.85932174505794134, 0.859492160872529, 0.85591342876618948, 0.85506134969325154, 0.85753237900477164, 0.85565780504430811, 0.85582822085889576, 0.85216428084526241, 0.85046012269938653, 0.84688139059304701, 0.84654055896387181, 0.84398432174505789, 0.84492160872528965, 0.83980913428766191, 0.84262099522835721, 0.84355828220858897], 11247: [0.88094603005245842, 0.88085711745354311, 0.88005690406330572, 0.87970125366764473, 0.87329954654574549, 0.87009869298479592, 0.87018760558371122, 0.86805370320974484, 0.86520850004445626, 0.8627189472748289, 0.86378589846181209, 0.86458611185204948, 0.86031830710411661, 0.86014048190628611, 0.85765092913665864, 0.85791766693340443, 0.85889570552147243, 0.85622832755401446, 0.85373877478438698, 0.8513381346136748, 0.84689250466791144, 0.84787054325597933, 0.84680359206899614, 0.84635902907441984, 0.84431403929936877, 0.84573664088201295, 0.84493642749177555], 10758: [0.87999628183677259, 0.88213422569250788, 0.88315672058003347, 0.8812046848856665, 0.8789737869492471, 0.87488380739914484, 0.87265290946272545, 0.87153746049451575, 0.86921360847741214, 0.86642498605688789, 0.86605316973415136, 0.86558839933073062, 0.86335750139431122, 0.8642870422011526, 0.86475181260457334, 0.86242796058746984, 0.8608477412158394, 0.85833798103736758, 0.85796616471463094, 0.85489868005205427, 0.85201710355084592, 0.85015802193716306, 0.85127347090537275, 0.84960029745305821, 0.84848484848484851, 0.84690462911321807, 0.84885666480758504, 0.84987915969511063], 10269: [0.88207225630538511, 0.88178011490894925, 0.88723342097575231, 0.88479890933878669, 0.88109845165059886, 0.87730061349693256, 0.87642418930762489, 0.87525562372188137, 0.87321063394683029, 0.86951017625864246, 0.87136040510273638, 0.86853637160385622, 0.86542019670854031, 0.86892589346577076, 0.86814684974194178, 0.86512805531210435, 0.86629662089784787, 0.86220664134774561, 0.86064855390008765, 0.8603564125036518, 0.85470834550589148, 0.85597429155711369, 0.85353977992014807, 0.85305287759275494, 0.85042360502483205, 0.85315025805823352, 0.85052098549031063, 0.84983932223196024, 0.85032622455935336], 9780: [0.88466257668711656, 0.88486707566462164, 0.88670756646216764, 0.88926380368098157, 0.88548057259713697, 0.88159509202453989, 0.87985685071574637, 0.87770961145194271, 0.87934560327198363, 0.87525562372188137, 0.87300613496932511, 0.87259713701431496, 0.86912065439672803, 0.87157464212678937, 0.87300613496932511, 0.87044989775051129, 0.87106339468302663, 0.86543967280163603, 0.86431492842535784, 0.86124744376278117, 0.85899795501022491, 0.85664621676891617, 0.85838445807770958, 0.85521472392638032, 0.85429447852760731, 0.85644171779141109, 0.85480572597137017, 0.85419222903885483, 0.85255623721881391, 0.8535787321063395], 9291: [0.884834786352384, 0.88612635884188995, 0.88806371757614899, 0.88870950382090197, 0.88849424173931757, 0.88440426218921542, 0.88257453449574852, 0.88085243784307399, 0.88268216553654077, 0.88009902055752876, 0.87708535141534816, 0.87697772037455601, 0.87278010978366161, 0.87460983747712839, 0.87471746851792054, 0.87568614788505006, 0.8743945753955441, 0.87073512000861053, 0.86729092670326124, 0.86761381982563768, 0.86126358841890005, 0.86169411258206863, 0.86072543321493922, 0.86061780217414707, 0.85911096760305672, 0.85954149176622541, 0.85857281239909589, 0.85749650199117422, 0.85609729846087612, 0.85502098805295446, 0.85642019158325255], 8802: [0.88604862531242901, 0.88559418314019545, 0.89184276300840715, 0.89036582594864799, 0.89059304703476483, 0.88798000454442172, 0.88809361508748008, 0.88479890933878669, 0.88570779368325381, 0.8827539195637355, 0.88457168825266985, 0.87991365598727567, 0.87843671892751651, 0.87855032947057488, 0.87809588729834132, 0.87832310838445804, 0.87752783458304928, 0.87502840263576465, 0.87321063394683029, 0.86980231765507843, 0.86628039082026809, 0.86639400136332656, 0.86537150647580097, 0.86321290615769142, 0.86275846398545786, 0.86332651670074978, 0.86366734832992498, 0.8626448534423995, 0.85969097932288119, 0.85810043172006367, 0.86025903203817311, 0.86332651670074978], 8313: [0.89125466137375198, 0.88812702995308557, 0.89281847708408513, 0.89305906411644409, 0.8923373030193672, 0.8923373030193672, 0.89101407434139301, 0.88920967159870079, 0.8898111391795982, 0.88776614940454712, 0.88848791050162401, 0.88632262721039334, 0.88271382172500901, 0.88235294117647056, 0.88379646337062434, 0.88295440875736797, 0.88151088656321419, 0.87934560327198363, 0.87922530975580415, 0.87561650427041982, 0.86960182846144596, 0.86948153494526648, 0.86815830626729218, 0.86767713220257425, 0.86743654517021529, 0.86839889329965114, 0.86659449055695903, 0.86479008781426681, 0.86358715265247199, 0.86178274990977988, 0.86527126187898473, 0.86442920726572836, 0.86707566462167684], 7824: [0.89391615541922287, 0.89187116564417179, 0.8936605316973415, 0.89570552147239269, 0.89608895705521474, 0.89327709611451944, 0.89302147239263807, 0.89199897750511248, 0.89378834355828218, 0.89059304703476483, 0.89008179959100209, 0.88905930470347649, 0.88842024539877296, 0.88854805725971375, 0.8871421267893661, 0.8880368098159509, 0.8834355828220859, 0.88036809815950923, 0.88407464212678932, 0.88087934560327197, 0.8778118609406953, 0.87359406952965235, 0.87359406952965235, 0.87397750511247441, 0.87448875255623726, 0.87282719836400813, 0.86848159509202449, 0.86707566462167684, 0.86490286298568508, 0.86809815950920244, 0.86975971370143146, 0.87103783231083842, 0.86848159509202449, 0.86950408997955009], 7335: [0.89747784594410362, 0.89461486025903203, 0.89720518064076349, 0.89679618268575323, 0.89788684389911388, 0.8958418541240627, 0.89706884798909337, 0.89447852760736202, 0.89693251533742335, 0.89570552147239269, 0.89188820722563056, 0.89338786639400136, 0.89011588275391962, 0.89202453987730057, 0.88943421949556922, 0.88820722563053855, 0.8877982276755283, 0.88575323790047722, 0.88684389911383776, 0.88657123381049763, 0.88084526244035444, 0.88152692569870483, 0.87580095432856164, 0.8768916155419223, 0.87607361963190189, 0.87648261758691204, 0.87484662576687111, 0.87239263803680978, 0.86830265848670751, 0.87075664621676896, 0.87280163599182004, 0.87566462167689163, 0.87389229720518069, 0.87171097477845949, 0.87457396046353097], 6846: [0.90037978381536665, 0.89789658194566169, 0.90300905638328954, 0.90213263219398188, 0.90037978381536665, 0.89702015775635413, 0.9000876424189308, 0.89789658194566169, 0.89964943032427691, 0.89775051124744376, 0.89628980426526439, 0.89599766286882854, 0.8952673093777388, 0.89322231960268772, 0.89249196611159798, 0.89307624890446979, 0.89073911773298275, 0.89176161262050835, 0.89176161262050835, 0.88913234005258546, 0.88635699678644464, 0.88504236050248319, 0.8834355828220859, 0.88080631025416301, 0.88124452234881678, 0.88036809815950923, 0.87934560327198363, 0.87554776511831722, 0.87306456324861237, 0.87394098743791993, 0.87832310838445804, 0.87773882559158634, 0.87730061349693256, 0.87642418930762489, 0.87583990651475319, 0.87452527023079174], 6357: [0.89948088721094854, 0.90184049079754602, 0.90671700487651408, 0.90593047034764829, 0.90593047034764829, 0.89806512505899005, 0.90073934245713383, 0.90073934245713383, 0.90215510460909232, 0.90136857008022653, 0.89995280792826804, 0.90136857008022653, 0.89806512505899005, 0.89712128362435106, 0.89633474909548527, 0.89664936290703157, 0.89476168003775369, 0.8933459178857952, 0.89633474909548527, 0.89444706622620729, 0.89004247286455873, 0.88831209690105395, 0.88799748308950766, 0.88878401761837345, 0.88469403806827118, 0.88563787950291017, 0.8823344344816737, 0.88060405851816892, 0.87399716847569608, 0.87871637564889093, 0.88186251376435421, 0.88500865187981748, 0.88060405851816892, 0.88186251376435421, 0.8834355828220859, 0.87808714802579835, 0.87777253421425205], 5868: [0.8979209270620313, 0.90184049079754602, 0.90933878663940015, 0.91104294478527603, 0.90814587593728702, 0.90252215405589642, 0.90320381731424682, 0.90235173824130877, 0.90490797546012269, 0.90593047034764829, 0.90405589638718475, 0.9038854805725971, 0.9038854805725971, 0.90081799591002043, 0.89655760054533062, 0.8989434219495569, 0.90013633265167003, 0.8979209270620313, 0.90115882753919563, 0.8989434219495569, 0.89468302658486709, 0.89383094751192915, 0.89417177914110424, 0.8933197000681663, 0.89246762099522836, 0.88957055214723924, 0.88786639400136336, 0.88599182004089982, 0.88019768234492157, 0.87917518745739609, 0.8873551465576005, 0.8887184730743013, 0.8876959781867757, 0.8873551465576005, 0.88752556237218816, 0.88650306748466257, 0.88394683026584864, 0.8834355828220859], 5379: [0.8947759806655512, 0.89960959286112663, 0.91094999070459193, 0.912994980479643, 0.91318088864101132, 0.90797546012269936, 0.908347276445436, 0.90462911321807027, 0.90686001115448966, 0.9072318274772263, 0.908347276445436, 0.90760364379996283, 0.9072318274772263, 0.9094627254136457, 0.90258412344301919, 0.90016731734523148, 0.9038854805725971, 0.90295593976575572, 0.90407138873396542, 0.90425729689533374, 0.90128276631344117, 0.89719278676333891, 0.89644915411786574, 0.89998140918386316, 0.89459007250418299, 0.89607733779512921, 0.89105781743818557, 0.89105781743818557, 0.88417921546755907, 0.88455103179029559, 0.88622420524261014, 0.8947759806655512, 0.89273099089050012, 0.89533370514965605, 0.8936605316973415, 0.89459007250418299, 0.8925450827291318, 0.8903141847927124, 0.8823201338538762], 4890: [0.89427402862985683, 0.89959100204498976, 0.91186094069529655, 0.91676891615541922, 0.91942740286298563, 0.91554192229038855, 0.91165644171779137, 0.91083844580777096, 0.90961145194274029, 0.90817995910020455, 0.90756646216768921, 0.91206543967280163, 0.91329243353783229, 0.91349693251533748, 0.9122699386503067, 0.90531697341513295, 0.90531697341513295, 0.90490797546012269, 0.90920245398773003, 0.90817995910020455, 0.9057259713701431, 0.90081799591002043, 0.90163599182004095, 0.90368098159509203, 0.90122699386503069, 0.90163599182004095, 0.8991820040899795, 0.89345603271983642, 0.89161554192229042, 0.88568507157464216, 0.89284253578732109, 0.89345603271983642, 0.89877300613496935, 0.89938650306748469, 0.90081799591002043, 0.90000000000000002, 0.89959100204498976, 0.89775051124744376, 0.88936605316973416, 0.88670756646216764], 4401: [0.89502385821404229, 0.90024994319472851, 0.90956600772551688, 0.91660986139513745, 0.91820040899795496, 0.9177459668257214, 0.9177459668257214, 0.91638264030902072, 0.91660986139513745, 0.91092933424221767, 0.91070211315610083, 0.91433765053396954, 0.9184276300840718, 0.91933651442853892, 0.91274710293115202, 0.91229266075891846, 0.91161099750056807, 0.9082026812088162, 0.91297432401726886, 0.91297432401726886, 0.91092933424221767, 0.90593047034764829, 0.90547602817541473, 0.90956600772551688, 0.9038854805725971, 0.91024767098386727, 0.90252215405589642, 0.90024994319472851, 0.89479663712792545, 0.8936605316973415, 0.89706884798909337, 0.89934105885026128, 0.89956827993637811, 0.90365825948648038, 0.90570324926153145, 0.90706657577823224, 0.90865712338104976, 0.9038854805725971, 0.89888661667802772, 0.8900249943194728, 0.88798000454442172], 3912: [0.89570552147239269, 0.90030674846625769, 0.90925357873210633, 0.91768916155419222, 0.92050102249488752, 0.92075664621676889, 0.92459100204498978, 0.92382413087934556, 0.92484662576687116, 0.91589979550102252, 0.91180981595092025, 0.91641104294478526, 0.92331288343558282, 0.92663599182004086, 0.91871165644171782, 0.91385480572597133, 0.91538854805725967, 0.91155419222903888, 0.91666666666666663, 0.91794478527607359, 0.91538854805725967, 0.91641104294478526, 0.91078732106339466, 0.91308793456032722, 0.91155419222903888, 0.91411042944785281, 0.91155419222903888, 0.90720858895705525, 0.89979550102249484, 0.89800613496932513, 0.90005112474437632, 0.90030674846625769, 0.90465235173824132, 0.9066973415132924, 0.91308793456032722, 0.91206543967280163, 0.91666666666666663, 0.91359918200408996, 0.90260736196319014, 0.89979550102249484, 0.8936605316973415, 0.89340490797546013], 3423: [0.89862693543675143, 0.90154834940111017, 0.91527899503359622, 0.9199532573765703, 0.92112182296231371, 0.91936897458369848, 0.92900964066608238, 0.92988606485539005, 0.92608822670172364, 0.92433537832310841, 0.92112182296231371, 0.91586327782646804, 0.92638036809815949, 0.93280747881974879, 0.9301782062518259, 0.91907683318726263, 0.91703184341221156, 0.91907683318726263, 0.920537540169442, 0.92229038854805723, 0.92112182296231371, 0.91703184341221156, 0.92345895413380075, 0.92141396435874967, 0.91381828805141685, 0.91820040899795496, 0.91732398480864741, 0.913526146654981, 0.90651475314051999, 0.90213263219398188, 0.90739117732982766, 0.90885188431200703, 0.9038854805725971, 0.91177329827636577, 0.91557113643003218, 0.91878469179082678, 0.92112182296231371, 0.92550394390885193, 0.91323400525854515, 0.90534618755477647, 0.90271691498685369, 0.89628980426526439, 0.89775051124744376], 2934: [0.89604635310156777, 0.90149965916837083, 0.91547375596455349, 0.92638036809815949, 0.92263122017723243, 0.92092706203135655, 0.92638036809815949, 0.93387866394001362, 0.93353783231083842, 0.92876618950238587, 0.9253578732106339, 0.92706203135650989, 0.92569870483980909, 0.93626448534423989, 0.93490115882753921, 0.92842535787321068, 0.92092706203135655, 0.91990456714383095, 0.92842535787321068, 0.92603953646898429, 0.92126789366053174, 0.91922290388548056, 0.92672119972733469, 0.93456032719836402, 0.92229038854805723, 0.92160872528970683, 0.91956373551465576, 0.91717791411042948, 0.91240627130197682, 0.91240627130197682, 0.9147920927062031, 0.91717791411042948, 0.91513292433537829, 0.91342876618950242, 0.91717791411042948, 0.92058623040218135, 0.92808452624403548, 0.92808452624403548, 0.9253578732106339, 0.91615541922290389, 0.90865712338104976, 0.9035446489434219, 0.89843217450579416, 0.90558963871847309], 2445: [0.89775051124744376, 0.9038854805725971, 0.92229038854805723, 0.93047034764826175, 0.92842535787321068, 0.92147239263803682, 0.92801635991820042, 0.93742331288343561, 0.93946830265848669, 0.93006134969325149, 0.93210633946830268, 0.93251533742331283, 0.93537832310838442, 0.93701431492842535, 0.93660531697341509, 0.93047034764826175, 0.93251533742331283, 0.92842535787321068, 0.92760736196319016, 0.93047034764826175, 0.92801635991820042, 0.92106339468302656, 0.92638036809815949, 0.93333333333333335, 0.93987730061349695, 0.93333333333333335, 0.92515337423312882, 0.91901840490797548, 0.91411042944785281, 0.91574642126789363, 0.92515337423312882, 0.92678936605316975, 0.92229038854805723, 0.92024539877300615, 0.91983640081799589, 0.92433537832310841, 0.92760736196319016, 0.93456032719836402, 0.93742331288343561, 0.9235173824130879, 0.91901840490797548, 0.91615541922290389, 0.90838445807770962, 0.90920245398773003, 0.91002044989775055], 1956: [0.89570552147239269, 0.90286298568507162, 0.92126789366053174, 0.93762781186094069, 0.93200408997955009, 0.93098159509202449, 0.93149284253578735, 0.93711656441717794, 0.94478527607361962, 0.93813905930470343, 0.93404907975460127, 0.93865030674846628, 0.94171779141104295, 0.94734151329243355, 0.94427402862985688, 0.93762781186094069, 0.93098159509202449, 0.93302658486707568, 0.93711656441717794, 0.92944785276073616, 0.93251533742331283, 0.92484662576687116, 0.9253578732106339, 0.93404907975460127, 0.93813905930470343, 0.94529652351738236, 0.93762781186094069, 0.92331288343558282, 0.91922290388548056, 0.91666666666666663, 0.93251533742331283, 0.93456032719836402, 0.92433537832310841, 0.92995910020449901, 0.92638036809815949, 0.92586912065439675, 0.92842535787321068, 0.93404907975460127, 0.93967280163599187, 0.93660531697341509, 0.92944785276073616, 0.91922290388548056, 0.91564417177914115, 0.91768916155419222, 0.91615541922290389, 0.9253578732106339], 1467: [0.89229720518064082, 0.89843217450579416, 0.92774369461486028, 0.94274028629856854, 0.94478527607361962, 0.94069529652351735, 0.9468302658486708, 0.93524199045671441, 0.94069529652351735, 0.94546693933197001, 0.93865030674846628, 0.93660531697341509, 0.95023858214042267, 0.95023858214042267, 0.95637355146557601, 0.94001363326516696, 0.93660531697341509, 0.92842535787321068, 0.94546693933197001, 0.92910702113156096, 0.93524199045671441, 0.93456032719836402, 0.92978868438991136, 0.93865030674846628, 0.93796864349011588, 0.95023858214042267, 0.95092024539877296, 0.93047034764826175, 0.92024539877300615, 0.92160872528970683, 0.93592365371506481, 0.94137695978186775, 0.93387866394001362, 0.92842535787321068, 0.94069529652351735, 0.93456032719836402, 0.93592365371506481, 0.92978868438991136, 0.94887525562372188, 0.94410361281526922, 0.93933197000681667, 0.92774369461486028, 0.91002044989775055, 0.92706203135650989, 0.93183367416496254, 0.93319700068166322, 0.92978868438991136], 978: [0.91104294478527603, 0.89059304703476483, 0.92842535787321068, 0.95194274028629855, 0.95501022494887522, 0.94887525562372188, 0.95705521472392641, 0.95194274028629855, 0.94171779141104295, 0.93762781186094069, 0.94989775051124747, 0.94478527607361962, 0.94989775051124747, 0.95910020449897748, 0.94785276073619629, 0.95501022494887522, 0.93762781186094069, 0.9253578732106339, 0.94989775051124747, 0.95092024539877296, 0.92638036809815949, 0.93456032719836402, 0.94887525562372188, 0.9468302658486708, 0.94171779141104295, 0.95705521472392641, 0.95398773006134974, 0.9468302658486708, 0.92126789366053174, 0.91411042944785281, 0.93865030674846628, 0.94478527607361962, 0.94274028629856854, 0.93149284253578735, 0.93865030674846628, 0.94069529652351735, 0.94989775051124747, 0.93456032719836402, 0.94171779141104295, 0.96319018404907975, 0.94785276073619629, 0.93865030674846628, 0.92433537832310841, 0.93558282208588961, 0.93149284253578735, 0.93149284253578735, 0.93558282208588961, 0.92126789366053174], 489: [0.91820040899795496, 0.91002044989775055, 0.91206543967280163, 0.96932515337423308, 0.97750511247443761, 0.95501022494887522, 0.95910020449897748, 0.95296523517382414, 0.96114519427402867, 0.93660531697341509, 0.96523517382413093, 0.9468302658486708, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.95296523517382414, 0.96523517382413093, 0.90593047034764829, 0.96319018404907975, 0.95910020449897748, 0.95910020449897748, 0.91411042944785281, 0.94069529652351735, 0.97137014314928427, 0.93865030674846628, 0.95705521472392641, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.90184049079754602, 0.93660531697341509, 0.9468302658486708, 0.95501022494887522, 0.94069529652351735, 0.94274028629856854, 0.95296523517382414, 0.94069529652351735, 0.95296523517382414, 0.94069529652351735, 0.96114519427402867, 0.9795501022494888, 0.94069529652351735, 0.92433537832310841, 0.93660531697341509, 0.93047034764826175, 0.93047034764826175, 0.96932515337423308, 0.92024539877300615, 0.94887525562372188]}
incremental_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859, 0.15132924335378323], 22983: [0.096114519427402859, 0.15132924335378323, 0.26993865030674846], 22494: [0.096114519427402859, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484], 22005: [0.09815950920245399, 0.15132924335378323, 0.27607361963190186, 0.22290388548057261, 0.16359918200408999], 21516: [0.096114519427402859, 0.15337423312883436, 0.27198364008179959, 0.22085889570552147, 0.16768916155419222, 0.096114519427402859], 21027: [0.09815950920245399, 0.1492842535787321, 0.27607361963190186, 0.22085889570552147, 0.16973415132924335, 0.096114519427402859, 0.15746421267893659], 20538: [0.096114519427402859, 0.15337423312883436, 0.27811860940695299, 0.22699386503067484, 0.16564417177914109, 0.10020449897750511, 0.15541922290388549, 0.15132924335378323], 20049: [0.096114519427402859, 0.15132924335378323, 0.27402862985685073, 0.22903885480572597, 0.16768916155419222, 0.096114519427402859, 0.15132924335378323, 0.15541922290388549, 0.20449897750511248], 19560: [0.09202453987730061, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484, 0.15950920245398773, 0.094069529652351741, 0.15132924335378323, 0.14723926380368099, 0.20245398773006135, 0.14723926380368099], 19071: [0.096114519427402859, 0.15337423312883436, 0.27402862985685073, 0.22494887525562371, 0.16359918200408999, 0.094069529652351741, 0.15132924335378323, 0.15950920245398773, 0.20245398773006135, 0.15132924335378323, 0.21676891615541921], 18582: [0.094069529652351741, 0.15337423312883436, 0.28016359918200406, 0.22290388548057261, 0.16359918200408999, 0.09202453987730061, 0.1492842535787321, 0.16155419222903886, 0.20040899795501022, 0.15337423312883436, 0.21472392638036811, 0.32310838445807771], 18093: [0.096114519427402859, 0.15950920245398773, 0.28016359918200406, 0.22699386503067484, 0.16359918200408999, 0.085889570552147243, 0.1492842535787321, 0.16155419222903886, 0.20245398773006135, 0.15337423312883436, 0.21676891615541921, 0.32106339468302658, 0.28016359918200406], 17604: [0.09815950920245399, 0.16564417177914109, 0.2822085889570552, 0.22903885480572597, 0.17177914110429449, 0.087934560327198361, 0.14723926380368099, 0.16768916155419222, 0.20654396728016361, 0.15337423312883436, 0.21676891615541921, 0.32924335378323111, 0.29038854805725972, 0.30061349693251532], 17115: [0.096114519427402859, 0.16155419222903886, 0.27811860940695299, 0.22903885480572597, 0.17586912065439672, 0.094069529652351741, 0.14519427402862986, 0.16768916155419222, 0.21063394683026584, 0.15746421267893659, 0.21676891615541921, 0.32719836400817998, 0.28425357873210633, 0.30061349693251532, 0.27607361963190186], 16626: [0.09202453987730061, 0.15950920245398773, 0.28629856850715746, 0.22494887525562371, 0.16973415132924335, 0.094069529652351741, 0.14314928425357873, 0.16564417177914109, 0.20449897750511248, 0.16359918200408999, 0.22085889570552147, 0.33537832310838445, 0.28016359918200406, 0.29856850715746419, 0.2822085889570552, 0.24130879345603273], 16137: [0.094069529652351741, 0.15746421267893659, 0.2822085889570552, 0.2310838445807771, 0.17177914110429449, 0.096114519427402859, 0.14519427402862986, 0.17586912065439672, 0.21063394683026584, 0.16359918200408999, 0.22290388548057261, 0.32719836400817998, 0.28834355828220859, 0.30265848670756645, 0.28016359918200406, 0.2474437627811861, 0.33537832310838445], 15648: [0.09202453987730061, 0.16973415132924335, 0.2822085889570552, 0.22699386503067484, 0.17995910020449898, 0.094069529652351741, 0.14519427402862986, 0.17177914110429449, 0.21267893660531698, 0.16155419222903886, 0.22494887525562371, 0.32924335378323111, 0.29038854805725972, 0.30674846625766872, 0.29038854805725972, 0.24130879345603273, 0.33333333333333331, 0.28629856850715746], 15159: [0.096114519427402859, 0.16768916155419222, 0.28425357873210633, 0.22085889570552147, 0.17177914110429449, 0.09202453987730061, 0.1411042944785276, 0.17791411042944785, 0.21676891615541921, 0.15746421267893659, 0.21676891615541921, 0.33128834355828218, 0.29243353783231085, 0.29856850715746419, 0.29038854805725972, 0.23517382413087934, 0.33946830265848671, 0.27811860940695299, 0.2392638036809816], 14670: [0.094069529652351741, 0.16359918200408999, 0.28629856850715746, 0.22494887525562371, 0.17177914110429449, 0.09202453987730061, 0.14519427402862986, 0.17586912065439672, 0.21881390593047034, 0.15950920245398773, 0.21881390593047034, 0.33128834355828218, 0.29038854805725972, 0.29652351738241312, 0.29447852760736198, 0.22903885480572597, 0.34969325153374231, 0.28834355828220859, 0.24130879345603273, 0.25153374233128833], 14181: [0.096114519427402859, 0.17177914110429449, 0.28834355828220859, 0.2310838445807771, 0.17382413087934559, 0.094069529652351741, 0.14519427402862986, 0.17586912065439672, 0.20654396728016361, 0.17177914110429449, 0.21267893660531698, 0.33537832310838445, 0.29447852760736198, 0.30265848670756645, 0.29243353783231085, 0.24539877300613497, 0.34764826175869118, 0.2822085889570552, 0.23517382413087934, 0.25971370143149286, 0.26789366053169733], 13692: [0.10020449897750511, 0.16564417177914109, 0.29038854805725972, 0.23517382413087934, 0.17995910020449898, 0.087934560327198361, 0.14519427402862986, 0.17791411042944785, 0.20858895705521471, 0.16973415132924335, 0.21881390593047034, 0.33128834355828218, 0.29856850715746419, 0.30061349693251532, 0.28834355828220859, 0.24130879345603273, 0.35173824130879344, 0.28834355828220859, 0.24130879345603273, 0.26175869120654399, 0.27402862985685073, 0.32310838445807771], 13203: [0.09815950920245399, 0.17382413087934559, 0.29038854805725972, 0.24539877300613497, 0.17995910020449898, 0.089979550102249492, 0.1492842535787321, 0.17791411042944785, 0.21472392638036811, 0.17177914110429449, 0.21676891615541921, 0.33128834355828218, 0.30674846625766872, 0.30061349693251532, 0.28834355828220859, 0.24539877300613497, 0.35378323108384457, 0.28425357873210633, 0.24335378323108384, 0.26175869120654399, 0.27811860940695299, 0.32310838445807771, 0.3619631901840491], 12714: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.23312883435582821, 0.17177914110429449, 0.096114519427402859, 0.15950920245398773, 0.17177914110429449, 0.21267893660531698, 0.16768916155419222, 0.21676891615541921, 0.33946830265848671, 0.30061349693251532, 0.29856850715746419, 0.29243353783231085, 0.25153374233128833, 0.34355828220858897, 0.29038854805725972, 0.24539877300613497, 0.26380368098159507, 0.2822085889570552, 0.32310838445807771, 0.35787321063394684, 0.35378323108384457], 12225: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.2392638036809816, 0.17382413087934559, 0.094069529652351741, 0.15541922290388549, 0.17382413087934559, 0.21881390593047034, 0.17586912065439672, 0.21676891615541921, 0.33742331288343558, 0.29652351738241312, 0.29243353783231085, 0.28834355828220859, 0.25153374233128833, 0.35173824130879344, 0.27402862985685073, 0.25153374233128833, 0.26175869120654399, 0.28425357873210633, 0.32515337423312884, 0.36400817995910023, 0.34969325153374231, 0.33333333333333331], 11736: [0.10224948875255624, 0.17177914110429449, 0.29447852760736198, 0.24335378323108384, 0.17382413087934559, 0.10224948875255624, 0.14723926380368099, 0.17791411042944785, 0.22494887525562371, 0.17177914110429449, 0.22290388548057261, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.29652351738241312, 0.25153374233128833, 0.34969325153374231, 0.27811860940695299, 0.25357873210633947, 0.25153374233128833, 0.28425357873210633, 0.32719836400817998, 0.35991820040899797, 0.35378323108384457, 0.34764826175869118, 0.3783231083844581], 11247: [0.10224948875255624, 0.18200408997955012, 0.30470347648261759, 0.24948875255623723, 0.17177914110429449, 0.10020449897750511, 0.14519427402862986, 0.16973415132924335, 0.21472392638036811, 0.16768916155419222, 0.22699386503067484, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.28629856850715746, 0.25766871165644173, 0.35582822085889571, 0.30265848670756645, 0.25153374233128833, 0.2556237218813906, 0.28016359918200406, 0.33537832310838445, 0.3721881390593047, 0.35991820040899797, 0.33742331288343558, 0.37423312883435583, 0.33742331288343558], 10758: [0.10633946830265849, 0.18813905930470348, 0.30470347648261759, 0.26175869120654399, 0.18404907975460122, 0.10429447852760736, 0.15132924335378323, 0.18609406952965235, 0.21063394683026584, 0.17586912065439672, 0.22903885480572597, 0.33537832310838445, 0.30265848670756645, 0.30265848670756645, 0.29652351738241312, 0.25971370143149286, 0.35787321063394684, 0.29038854805725972, 0.26789366053169733, 0.25153374233128833, 0.27811860940695299, 0.33128834355828218, 0.36809815950920244, 0.35787321063394684, 0.34969325153374231, 0.36400817995910023, 0.32719836400817998, 0.33333333333333331], 10269: [0.10838445807770961, 0.18200408997955012, 0.30470347648261759, 0.27607361963190186, 0.18200408997955012, 0.10224948875255624, 0.15950920245398773, 0.18609406952965235, 0.21881390593047034, 0.17586912065439672, 0.23721881390593047, 0.34151329243353784, 0.30061349693251532, 0.29447852760736198, 0.30061349693251532, 0.25357873210633947, 0.35173824130879344, 0.30265848670756645, 0.25971370143149286, 0.25766871165644173, 0.26993865030674846, 0.32515337423312884, 0.36605316973415131, 0.35787321063394684, 0.34969325153374231, 0.36605316973415131, 0.34151329243353784, 0.33742331288343558, 0.30265848670756645], 9780: [0.10838445807770961, 0.19427402862985685, 0.30674846625766872, 0.28425357873210633, 0.18200408997955012, 0.10633946830265849, 0.16564417177914109, 0.18404907975460122, 0.21881390593047034, 0.18609406952965235, 0.22699386503067484, 0.34969325153374231, 0.30470347648261759, 0.29038854805725972, 0.30674846625766872, 0.26175869120654399, 0.34969325153374231, 0.30470347648261759, 0.2658486707566462, 0.27402862985685073, 0.28629856850715746, 0.33537832310838445, 0.35173824130879344, 0.35582822085889571, 0.34355828220858897, 0.36605316973415131, 0.32924335378323111, 0.33946830265848671, 0.30265848670756645, 0.36400817995910023], 9291: [0.11042944785276074, 0.18813905930470348, 0.31697341513292432, 0.29243353783231085, 0.18404907975460122, 0.10838445807770961, 0.17791411042944785, 0.18813905930470348, 0.21881390593047034, 0.18813905930470348, 0.22290388548057261, 0.34969325153374231, 0.30061349693251532, 0.29447852760736198, 0.31083844580777098, 0.2658486707566462, 0.3456032719836401, 0.31083844580777098, 0.26993865030674846, 0.26175869120654399, 0.28425357873210633, 0.32310838445807771, 0.35173824130879344, 0.35582822085889571, 0.35582822085889571, 0.36809815950920244, 0.32719836400817998, 0.32310838445807771, 0.31288343558282211, 0.36605316973415131, 0.29856850715746419], 8802: [0.11451942740286299, 0.19222903885480572, 0.32515337423312884, 0.29447852760736198, 0.19222903885480572, 0.10020449897750511, 0.18404907975460122, 0.19018404907975461, 0.21676891615541921, 0.18813905930470348, 0.22699386503067484, 0.34969325153374231, 0.30061349693251532, 0.28834355828220859, 0.30061349693251532, 0.26993865030674846, 0.35378323108384457, 0.31083844580777098, 0.2822085889570552, 0.27607361963190186, 0.29038854805725972, 0.32515337423312884, 0.35378323108384457, 0.37014314928425357, 0.35787321063394684, 0.35787321063394684, 0.33537832310838445, 0.33128834355828218, 0.30470347648261759, 0.3783231083844581, 0.30061349693251532, 0.2822085889570552], 8313: [0.1165644171779141, 0.19631901840490798, 0.32924335378323111, 0.29038854805725972, 0.19018404907975461, 0.10838445807770961, 0.19631901840490798, 0.18813905930470348, 0.22903885480572597, 0.18200408997955012, 0.22903885480572597, 0.34151329243353784, 0.29856850715746419, 0.30061349693251532, 0.29652351738241312, 0.26993865030674846, 0.35173824130879344, 0.30879345603271985, 0.28425357873210633, 0.27811860940695299, 0.29243353783231085, 0.31901840490797545, 0.35582822085889571, 0.3783231083844581, 0.35378323108384457, 0.36605316973415131, 0.3456032719836401, 0.34355828220858897, 0.31492842535787319, 0.36605316973415131, 0.29447852760736198, 0.29038854805725972, 0.32719836400817998], 7824: [0.1165644171779141, 0.19427402862985685, 0.33742331288343558, 0.29652351738241312, 0.19427402862985685, 0.11042944785276074, 0.20040899795501022, 0.19222903885480572, 0.22494887525562371, 0.18813905930470348, 0.2310838445807771, 0.33742331288343558, 0.30265848670756645, 0.29038854805725972, 0.28629856850715746, 0.2658486707566462, 0.35582822085889571, 0.30061349693251532, 0.30061349693251532, 0.28834355828220859, 0.29243353783231085, 0.32515337423312884, 0.35991820040899797, 0.37423312883435583, 0.35787321063394684, 0.36605316973415131, 0.33537832310838445, 0.34355828220858897, 0.31492842535787319, 0.3619631901840491, 0.28834355828220859, 0.28834355828220859, 0.32106339468302658, 0.28425357873210633], 7335: [0.12269938650306748, 0.19222903885480572, 0.33537832310838445, 0.30061349693251532, 0.19427402862985685, 0.11451942740286299, 0.20245398773006135, 0.19631901840490798, 0.24539877300613497, 0.19222903885480572, 0.23517382413087934, 0.33333333333333331, 0.29652351738241312, 0.29856850715746419, 0.30674846625766872, 0.26380368098159507, 0.36809815950920244, 0.29652351738241312, 0.30061349693251532, 0.2822085889570552, 0.29652351738241312, 0.31901840490797545, 0.35787321063394684, 0.36400817995910023, 0.35378323108384457, 0.36400817995910023, 0.32924335378323111, 0.35378323108384457, 0.30061349693251532, 0.35991820040899797, 0.30061349693251532, 0.27811860940695299, 0.33742331288343558, 0.29652351738241312, 0.25971370143149286], 6846: [0.12883435582822086, 0.19631901840490798, 0.33946830265848671, 0.29038854805725972, 0.19836400817995911, 0.12678936605316973, 0.21472392638036811, 0.19222903885480572, 0.24335378323108384, 0.19222903885480572, 0.22903885480572597, 0.3456032719836401, 0.30470347648261759, 0.28629856850715746, 0.30470347648261759, 0.26380368098159507, 0.35991820040899797, 0.29652351738241312, 0.31901840490797545, 0.26789366053169733, 0.30265848670756645, 0.32310838445807771, 0.35991820040899797, 0.35787321063394684, 0.35378323108384457, 0.36605316973415131, 0.33537832310838445, 0.34764826175869118, 0.30879345603271985, 0.36400817995910023, 0.29243353783231085, 0.28834355828220859, 0.32924335378323111, 0.29038854805725972, 0.26993865030674846, 0.35378323108384457], 6357: [0.1329243353783231, 0.19836400817995911, 0.33333333333333331, 0.30470347648261759, 0.20040899795501022, 0.12269938650306748, 0.21676891615541921, 0.18813905930470348, 0.24130879345603273, 0.19222903885480572, 0.22494887525562371, 0.34151329243353784, 0.30470347648261759, 0.28629856850715746, 0.29243353783231085, 0.2658486707566462, 0.3619631901840491, 0.27402862985685073, 0.32924335378323111, 0.28016359918200406, 0.29652351738241312, 0.31492842535787319, 0.34764826175869118, 0.36605316973415131, 0.34764826175869118, 0.37014314928425357, 0.32924335378323111, 0.35787321063394684, 0.30061349693251532, 0.37014314928425357, 0.30061349693251532, 0.29447852760736198, 0.33128834355828218, 0.30061349693251532, 0.27198364008179959, 0.34969325153374231, 0.29243353783231085], 5868: [0.1411042944785276, 0.20245398773006135, 0.35173824130879344, 0.30879345603271985, 0.21063394683026584, 0.12065439672801637, 0.21472392638036811, 0.19018404907975461, 0.23721881390593047, 0.19836400817995911, 0.21063394683026584, 0.33128834355828218, 0.31901840490797545, 0.28834355828220859, 0.28425357873210633, 0.27402862985685073, 0.35582822085889571, 0.29652351738241312, 0.31901840490797545, 0.26993865030674846, 0.2822085889570552, 0.33333333333333331, 0.35582822085889571, 0.35787321063394684, 0.35582822085889571, 0.3721881390593047, 0.32924335378323111, 0.34969325153374231, 0.28834355828220859, 0.36605316973415131, 0.30674846625766872, 0.30470347648261759, 0.33946830265848671, 0.29652351738241312, 0.2658486707566462, 0.35582822085889571, 0.2822085889570552, 0.27607361963190186], 5379: [0.14314928425357873, 0.20449897750511248, 0.3456032719836401, 0.31492842535787319, 0.21676891615541921, 0.12678936605316973, 0.22085889570552147, 0.19018404907975461, 0.24335378323108384, 0.20449897750511248, 0.21063394683026584, 0.33333333333333331, 0.32719836400817998, 0.29243353783231085, 0.29038854805725972, 0.27811860940695299, 0.34969325153374231, 0.29856850715746419, 0.32515337423312884, 0.28629856850715746, 0.29038854805725972, 0.33742331288343558, 0.34969325153374231, 0.35173824130879344, 0.33946830265848671, 0.35378323108384457, 0.32106339468302658, 0.33537832310838445, 0.28016359918200406, 0.35378323108384457, 0.30265848670756645, 0.29038854805725972, 0.34151329243353784, 0.30879345603271985, 0.27607361963190186, 0.35173824130879344, 0.28425357873210633, 0.27402862985685073, 0.30674846625766872], 4890: [0.15132924335378323, 0.20858895705521471, 0.34969325153374231, 0.31697341513292432, 0.2310838445807771, 0.13905930470347649, 0.2310838445807771, 0.19427402862985685, 0.2474437627811861, 0.22085889570552147, 0.21881390593047034, 0.33742331288343558, 0.33742331288343558, 0.30470347648261759, 0.31083844580777098, 0.28016359918200406, 0.34764826175869118, 0.30061349693251532, 0.34151329243353784, 0.29038854805725972, 0.2822085889570552, 0.32106339468302658, 0.34764826175869118, 0.37627811860940696, 0.33946830265848671, 0.35378323108384457, 0.34764826175869118, 0.35173824130879344, 0.28425357873210633, 0.35582822085889571, 0.28629856850715746, 0.2822085889570552, 0.33128834355828218, 0.31083844580777098, 0.27402862985685073, 0.35991820040899797, 0.29038854805725972, 0.28834355828220859, 0.31083844580777098, 0.28016359918200406], 4401: [0.15337423312883436, 0.21472392638036811, 0.3619631901840491, 0.33128834355828218, 0.2474437627811861, 0.14723926380368099, 0.24539877300613497, 0.20858895705521471, 0.24335378323108384, 0.21881390593047034, 0.21881390593047034, 0.32310838445807771, 0.32515337423312884, 0.30265848670756645, 0.30879345603271985, 0.28425357873210633, 0.33742331288343558, 0.2822085889570552, 0.32924335378323111, 0.28629856850715746, 0.28425357873210633, 0.31492842535787319, 0.35582822085889571, 0.3783231083844581, 0.34151329243353784, 0.36605316973415131, 0.34151329243353784, 0.3456032719836401, 0.28016359918200406, 0.3619631901840491, 0.30061349693251532, 0.27607361963190186, 0.33537832310838445, 0.31697341513292432, 0.27811860940695299, 0.34969325153374231, 0.27811860940695299, 0.28629856850715746, 0.30879345603271985, 0.2822085889570552, 0.32515337423312884], 3912: [0.17791411042944785, 0.21472392638036811, 0.36809815950920244, 0.32515337423312884, 0.24948875255623723, 0.15337423312883436, 0.22903885480572597, 0.21063394683026584, 0.24335378323108384, 0.21267893660531698, 0.22085889570552147, 0.31901840490797545, 0.31697341513292432, 0.29447852760736198, 0.29447852760736198, 0.2658486707566462, 0.35173824130879344, 0.28016359918200406, 0.33333333333333331, 0.29652351738241312, 0.28834355828220859, 0.31288343558282211, 0.34355828220858897, 0.37014314928425357, 0.34355828220858897, 0.35991820040899797, 0.33333333333333331, 0.33742331288343558, 0.27402862985685073, 0.36605316973415131, 0.31697341513292432, 0.28834355828220859, 0.32515337423312884, 0.30674846625766872, 0.26993865030674846, 0.36400817995910023, 0.28016359918200406, 0.29856850715746419, 0.29652351738241312, 0.28629856850715746, 0.33128834355828218, 0.30470347648261759], 3423: [0.19631901840490798, 0.21267893660531698, 0.3619631901840491, 0.33537832310838445, 0.2474437627811861, 0.1492842535787321, 0.24539877300613497, 0.23721881390593047, 0.2556237218813906, 0.21063394683026584, 0.22290388548057261, 0.33128834355828218, 0.32719836400817998, 0.29447852760736198, 0.31697341513292432, 0.24948875255623723, 0.34764826175869118, 0.2822085889570552, 0.31288343558282211, 0.29038854805725972, 0.30879345603271985, 0.30061349693251532, 0.35787321063394684, 0.3824130879345603, 0.34764826175869118, 0.35173824130879344, 0.33333333333333331, 0.33128834355828218, 0.26380368098159507, 0.3619631901840491, 0.29447852760736198, 0.30061349693251532, 0.33742331288343558, 0.30061349693251532, 0.26789366053169733, 0.3783231083844581, 0.29243353783231085, 0.28425357873210633, 0.30470347648261759, 0.27811860940695299, 0.34969325153374231, 0.29038854805725972, 0.22290388548057261], 2934: [0.19222903885480572, 0.24539877300613497, 0.36605316973415131, 0.33946830265848671, 0.23517382413087934, 0.17586912065439672, 0.2556237218813906, 0.2556237218813906, 0.25153374233128833, 0.20654396728016361, 0.21472392638036811, 0.31288343558282211, 0.31288343558282211, 0.31288343558282211, 0.30470347648261759, 0.25971370143149286, 0.34764826175869118, 0.27607361963190186, 0.31492842535787319, 0.29038854805725972, 0.28425357873210633, 0.28834355828220859, 0.36400817995910023, 0.37423312883435583, 0.33946830265848671, 0.37627811860940696, 0.33333333333333331, 0.31901840490797545, 0.27402862985685073, 0.33946830265848671, 0.29038854805725972, 0.27811860940695299, 0.32106339468302658, 0.27607361963190186, 0.24130879345603273, 0.3783231083844581, 0.28016359918200406, 0.29652351738241312, 0.30879345603271985, 0.2658486707566462, 0.33537832310838445, 0.30470347648261759, 0.23517382413087934, 0.30879345603271985], 2445: [0.19222903885480572, 0.24335378323108384, 0.38036809815950923, 0.34151329243353784, 0.24539877300613497, 0.18813905930470348, 0.25153374233128833, 0.25153374233128833, 0.26175869120654399, 0.21676891615541921, 0.20654396728016361, 0.29447852760736198, 0.30879345603271985, 0.32515337423312884, 0.30879345603271985, 0.25153374233128833, 0.3456032719836401, 0.28425357873210633, 0.31697341513292432, 0.29652351738241312, 0.29856850715746419, 0.31288343558282211, 0.34355828220858897, 0.39468302658486709, 0.34151329243353784, 0.3721881390593047, 0.31901840490797545, 0.30674846625766872, 0.27402862985685073, 0.32106339468302658, 0.29447852760736198, 0.30265848670756645, 0.30879345603271985, 0.25971370143149286, 0.23721881390593047, 0.37423312883435583, 0.28834355828220859, 0.30879345603271985, 0.30674846625766872, 0.26789366053169733, 0.31083844580777098, 0.28629856850715746, 0.21881390593047034, 0.28425357873210633, 0.29652351738241312], 1956: [0.20245398773006135, 0.26993865030674846, 0.35991820040899797, 0.33333333333333331, 0.25357873210633947, 0.17791411042944785, 0.2474437627811861, 0.28425357873210633, 0.29038854805725972, 0.17177914110429449, 0.19222903885480572, 0.29652351738241312, 0.29856850715746419, 0.33946830265848671, 0.32515337423312884, 0.25766871165644173, 0.33742331288343558, 0.29038854805725972, 0.34151329243353784, 0.28834355828220859, 0.28834355828220859, 0.29038854805725972, 0.33333333333333331, 0.35991820040899797, 0.31901840490797545, 0.36400817995910023, 0.33742331288343558, 0.31492842535787319, 0.26380368098159507, 0.31492842535787319, 0.29243353783231085, 0.28834355828220859, 0.30879345603271985, 0.2658486707566462, 0.2310838445807771, 0.3783231083844581, 0.27607361963190186, 0.30265848670756645, 0.31697341513292432, 0.24130879345603273, 0.26993865030674846, 0.2556237218813906, 0.19427402862985685, 0.31492842535787319, 0.31901840490797545, 0.31697341513292432], 1467: [0.21063394683026584, 0.26993865030674846, 0.33742331288343558, 0.32924335378323111, 0.26993865030674846, 0.19018404907975461, 0.26380368098159507, 0.28834355828220859, 0.31083844580777098, 0.19427402862985685, 0.17382413087934559, 0.28425357873210633, 0.28629856850715746, 0.32310838445807771, 0.28016359918200406, 0.24539877300613497, 0.32106339468302658, 0.26175869120654399, 0.32310838445807771, 0.28629856850715746, 0.26380368098159507, 0.28629856850715746, 0.31492842535787319, 0.3456032719836401, 0.32924335378323111, 0.34151329243353784, 0.35378323108384457, 0.30061349693251532, 0.24539877300613497, 0.31492842535787319, 0.28629856850715746, 0.28425357873210633, 0.31288343558282211, 0.25971370143149286, 0.24539877300613497, 0.34355828220858897, 0.26175869120654399, 0.29243353783231085, 0.32310838445807771, 0.24948875255623723, 0.26380368098159507, 0.22903885480572597, 0.20858895705521471, 0.30061349693251532, 0.30674846625766872, 0.28629856850715746, 0.2822085889570552], 978: [0.22085889570552147, 0.26789366053169733, 0.33946830265848671, 0.35378323108384457, 0.27402862985685073, 0.19631901840490798, 0.26175869120654399, 0.28834355828220859, 0.29856850715746419, 0.20858895705521471, 0.17791411042944785, 0.29652351738241312, 0.28016359918200406, 0.31901840490797545, 0.25153374233128833, 0.26380368098159507, 0.31901840490797545, 0.24130879345603273, 0.31492842535787319, 0.27811860940695299, 0.26789366053169733, 0.25766871165644173, 0.30265848670756645, 0.32310838445807771, 0.30879345603271985, 0.31288343558282211, 0.32310838445807771, 0.29038854805725972, 0.23517382413087934, 0.29652351738241312, 0.2310838445807771, 0.26175869120654399, 0.29652351738241312, 0.22290388548057261, 0.20654396728016361, 0.30879345603271985, 0.24130879345603273, 0.2658486707566462, 0.29652351738241312, 0.26789366053169733, 0.2392638036809816, 0.18813905930470348, 0.19631901840490798, 0.27607361963190186, 0.25971370143149286, 0.30265848670756645, 0.25766871165644173, 0.23312883435582821], 489: [0.21472392638036811, 0.21267893660531698, 0.32924335378323111, 0.35173824130879344, 0.29652351738241312, 0.20245398773006135, 0.24130879345603273, 0.30061349693251532, 0.29038854805725972, 0.20449897750511248, 0.18813905930470348, 0.23721881390593047, 0.27198364008179959, 0.2822085889570552, 0.20040899795501022, 0.21676891615541921, 0.29652351738241312, 0.18609406952965235, 0.28425357873210633, 0.2474437627811861, 0.23517382413087934, 0.21267893660531698, 0.26380368098159507, 0.26789366053169733, 0.26380368098159507, 0.25766871165644173, 0.2658486707566462, 0.24130879345603273, 0.19631901840490798, 0.22903885480572597, 0.19427402862985685, 0.23312883435582821, 0.29038854805725972, 0.18813905930470348, 0.16973415132924335, 0.25357873210633947, 0.17791411042944785, 0.20245398773006135, 0.25153374233128833, 0.23517382413087934, 0.2556237218813906, 0.13905930470347649, 0.15950920245398773, 0.24539877300613497, 0.22085889570552147, 0.23517382413087934, 0.25357873210633947, 0.17382413087934559, 0.39672801635991822]}
incremental_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
incremental_train_scores_mean_50_folds = array([ 0.94641292, 0.94018405, 0.93508245, 0.92998133, 0.92539877, 0.92040807,
0.91610107, 0.91143247, 0.90711645, 0.903318, 0.89938555, 0.89571898,
0.89192165, 0.88847908, 0.88510274, 0.8819883, 0.87861655, 0.87507101,
0.87178365, 0.8690559, 0.86611193, 0.8634239, 0.86069042, 0.85786338,
0.85518855, 0.85250511, 0.85012629, 0.84780429, 0.84546392, 0.84356169,
0.84149767, 0.83968132, 0.83789902, 0.83605873, 0.83428571, 0.83245285,
0.83075052, 0.82938238, 0.8276965, 0.82601738, 0.82459641, 0.82349182,
0.82237803, 0.82158239, 0.82042263, 0.81958522, 0.81863406, 0.81820893,
0.81770377])
incremental_train_scores_std_50_folds = array([ 0.01849963, 0.01378789, 0.01221196, 0.01036566, 0.00951653, 0.0095293,
0.00920113, 0.00879358, 0.00817018, 0.00829866, 0.00816914, 0.00840314,
0.00921439, 0.00955778, 0.00966554, 0.01008684, 0.01065165, 0.0106473,
0.0110683, 0.01155044, 0.01137906, 0.01131299, 0.01164905, 0.01127409,
0.01067386, 0.01048885, 0.00997217, 0.00943623, 0.00849798, 0.00799113,
0.00751208, 0.00688614, 0.00636693, 0.00622729, 0.00592422, 0.00536307,
0.00523293, 0.00500458, 0.0051211, 0.00461855, 0.00442537, 0.00428366,
0.00362497, 0.0031117, 0.00208263, 0.00132304, 0.00030215, 0.00021302,
0. ])
incremental_test_scores_mean_50_folds = array([ 0.24009849, 0.26921438, 0.28260018, 0.28949942, 0.29202454, 0.29280535,
0.2946212, 0.29418639, 0.29457828, 0.29335378, 0.28850087, 0.28796685,
0.28740397, 0.28732106, 0.28425358, 0.28317094, 0.28282828, 0.27952454,
0.27653539, 0.27484663, 0.26937452, 0.26701724, 0.26168295, 0.25727544,
0.25071575, 0.24710293, 0.24290922, 0.23545269, 0.23001266, 0.22617587,
0.22354967, 0.22438082, 0.21953567, 0.2101227, 0.20804363, 0.20333041,
0.19207173, 0.18404908, 0.17159323, 0.16462168, 0.16950693, 0.1658998,
0.16681274, 0.16768916, 0.18241309, 0.18711656, 0.1724608, 0.12372188,
0.09611452])
incremental_test_scores_std_50_folds = array([ 0.04942445, 0.04132467, 0.04251402, 0.04883771, 0.04769812, 0.04882399,
0.05244483, 0.05212685, 0.05434039, 0.05685099, 0.05870295, 0.06312129,
0.06397167, 0.06475891, 0.06619849, 0.06858608, 0.07052225, 0.07211568,
0.07247594, 0.07426551, 0.0754153, 0.07650368, 0.08005695, 0.07839793,
0.07575411, 0.07573291, 0.0747032, 0.07206771, 0.07047588, 0.07223881,
0.07240794, 0.07394336, 0.0739362, 0.07114794, 0.07094068, 0.07254348,
0.06833713, 0.06574783, 0.05170119, 0.05388267, 0.05510474, 0.05699836,
0.05972988, 0.06344963, 0.06136878, 0.06931128, 0.07251949, 0.02760736,
0. ])
normal_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565], 6112: [0.90068717277486909]}
normal_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966], 6112: [0.10091593065096501]}
normal_train_sizes_4_folds = array([ 6112, 12224, 18336])
normal_train_scores_mean_4_folds = array([ 0.90068717, 0.85340314, 0.82422557])
normal_train_scores_std_4_folds = array([ 0., 0., 0.])
normal_test_scores_mean_4_folds = array([ 0.10091593, 0.09813543, 0.09421001])
normal_test_scores_std_4_folds = array([ 0., 0., 0.])
normal_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952], 12225: [0.85824130879345606], 8150: [0.88981595092024535], 4075: [0.92122699386503071]}
normal_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043], 12225: [0.05865030674846626], 8150: [0.063803680981595098], 4075: [0.075828220858895706]}
normal_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
normal_train_scores_mean_6_folds = array([ 0.92122699, 0.88981595, 0.85824131, 0.83558282, 0.8201227 ])
normal_train_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_test_scores_mean_6_folds = array([ 0.07582822, 0.06380368, 0.05865031, 0.05644172, 0.05496933])
normal_test_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754], 15280: [0.84332460732984293], 12224: [0.86338350785340312], 9168: [0.88568935427574169], 6112: [0.90085078534031415], 3056: [0.91819371727748689]}
normal_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004], 15280: [0.043819489862655332], 12224: [0.048724656638325703], 9168: [0.055591890124264222], 6112: [0.072596468279921514], 3056: [0.11118378024852844]}
normal_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
normal_train_scores_mean_8_folds = array([ 0.91819372, 0.90085079, 0.88568935, 0.86338351, 0.84332461, 0.82869764,
0.81740838])
normal_train_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_8_folds = array([ 0.11118378, 0.07259647, 0.05559189, 0.04872466, 0.04381949, 0.03956835,
0.03924133])
normal_test_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444], 17115: [0.83651767455448434], 14670: [0.84935241990456711], 12225: [0.86633946830265851], 9780: [0.88548057259713697], 7335: [0.89788684389911388], 4890: [0.91942740286298563], 2445: [0.92842535787321068]}
normal_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455], 17115: [0.12106339468302658], 14670: [0.12269938650306748], 12225: [0.12719836400817996], 9780: [0.14069529652351739], 7335: [0.16196319018404909], 4890: [0.17995910020449898], 2445: [0.19386503067484662]}
normal_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
normal_train_scores_mean_10_folds = array([ 0.92842536, 0.9194274, 0.89788684, 0.88548057, 0.86633947, 0.84935242,
0.83651767, 0.82617587, 0.81799591])
normal_train_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_10_folds = array([ 0.19386503, 0.1799591, 0.16196319, 0.1406953, 0.12719836, 0.12269939,
0.12106339, 0.11738241, 0.11820041])
normal_test_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918], 19560: [0.83052147239263807], 17930: [0.83736754043502515], 16300: [0.84699386503067486], 14670: [0.85405589638718471], 13040: [0.86618098159509205], 11410: [0.87765118317265556], 9780: [0.88588957055214723], 8150: [0.89447852760736202], 6520: [0.90567484662576692], 4890: [0.91390593047034763], 3260: [0.91932515337423315], 1630: [0.93496932515337428]}
normal_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773], 19560: [0.15337423312883436], 17930: [0.15214723926380369], 16300: [0.15398773006134969], 14670: [0.15460122699386503], 13040: [0.15766871165644172], 11410: [0.16809815950920245], 9780: [0.17668711656441718], 8150: [0.18466257668711655], 6520: [0.19570552147239265], 4890: [0.20429447852760735], 3260: [0.21717791411042944], 1630: [0.23803680981595093]}
normal_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
normal_train_scores_mean_15_folds = array([ 0.93496933, 0.91932515, 0.91390593, 0.90567485, 0.89447853, 0.88588957,
0.87765118, 0.86618098, 0.8540559, 0.84699387, 0.83736754, 0.83052147,
0.82298254, 0.81849255])
normal_train_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_15_folds = array([ 0.23803681, 0.21717791, 0.20429448, 0.19570552, 0.18466258, 0.17668712,
0.16809816, 0.15766871, 0.15460123, 0.15398773, 0.15214724, 0.15337423,
0.15092025, 0.15276074])
normal_test_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484], 21516: [0.824595649749024], 20538: [0.82753919563735512], 19560: [0.83195296523517381], 18582: [0.8366698955978904], 17604: [0.84003635537377863], 16626: [0.84620473956453746], 15648: [0.85090746421267893], 14670: [0.85821404226312203], 13692: [0.86240140227870288], 12714: [0.871558911436212], 11736: [0.87585207907293794], 10758: [0.88213422569250788], 9780: [0.88486707566462164], 8802: [0.88559418314019545], 7824: [0.89187116564417179], 6846: [0.89789658194566169], 5868: [0.90184049079754602], 4890: [0.89959100204498976], 3912: [0.90030674846625769], 2934: [0.90149965916837083], 1956: [0.90286298568507162], 978: [0.89059304703476483]}
normal_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292], 21516: [0.10736196319018405], 20538: [0.10736196319018405], 19560: [0.10531697341513292], 18582: [0.10736196319018405], 17604: [0.11349693251533742], 16626: [0.11042944785276074], 15648: [0.1165644171779141], 14670: [0.11349693251533742], 13692: [0.11451942740286299], 12714: [0.11758691206543967], 11736: [0.1196319018404908], 10758: [0.130879345603272], 9780: [0.1329243353783231], 8802: [0.13190184049079753], 7824: [0.13496932515337423], 6846: [0.13701431492842536], 5868: [0.14723926380368099], 4890: [0.15132924335378323], 3912: [0.15746421267893659], 2934: [0.17586912065439672], 1956: [0.19325153374233128], 978: [0.19529652351738241]}
normal_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
normal_train_scores_mean_25_folds = array([ 0.89059305, 0.90286299, 0.90149966, 0.90030675, 0.899591, 0.90184049,
0.89789658, 0.89187117, 0.88559418, 0.88486708, 0.88213423, 0.87585208,
0.87155891, 0.8624014, 0.85821404, 0.85090746, 0.84620474, 0.84003636,
0.8366699, 0.83195297, 0.8275392, 0.82459565, 0.81915177, 0.81799591])
normal_train_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_25_folds = array([ 0.19529652, 0.19325153, 0.17586912, 0.15746421, 0.15132924, 0.14723926,
0.13701431, 0.13496933, 0.13190184, 0.13292434, 0.13087935, 0.1196319,
0.11758691, 0.11451943, 0.11349693, 0.11656442, 0.11042945, 0.11349693,
0.10736196, 0.10531697, 0.10736196, 0.10736196, 0.10531697, 0.10633947])
normal_test_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894], 22983: [0.81877909759387368], 22494: [0.82177469547434867], 22005: [0.82394910247670983], 21516: [0.82594348391894401], 21027: [0.8270319113520711], 20538: [0.82958418541240631], 20049: [0.8316624270537184], 19560: [0.83333333333333337], 19071: [0.83682030307797184], 18582: [0.83785383704660421], 18093: [0.84115403747305584], 17604: [0.84350147693705979], 17115: [0.84691790826760149], 16626: [0.84885119692048594], 16137: [0.85077771580839068], 15648: [0.8539749488752556], 15159: [0.85744442245530705], 14670: [0.8597818677573279], 14181: [0.86150483040688242], 13692: [0.86634531113058721], 13203: [0.87002953874119515], 12714: [0.8728173666823974], 12225: [0.87427402862985681], 11736: [0.87857873210633952], 11247: [0.88094603005245842], 10758: [0.87999628183677259], 10269: [0.88207225630538511], 9780: [0.88466257668711656], 9291: [0.884834786352384], 8802: [0.88604862531242901], 8313: [0.89125466137375198], 7824: [0.89391615541922287], 7335: [0.89747784594410362], 6846: [0.90037978381536665], 6357: [0.89948088721094854], 5868: [0.8979209270620313], 5379: [0.8947759806655512], 4890: [0.89427402862985683], 4401: [0.89502385821404229], 3912: [0.89570552147239269], 3423: [0.89862693543675143], 2934: [0.89604635310156777], 2445: [0.89775051124744376], 1956: [0.89570552147239269], 1467: [0.89229720518064082], 978: [0.91104294478527603], 489: [0.91820040899795496]}
normal_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859], 22983: [0.096114519427402859], 22494: [0.096114519427402859], 22005: [0.09815950920245399], 21516: [0.096114519427402859], 21027: [0.09815950920245399], 20538: [0.096114519427402859], 20049: [0.096114519427402859], 19560: [0.09202453987730061], 19071: [0.096114519427402859], 18582: [0.094069529652351741], 18093: [0.096114519427402859], 17604: [0.09815950920245399], 17115: [0.096114519427402859], 16626: [0.09202453987730061], 16137: [0.094069529652351741], 15648: [0.09202453987730061], 15159: [0.096114519427402859], 14670: [0.094069529652351741], 14181: [0.096114519427402859], 13692: [0.10020449897750511], 13203: [0.09815950920245399], 12714: [0.10224948875255624], 12225: [0.10224948875255624], 11736: [0.10224948875255624], 11247: [0.10224948875255624], 10758: [0.10633946830265849], 10269: [0.10838445807770961], 9780: [0.10838445807770961], 9291: [0.11042944785276074], 8802: [0.11451942740286299], 8313: [0.1165644171779141], 7824: [0.1165644171779141], 7335: [0.12269938650306748], 6846: [0.12883435582822086], 6357: [0.1329243353783231], 5868: [0.1411042944785276], 5379: [0.14314928425357873], 4890: [0.15132924335378323], 4401: [0.15337423312883436], 3912: [0.17791411042944785], 3423: [0.19631901840490798], 2934: [0.19222903885480572], 2445: [0.19222903885480572], 1956: [0.20245398773006135], 1467: [0.21063394683026584], 978: [0.22085889570552147], 489: [0.21472392638036811]}
normal_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
normal_train_scores_mean_50_folds = array([ 0.91820041, 0.91104294, 0.89229721, 0.89570552, 0.89775051, 0.89604635,
0.89862694, 0.89570552, 0.89502386, 0.89427403, 0.89477598, 0.89792093,
0.89948089, 0.90037978, 0.89747785, 0.89391616, 0.89125466, 0.88604863,
0.88483479, 0.88466258, 0.88207226, 0.87999628, 0.88094603, 0.87857873,
0.87427403, 0.87281737, 0.87002954, 0.86634531, 0.86150483, 0.85978187,
0.85744442, 0.85397495, 0.85077772, 0.8488512, 0.84691791, 0.84350148,
0.84115404, 0.83785384, 0.8368203, 0.83333333, 0.83166243, 0.82958419,
0.82703191, 0.82594348, 0.8239491, 0.8217747, 0.8187791, 0.81842195,
0.81770377])
normal_train_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_50_folds = array([ 0.21472393, 0.2208589, 0.21063395, 0.20245399, 0.19222904, 0.19222904,
0.19631902, 0.17791411, 0.15337423, 0.15132924, 0.14314928, 0.14110429,
0.13292434, 0.12883436, 0.12269939, 0.11656442, 0.11656442, 0.11451943,
0.11042945, 0.10838446, 0.10838446, 0.10633947, 0.10224949, 0.10224949,
0.10224949, 0.10224949, 0.09815951, 0.1002045, 0.09611452, 0.09406953,
0.09611452, 0.09202454, 0.09406953, 0.09202454, 0.09611452, 0.09815951,
0.09611452, 0.09406953, 0.09611452, 0.09202454, 0.09611452, 0.09611452,
0.09815951, 0.09611452, 0.09815951, 0.09611452, 0.09611452, 0.09611452,
0.09611452])
normal_test_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
data = {
"incremental": {
4: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_4_folds,
"train_sizes": incremental_train_sizes_4_folds,
"train_scores_mean": incremental_train_scores_mean_4_folds,
"train_scores_std": incremental_train_scores_std_4_folds,
"test_scores_mean": incremental_test_scores_mean_4_folds,
"test_scores_std": incremental_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_6_folds,
"train_sizes": incremental_train_sizes_6_folds,
"train_scores_mean": incremental_train_scores_mean_6_folds,
"train_scores_std": incremental_train_scores_std_6_folds,
"test_scores_mean": incremental_test_scores_mean_6_folds,
"test_scores_std": incremental_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_8_folds,
"train_sizes": incremental_train_sizes_8_folds,
"train_scores_mean": incremental_train_scores_mean_8_folds,
"train_scores_std": incremental_train_scores_std_8_folds,
"test_scores_mean": incremental_test_scores_mean_8_folds,
"test_scores_std": incremental_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_10_folds,
"train_sizes": incremental_train_sizes_10_folds,
"train_scores_mean": incremental_train_scores_mean_10_folds,
"train_scores_std": incremental_train_scores_std_10_folds,
"test_scores_mean": incremental_test_scores_mean_10_folds,
"test_scores_std": incremental_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_15_folds,
"train_sizes": incremental_train_sizes_15_folds,
"train_scores_mean": incremental_train_scores_mean_15_folds,
"train_scores_std": incremental_train_scores_std_15_folds,
"test_scores_mean": incremental_test_scores_mean_15_folds,
"test_scores_std": incremental_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_25_folds,
"train_sizes": incremental_train_sizes_25_folds,
"train_scores_mean": incremental_train_scores_mean_25_folds,
"train_scores_std": incremental_train_scores_std_25_folds,
"test_scores_mean": incremental_test_scores_mean_25_folds,
"test_scores_std": incremental_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_50_folds,
"train_sizes": incremental_train_sizes_50_folds,
"train_scores_mean": incremental_train_scores_mean_50_folds,
"train_scores_std": incremental_train_scores_std_50_folds,
"test_scores_mean": incremental_test_scores_mean_50_folds,
"test_scores_std": incremental_test_scores_std_50_folds
}
},
"normal": {
4: {
"train_accuracy_per_size": normal_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_4_folds,
"train_sizes": normal_train_sizes_4_folds,
"train_scores_mean": normal_train_scores_mean_4_folds,
"train_scores_std": normal_train_scores_std_4_folds,
"test_scores_mean": normal_test_scores_mean_4_folds,
"test_scores_std": normal_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": normal_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_6_folds,
"train_sizes": normal_train_sizes_6_folds,
"train_scores_mean": normal_train_scores_mean_6_folds,
"train_scores_std": normal_train_scores_std_6_folds,
"test_scores_mean": normal_test_scores_mean_6_folds,
"test_scores_std": normal_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": normal_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_8_folds,
"train_sizes": normal_train_sizes_8_folds,
"train_scores_mean": normal_train_scores_mean_8_folds,
"train_scores_std": normal_train_scores_std_8_folds,
"test_scores_mean": normal_test_scores_mean_8_folds,
"test_scores_std": normal_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": normal_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_10_folds,
"train_sizes": normal_train_sizes_10_folds,
"train_scores_mean": normal_train_scores_mean_10_folds,
"train_scores_std": normal_train_scores_std_10_folds,
"test_scores_mean": normal_test_scores_mean_10_folds,
"test_scores_std": normal_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": normal_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_15_folds,
"train_sizes": normal_train_sizes_15_folds,
"train_scores_mean": normal_train_scores_mean_15_folds,
"train_scores_std": normal_train_scores_std_15_folds,
"test_scores_mean": normal_test_scores_mean_15_folds,
"test_scores_std": normal_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": normal_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_25_folds,
"train_sizes": normal_train_sizes_25_folds,
"train_scores_mean": normal_train_scores_mean_25_folds,
"train_scores_std": normal_train_scores_std_25_folds,
"test_scores_mean": normal_test_scores_mean_25_folds,
"test_scores_std": normal_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": normal_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_50_folds,
"train_sizes": normal_train_sizes_50_folds,
"train_scores_mean": normal_train_scores_mean_50_folds,
"train_scores_std": normal_train_scores_std_50_folds,
"test_scores_mean": normal_test_scores_mean_50_folds,
"test_scores_std": normal_test_scores_std_50_folds
}
}
}
for key, value in data.items():
# print("{}: {}".format(key, value)) # Debug
for subKey, subValue in value.items():
# print("{}: {}".format(subKey, subValue)) # Debug
# Then, we plot the aforementioned learning curves
title = "Learning Curves (Linear SVM without tuning, " + \
key + \
" approach, {} folds)".format(subKey)
fig = plot_learning_curve(title, "accuracy", \
subValue["train_sizes"], \
subValue["train_scores_mean"], \
subValue["train_scores_std"], \
subValue["test_scores_mean"], \
subValue["test_scores_std"])
name_file = "{}_learning_curves_{}_folds.png".format( \
key, subKey)
# save_file = None if not save_file \
# else os.path.join(current_dir, name_file)
save_file = os.path.join(current_dir, name_file)
if save_file:
plt.savefig(save_file, bbox_inches="tight")
plt.close(fig)
else:
plt.show()
if __name__ == "__main__":
main() | 236.562092 | 26,139 | 0.795611 | from numpy import array
import os
import inspect
import matplotlib.pyplot as plt
def plot_learning_curve(title, computed_score, train_sizes, \
train_scores_mean, train_scores_std, test_scores_mean, \
test_scores_std):
"""Generate a plot of the test and training learning curves.
Parameters
----------
title: string
Contains the title of the chart.
computed_score: string
Contains the name of the computed score.
train_sizes: a one dimension numpy.ndarray
An array containing the various sizes of the training set for
which the scores have been computed.
train_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the training set.
train_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the training set.
test_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the test set.
test_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the test set.
ylim: tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
"""
fig = plt.figure(figsize=(20.0, 12.5))
plt.title(title, size=31)
plt.xlim(xmin=0, xmax=25000)
plt.ylim(ymin=0.0, ymax=1.0)
plt.xlabel("Training examples", size=28)
plt.ylabel(computed_score.capitalize(), size=28)
plt.grid(linewidth=3)
plt.fill_between(train_sizes, train_scores_mean - \
train_scores_std, train_scores_mean + train_scores_std, \
alpha=0.3, color="r")
plt.fill_between(train_sizes, test_scores_mean - \
test_scores_std, test_scores_mean + test_scores_std, \
alpha=0.3, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", \
label="Training {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", \
label="Test {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.legend(loc="best", prop={'size': 26})
plt.tick_params(axis='both', which='major', labelsize=22)
return fig
def main():
current_dir = os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe())))
incremental_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565, 0.84031413612565442], 6112: [0.90068717277486909, 0.88890706806282727, 0.88219895287958117]}
incremental_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966, 0.224967277486911], 6112: [0.10091593065096501, 0.23707460732984292, 0.24803664921465968]}
incremental_train_sizes_4_folds = array([ 6112, 12224, 18336])
incremental_train_scores_mean_4_folds = array([ 0.89059773, 0.84685864, 0.82422557])
incremental_train_scores_std_4_folds = array([ 0.00764187, 0.0065445, 0. ])
incremental_test_scores_mean_4_folds = array([ 0.1953424, 0.16155135, 0.09421001])
incremental_test_scores_std_4_folds = array([ 0.0669194, 0.06341593, 0. ])
incremental_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952, 0.82993865030674852], 12225: [0.85824130879345606, 0.84932515337423309, 0.84040899795501023], 8150: [0.88981595092024535, 0.88110429447852756, 0.86932515337423311, 0.86687116564417177], 4075: [0.92122699386503071, 0.91533742331288348, 0.90625766871165647, 0.90134969325153369, 0.89447852760736202]}
incremental_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043, 0.21398773006134969], 12225: [0.05865030674846626, 0.22331288343558281, 0.24957055214723928], 8150: [0.063803680981595098, 0.23018404907975459, 0.26085889570552145, 0.27558282208588958], 4075: [0.075828220858895706, 0.22012269938650306, 0.25840490797546012, 0.2723926380368098, 0.23484662576687115]}
incremental_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
incremental_train_scores_mean_6_folds = array([ 0.90773006, 0.87677914, 0.84932515, 0.83276074, 0.8201227 ])
incremental_train_scores_std_6_folds = array([ 0.00957621, 0.00925196, 0.00728001, 0.00282209, 0. ])
incremental_test_scores_mean_6_folds = array([ 0.21231902, 0.20760736, 0.17717791, 0.13521472, 0.05496933])
incremental_test_scores_std_6_folds = array([ 0.07061286, 0.08462505, 0.08449442, 0.07877301, 0. ])
incremental_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754, 0.82422556719022688], 15280: [0.84332460732984293, 0.83776178010471203, 0.83082460732984298], 12224: [0.86338350785340312, 0.8534031413612565, 0.84710405759162299, 0.84031413612565442], 9168: [0.88568935427574169, 0.87565445026178013, 0.87041884816753923, 0.85907504363001741, 0.8586387434554974], 6112: [0.90085078534031415, 0.90068717277486909, 0.89725130890052351, 0.88890706806282727, 0.88334424083769636, 0.88219895287958117], 3056: [0.91819371727748689, 0.9240837696335078, 0.92702879581151831, 0.91852094240837701, 0.91819371727748689, 0.92833769633507857, 0.90150523560209428]}
incremental_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004, 0.17604712041884818], 15280: [0.043819489862655332, 0.17702879581151831, 0.2486910994764398], 12224: [0.048724656638325703, 0.18160994764397906, 0.25327225130890052, 0.26897905759162305], 9168: [0.055591890124264222, 0.18422774869109948, 0.26734293193717279, 0.27945026178010474, 0.30268324607329844], 6112: [0.072596468279921514, 0.18259162303664922, 0.27192408376963351, 0.28435863874345552, 0.29286649214659688, 0.26145287958115182], 3056: [0.11118378024852844, 0.1806282722513089, 0.2581806282722513, 0.27290575916230364, 0.27748691099476441, 0.26897905759162305, 0.25785340314136124]}
incremental_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
incremental_train_scores_mean_8_folds = array([ 0.91940912, 0.89220659, 0.86989529, 0.85105121, 0.83730366, 0.82646161,
0.81740838])
incremental_train_scores_std_8_folds = array([ 0.00831456, 0.00776394, 0.01026335, 0.00849238, 0.00511337, 0.00223604,
0. ])
incremental_test_scores_mean_8_folds = array([ 0.23245969, 0.2276317, 0.21785922, 0.18814648, 0.15651313, 0.10780773,
0.03924133])
incremental_test_scores_std_8_folds = array([ 0.05818413, 0.07814916, 0.09044222, 0.0869719, 0.08488723, 0.06823939,
0. ])
incremental_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444, 0.82249488752556232], 17115: [0.83651767455448434, 0.83102541630148996, 0.82810400233713122], 14670: [0.84935241990456711, 0.84226312201772324, 0.83974096796182685, 0.83278800272665299], 12225: [0.86633946830265851, 0.85758691206543969, 0.85251533742331287, 0.84621676891615538, 0.84040899795501023], 9780: [0.88548057259713697, 0.87525562372188137, 0.87300613496932511, 0.86124744376278117, 0.85429447852760731, 0.8535787321063395], 7335: [0.89788684389911388, 0.89570552147239269, 0.88943421949556922, 0.88657123381049763, 0.87607361963190189, 0.87075664621676896, 0.87457396046353097], 4890: [0.91942740286298563, 0.90817995910020455, 0.9122699386503067, 0.90817995910020455, 0.90122699386503069, 0.88568507157464216, 0.90081799591002043, 0.88670756646216764], 2445: [0.92842535787321068, 0.93006134969325149, 0.93660531697341509, 0.93047034764826175, 0.93987730061349695, 0.91574642126789363, 0.91983640081799589, 0.9235173824130879, 0.91002044989775055]}
incremental_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455, 0.095296523517382409], 17115: [0.12106339468302658, 0.10061349693251534, 0.25439672801635993], 14670: [0.12269938650306748, 0.10224948875255624, 0.2593047034764826, 0.23640081799591003], 12225: [0.12719836400817996, 0.11615541922290389, 0.25807770961145193, 0.23885480572597137, 0.29284253578732106], 9780: [0.14069529652351739, 0.11942740286298568, 0.26625766871165646, 0.24498977505112474, 0.30224948875255625, 0.31533742331288345], 7335: [0.16196319018404909, 0.12392638036809817, 0.2593047034764826, 0.2560327198364008, 0.31083844580777098, 0.31124744376278118, 0.2523517382413088], 4890: [0.17995910020449898, 0.1329243353783231, 0.2523517382413088, 0.26339468302658486, 0.29734151329243352, 0.29979550102249491, 0.26134969325153373, 0.26871165644171779], 2445: [0.19386503067484662, 0.13006134969325153, 0.2392638036809816, 0.24498977505112474, 0.29202453987730059, 0.27034764826175867, 0.24130879345603273, 0.24989775051124744, 0.25071574642126787]}
incremental_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
incremental_train_scores_mean_10_folds = array([ 0.92606226, 0.90281186, 0.88442886, 0.86714383, 0.8526135, 0.84103613,
0.83188236, 0.82433538, 0.81799591])
incremental_train_scores_std_10_folds = array([ 0.00914095, 0.0110811, 0.00994113, 0.01169251, 0.00897791, 0.005924,
0.00348791, 0.00184049, 0. ])
incremental_test_scores_mean_10_folds = array([ 0.23471938, 0.24447853, 0.23938066, 0.23149284, 0.20662577, 0.1801636,
0.15869121, 0.10633947, 0.11820041])
incremental_test_scores_std_10_folds = array([ 0.04451149, 0.05448996, 0.06594014, 0.07553149, 0.07157226, 0.06855415,
0.06818705, 0.01104294, 0. ])
incremental_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918, 0.81727229825389336], 19560: [0.83052147239263807, 0.82264826175869121, 0.82249488752556232], 17930: [0.83736754043502515, 0.830117122141662, 0.82861126603457891, 0.82381483547127721], 16300: [0.84699386503067486, 0.83619631901840485, 0.83668711656441719, 0.83073619631901841, 0.82993865030674852], 14670: [0.85405589638718471, 0.8477164280845263, 0.84226312201772324, 0.83871847307430125, 0.83653715064758005, 0.83278800272665299], 13040: [0.86618098159509205, 0.85552147239263798, 0.85421779141104293, 0.84739263803680986, 0.84631901840490797, 0.84072085889570547, 0.8377300613496933], 11410: [0.87765118317265556, 0.86958808063102544, 0.86126205083260299, 0.86038562664329532, 0.85723049956178787, 0.85021910604732687, 0.84469763365468886, 0.84522348816827342], 9780: [0.88588957055214723, 0.87924335378323104, 0.87525562372188137, 0.87044989775051129, 0.87157464212678937, 0.86124744376278117, 0.8563394683026585, 0.85685071574642124, 0.8535787321063395], 8150: [0.89447852760736202, 0.89042944785276079, 0.88858895705521468, 0.88638036809815945, 0.88110429447852756, 0.87754601226993867, 0.87067484662576689, 0.86907975460122699, 0.86453987730061355, 0.86687116564417177], 6520: [0.90567484662576692, 0.90000000000000002, 0.89892638036809813, 0.89984662576687113, 0.89309815950920246, 0.89401840490797546, 0.88696319018404912, 0.88312883435582823, 0.87423312883435578, 0.87944785276073623, 0.87898773006134967], 4890: [0.91390593047034763, 0.91451942740286296, 0.90817995910020455, 0.91267893660531696, 0.90899795501022496, 0.90817995910020455, 0.90061349693251536, 0.9002044989775051, 0.88568507157464216, 0.89836400817995909, 0.90040899795501017, 0.88670756646216764], 3260: [0.91932515337423315, 0.92638036809815949, 0.92699386503067482, 0.92791411042944782, 0.91748466257668715, 0.92392638036809815, 0.93006134969325149, 0.91625766871165648, 0.90490797546012269, 0.90674846625766869, 0.91901840490797548, 0.90889570552147236, 0.89785276073619635], 1630: [0.93496932515337428, 0.94049079754601228, 0.94601226993865029, 0.94969325153374229, 0.93619631901840494, 0.92883435582822083, 0.93803680981595094, 0.94907975460122695, 0.91717791411042948, 0.92699386503067482, 0.9319018404907975, 0.94723926380368095, 0.91533742331288348, 0.92638036809815949]}
incremental_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773, 0.060122699386503067], 19560: [0.15337423312883436, 0.059509202453987733, 0.12331288343558282], 17930: [0.15214723926380369, 0.058282208588957052, 0.13190184049079753, 0.2607361963190184], 16300: [0.15398773006134969, 0.061963190184049083, 0.13190184049079753, 0.27116564417177913, 0.25398773006134967], 14670: [0.15460122699386503, 0.066257668711656448, 0.13374233128834356, 0.27116564417177913, 0.25398773006134967, 0.2411042944785276], 13040: [0.15766871165644172, 0.068711656441717797, 0.14294478527607363, 0.27300613496932513, 0.25398773006134967, 0.24049079754601227, 0.29877300613496932], 11410: [0.16809815950920245, 0.066871165644171782, 0.15337423312883436, 0.26625766871165646, 0.26380368098159507, 0.2460122699386503, 0.30858895705521472, 0.33128834355828218], 9780: [0.17668711656441718, 0.073619631901840496, 0.15398773006134969, 0.27484662576687119, 0.26503067484662579, 0.25644171779141106, 0.30613496932515338, 0.32822085889570551, 0.31411042944785278], 8150: [0.18466257668711655, 0.078527607361963195, 0.16073619631901839, 0.26687116564417179, 0.27423312883435585, 0.26932515337423313, 0.31226993865030672, 0.33742331288343558, 0.31963190184049078, 0.26748466257668713], 6520: [0.19570552147239265, 0.095092024539877307, 0.16809815950920245, 0.2588957055214724, 0.26993865030674846, 0.26564417177914113, 0.30797546012269938, 0.33680981595092024, 0.31963190184049078, 0.27361963190184047, 0.27975460122699386], 4890: [0.20429447852760735, 0.11288343558282209, 0.17055214723926379, 0.26503067484662579, 0.26319018404907973, 0.2822085889570552, 0.30184049079754599, 0.32699386503067485, 0.30306748466257671, 0.28650306748466259, 0.27607361963190186, 0.26625766871165646], 3260: [0.21717791411042944, 0.12208588957055215, 0.16380368098159509, 0.25950920245398773, 0.252760736196319, 0.27300613496932513, 0.29815950920245399, 0.33128834355828218, 0.29509202453987732, 0.27975460122699386, 0.28343558282208586, 0.26503067484662579, 0.27300613496932513], 1630: [0.23803680981595093, 0.15889570552147239, 0.15705521472392639, 0.23251533742331287, 0.25521472392638039, 0.25582822085889573, 0.28159509202453986, 0.31901840490797545, 0.2570552147239264, 0.26196319018404907, 0.26319018404907973, 0.23558282208588957, 0.25214723926380367, 0.25950920245398773]}
incremental_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
incremental_train_scores_mean_15_folds = array([ 0.93488168, 0.91736668, 0.90320382, 0.8903932, 0.87896933, 0.86782549,
0.85828221, 0.84972612, 0.84201318, 0.83611043, 0.82997769, 0.82522154,
0.82012742, 0.81849255])
incremental_train_scores_std_15_folds = array([ 0.01074397, 0.00967823, 0.00931587, 0.00992714, 0.01023275, 0.01070537,
0.01082517, 0.00899664, 0.00711293, 0.00609528, 0.00485997, 0.00374814,
0.00285512, 0. ])
incremental_test_scores_mean_15_folds = array([ 0.2448291, 0.25493157, 0.25490798, 0.25192415, 0.24711656, 0.23878664,
0.22553681, 0.20508326, 0.18680982, 0.17460123, 0.15076687, 0.11206544,
0.10552147, 0.15276074])
incremental_test_scores_std_15_folds = array([ 0.04097555, 0.05470141, 0.05920192, 0.06843836, 0.07712113, 0.08085383,
0.08314464, 0.07722247, 0.07412044, 0.07818233, 0.07246458, 0.03913685,
0.04539877, 0. ])
incremental_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484, 0.81821819151773811], 21516: [0.824595649749024, 0.82041271611823763, 0.81692693809258221], 20538: [0.82753919563735512, 0.82520206446586819, 0.81892102444249681, 0.81862888304606096], 19560: [0.83195296523517381, 0.82878323108384455, 0.82356850715746421, 0.82014314928425358, 0.82249488752556232], 18582: [0.8366698955978904, 0.83397911957808635, 0.82741362608976432, 0.82692928640619956, 0.82348509310085027, 0.82396943278441503], 17604: [0.84003635537377863, 0.83702567598273114, 0.83242444898886614, 0.83094751192910699, 0.82969779595546467, 0.826630311292888, 0.82509656896159966], 16626: [0.84620473956453746, 0.84187417298207623, 0.83585949717310237, 0.83549861662456393, 0.8338145073980513, 0.83297245278479493, 0.82828100565379525, 0.8286418862023337], 15648: [0.85090746421267893, 0.84809560327198363, 0.84189672801635995, 0.83876533742331283, 0.83825408997955009, 0.83640081799591004, 0.8339723926380368, 0.83167177914110424, 0.83032975460122704], 14670: [0.85821404226312203, 0.85262440354464897, 0.84723926380368098, 0.84464894342194952, 0.84226312201772324, 0.84137695978186777, 0.83749147920927058, 0.83715064758009539, 0.83360599863667351, 0.83278800272665299], 13692: [0.86240140227870288, 0.86064855390008765, 0.8523225241016652, 0.85210341805433831, 0.84845165059888983, 0.84567630733274901, 0.8437043529068069, 0.84041776219690334, 0.83939526730937775, 0.83384458077709611, 0.83494011101373067], 12714: [0.871558911436212, 0.86644643699858426, 0.85921031933301872, 0.85614283467044205, 0.85456976561271036, 0.85189554821456659, 0.84867075664621672, 0.84827748938178382, 0.84481673745477426, 0.84135598552776469, 0.83687273871322954, 0.83852446122384772], 11736: [0.87585207907293794, 0.87491479209270617, 0.86699045671438313, 0.86255964553510567, 0.859492160872529, 0.859492160872529, 0.85506134969325154, 0.85565780504430811, 0.85216428084526241, 0.84688139059304701, 0.84398432174505789, 0.83980913428766191, 0.84355828220858897], 10758: [0.88213422569250788, 0.8812046848856665, 0.87488380739914484, 0.87153746049451575, 0.86642498605688789, 0.86558839933073062, 0.8642870422011526, 0.86242796058746984, 0.85833798103736758, 0.85489868005205427, 0.85015802193716306, 0.84960029745305821, 0.84690462911321807, 0.84987915969511063], 9780: [0.88486707566462164, 0.88926380368098157, 0.88159509202453989, 0.87770961145194271, 0.87525562372188137, 0.87259713701431496, 0.87157464212678937, 0.87044989775051129, 0.86543967280163603, 0.86124744376278117, 0.85664621676891617, 0.85521472392638032, 0.85644171779141109, 0.85419222903885483, 0.8535787321063395], 8802: [0.88559418314019545, 0.89036582594864799, 0.88798000454442172, 0.88479890933878669, 0.8827539195637355, 0.87991365598727567, 0.87855032947057488, 0.87832310838445804, 0.87502840263576465, 0.86980231765507843, 0.86639400136332656, 0.86321290615769142, 0.86332651670074978, 0.8626448534423995, 0.85810043172006367, 0.86332651670074978], 7824: [0.89187116564417179, 0.89570552147239269, 0.89327709611451944, 0.89199897750511248, 0.89059304703476483, 0.88905930470347649, 0.88854805725971375, 0.8880368098159509, 0.88036809815950923, 0.88087934560327197, 0.87359406952965235, 0.87397750511247441, 0.87282719836400813, 0.86707566462167684, 0.86809815950920244, 0.87103783231083842, 0.86950408997955009], 6846: [0.89789658194566169, 0.90213263219398188, 0.89702015775635413, 0.89789658194566169, 0.89775051124744376, 0.89599766286882854, 0.89322231960268772, 0.89307624890446979, 0.89176161262050835, 0.88913234005258546, 0.88504236050248319, 0.88080631025416301, 0.88036809815950923, 0.87554776511831722, 0.87394098743791993, 0.87773882559158634, 0.87642418930762489, 0.87452527023079174], 5868: [0.90184049079754602, 0.91104294478527603, 0.90252215405589642, 0.90235173824130877, 0.90593047034764829, 0.9038854805725971, 0.90081799591002043, 0.8989434219495569, 0.8979209270620313, 0.8989434219495569, 0.89383094751192915, 0.8933197000681663, 0.88957055214723924, 0.88599182004089982, 0.87917518745739609, 0.8887184730743013, 0.8873551465576005, 0.88650306748466257, 0.8834355828220859], 4890: [0.89959100204498976, 0.91676891615541922, 0.91554192229038855, 0.91083844580777096, 0.90817995910020455, 0.91206543967280163, 0.91349693251533748, 0.90531697341513295, 0.90490797546012269, 0.90817995910020455, 0.90081799591002043, 0.90368098159509203, 0.90163599182004095, 0.89345603271983642, 0.88568507157464216, 0.89345603271983642, 0.89938650306748469, 0.90000000000000002, 0.89775051124744376, 0.88670756646216764], 3912: [0.90030674846625769, 0.91768916155419222, 0.92075664621676889, 0.92382413087934556, 0.91589979550102252, 0.91641104294478526, 0.92663599182004086, 0.91385480572597133, 0.91155419222903888, 0.91794478527607359, 0.91641104294478526, 0.91308793456032722, 0.91411042944785281, 0.90720858895705525, 0.89800613496932513, 0.90030674846625769, 0.9066973415132924, 0.91206543967280163, 0.91359918200408996, 0.89979550102249484, 0.89340490797546013], 2934: [0.90149965916837083, 0.92638036809815949, 0.92092706203135655, 0.93387866394001362, 0.92876618950238587, 0.92706203135650989, 0.93626448534423989, 0.92842535787321068, 0.91990456714383095, 0.92603953646898429, 0.91922290388548056, 0.93456032719836402, 0.92160872528970683, 0.91717791411042948, 0.91240627130197682, 0.91717791411042948, 0.91342876618950242, 0.92058623040218135, 0.92808452624403548, 0.91615541922290389, 0.9035446489434219, 0.90558963871847309], 1956: [0.90286298568507162, 0.93762781186094069, 0.93098159509202449, 0.93711656441717794, 0.93813905930470343, 0.93865030674846628, 0.94734151329243355, 0.93762781186094069, 0.93302658486707568, 0.92944785276073616, 0.92484662576687116, 0.93404907975460127, 0.94529652351738236, 0.92331288343558282, 0.91666666666666663, 0.93456032719836402, 0.92995910020449901, 0.92586912065439675, 0.93404907975460127, 0.93660531697341509, 0.91922290388548056, 0.91768916155419222, 0.9253578732106339], 978: [0.89059304703476483, 0.95194274028629855, 0.94887525562372188, 0.95194274028629855, 0.93762781186094069, 0.94478527607361962, 0.95910020449897748, 0.95501022494887522, 0.9253578732106339, 0.95092024539877296, 0.93456032719836402, 0.9468302658486708, 0.95705521472392641, 0.9468302658486708, 0.91411042944785281, 0.94478527607361962, 0.93149284253578735, 0.94069529652351735, 0.93456032719836402, 0.96319018404907975, 0.93865030674846628, 0.93558282208588961, 0.93149284253578735, 0.92126789366053174]}
incremental_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292, 0.22903885480572597], 21516: [0.10736196319018405, 0.22392638036809817, 0.070552147239263799], 20538: [0.10736196319018405, 0.22699386503067484, 0.07259713701431493, 0.13905930470347649], 19560: [0.10531697341513292, 0.22903885480572597, 0.068507157464212681, 0.13803680981595093, 0.1492842535787321], 18582: [0.10736196319018405, 0.22290388548057261, 0.06646216768916155, 0.14519427402862986, 0.15235173824130879, 0.26687116564417179], 17604: [0.11349693251533742, 0.22903885480572597, 0.06646216768916155, 0.14519427402862986, 0.15644171779141106, 0.26789366053169733, 0.28936605316973413], 16626: [0.11042944785276074, 0.22392638036809817, 0.07259713701431493, 0.14519427402862986, 0.15848670756646216, 0.27505112474437626, 0.29345603271983639, 0.254601226993865], 15648: [0.1165644171779141, 0.22494887525562371, 0.07259713701431493, 0.14621676891615543, 0.15950920245398773, 0.26993865030674846, 0.29550102249488752, 0.25153374233128833, 0.31083844580777098], 14670: [0.11349693251533742, 0.22699386503067484, 0.07259713701431493, 0.14621676891615543, 0.15848670756646216, 0.27402862985685073, 0.29038854805725972, 0.24846625766871167, 0.30981595092024539, 0.21881390593047034], 13692: [0.11451942740286299, 0.23210633946830267, 0.070552147239263799, 0.14723926380368099, 0.16666666666666666, 0.27198364008179959, 0.29550102249488752, 0.25869120654396727, 0.31799591002044991, 0.22392638036809817, 0.2822085889570552], 12714: [0.11758691206543967, 0.23210633946830267, 0.075664621676891614, 0.14826175869120656, 0.17484662576687116, 0.27505112474437626, 0.29652351738241312, 0.2658486707566462, 0.31901840490797545, 0.23517382413087934, 0.28629856850715746, 0.34049079754601225], 11736: [0.1196319018404908, 0.24028629856850717, 0.075664621676891614, 0.15337423312883436, 0.18609406952965235, 0.26993865030674846, 0.28732106339468305, 0.26278118609406953, 0.31186094069529652, 0.22699386503067484, 0.28936605316973413, 0.34049079754601225, 0.34049079754601225], 10758: [0.130879345603272, 0.2607361963190184, 0.078732106339468297, 0.15848670756646216, 0.18711656441717792, 0.27607361963190186, 0.28936605316973413, 0.26278118609406953, 0.32106339468302658, 0.22801635991820041, 0.29141104294478526, 0.34458077709611451, 0.34151329243353784, 0.32617586912065438], 9780: [0.1329243353783231, 0.27709611451942739, 0.079754601226993863, 0.15439672801635992, 0.18813905930470348, 0.28527607361963192, 0.29141104294478526, 0.27198364008179959, 0.32515337423312884, 0.24335378323108384, 0.29243353783231085, 0.34662576687116564, 0.34458077709611451, 0.33435582822085891, 0.32515337423312884], 8802: [0.13190184049079753, 0.28629856850715746, 0.078732106339468297, 0.16564417177914109, 0.19325153374233128, 0.28118609406952966, 0.28834355828220859, 0.28834355828220859, 0.32924335378323111, 0.25153374233128833, 0.28834355828220859, 0.35276073619631904, 0.34253578732106338, 0.31697341513292432, 0.33333333333333331, 0.28323108384458079], 7824: [0.13496932515337423, 0.29550102249488752, 0.085889570552147243, 0.16871165644171779, 0.19836400817995911, 0.27300613496932513, 0.29243353783231085, 0.27811860940695299, 0.32822085889570551, 0.26482617586912066, 0.29447852760736198, 0.35787321063394684, 0.34867075664621677, 0.33128834355828218, 0.32310838445807771, 0.2822085889570552, 0.28732106339468305], 6846: [0.13701431492842536, 0.29447852760736198, 0.096114519427402859, 0.16973415132924335, 0.19836400817995911, 0.27402862985685073, 0.28527607361963192, 0.27402862985685073, 0.32719836400817998, 0.25153374233128833, 0.29345603271983639, 0.34969325153374231, 0.34969325153374231, 0.33026584867075665, 0.32617586912065438, 0.29243353783231085, 0.29550102249488752, 0.29141104294478526], 5868: [0.14723926380368099, 0.30572597137014312, 0.097137014314928424, 0.17177914110429449, 0.20449897750511248, 0.26380368098159507, 0.28118609406952966, 0.27607361963190186, 0.32515337423312884, 0.27300613496932513, 0.29754601226993865, 0.34049079754601225, 0.34969325153374231, 0.32310838445807771, 0.32924335378323111, 0.29243353783231085, 0.28629856850715746, 0.30572597137014312, 0.27096114519427406], 4890: [0.15132924335378323, 0.31697341513292432, 0.11247443762781185, 0.17791411042944785, 0.21370143149284254, 0.26278118609406953, 0.28732106339468305, 0.2822085889570552, 0.32310838445807771, 0.27709611451942739, 0.28732106339468305, 0.35378323108384457, 0.32924335378323111, 0.31799591002044991, 0.31288343558282211, 0.28834355828220859, 0.29447852760736198, 0.31083844580777098, 0.27198364008179959, 0.27505112474437626], 3912: [0.15746421267893659, 0.32515337423312884, 0.12678936605316973, 0.20040899795501022, 0.21267893660531698, 0.25664621676891614, 0.28834355828220859, 0.26993865030674846, 0.30981595092024539, 0.28118609406952966, 0.28016359918200406, 0.34969325153374231, 0.33946830265848671, 0.30981595092024539, 0.31492842535787319, 0.28425357873210633, 0.29141104294478526, 0.31186094069529652, 0.2822085889570552, 0.27811860940695299, 0.30163599182004092], 2934: [0.17586912065439672, 0.33435582822085891, 0.14314928425357873, 0.21676891615541921, 0.19938650306748465, 0.25255623721881393, 0.28732106339468305, 0.27096114519427406, 0.29959100204498978, 0.27709611451942739, 0.26380368098159507, 0.35378323108384457, 0.34049079754601225, 0.29038854805725972, 0.30470347648261759, 0.27096114519427406, 0.2658486707566462, 0.32208588957055212, 0.28834355828220859, 0.26278118609406953, 0.31492842535787319, 0.25869120654396727], 1956: [0.19325153374233128, 0.34253578732106338, 0.15848670756646216, 0.24130879345603273, 0.17382413087934559, 0.22290388548057261, 0.31390593047034765, 0.26482617586912066, 0.30674846625766872, 0.2822085889570552, 0.26789366053169733, 0.34049079754601225, 0.33231083844580778, 0.28936605316973413, 0.28016359918200406, 0.28425357873210633, 0.25766871165644173, 0.30470347648261759, 0.28527607361963192, 0.24335378323108384, 0.27198364008179959, 0.26482617586912066, 0.29243353783231085], 978: [0.19529652351738241, 0.34253578732106338, 0.18507157464212678, 0.24233128834355827, 0.21370143149284254, 0.20961145194274028, 0.27811860940695299, 0.27505112474437626, 0.25869120654396727, 0.2556237218813906, 0.2310838445807771, 0.29447852760736198, 0.29038854805725972, 0.27198364008179959, 0.24846625766871167, 0.24539877300613497, 0.20143149284253578, 0.25869120654396727, 0.25255623721881393, 0.26993865030674846, 0.18916155419222905, 0.22903885480572597, 0.28323108384458079, 0.23415132924335377]}
incremental_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
incremental_train_scores_mean_25_folds = array([ 0.93988582, 0.93044812, 0.9208496, 0.91140812, 0.90287321, 0.89537366,
0.88779336, 0.881556, 0.87438224, 0.86840491, 0.86273338, 0.85664753,
0.8515285, 0.84671872, 0.84274029, 0.83892155, 0.83539336, 0.8316941,
0.82874108, 0.82538855, 0.82257279, 0.8206451, 0.81868498, 0.81799591])
incremental_train_scores_std_25_folds = array([ 0.01574382, 0.00986344, 0.00941513, 0.0086523, 0.00859881, 0.00843904,
0.0094164, 0.00978435, 0.0102025, 0.01149665, 0.01132038, 0.011076,
0.01026567, 0.00910232, 0.00777419, 0.00661961, 0.00574061, 0.00496077,
0.00492852, 0.00433164, 0.00388806, 0.00313505, 0.00046679, 0. ])
incremental_test_scores_mean_25_folds = array([ 0.24816803, 0.27020539, 0.27244841, 0.27485636, 0.27234151, 0.27058444,
0.26868893, 0.26735234, 0.26322853, 0.2595092, 0.24978089, 0.23879188,
0.2305726, 0.21649005, 0.20593047, 0.20529425, 0.19171779, 0.18112767,
0.16019087, 0.13803681, 0.13650307, 0.13394683, 0.16717791, 0.10633947])
incremental_test_scores_std_25_folds = array([ 0.03709919, 0.0474123, 0.05100164, 0.0554582, 0.06067133, 0.06643791,
0.07039766, 0.07419473, 0.0772398, 0.08042228, 0.07961067, 0.08014752,
0.08023328, 0.07686982, 0.0755778, 0.07961578, 0.07611923, 0.07668308,
0.06730022, 0.05350297, 0.05728938, 0.06537574, 0.06186094, 0. ])
incremental_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894, 0.81799591002044991], 22983: [0.81877909759387368, 0.81821346212417878, 0.81890962885611107], 22494: [0.82177469547434867, 0.8191517738063484, 0.819196230105806, 0.81821819151773811], 22005: [0.82394910247670983, 0.82140422631220178, 0.81972279027493755, 0.81904112701658716, 0.81799591002044991], 21516: [0.82594348391894401, 0.824595649749024, 0.82250418293363081, 0.82041271611823763, 0.81911135898865961, 0.81692693809258221], 21027: [0.8270319113520711, 0.82679412184334422, 0.82455890046131164, 0.82260902648975132, 0.81966043658153798, 0.81832881533266755, 0.81766300470823228], 20538: [0.82958418541240631, 0.82753919563735512, 0.82753919563735512, 0.82520206446586819, 0.82232934073424868, 0.81892102444249681, 0.81819067095140718, 0.81862888304606096], 20049: [0.8316624270537184, 0.82891914808718636, 0.82822085889570551, 0.82762232530300761, 0.82448002394134368, 0.82168686717542017, 0.81874407701132224, 0.81944236620280309, 0.82058955558880742], 19560: [0.83333333333333337, 0.83195296523517381, 0.83067484662576685, 0.82878323108384455, 0.82617586912065444, 0.82356850715746421, 0.82249488752556232, 0.82014314928425358, 0.82055214723926384, 0.82249488752556232], 19071: [0.83682030307797184, 0.83320224424518907, 0.83377903623302396, 0.83141943264642648, 0.82858790834250962, 0.82612343348539663, 0.82476010696869595, 0.82344921608725286, 0.82093230559488228, 0.82240050338209847, 0.82318703791096426], 18582: [0.83785383704660421, 0.8366698955978904, 0.83435582822085885, 0.83397911957808635, 0.83118071251749004, 0.82741362608976432, 0.82800559681412123, 0.82692928640619956, 0.82488429663114837, 0.82348509310085027, 0.82386180174362289, 0.82396943278441503], 18093: [0.84115403747305584, 0.83722986790471454, 0.8366771679655115, 0.83474271817830104, 0.83280826839109046, 0.83087381860387999, 0.82998949870115513, 0.82860774885314759, 0.82706018902337919, 0.82656275907809651, 0.82484938926656715, 0.82595478914497322, 0.8232465594428785], 17604: [0.84350147693705979, 0.84003635537377863, 0.83844580777096112, 0.83702567598273114, 0.83441263349238814, 0.83242444898886614, 0.83145875937286984, 0.83094751192910699, 0.83037945921381506, 0.82969779595546467, 0.82833446943876388, 0.826630311292888, 0.8259486480345376, 0.82509656896159966], 17115: [0.84691790826760149, 0.84317849839322234, 0.84224364592462753, 0.83827052293309967, 0.83651767455448434, 0.83447268477943326, 0.83342097575226415, 0.83353783231083844, 0.83196026877008478, 0.83102541630148996, 0.83219398188723337, 0.82921413964358748, 0.82629272567922873, 0.8269354367513877, 0.82810400233713122], 16626: [0.84885119692048594, 0.84620473956453746, 0.84536268495128108, 0.84187417298207623, 0.83844580777096112, 0.83585949717310237, 0.83489714904366652, 0.83549861662456393, 0.83417538794658963, 0.8338145073980513, 0.83267171899434622, 0.83297245278479493, 0.82966438108985929, 0.82828100565379525, 0.82972452784794903, 0.8286418862023337], 16137: [0.85077771580839068, 0.84792712400074366, 0.84817500154923464, 0.84513850158021941, 0.84166821590134477, 0.83881762409369776, 0.83695854248001489, 0.83708248125426044, 0.83621490983454172, 0.83584309351180519, 0.83454173638222717, 0.83435582822085885, 0.83274462415566708, 0.83156720580033461, 0.83125735886472085, 0.83038978744500214, 0.83082357315486155], 15648: [0.8539749488752556, 0.85090746421267893, 0.84924591002044991, 0.84809560327198363, 0.84502811860940696, 0.84189672801635995, 0.83946830265848671, 0.83876533742331283, 0.83819018404907975, 0.83825408997955009, 0.83780674846625769, 0.83640081799591004, 0.83416411042944782, 0.8339723926380368, 0.83467535787321068, 0.83167177914110424, 0.83141615541922287, 0.83032975460122704], 15159: [0.85744442245530705, 0.85421201926248436, 0.85196912725113794, 0.84959430041559469, 0.84814301734942943, 0.84444884227191763, 0.843723200738835, 0.84148030872748858, 0.84075466719440595, 0.83956725377663433, 0.84002902566132331, 0.83818193812256747, 0.83659872023220527, 0.83574114387492582, 0.83587307869912264, 0.83448776304505579, 0.83277261033049677, 0.83217890362161095, 0.83125535985223298], 14670: [0.8597818677573279, 0.85821404226312203, 0.85555555555555551, 0.85262440354464897, 0.84935241990456711, 0.84723926380368098, 0.84689843217450578, 0.84464894342194952, 0.84294478527607364, 0.84226312201772324, 0.84192229038854804, 0.84137695978186777, 0.83871847307430125, 0.83749147920927058, 0.83974096796182685, 0.83715064758009539, 0.83640081799591004, 0.83360599863667351, 0.83251533742331285, 0.83278800272665299], 14181: [0.86150483040688242, 0.86002397574219025, 0.85995345885339536, 0.85508779352654962, 0.85057471264367812, 0.85050419575488334, 0.8488117904238065, 0.84860023975742194, 0.84648473309357586, 0.84648473309357586, 0.84380509131937098, 0.84458077709611457, 0.8404907975460123, 0.84161906776673012, 0.84098441576757632, 0.83978562865806361, 0.83886890910373035, 0.83752908821662786, 0.83329807488893592, 0.83414427755447429, 0.83160566955785908], 13692: [0.86634531113058721, 0.86240140227870288, 0.86364300321355536, 0.86064855390008765, 0.8555360794624598, 0.8523225241016652, 0.8507157464212679, 0.85210341805433831, 0.85159217061057557, 0.84845165059888983, 0.84786736780601812, 0.84567630733274901, 0.84421560035056964, 0.8437043529068069, 0.84341221151037105, 0.84041776219690334, 0.84290096406660819, 0.83939526730937775, 0.83822670172363423, 0.83384458077709611, 0.83333333333333337, 0.83494011101373067], 13203: [0.87002953874119515, 0.86737862606983263, 0.86609103991517078, 0.86344012724380825, 0.85942588805574494, 0.85465424524729228, 0.85442702416117544, 0.85435128379913661, 0.85359388017874727, 0.85344239945466938, 0.84965538135272289, 0.84995834280087856, 0.84511095963038707, 0.84647428614708775, 0.84556540180262063, 0.84397485419980312, 0.84382337347572522, 0.84382337347572522, 0.83958191320154507, 0.84071801863212903, 0.83458304930697569, 0.83624933727183215, 0.83655229871998793], 12714: [0.8728173666823974, 0.871558911436212, 0.86967122856693413, 0.86644643699858426, 0.8619631901840491, 0.85921031933301872, 0.85779455718106024, 0.85614283467044205, 0.85677206229353464, 0.85456976561271036, 0.85472707251848357, 0.85189554821456659, 0.84977190498662891, 0.84867075664621672, 0.84945729117508262, 0.84827748938178382, 0.84812018247601073, 0.84481673745477426, 0.84324366839704268, 0.84135598552776469, 0.84143463898065129, 0.83687273871322954, 0.83600755073147714, 0.83852446122384772], 12225: [0.87427402862985681, 0.874601226993865, 0.87337423312883433, 0.87108384458077714, 0.86633946830265851, 0.86298568507157469, 0.86159509202453988, 0.85946830265848673, 0.85881390593047036, 0.85758691206543969, 0.8555419222903885, 0.85652351738241306, 0.85194274028629857, 0.85284253578732105, 0.85251533742331287, 0.8512883435582822, 0.85194274028629857, 0.8489979550102249, 0.84719836400817994, 0.84621676891615538, 0.84220858895705519, 0.84261758691206545, 0.83918200408997956, 0.84016359918200412, 0.84040899795501023], 11736: [0.87857873210633952, 0.87585207907293794, 0.87849352419904569, 0.87491479209270617, 0.87125085207907293, 0.86699045671438313, 0.86486025903203823, 0.86255964553510567, 0.86170756646216773, 0.859492160872529, 0.85932174505794134, 0.859492160872529, 0.85591342876618948, 0.85506134969325154, 0.85753237900477164, 0.85565780504430811, 0.85582822085889576, 0.85216428084526241, 0.85046012269938653, 0.84688139059304701, 0.84654055896387181, 0.84398432174505789, 0.84492160872528965, 0.83980913428766191, 0.84262099522835721, 0.84355828220858897], 11247: [0.88094603005245842, 0.88085711745354311, 0.88005690406330572, 0.87970125366764473, 0.87329954654574549, 0.87009869298479592, 0.87018760558371122, 0.86805370320974484, 0.86520850004445626, 0.8627189472748289, 0.86378589846181209, 0.86458611185204948, 0.86031830710411661, 0.86014048190628611, 0.85765092913665864, 0.85791766693340443, 0.85889570552147243, 0.85622832755401446, 0.85373877478438698, 0.8513381346136748, 0.84689250466791144, 0.84787054325597933, 0.84680359206899614, 0.84635902907441984, 0.84431403929936877, 0.84573664088201295, 0.84493642749177555], 10758: [0.87999628183677259, 0.88213422569250788, 0.88315672058003347, 0.8812046848856665, 0.8789737869492471, 0.87488380739914484, 0.87265290946272545, 0.87153746049451575, 0.86921360847741214, 0.86642498605688789, 0.86605316973415136, 0.86558839933073062, 0.86335750139431122, 0.8642870422011526, 0.86475181260457334, 0.86242796058746984, 0.8608477412158394, 0.85833798103736758, 0.85796616471463094, 0.85489868005205427, 0.85201710355084592, 0.85015802193716306, 0.85127347090537275, 0.84960029745305821, 0.84848484848484851, 0.84690462911321807, 0.84885666480758504, 0.84987915969511063], 10269: [0.88207225630538511, 0.88178011490894925, 0.88723342097575231, 0.88479890933878669, 0.88109845165059886, 0.87730061349693256, 0.87642418930762489, 0.87525562372188137, 0.87321063394683029, 0.86951017625864246, 0.87136040510273638, 0.86853637160385622, 0.86542019670854031, 0.86892589346577076, 0.86814684974194178, 0.86512805531210435, 0.86629662089784787, 0.86220664134774561, 0.86064855390008765, 0.8603564125036518, 0.85470834550589148, 0.85597429155711369, 0.85353977992014807, 0.85305287759275494, 0.85042360502483205, 0.85315025805823352, 0.85052098549031063, 0.84983932223196024, 0.85032622455935336], 9780: [0.88466257668711656, 0.88486707566462164, 0.88670756646216764, 0.88926380368098157, 0.88548057259713697, 0.88159509202453989, 0.87985685071574637, 0.87770961145194271, 0.87934560327198363, 0.87525562372188137, 0.87300613496932511, 0.87259713701431496, 0.86912065439672803, 0.87157464212678937, 0.87300613496932511, 0.87044989775051129, 0.87106339468302663, 0.86543967280163603, 0.86431492842535784, 0.86124744376278117, 0.85899795501022491, 0.85664621676891617, 0.85838445807770958, 0.85521472392638032, 0.85429447852760731, 0.85644171779141109, 0.85480572597137017, 0.85419222903885483, 0.85255623721881391, 0.8535787321063395], 9291: [0.884834786352384, 0.88612635884188995, 0.88806371757614899, 0.88870950382090197, 0.88849424173931757, 0.88440426218921542, 0.88257453449574852, 0.88085243784307399, 0.88268216553654077, 0.88009902055752876, 0.87708535141534816, 0.87697772037455601, 0.87278010978366161, 0.87460983747712839, 0.87471746851792054, 0.87568614788505006, 0.8743945753955441, 0.87073512000861053, 0.86729092670326124, 0.86761381982563768, 0.86126358841890005, 0.86169411258206863, 0.86072543321493922, 0.86061780217414707, 0.85911096760305672, 0.85954149176622541, 0.85857281239909589, 0.85749650199117422, 0.85609729846087612, 0.85502098805295446, 0.85642019158325255], 8802: [0.88604862531242901, 0.88559418314019545, 0.89184276300840715, 0.89036582594864799, 0.89059304703476483, 0.88798000454442172, 0.88809361508748008, 0.88479890933878669, 0.88570779368325381, 0.8827539195637355, 0.88457168825266985, 0.87991365598727567, 0.87843671892751651, 0.87855032947057488, 0.87809588729834132, 0.87832310838445804, 0.87752783458304928, 0.87502840263576465, 0.87321063394683029, 0.86980231765507843, 0.86628039082026809, 0.86639400136332656, 0.86537150647580097, 0.86321290615769142, 0.86275846398545786, 0.86332651670074978, 0.86366734832992498, 0.8626448534423995, 0.85969097932288119, 0.85810043172006367, 0.86025903203817311, 0.86332651670074978], 8313: [0.89125466137375198, 0.88812702995308557, 0.89281847708408513, 0.89305906411644409, 0.8923373030193672, 0.8923373030193672, 0.89101407434139301, 0.88920967159870079, 0.8898111391795982, 0.88776614940454712, 0.88848791050162401, 0.88632262721039334, 0.88271382172500901, 0.88235294117647056, 0.88379646337062434, 0.88295440875736797, 0.88151088656321419, 0.87934560327198363, 0.87922530975580415, 0.87561650427041982, 0.86960182846144596, 0.86948153494526648, 0.86815830626729218, 0.86767713220257425, 0.86743654517021529, 0.86839889329965114, 0.86659449055695903, 0.86479008781426681, 0.86358715265247199, 0.86178274990977988, 0.86527126187898473, 0.86442920726572836, 0.86707566462167684], 7824: [0.89391615541922287, 0.89187116564417179, 0.8936605316973415, 0.89570552147239269, 0.89608895705521474, 0.89327709611451944, 0.89302147239263807, 0.89199897750511248, 0.89378834355828218, 0.89059304703476483, 0.89008179959100209, 0.88905930470347649, 0.88842024539877296, 0.88854805725971375, 0.8871421267893661, 0.8880368098159509, 0.8834355828220859, 0.88036809815950923, 0.88407464212678932, 0.88087934560327197, 0.8778118609406953, 0.87359406952965235, 0.87359406952965235, 0.87397750511247441, 0.87448875255623726, 0.87282719836400813, 0.86848159509202449, 0.86707566462167684, 0.86490286298568508, 0.86809815950920244, 0.86975971370143146, 0.87103783231083842, 0.86848159509202449, 0.86950408997955009], 7335: [0.89747784594410362, 0.89461486025903203, 0.89720518064076349, 0.89679618268575323, 0.89788684389911388, 0.8958418541240627, 0.89706884798909337, 0.89447852760736202, 0.89693251533742335, 0.89570552147239269, 0.89188820722563056, 0.89338786639400136, 0.89011588275391962, 0.89202453987730057, 0.88943421949556922, 0.88820722563053855, 0.8877982276755283, 0.88575323790047722, 0.88684389911383776, 0.88657123381049763, 0.88084526244035444, 0.88152692569870483, 0.87580095432856164, 0.8768916155419223, 0.87607361963190189, 0.87648261758691204, 0.87484662576687111, 0.87239263803680978, 0.86830265848670751, 0.87075664621676896, 0.87280163599182004, 0.87566462167689163, 0.87389229720518069, 0.87171097477845949, 0.87457396046353097], 6846: [0.90037978381536665, 0.89789658194566169, 0.90300905638328954, 0.90213263219398188, 0.90037978381536665, 0.89702015775635413, 0.9000876424189308, 0.89789658194566169, 0.89964943032427691, 0.89775051124744376, 0.89628980426526439, 0.89599766286882854, 0.8952673093777388, 0.89322231960268772, 0.89249196611159798, 0.89307624890446979, 0.89073911773298275, 0.89176161262050835, 0.89176161262050835, 0.88913234005258546, 0.88635699678644464, 0.88504236050248319, 0.8834355828220859, 0.88080631025416301, 0.88124452234881678, 0.88036809815950923, 0.87934560327198363, 0.87554776511831722, 0.87306456324861237, 0.87394098743791993, 0.87832310838445804, 0.87773882559158634, 0.87730061349693256, 0.87642418930762489, 0.87583990651475319, 0.87452527023079174], 6357: [0.89948088721094854, 0.90184049079754602, 0.90671700487651408, 0.90593047034764829, 0.90593047034764829, 0.89806512505899005, 0.90073934245713383, 0.90073934245713383, 0.90215510460909232, 0.90136857008022653, 0.89995280792826804, 0.90136857008022653, 0.89806512505899005, 0.89712128362435106, 0.89633474909548527, 0.89664936290703157, 0.89476168003775369, 0.8933459178857952, 0.89633474909548527, 0.89444706622620729, 0.89004247286455873, 0.88831209690105395, 0.88799748308950766, 0.88878401761837345, 0.88469403806827118, 0.88563787950291017, 0.8823344344816737, 0.88060405851816892, 0.87399716847569608, 0.87871637564889093, 0.88186251376435421, 0.88500865187981748, 0.88060405851816892, 0.88186251376435421, 0.8834355828220859, 0.87808714802579835, 0.87777253421425205], 5868: [0.8979209270620313, 0.90184049079754602, 0.90933878663940015, 0.91104294478527603, 0.90814587593728702, 0.90252215405589642, 0.90320381731424682, 0.90235173824130877, 0.90490797546012269, 0.90593047034764829, 0.90405589638718475, 0.9038854805725971, 0.9038854805725971, 0.90081799591002043, 0.89655760054533062, 0.8989434219495569, 0.90013633265167003, 0.8979209270620313, 0.90115882753919563, 0.8989434219495569, 0.89468302658486709, 0.89383094751192915, 0.89417177914110424, 0.8933197000681663, 0.89246762099522836, 0.88957055214723924, 0.88786639400136336, 0.88599182004089982, 0.88019768234492157, 0.87917518745739609, 0.8873551465576005, 0.8887184730743013, 0.8876959781867757, 0.8873551465576005, 0.88752556237218816, 0.88650306748466257, 0.88394683026584864, 0.8834355828220859], 5379: [0.8947759806655512, 0.89960959286112663, 0.91094999070459193, 0.912994980479643, 0.91318088864101132, 0.90797546012269936, 0.908347276445436, 0.90462911321807027, 0.90686001115448966, 0.9072318274772263, 0.908347276445436, 0.90760364379996283, 0.9072318274772263, 0.9094627254136457, 0.90258412344301919, 0.90016731734523148, 0.9038854805725971, 0.90295593976575572, 0.90407138873396542, 0.90425729689533374, 0.90128276631344117, 0.89719278676333891, 0.89644915411786574, 0.89998140918386316, 0.89459007250418299, 0.89607733779512921, 0.89105781743818557, 0.89105781743818557, 0.88417921546755907, 0.88455103179029559, 0.88622420524261014, 0.8947759806655512, 0.89273099089050012, 0.89533370514965605, 0.8936605316973415, 0.89459007250418299, 0.8925450827291318, 0.8903141847927124, 0.8823201338538762], 4890: [0.89427402862985683, 0.89959100204498976, 0.91186094069529655, 0.91676891615541922, 0.91942740286298563, 0.91554192229038855, 0.91165644171779137, 0.91083844580777096, 0.90961145194274029, 0.90817995910020455, 0.90756646216768921, 0.91206543967280163, 0.91329243353783229, 0.91349693251533748, 0.9122699386503067, 0.90531697341513295, 0.90531697341513295, 0.90490797546012269, 0.90920245398773003, 0.90817995910020455, 0.9057259713701431, 0.90081799591002043, 0.90163599182004095, 0.90368098159509203, 0.90122699386503069, 0.90163599182004095, 0.8991820040899795, 0.89345603271983642, 0.89161554192229042, 0.88568507157464216, 0.89284253578732109, 0.89345603271983642, 0.89877300613496935, 0.89938650306748469, 0.90081799591002043, 0.90000000000000002, 0.89959100204498976, 0.89775051124744376, 0.88936605316973416, 0.88670756646216764], 4401: [0.89502385821404229, 0.90024994319472851, 0.90956600772551688, 0.91660986139513745, 0.91820040899795496, 0.9177459668257214, 0.9177459668257214, 0.91638264030902072, 0.91660986139513745, 0.91092933424221767, 0.91070211315610083, 0.91433765053396954, 0.9184276300840718, 0.91933651442853892, 0.91274710293115202, 0.91229266075891846, 0.91161099750056807, 0.9082026812088162, 0.91297432401726886, 0.91297432401726886, 0.91092933424221767, 0.90593047034764829, 0.90547602817541473, 0.90956600772551688, 0.9038854805725971, 0.91024767098386727, 0.90252215405589642, 0.90024994319472851, 0.89479663712792545, 0.8936605316973415, 0.89706884798909337, 0.89934105885026128, 0.89956827993637811, 0.90365825948648038, 0.90570324926153145, 0.90706657577823224, 0.90865712338104976, 0.9038854805725971, 0.89888661667802772, 0.8900249943194728, 0.88798000454442172], 3912: [0.89570552147239269, 0.90030674846625769, 0.90925357873210633, 0.91768916155419222, 0.92050102249488752, 0.92075664621676889, 0.92459100204498978, 0.92382413087934556, 0.92484662576687116, 0.91589979550102252, 0.91180981595092025, 0.91641104294478526, 0.92331288343558282, 0.92663599182004086, 0.91871165644171782, 0.91385480572597133, 0.91538854805725967, 0.91155419222903888, 0.91666666666666663, 0.91794478527607359, 0.91538854805725967, 0.91641104294478526, 0.91078732106339466, 0.91308793456032722, 0.91155419222903888, 0.91411042944785281, 0.91155419222903888, 0.90720858895705525, 0.89979550102249484, 0.89800613496932513, 0.90005112474437632, 0.90030674846625769, 0.90465235173824132, 0.9066973415132924, 0.91308793456032722, 0.91206543967280163, 0.91666666666666663, 0.91359918200408996, 0.90260736196319014, 0.89979550102249484, 0.8936605316973415, 0.89340490797546013], 3423: [0.89862693543675143, 0.90154834940111017, 0.91527899503359622, 0.9199532573765703, 0.92112182296231371, 0.91936897458369848, 0.92900964066608238, 0.92988606485539005, 0.92608822670172364, 0.92433537832310841, 0.92112182296231371, 0.91586327782646804, 0.92638036809815949, 0.93280747881974879, 0.9301782062518259, 0.91907683318726263, 0.91703184341221156, 0.91907683318726263, 0.920537540169442, 0.92229038854805723, 0.92112182296231371, 0.91703184341221156, 0.92345895413380075, 0.92141396435874967, 0.91381828805141685, 0.91820040899795496, 0.91732398480864741, 0.913526146654981, 0.90651475314051999, 0.90213263219398188, 0.90739117732982766, 0.90885188431200703, 0.9038854805725971, 0.91177329827636577, 0.91557113643003218, 0.91878469179082678, 0.92112182296231371, 0.92550394390885193, 0.91323400525854515, 0.90534618755477647, 0.90271691498685369, 0.89628980426526439, 0.89775051124744376], 2934: [0.89604635310156777, 0.90149965916837083, 0.91547375596455349, 0.92638036809815949, 0.92263122017723243, 0.92092706203135655, 0.92638036809815949, 0.93387866394001362, 0.93353783231083842, 0.92876618950238587, 0.9253578732106339, 0.92706203135650989, 0.92569870483980909, 0.93626448534423989, 0.93490115882753921, 0.92842535787321068, 0.92092706203135655, 0.91990456714383095, 0.92842535787321068, 0.92603953646898429, 0.92126789366053174, 0.91922290388548056, 0.92672119972733469, 0.93456032719836402, 0.92229038854805723, 0.92160872528970683, 0.91956373551465576, 0.91717791411042948, 0.91240627130197682, 0.91240627130197682, 0.9147920927062031, 0.91717791411042948, 0.91513292433537829, 0.91342876618950242, 0.91717791411042948, 0.92058623040218135, 0.92808452624403548, 0.92808452624403548, 0.9253578732106339, 0.91615541922290389, 0.90865712338104976, 0.9035446489434219, 0.89843217450579416, 0.90558963871847309], 2445: [0.89775051124744376, 0.9038854805725971, 0.92229038854805723, 0.93047034764826175, 0.92842535787321068, 0.92147239263803682, 0.92801635991820042, 0.93742331288343561, 0.93946830265848669, 0.93006134969325149, 0.93210633946830268, 0.93251533742331283, 0.93537832310838442, 0.93701431492842535, 0.93660531697341509, 0.93047034764826175, 0.93251533742331283, 0.92842535787321068, 0.92760736196319016, 0.93047034764826175, 0.92801635991820042, 0.92106339468302656, 0.92638036809815949, 0.93333333333333335, 0.93987730061349695, 0.93333333333333335, 0.92515337423312882, 0.91901840490797548, 0.91411042944785281, 0.91574642126789363, 0.92515337423312882, 0.92678936605316975, 0.92229038854805723, 0.92024539877300615, 0.91983640081799589, 0.92433537832310841, 0.92760736196319016, 0.93456032719836402, 0.93742331288343561, 0.9235173824130879, 0.91901840490797548, 0.91615541922290389, 0.90838445807770962, 0.90920245398773003, 0.91002044989775055], 1956: [0.89570552147239269, 0.90286298568507162, 0.92126789366053174, 0.93762781186094069, 0.93200408997955009, 0.93098159509202449, 0.93149284253578735, 0.93711656441717794, 0.94478527607361962, 0.93813905930470343, 0.93404907975460127, 0.93865030674846628, 0.94171779141104295, 0.94734151329243355, 0.94427402862985688, 0.93762781186094069, 0.93098159509202449, 0.93302658486707568, 0.93711656441717794, 0.92944785276073616, 0.93251533742331283, 0.92484662576687116, 0.9253578732106339, 0.93404907975460127, 0.93813905930470343, 0.94529652351738236, 0.93762781186094069, 0.92331288343558282, 0.91922290388548056, 0.91666666666666663, 0.93251533742331283, 0.93456032719836402, 0.92433537832310841, 0.92995910020449901, 0.92638036809815949, 0.92586912065439675, 0.92842535787321068, 0.93404907975460127, 0.93967280163599187, 0.93660531697341509, 0.92944785276073616, 0.91922290388548056, 0.91564417177914115, 0.91768916155419222, 0.91615541922290389, 0.9253578732106339], 1467: [0.89229720518064082, 0.89843217450579416, 0.92774369461486028, 0.94274028629856854, 0.94478527607361962, 0.94069529652351735, 0.9468302658486708, 0.93524199045671441, 0.94069529652351735, 0.94546693933197001, 0.93865030674846628, 0.93660531697341509, 0.95023858214042267, 0.95023858214042267, 0.95637355146557601, 0.94001363326516696, 0.93660531697341509, 0.92842535787321068, 0.94546693933197001, 0.92910702113156096, 0.93524199045671441, 0.93456032719836402, 0.92978868438991136, 0.93865030674846628, 0.93796864349011588, 0.95023858214042267, 0.95092024539877296, 0.93047034764826175, 0.92024539877300615, 0.92160872528970683, 0.93592365371506481, 0.94137695978186775, 0.93387866394001362, 0.92842535787321068, 0.94069529652351735, 0.93456032719836402, 0.93592365371506481, 0.92978868438991136, 0.94887525562372188, 0.94410361281526922, 0.93933197000681667, 0.92774369461486028, 0.91002044989775055, 0.92706203135650989, 0.93183367416496254, 0.93319700068166322, 0.92978868438991136], 978: [0.91104294478527603, 0.89059304703476483, 0.92842535787321068, 0.95194274028629855, 0.95501022494887522, 0.94887525562372188, 0.95705521472392641, 0.95194274028629855, 0.94171779141104295, 0.93762781186094069, 0.94989775051124747, 0.94478527607361962, 0.94989775051124747, 0.95910020449897748, 0.94785276073619629, 0.95501022494887522, 0.93762781186094069, 0.9253578732106339, 0.94989775051124747, 0.95092024539877296, 0.92638036809815949, 0.93456032719836402, 0.94887525562372188, 0.9468302658486708, 0.94171779141104295, 0.95705521472392641, 0.95398773006134974, 0.9468302658486708, 0.92126789366053174, 0.91411042944785281, 0.93865030674846628, 0.94478527607361962, 0.94274028629856854, 0.93149284253578735, 0.93865030674846628, 0.94069529652351735, 0.94989775051124747, 0.93456032719836402, 0.94171779141104295, 0.96319018404907975, 0.94785276073619629, 0.93865030674846628, 0.92433537832310841, 0.93558282208588961, 0.93149284253578735, 0.93149284253578735, 0.93558282208588961, 0.92126789366053174], 489: [0.91820040899795496, 0.91002044989775055, 0.91206543967280163, 0.96932515337423308, 0.97750511247443761, 0.95501022494887522, 0.95910020449897748, 0.95296523517382414, 0.96114519427402867, 0.93660531697341509, 0.96523517382413093, 0.9468302658486708, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.95296523517382414, 0.96523517382413093, 0.90593047034764829, 0.96319018404907975, 0.95910020449897748, 0.95910020449897748, 0.91411042944785281, 0.94069529652351735, 0.97137014314928427, 0.93865030674846628, 0.95705521472392641, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.90184049079754602, 0.93660531697341509, 0.9468302658486708, 0.95501022494887522, 0.94069529652351735, 0.94274028629856854, 0.95296523517382414, 0.94069529652351735, 0.95296523517382414, 0.94069529652351735, 0.96114519427402867, 0.9795501022494888, 0.94069529652351735, 0.92433537832310841, 0.93660531697341509, 0.93047034764826175, 0.93047034764826175, 0.96932515337423308, 0.92024539877300615, 0.94887525562372188]}
incremental_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859, 0.15132924335378323], 22983: [0.096114519427402859, 0.15132924335378323, 0.26993865030674846], 22494: [0.096114519427402859, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484], 22005: [0.09815950920245399, 0.15132924335378323, 0.27607361963190186, 0.22290388548057261, 0.16359918200408999], 21516: [0.096114519427402859, 0.15337423312883436, 0.27198364008179959, 0.22085889570552147, 0.16768916155419222, 0.096114519427402859], 21027: [0.09815950920245399, 0.1492842535787321, 0.27607361963190186, 0.22085889570552147, 0.16973415132924335, 0.096114519427402859, 0.15746421267893659], 20538: [0.096114519427402859, 0.15337423312883436, 0.27811860940695299, 0.22699386503067484, 0.16564417177914109, 0.10020449897750511, 0.15541922290388549, 0.15132924335378323], 20049: [0.096114519427402859, 0.15132924335378323, 0.27402862985685073, 0.22903885480572597, 0.16768916155419222, 0.096114519427402859, 0.15132924335378323, 0.15541922290388549, 0.20449897750511248], 19560: [0.09202453987730061, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484, 0.15950920245398773, 0.094069529652351741, 0.15132924335378323, 0.14723926380368099, 0.20245398773006135, 0.14723926380368099], 19071: [0.096114519427402859, 0.15337423312883436, 0.27402862985685073, 0.22494887525562371, 0.16359918200408999, 0.094069529652351741, 0.15132924335378323, 0.15950920245398773, 0.20245398773006135, 0.15132924335378323, 0.21676891615541921], 18582: [0.094069529652351741, 0.15337423312883436, 0.28016359918200406, 0.22290388548057261, 0.16359918200408999, 0.09202453987730061, 0.1492842535787321, 0.16155419222903886, 0.20040899795501022, 0.15337423312883436, 0.21472392638036811, 0.32310838445807771], 18093: [0.096114519427402859, 0.15950920245398773, 0.28016359918200406, 0.22699386503067484, 0.16359918200408999, 0.085889570552147243, 0.1492842535787321, 0.16155419222903886, 0.20245398773006135, 0.15337423312883436, 0.21676891615541921, 0.32106339468302658, 0.28016359918200406], 17604: [0.09815950920245399, 0.16564417177914109, 0.2822085889570552, 0.22903885480572597, 0.17177914110429449, 0.087934560327198361, 0.14723926380368099, 0.16768916155419222, 0.20654396728016361, 0.15337423312883436, 0.21676891615541921, 0.32924335378323111, 0.29038854805725972, 0.30061349693251532], 17115: [0.096114519427402859, 0.16155419222903886, 0.27811860940695299, 0.22903885480572597, 0.17586912065439672, 0.094069529652351741, 0.14519427402862986, 0.16768916155419222, 0.21063394683026584, 0.15746421267893659, 0.21676891615541921, 0.32719836400817998, 0.28425357873210633, 0.30061349693251532, 0.27607361963190186], 16626: [0.09202453987730061, 0.15950920245398773, 0.28629856850715746, 0.22494887525562371, 0.16973415132924335, 0.094069529652351741, 0.14314928425357873, 0.16564417177914109, 0.20449897750511248, 0.16359918200408999, 0.22085889570552147, 0.33537832310838445, 0.28016359918200406, 0.29856850715746419, 0.2822085889570552, 0.24130879345603273], 16137: [0.094069529652351741, 0.15746421267893659, 0.2822085889570552, 0.2310838445807771, 0.17177914110429449, 0.096114519427402859, 0.14519427402862986, 0.17586912065439672, 0.21063394683026584, 0.16359918200408999, 0.22290388548057261, 0.32719836400817998, 0.28834355828220859, 0.30265848670756645, 0.28016359918200406, 0.2474437627811861, 0.33537832310838445], 15648: [0.09202453987730061, 0.16973415132924335, 0.2822085889570552, 0.22699386503067484, 0.17995910020449898, 0.094069529652351741, 0.14519427402862986, 0.17177914110429449, 0.21267893660531698, 0.16155419222903886, 0.22494887525562371, 0.32924335378323111, 0.29038854805725972, 0.30674846625766872, 0.29038854805725972, 0.24130879345603273, 0.33333333333333331, 0.28629856850715746], 15159: [0.096114519427402859, 0.16768916155419222, 0.28425357873210633, 0.22085889570552147, 0.17177914110429449, 0.09202453987730061, 0.1411042944785276, 0.17791411042944785, 0.21676891615541921, 0.15746421267893659, 0.21676891615541921, 0.33128834355828218, 0.29243353783231085, 0.29856850715746419, 0.29038854805725972, 0.23517382413087934, 0.33946830265848671, 0.27811860940695299, 0.2392638036809816], 14670: [0.094069529652351741, 0.16359918200408999, 0.28629856850715746, 0.22494887525562371, 0.17177914110429449, 0.09202453987730061, 0.14519427402862986, 0.17586912065439672, 0.21881390593047034, 0.15950920245398773, 0.21881390593047034, 0.33128834355828218, 0.29038854805725972, 0.29652351738241312, 0.29447852760736198, 0.22903885480572597, 0.34969325153374231, 0.28834355828220859, 0.24130879345603273, 0.25153374233128833], 14181: [0.096114519427402859, 0.17177914110429449, 0.28834355828220859, 0.2310838445807771, 0.17382413087934559, 0.094069529652351741, 0.14519427402862986, 0.17586912065439672, 0.20654396728016361, 0.17177914110429449, 0.21267893660531698, 0.33537832310838445, 0.29447852760736198, 0.30265848670756645, 0.29243353783231085, 0.24539877300613497, 0.34764826175869118, 0.2822085889570552, 0.23517382413087934, 0.25971370143149286, 0.26789366053169733], 13692: [0.10020449897750511, 0.16564417177914109, 0.29038854805725972, 0.23517382413087934, 0.17995910020449898, 0.087934560327198361, 0.14519427402862986, 0.17791411042944785, 0.20858895705521471, 0.16973415132924335, 0.21881390593047034, 0.33128834355828218, 0.29856850715746419, 0.30061349693251532, 0.28834355828220859, 0.24130879345603273, 0.35173824130879344, 0.28834355828220859, 0.24130879345603273, 0.26175869120654399, 0.27402862985685073, 0.32310838445807771], 13203: [0.09815950920245399, 0.17382413087934559, 0.29038854805725972, 0.24539877300613497, 0.17995910020449898, 0.089979550102249492, 0.1492842535787321, 0.17791411042944785, 0.21472392638036811, 0.17177914110429449, 0.21676891615541921, 0.33128834355828218, 0.30674846625766872, 0.30061349693251532, 0.28834355828220859, 0.24539877300613497, 0.35378323108384457, 0.28425357873210633, 0.24335378323108384, 0.26175869120654399, 0.27811860940695299, 0.32310838445807771, 0.3619631901840491], 12714: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.23312883435582821, 0.17177914110429449, 0.096114519427402859, 0.15950920245398773, 0.17177914110429449, 0.21267893660531698, 0.16768916155419222, 0.21676891615541921, 0.33946830265848671, 0.30061349693251532, 0.29856850715746419, 0.29243353783231085, 0.25153374233128833, 0.34355828220858897, 0.29038854805725972, 0.24539877300613497, 0.26380368098159507, 0.2822085889570552, 0.32310838445807771, 0.35787321063394684, 0.35378323108384457], 12225: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.2392638036809816, 0.17382413087934559, 0.094069529652351741, 0.15541922290388549, 0.17382413087934559, 0.21881390593047034, 0.17586912065439672, 0.21676891615541921, 0.33742331288343558, 0.29652351738241312, 0.29243353783231085, 0.28834355828220859, 0.25153374233128833, 0.35173824130879344, 0.27402862985685073, 0.25153374233128833, 0.26175869120654399, 0.28425357873210633, 0.32515337423312884, 0.36400817995910023, 0.34969325153374231, 0.33333333333333331], 11736: [0.10224948875255624, 0.17177914110429449, 0.29447852760736198, 0.24335378323108384, 0.17382413087934559, 0.10224948875255624, 0.14723926380368099, 0.17791411042944785, 0.22494887525562371, 0.17177914110429449, 0.22290388548057261, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.29652351738241312, 0.25153374233128833, 0.34969325153374231, 0.27811860940695299, 0.25357873210633947, 0.25153374233128833, 0.28425357873210633, 0.32719836400817998, 0.35991820040899797, 0.35378323108384457, 0.34764826175869118, 0.3783231083844581], 11247: [0.10224948875255624, 0.18200408997955012, 0.30470347648261759, 0.24948875255623723, 0.17177914110429449, 0.10020449897750511, 0.14519427402862986, 0.16973415132924335, 0.21472392638036811, 0.16768916155419222, 0.22699386503067484, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.28629856850715746, 0.25766871165644173, 0.35582822085889571, 0.30265848670756645, 0.25153374233128833, 0.2556237218813906, 0.28016359918200406, 0.33537832310838445, 0.3721881390593047, 0.35991820040899797, 0.33742331288343558, 0.37423312883435583, 0.33742331288343558], 10758: [0.10633946830265849, 0.18813905930470348, 0.30470347648261759, 0.26175869120654399, 0.18404907975460122, 0.10429447852760736, 0.15132924335378323, 0.18609406952965235, 0.21063394683026584, 0.17586912065439672, 0.22903885480572597, 0.33537832310838445, 0.30265848670756645, 0.30265848670756645, 0.29652351738241312, 0.25971370143149286, 0.35787321063394684, 0.29038854805725972, 0.26789366053169733, 0.25153374233128833, 0.27811860940695299, 0.33128834355828218, 0.36809815950920244, 0.35787321063394684, 0.34969325153374231, 0.36400817995910023, 0.32719836400817998, 0.33333333333333331], 10269: [0.10838445807770961, 0.18200408997955012, 0.30470347648261759, 0.27607361963190186, 0.18200408997955012, 0.10224948875255624, 0.15950920245398773, 0.18609406952965235, 0.21881390593047034, 0.17586912065439672, 0.23721881390593047, 0.34151329243353784, 0.30061349693251532, 0.29447852760736198, 0.30061349693251532, 0.25357873210633947, 0.35173824130879344, 0.30265848670756645, 0.25971370143149286, 0.25766871165644173, 0.26993865030674846, 0.32515337423312884, 0.36605316973415131, 0.35787321063394684, 0.34969325153374231, 0.36605316973415131, 0.34151329243353784, 0.33742331288343558, 0.30265848670756645], 9780: [0.10838445807770961, 0.19427402862985685, 0.30674846625766872, 0.28425357873210633, 0.18200408997955012, 0.10633946830265849, 0.16564417177914109, 0.18404907975460122, 0.21881390593047034, 0.18609406952965235, 0.22699386503067484, 0.34969325153374231, 0.30470347648261759, 0.29038854805725972, 0.30674846625766872, 0.26175869120654399, 0.34969325153374231, 0.30470347648261759, 0.2658486707566462, 0.27402862985685073, 0.28629856850715746, 0.33537832310838445, 0.35173824130879344, 0.35582822085889571, 0.34355828220858897, 0.36605316973415131, 0.32924335378323111, 0.33946830265848671, 0.30265848670756645, 0.36400817995910023], 9291: [0.11042944785276074, 0.18813905930470348, 0.31697341513292432, 0.29243353783231085, 0.18404907975460122, 0.10838445807770961, 0.17791411042944785, 0.18813905930470348, 0.21881390593047034, 0.18813905930470348, 0.22290388548057261, 0.34969325153374231, 0.30061349693251532, 0.29447852760736198, 0.31083844580777098, 0.2658486707566462, 0.3456032719836401, 0.31083844580777098, 0.26993865030674846, 0.26175869120654399, 0.28425357873210633, 0.32310838445807771, 0.35173824130879344, 0.35582822085889571, 0.35582822085889571, 0.36809815950920244, 0.32719836400817998, 0.32310838445807771, 0.31288343558282211, 0.36605316973415131, 0.29856850715746419], 8802: [0.11451942740286299, 0.19222903885480572, 0.32515337423312884, 0.29447852760736198, 0.19222903885480572, 0.10020449897750511, 0.18404907975460122, 0.19018404907975461, 0.21676891615541921, 0.18813905930470348, 0.22699386503067484, 0.34969325153374231, 0.30061349693251532, 0.28834355828220859, 0.30061349693251532, 0.26993865030674846, 0.35378323108384457, 0.31083844580777098, 0.2822085889570552, 0.27607361963190186, 0.29038854805725972, 0.32515337423312884, 0.35378323108384457, 0.37014314928425357, 0.35787321063394684, 0.35787321063394684, 0.33537832310838445, 0.33128834355828218, 0.30470347648261759, 0.3783231083844581, 0.30061349693251532, 0.2822085889570552], 8313: [0.1165644171779141, 0.19631901840490798, 0.32924335378323111, 0.29038854805725972, 0.19018404907975461, 0.10838445807770961, 0.19631901840490798, 0.18813905930470348, 0.22903885480572597, 0.18200408997955012, 0.22903885480572597, 0.34151329243353784, 0.29856850715746419, 0.30061349693251532, 0.29652351738241312, 0.26993865030674846, 0.35173824130879344, 0.30879345603271985, 0.28425357873210633, 0.27811860940695299, 0.29243353783231085, 0.31901840490797545, 0.35582822085889571, 0.3783231083844581, 0.35378323108384457, 0.36605316973415131, 0.3456032719836401, 0.34355828220858897, 0.31492842535787319, 0.36605316973415131, 0.29447852760736198, 0.29038854805725972, 0.32719836400817998], 7824: [0.1165644171779141, 0.19427402862985685, 0.33742331288343558, 0.29652351738241312, 0.19427402862985685, 0.11042944785276074, 0.20040899795501022, 0.19222903885480572, 0.22494887525562371, 0.18813905930470348, 0.2310838445807771, 0.33742331288343558, 0.30265848670756645, 0.29038854805725972, 0.28629856850715746, 0.2658486707566462, 0.35582822085889571, 0.30061349693251532, 0.30061349693251532, 0.28834355828220859, 0.29243353783231085, 0.32515337423312884, 0.35991820040899797, 0.37423312883435583, 0.35787321063394684, 0.36605316973415131, 0.33537832310838445, 0.34355828220858897, 0.31492842535787319, 0.3619631901840491, 0.28834355828220859, 0.28834355828220859, 0.32106339468302658, 0.28425357873210633], 7335: [0.12269938650306748, 0.19222903885480572, 0.33537832310838445, 0.30061349693251532, 0.19427402862985685, 0.11451942740286299, 0.20245398773006135, 0.19631901840490798, 0.24539877300613497, 0.19222903885480572, 0.23517382413087934, 0.33333333333333331, 0.29652351738241312, 0.29856850715746419, 0.30674846625766872, 0.26380368098159507, 0.36809815950920244, 0.29652351738241312, 0.30061349693251532, 0.2822085889570552, 0.29652351738241312, 0.31901840490797545, 0.35787321063394684, 0.36400817995910023, 0.35378323108384457, 0.36400817995910023, 0.32924335378323111, 0.35378323108384457, 0.30061349693251532, 0.35991820040899797, 0.30061349693251532, 0.27811860940695299, 0.33742331288343558, 0.29652351738241312, 0.25971370143149286], 6846: [0.12883435582822086, 0.19631901840490798, 0.33946830265848671, 0.29038854805725972, 0.19836400817995911, 0.12678936605316973, 0.21472392638036811, 0.19222903885480572, 0.24335378323108384, 0.19222903885480572, 0.22903885480572597, 0.3456032719836401, 0.30470347648261759, 0.28629856850715746, 0.30470347648261759, 0.26380368098159507, 0.35991820040899797, 0.29652351738241312, 0.31901840490797545, 0.26789366053169733, 0.30265848670756645, 0.32310838445807771, 0.35991820040899797, 0.35787321063394684, 0.35378323108384457, 0.36605316973415131, 0.33537832310838445, 0.34764826175869118, 0.30879345603271985, 0.36400817995910023, 0.29243353783231085, 0.28834355828220859, 0.32924335378323111, 0.29038854805725972, 0.26993865030674846, 0.35378323108384457], 6357: [0.1329243353783231, 0.19836400817995911, 0.33333333333333331, 0.30470347648261759, 0.20040899795501022, 0.12269938650306748, 0.21676891615541921, 0.18813905930470348, 0.24130879345603273, 0.19222903885480572, 0.22494887525562371, 0.34151329243353784, 0.30470347648261759, 0.28629856850715746, 0.29243353783231085, 0.2658486707566462, 0.3619631901840491, 0.27402862985685073, 0.32924335378323111, 0.28016359918200406, 0.29652351738241312, 0.31492842535787319, 0.34764826175869118, 0.36605316973415131, 0.34764826175869118, 0.37014314928425357, 0.32924335378323111, 0.35787321063394684, 0.30061349693251532, 0.37014314928425357, 0.30061349693251532, 0.29447852760736198, 0.33128834355828218, 0.30061349693251532, 0.27198364008179959, 0.34969325153374231, 0.29243353783231085], 5868: [0.1411042944785276, 0.20245398773006135, 0.35173824130879344, 0.30879345603271985, 0.21063394683026584, 0.12065439672801637, 0.21472392638036811, 0.19018404907975461, 0.23721881390593047, 0.19836400817995911, 0.21063394683026584, 0.33128834355828218, 0.31901840490797545, 0.28834355828220859, 0.28425357873210633, 0.27402862985685073, 0.35582822085889571, 0.29652351738241312, 0.31901840490797545, 0.26993865030674846, 0.2822085889570552, 0.33333333333333331, 0.35582822085889571, 0.35787321063394684, 0.35582822085889571, 0.3721881390593047, 0.32924335378323111, 0.34969325153374231, 0.28834355828220859, 0.36605316973415131, 0.30674846625766872, 0.30470347648261759, 0.33946830265848671, 0.29652351738241312, 0.2658486707566462, 0.35582822085889571, 0.2822085889570552, 0.27607361963190186], 5379: [0.14314928425357873, 0.20449897750511248, 0.3456032719836401, 0.31492842535787319, 0.21676891615541921, 0.12678936605316973, 0.22085889570552147, 0.19018404907975461, 0.24335378323108384, 0.20449897750511248, 0.21063394683026584, 0.33333333333333331, 0.32719836400817998, 0.29243353783231085, 0.29038854805725972, 0.27811860940695299, 0.34969325153374231, 0.29856850715746419, 0.32515337423312884, 0.28629856850715746, 0.29038854805725972, 0.33742331288343558, 0.34969325153374231, 0.35173824130879344, 0.33946830265848671, 0.35378323108384457, 0.32106339468302658, 0.33537832310838445, 0.28016359918200406, 0.35378323108384457, 0.30265848670756645, 0.29038854805725972, 0.34151329243353784, 0.30879345603271985, 0.27607361963190186, 0.35173824130879344, 0.28425357873210633, 0.27402862985685073, 0.30674846625766872], 4890: [0.15132924335378323, 0.20858895705521471, 0.34969325153374231, 0.31697341513292432, 0.2310838445807771, 0.13905930470347649, 0.2310838445807771, 0.19427402862985685, 0.2474437627811861, 0.22085889570552147, 0.21881390593047034, 0.33742331288343558, 0.33742331288343558, 0.30470347648261759, 0.31083844580777098, 0.28016359918200406, 0.34764826175869118, 0.30061349693251532, 0.34151329243353784, 0.29038854805725972, 0.2822085889570552, 0.32106339468302658, 0.34764826175869118, 0.37627811860940696, 0.33946830265848671, 0.35378323108384457, 0.34764826175869118, 0.35173824130879344, 0.28425357873210633, 0.35582822085889571, 0.28629856850715746, 0.2822085889570552, 0.33128834355828218, 0.31083844580777098, 0.27402862985685073, 0.35991820040899797, 0.29038854805725972, 0.28834355828220859, 0.31083844580777098, 0.28016359918200406], 4401: [0.15337423312883436, 0.21472392638036811, 0.3619631901840491, 0.33128834355828218, 0.2474437627811861, 0.14723926380368099, 0.24539877300613497, 0.20858895705521471, 0.24335378323108384, 0.21881390593047034, 0.21881390593047034, 0.32310838445807771, 0.32515337423312884, 0.30265848670756645, 0.30879345603271985, 0.28425357873210633, 0.33742331288343558, 0.2822085889570552, 0.32924335378323111, 0.28629856850715746, 0.28425357873210633, 0.31492842535787319, 0.35582822085889571, 0.3783231083844581, 0.34151329243353784, 0.36605316973415131, 0.34151329243353784, 0.3456032719836401, 0.28016359918200406, 0.3619631901840491, 0.30061349693251532, 0.27607361963190186, 0.33537832310838445, 0.31697341513292432, 0.27811860940695299, 0.34969325153374231, 0.27811860940695299, 0.28629856850715746, 0.30879345603271985, 0.2822085889570552, 0.32515337423312884], 3912: [0.17791411042944785, 0.21472392638036811, 0.36809815950920244, 0.32515337423312884, 0.24948875255623723, 0.15337423312883436, 0.22903885480572597, 0.21063394683026584, 0.24335378323108384, 0.21267893660531698, 0.22085889570552147, 0.31901840490797545, 0.31697341513292432, 0.29447852760736198, 0.29447852760736198, 0.2658486707566462, 0.35173824130879344, 0.28016359918200406, 0.33333333333333331, 0.29652351738241312, 0.28834355828220859, 0.31288343558282211, 0.34355828220858897, 0.37014314928425357, 0.34355828220858897, 0.35991820040899797, 0.33333333333333331, 0.33742331288343558, 0.27402862985685073, 0.36605316973415131, 0.31697341513292432, 0.28834355828220859, 0.32515337423312884, 0.30674846625766872, 0.26993865030674846, 0.36400817995910023, 0.28016359918200406, 0.29856850715746419, 0.29652351738241312, 0.28629856850715746, 0.33128834355828218, 0.30470347648261759], 3423: [0.19631901840490798, 0.21267893660531698, 0.3619631901840491, 0.33537832310838445, 0.2474437627811861, 0.1492842535787321, 0.24539877300613497, 0.23721881390593047, 0.2556237218813906, 0.21063394683026584, 0.22290388548057261, 0.33128834355828218, 0.32719836400817998, 0.29447852760736198, 0.31697341513292432, 0.24948875255623723, 0.34764826175869118, 0.2822085889570552, 0.31288343558282211, 0.29038854805725972, 0.30879345603271985, 0.30061349693251532, 0.35787321063394684, 0.3824130879345603, 0.34764826175869118, 0.35173824130879344, 0.33333333333333331, 0.33128834355828218, 0.26380368098159507, 0.3619631901840491, 0.29447852760736198, 0.30061349693251532, 0.33742331288343558, 0.30061349693251532, 0.26789366053169733, 0.3783231083844581, 0.29243353783231085, 0.28425357873210633, 0.30470347648261759, 0.27811860940695299, 0.34969325153374231, 0.29038854805725972, 0.22290388548057261], 2934: [0.19222903885480572, 0.24539877300613497, 0.36605316973415131, 0.33946830265848671, 0.23517382413087934, 0.17586912065439672, 0.2556237218813906, 0.2556237218813906, 0.25153374233128833, 0.20654396728016361, 0.21472392638036811, 0.31288343558282211, 0.31288343558282211, 0.31288343558282211, 0.30470347648261759, 0.25971370143149286, 0.34764826175869118, 0.27607361963190186, 0.31492842535787319, 0.29038854805725972, 0.28425357873210633, 0.28834355828220859, 0.36400817995910023, 0.37423312883435583, 0.33946830265848671, 0.37627811860940696, 0.33333333333333331, 0.31901840490797545, 0.27402862985685073, 0.33946830265848671, 0.29038854805725972, 0.27811860940695299, 0.32106339468302658, 0.27607361963190186, 0.24130879345603273, 0.3783231083844581, 0.28016359918200406, 0.29652351738241312, 0.30879345603271985, 0.2658486707566462, 0.33537832310838445, 0.30470347648261759, 0.23517382413087934, 0.30879345603271985], 2445: [0.19222903885480572, 0.24335378323108384, 0.38036809815950923, 0.34151329243353784, 0.24539877300613497, 0.18813905930470348, 0.25153374233128833, 0.25153374233128833, 0.26175869120654399, 0.21676891615541921, 0.20654396728016361, 0.29447852760736198, 0.30879345603271985, 0.32515337423312884, 0.30879345603271985, 0.25153374233128833, 0.3456032719836401, 0.28425357873210633, 0.31697341513292432, 0.29652351738241312, 0.29856850715746419, 0.31288343558282211, 0.34355828220858897, 0.39468302658486709, 0.34151329243353784, 0.3721881390593047, 0.31901840490797545, 0.30674846625766872, 0.27402862985685073, 0.32106339468302658, 0.29447852760736198, 0.30265848670756645, 0.30879345603271985, 0.25971370143149286, 0.23721881390593047, 0.37423312883435583, 0.28834355828220859, 0.30879345603271985, 0.30674846625766872, 0.26789366053169733, 0.31083844580777098, 0.28629856850715746, 0.21881390593047034, 0.28425357873210633, 0.29652351738241312], 1956: [0.20245398773006135, 0.26993865030674846, 0.35991820040899797, 0.33333333333333331, 0.25357873210633947, 0.17791411042944785, 0.2474437627811861, 0.28425357873210633, 0.29038854805725972, 0.17177914110429449, 0.19222903885480572, 0.29652351738241312, 0.29856850715746419, 0.33946830265848671, 0.32515337423312884, 0.25766871165644173, 0.33742331288343558, 0.29038854805725972, 0.34151329243353784, 0.28834355828220859, 0.28834355828220859, 0.29038854805725972, 0.33333333333333331, 0.35991820040899797, 0.31901840490797545, 0.36400817995910023, 0.33742331288343558, 0.31492842535787319, 0.26380368098159507, 0.31492842535787319, 0.29243353783231085, 0.28834355828220859, 0.30879345603271985, 0.2658486707566462, 0.2310838445807771, 0.3783231083844581, 0.27607361963190186, 0.30265848670756645, 0.31697341513292432, 0.24130879345603273, 0.26993865030674846, 0.2556237218813906, 0.19427402862985685, 0.31492842535787319, 0.31901840490797545, 0.31697341513292432], 1467: [0.21063394683026584, 0.26993865030674846, 0.33742331288343558, 0.32924335378323111, 0.26993865030674846, 0.19018404907975461, 0.26380368098159507, 0.28834355828220859, 0.31083844580777098, 0.19427402862985685, 0.17382413087934559, 0.28425357873210633, 0.28629856850715746, 0.32310838445807771, 0.28016359918200406, 0.24539877300613497, 0.32106339468302658, 0.26175869120654399, 0.32310838445807771, 0.28629856850715746, 0.26380368098159507, 0.28629856850715746, 0.31492842535787319, 0.3456032719836401, 0.32924335378323111, 0.34151329243353784, 0.35378323108384457, 0.30061349693251532, 0.24539877300613497, 0.31492842535787319, 0.28629856850715746, 0.28425357873210633, 0.31288343558282211, 0.25971370143149286, 0.24539877300613497, 0.34355828220858897, 0.26175869120654399, 0.29243353783231085, 0.32310838445807771, 0.24948875255623723, 0.26380368098159507, 0.22903885480572597, 0.20858895705521471, 0.30061349693251532, 0.30674846625766872, 0.28629856850715746, 0.2822085889570552], 978: [0.22085889570552147, 0.26789366053169733, 0.33946830265848671, 0.35378323108384457, 0.27402862985685073, 0.19631901840490798, 0.26175869120654399, 0.28834355828220859, 0.29856850715746419, 0.20858895705521471, 0.17791411042944785, 0.29652351738241312, 0.28016359918200406, 0.31901840490797545, 0.25153374233128833, 0.26380368098159507, 0.31901840490797545, 0.24130879345603273, 0.31492842535787319, 0.27811860940695299, 0.26789366053169733, 0.25766871165644173, 0.30265848670756645, 0.32310838445807771, 0.30879345603271985, 0.31288343558282211, 0.32310838445807771, 0.29038854805725972, 0.23517382413087934, 0.29652351738241312, 0.2310838445807771, 0.26175869120654399, 0.29652351738241312, 0.22290388548057261, 0.20654396728016361, 0.30879345603271985, 0.24130879345603273, 0.2658486707566462, 0.29652351738241312, 0.26789366053169733, 0.2392638036809816, 0.18813905930470348, 0.19631901840490798, 0.27607361963190186, 0.25971370143149286, 0.30265848670756645, 0.25766871165644173, 0.23312883435582821], 489: [0.21472392638036811, 0.21267893660531698, 0.32924335378323111, 0.35173824130879344, 0.29652351738241312, 0.20245398773006135, 0.24130879345603273, 0.30061349693251532, 0.29038854805725972, 0.20449897750511248, 0.18813905930470348, 0.23721881390593047, 0.27198364008179959, 0.2822085889570552, 0.20040899795501022, 0.21676891615541921, 0.29652351738241312, 0.18609406952965235, 0.28425357873210633, 0.2474437627811861, 0.23517382413087934, 0.21267893660531698, 0.26380368098159507, 0.26789366053169733, 0.26380368098159507, 0.25766871165644173, 0.2658486707566462, 0.24130879345603273, 0.19631901840490798, 0.22903885480572597, 0.19427402862985685, 0.23312883435582821, 0.29038854805725972, 0.18813905930470348, 0.16973415132924335, 0.25357873210633947, 0.17791411042944785, 0.20245398773006135, 0.25153374233128833, 0.23517382413087934, 0.2556237218813906, 0.13905930470347649, 0.15950920245398773, 0.24539877300613497, 0.22085889570552147, 0.23517382413087934, 0.25357873210633947, 0.17382413087934559, 0.39672801635991822]}
incremental_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
incremental_train_scores_mean_50_folds = array([ 0.94641292, 0.94018405, 0.93508245, 0.92998133, 0.92539877, 0.92040807,
0.91610107, 0.91143247, 0.90711645, 0.903318, 0.89938555, 0.89571898,
0.89192165, 0.88847908, 0.88510274, 0.8819883, 0.87861655, 0.87507101,
0.87178365, 0.8690559, 0.86611193, 0.8634239, 0.86069042, 0.85786338,
0.85518855, 0.85250511, 0.85012629, 0.84780429, 0.84546392, 0.84356169,
0.84149767, 0.83968132, 0.83789902, 0.83605873, 0.83428571, 0.83245285,
0.83075052, 0.82938238, 0.8276965, 0.82601738, 0.82459641, 0.82349182,
0.82237803, 0.82158239, 0.82042263, 0.81958522, 0.81863406, 0.81820893,
0.81770377])
incremental_train_scores_std_50_folds = array([ 0.01849963, 0.01378789, 0.01221196, 0.01036566, 0.00951653, 0.0095293,
0.00920113, 0.00879358, 0.00817018, 0.00829866, 0.00816914, 0.00840314,
0.00921439, 0.00955778, 0.00966554, 0.01008684, 0.01065165, 0.0106473,
0.0110683, 0.01155044, 0.01137906, 0.01131299, 0.01164905, 0.01127409,
0.01067386, 0.01048885, 0.00997217, 0.00943623, 0.00849798, 0.00799113,
0.00751208, 0.00688614, 0.00636693, 0.00622729, 0.00592422, 0.00536307,
0.00523293, 0.00500458, 0.0051211, 0.00461855, 0.00442537, 0.00428366,
0.00362497, 0.0031117, 0.00208263, 0.00132304, 0.00030215, 0.00021302,
0. ])
incremental_test_scores_mean_50_folds = array([ 0.24009849, 0.26921438, 0.28260018, 0.28949942, 0.29202454, 0.29280535,
0.2946212, 0.29418639, 0.29457828, 0.29335378, 0.28850087, 0.28796685,
0.28740397, 0.28732106, 0.28425358, 0.28317094, 0.28282828, 0.27952454,
0.27653539, 0.27484663, 0.26937452, 0.26701724, 0.26168295, 0.25727544,
0.25071575, 0.24710293, 0.24290922, 0.23545269, 0.23001266, 0.22617587,
0.22354967, 0.22438082, 0.21953567, 0.2101227, 0.20804363, 0.20333041,
0.19207173, 0.18404908, 0.17159323, 0.16462168, 0.16950693, 0.1658998,
0.16681274, 0.16768916, 0.18241309, 0.18711656, 0.1724608, 0.12372188,
0.09611452])
incremental_test_scores_std_50_folds = array([ 0.04942445, 0.04132467, 0.04251402, 0.04883771, 0.04769812, 0.04882399,
0.05244483, 0.05212685, 0.05434039, 0.05685099, 0.05870295, 0.06312129,
0.06397167, 0.06475891, 0.06619849, 0.06858608, 0.07052225, 0.07211568,
0.07247594, 0.07426551, 0.0754153, 0.07650368, 0.08005695, 0.07839793,
0.07575411, 0.07573291, 0.0747032, 0.07206771, 0.07047588, 0.07223881,
0.07240794, 0.07394336, 0.0739362, 0.07114794, 0.07094068, 0.07254348,
0.06833713, 0.06574783, 0.05170119, 0.05388267, 0.05510474, 0.05699836,
0.05972988, 0.06344963, 0.06136878, 0.06931128, 0.07251949, 0.02760736,
0. ])
normal_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565], 6112: [0.90068717277486909]}
normal_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966], 6112: [0.10091593065096501]}
normal_train_sizes_4_folds = array([ 6112, 12224, 18336])
normal_train_scores_mean_4_folds = array([ 0.90068717, 0.85340314, 0.82422557])
normal_train_scores_std_4_folds = array([ 0., 0., 0.])
normal_test_scores_mean_4_folds = array([ 0.10091593, 0.09813543, 0.09421001])
normal_test_scores_std_4_folds = array([ 0., 0., 0.])
normal_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952], 12225: [0.85824130879345606], 8150: [0.88981595092024535], 4075: [0.92122699386503071]}
normal_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043], 12225: [0.05865030674846626], 8150: [0.063803680981595098], 4075: [0.075828220858895706]}
normal_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
normal_train_scores_mean_6_folds = array([ 0.92122699, 0.88981595, 0.85824131, 0.83558282, 0.8201227 ])
normal_train_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_test_scores_mean_6_folds = array([ 0.07582822, 0.06380368, 0.05865031, 0.05644172, 0.05496933])
normal_test_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754], 15280: [0.84332460732984293], 12224: [0.86338350785340312], 9168: [0.88568935427574169], 6112: [0.90085078534031415], 3056: [0.91819371727748689]}
normal_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004], 15280: [0.043819489862655332], 12224: [0.048724656638325703], 9168: [0.055591890124264222], 6112: [0.072596468279921514], 3056: [0.11118378024852844]}
normal_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
normal_train_scores_mean_8_folds = array([ 0.91819372, 0.90085079, 0.88568935, 0.86338351, 0.84332461, 0.82869764,
0.81740838])
normal_train_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_8_folds = array([ 0.11118378, 0.07259647, 0.05559189, 0.04872466, 0.04381949, 0.03956835,
0.03924133])
normal_test_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444], 17115: [0.83651767455448434], 14670: [0.84935241990456711], 12225: [0.86633946830265851], 9780: [0.88548057259713697], 7335: [0.89788684389911388], 4890: [0.91942740286298563], 2445: [0.92842535787321068]}
normal_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455], 17115: [0.12106339468302658], 14670: [0.12269938650306748], 12225: [0.12719836400817996], 9780: [0.14069529652351739], 7335: [0.16196319018404909], 4890: [0.17995910020449898], 2445: [0.19386503067484662]}
normal_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
normal_train_scores_mean_10_folds = array([ 0.92842536, 0.9194274, 0.89788684, 0.88548057, 0.86633947, 0.84935242,
0.83651767, 0.82617587, 0.81799591])
normal_train_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_10_folds = array([ 0.19386503, 0.1799591, 0.16196319, 0.1406953, 0.12719836, 0.12269939,
0.12106339, 0.11738241, 0.11820041])
normal_test_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918], 19560: [0.83052147239263807], 17930: [0.83736754043502515], 16300: [0.84699386503067486], 14670: [0.85405589638718471], 13040: [0.86618098159509205], 11410: [0.87765118317265556], 9780: [0.88588957055214723], 8150: [0.89447852760736202], 6520: [0.90567484662576692], 4890: [0.91390593047034763], 3260: [0.91932515337423315], 1630: [0.93496932515337428]}
normal_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773], 19560: [0.15337423312883436], 17930: [0.15214723926380369], 16300: [0.15398773006134969], 14670: [0.15460122699386503], 13040: [0.15766871165644172], 11410: [0.16809815950920245], 9780: [0.17668711656441718], 8150: [0.18466257668711655], 6520: [0.19570552147239265], 4890: [0.20429447852760735], 3260: [0.21717791411042944], 1630: [0.23803680981595093]}
normal_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
normal_train_scores_mean_15_folds = array([ 0.93496933, 0.91932515, 0.91390593, 0.90567485, 0.89447853, 0.88588957,
0.87765118, 0.86618098, 0.8540559, 0.84699387, 0.83736754, 0.83052147,
0.82298254, 0.81849255])
normal_train_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_15_folds = array([ 0.23803681, 0.21717791, 0.20429448, 0.19570552, 0.18466258, 0.17668712,
0.16809816, 0.15766871, 0.15460123, 0.15398773, 0.15214724, 0.15337423,
0.15092025, 0.15276074])
normal_test_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484], 21516: [0.824595649749024], 20538: [0.82753919563735512], 19560: [0.83195296523517381], 18582: [0.8366698955978904], 17604: [0.84003635537377863], 16626: [0.84620473956453746], 15648: [0.85090746421267893], 14670: [0.85821404226312203], 13692: [0.86240140227870288], 12714: [0.871558911436212], 11736: [0.87585207907293794], 10758: [0.88213422569250788], 9780: [0.88486707566462164], 8802: [0.88559418314019545], 7824: [0.89187116564417179], 6846: [0.89789658194566169], 5868: [0.90184049079754602], 4890: [0.89959100204498976], 3912: [0.90030674846625769], 2934: [0.90149965916837083], 1956: [0.90286298568507162], 978: [0.89059304703476483]}
normal_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292], 21516: [0.10736196319018405], 20538: [0.10736196319018405], 19560: [0.10531697341513292], 18582: [0.10736196319018405], 17604: [0.11349693251533742], 16626: [0.11042944785276074], 15648: [0.1165644171779141], 14670: [0.11349693251533742], 13692: [0.11451942740286299], 12714: [0.11758691206543967], 11736: [0.1196319018404908], 10758: [0.130879345603272], 9780: [0.1329243353783231], 8802: [0.13190184049079753], 7824: [0.13496932515337423], 6846: [0.13701431492842536], 5868: [0.14723926380368099], 4890: [0.15132924335378323], 3912: [0.15746421267893659], 2934: [0.17586912065439672], 1956: [0.19325153374233128], 978: [0.19529652351738241]}
normal_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
normal_train_scores_mean_25_folds = array([ 0.89059305, 0.90286299, 0.90149966, 0.90030675, 0.899591, 0.90184049,
0.89789658, 0.89187117, 0.88559418, 0.88486708, 0.88213423, 0.87585208,
0.87155891, 0.8624014, 0.85821404, 0.85090746, 0.84620474, 0.84003636,
0.8366699, 0.83195297, 0.8275392, 0.82459565, 0.81915177, 0.81799591])
normal_train_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_25_folds = array([ 0.19529652, 0.19325153, 0.17586912, 0.15746421, 0.15132924, 0.14723926,
0.13701431, 0.13496933, 0.13190184, 0.13292434, 0.13087935, 0.1196319,
0.11758691, 0.11451943, 0.11349693, 0.11656442, 0.11042945, 0.11349693,
0.10736196, 0.10531697, 0.10736196, 0.10736196, 0.10531697, 0.10633947])
normal_test_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894], 22983: [0.81877909759387368], 22494: [0.82177469547434867], 22005: [0.82394910247670983], 21516: [0.82594348391894401], 21027: [0.8270319113520711], 20538: [0.82958418541240631], 20049: [0.8316624270537184], 19560: [0.83333333333333337], 19071: [0.83682030307797184], 18582: [0.83785383704660421], 18093: [0.84115403747305584], 17604: [0.84350147693705979], 17115: [0.84691790826760149], 16626: [0.84885119692048594], 16137: [0.85077771580839068], 15648: [0.8539749488752556], 15159: [0.85744442245530705], 14670: [0.8597818677573279], 14181: [0.86150483040688242], 13692: [0.86634531113058721], 13203: [0.87002953874119515], 12714: [0.8728173666823974], 12225: [0.87427402862985681], 11736: [0.87857873210633952], 11247: [0.88094603005245842], 10758: [0.87999628183677259], 10269: [0.88207225630538511], 9780: [0.88466257668711656], 9291: [0.884834786352384], 8802: [0.88604862531242901], 8313: [0.89125466137375198], 7824: [0.89391615541922287], 7335: [0.89747784594410362], 6846: [0.90037978381536665], 6357: [0.89948088721094854], 5868: [0.8979209270620313], 5379: [0.8947759806655512], 4890: [0.89427402862985683], 4401: [0.89502385821404229], 3912: [0.89570552147239269], 3423: [0.89862693543675143], 2934: [0.89604635310156777], 2445: [0.89775051124744376], 1956: [0.89570552147239269], 1467: [0.89229720518064082], 978: [0.91104294478527603], 489: [0.91820040899795496]}
normal_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859], 22983: [0.096114519427402859], 22494: [0.096114519427402859], 22005: [0.09815950920245399], 21516: [0.096114519427402859], 21027: [0.09815950920245399], 20538: [0.096114519427402859], 20049: [0.096114519427402859], 19560: [0.09202453987730061], 19071: [0.096114519427402859], 18582: [0.094069529652351741], 18093: [0.096114519427402859], 17604: [0.09815950920245399], 17115: [0.096114519427402859], 16626: [0.09202453987730061], 16137: [0.094069529652351741], 15648: [0.09202453987730061], 15159: [0.096114519427402859], 14670: [0.094069529652351741], 14181: [0.096114519427402859], 13692: [0.10020449897750511], 13203: [0.09815950920245399], 12714: [0.10224948875255624], 12225: [0.10224948875255624], 11736: [0.10224948875255624], 11247: [0.10224948875255624], 10758: [0.10633946830265849], 10269: [0.10838445807770961], 9780: [0.10838445807770961], 9291: [0.11042944785276074], 8802: [0.11451942740286299], 8313: [0.1165644171779141], 7824: [0.1165644171779141], 7335: [0.12269938650306748], 6846: [0.12883435582822086], 6357: [0.1329243353783231], 5868: [0.1411042944785276], 5379: [0.14314928425357873], 4890: [0.15132924335378323], 4401: [0.15337423312883436], 3912: [0.17791411042944785], 3423: [0.19631901840490798], 2934: [0.19222903885480572], 2445: [0.19222903885480572], 1956: [0.20245398773006135], 1467: [0.21063394683026584], 978: [0.22085889570552147], 489: [0.21472392638036811]}
normal_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
normal_train_scores_mean_50_folds = array([ 0.91820041, 0.91104294, 0.89229721, 0.89570552, 0.89775051, 0.89604635,
0.89862694, 0.89570552, 0.89502386, 0.89427403, 0.89477598, 0.89792093,
0.89948089, 0.90037978, 0.89747785, 0.89391616, 0.89125466, 0.88604863,
0.88483479, 0.88466258, 0.88207226, 0.87999628, 0.88094603, 0.87857873,
0.87427403, 0.87281737, 0.87002954, 0.86634531, 0.86150483, 0.85978187,
0.85744442, 0.85397495, 0.85077772, 0.8488512, 0.84691791, 0.84350148,
0.84115404, 0.83785384, 0.8368203, 0.83333333, 0.83166243, 0.82958419,
0.82703191, 0.82594348, 0.8239491, 0.8217747, 0.8187791, 0.81842195,
0.81770377])
normal_train_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_50_folds = array([ 0.21472393, 0.2208589, 0.21063395, 0.20245399, 0.19222904, 0.19222904,
0.19631902, 0.17791411, 0.15337423, 0.15132924, 0.14314928, 0.14110429,
0.13292434, 0.12883436, 0.12269939, 0.11656442, 0.11656442, 0.11451943,
0.11042945, 0.10838446, 0.10838446, 0.10633947, 0.10224949, 0.10224949,
0.10224949, 0.10224949, 0.09815951, 0.1002045, 0.09611452, 0.09406953,
0.09611452, 0.09202454, 0.09406953, 0.09202454, 0.09611452, 0.09815951,
0.09611452, 0.09406953, 0.09611452, 0.09202454, 0.09611452, 0.09611452,
0.09815951, 0.09611452, 0.09815951, 0.09611452, 0.09611452, 0.09611452,
0.09611452])
normal_test_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
data = {
"incremental": {
4: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_4_folds,
"train_sizes": incremental_train_sizes_4_folds,
"train_scores_mean": incremental_train_scores_mean_4_folds,
"train_scores_std": incremental_train_scores_std_4_folds,
"test_scores_mean": incremental_test_scores_mean_4_folds,
"test_scores_std": incremental_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_6_folds,
"train_sizes": incremental_train_sizes_6_folds,
"train_scores_mean": incremental_train_scores_mean_6_folds,
"train_scores_std": incremental_train_scores_std_6_folds,
"test_scores_mean": incremental_test_scores_mean_6_folds,
"test_scores_std": incremental_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_8_folds,
"train_sizes": incremental_train_sizes_8_folds,
"train_scores_mean": incremental_train_scores_mean_8_folds,
"train_scores_std": incremental_train_scores_std_8_folds,
"test_scores_mean": incremental_test_scores_mean_8_folds,
"test_scores_std": incremental_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_10_folds,
"train_sizes": incremental_train_sizes_10_folds,
"train_scores_mean": incremental_train_scores_mean_10_folds,
"train_scores_std": incremental_train_scores_std_10_folds,
"test_scores_mean": incremental_test_scores_mean_10_folds,
"test_scores_std": incremental_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_15_folds,
"train_sizes": incremental_train_sizes_15_folds,
"train_scores_mean": incremental_train_scores_mean_15_folds,
"train_scores_std": incremental_train_scores_std_15_folds,
"test_scores_mean": incremental_test_scores_mean_15_folds,
"test_scores_std": incremental_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_25_folds,
"train_sizes": incremental_train_sizes_25_folds,
"train_scores_mean": incremental_train_scores_mean_25_folds,
"train_scores_std": incremental_train_scores_std_25_folds,
"test_scores_mean": incremental_test_scores_mean_25_folds,
"test_scores_std": incremental_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_50_folds,
"train_sizes": incremental_train_sizes_50_folds,
"train_scores_mean": incremental_train_scores_mean_50_folds,
"train_scores_std": incremental_train_scores_std_50_folds,
"test_scores_mean": incremental_test_scores_mean_50_folds,
"test_scores_std": incremental_test_scores_std_50_folds
}
},
"normal": {
4: {
"train_accuracy_per_size": normal_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_4_folds,
"train_sizes": normal_train_sizes_4_folds,
"train_scores_mean": normal_train_scores_mean_4_folds,
"train_scores_std": normal_train_scores_std_4_folds,
"test_scores_mean": normal_test_scores_mean_4_folds,
"test_scores_std": normal_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": normal_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_6_folds,
"train_sizes": normal_train_sizes_6_folds,
"train_scores_mean": normal_train_scores_mean_6_folds,
"train_scores_std": normal_train_scores_std_6_folds,
"test_scores_mean": normal_test_scores_mean_6_folds,
"test_scores_std": normal_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": normal_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_8_folds,
"train_sizes": normal_train_sizes_8_folds,
"train_scores_mean": normal_train_scores_mean_8_folds,
"train_scores_std": normal_train_scores_std_8_folds,
"test_scores_mean": normal_test_scores_mean_8_folds,
"test_scores_std": normal_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": normal_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_10_folds,
"train_sizes": normal_train_sizes_10_folds,
"train_scores_mean": normal_train_scores_mean_10_folds,
"train_scores_std": normal_train_scores_std_10_folds,
"test_scores_mean": normal_test_scores_mean_10_folds,
"test_scores_std": normal_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": normal_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_15_folds,
"train_sizes": normal_train_sizes_15_folds,
"train_scores_mean": normal_train_scores_mean_15_folds,
"train_scores_std": normal_train_scores_std_15_folds,
"test_scores_mean": normal_test_scores_mean_15_folds,
"test_scores_std": normal_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": normal_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_25_folds,
"train_sizes": normal_train_sizes_25_folds,
"train_scores_mean": normal_train_scores_mean_25_folds,
"train_scores_std": normal_train_scores_std_25_folds,
"test_scores_mean": normal_test_scores_mean_25_folds,
"test_scores_std": normal_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": normal_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_50_folds,
"train_sizes": normal_train_sizes_50_folds,
"train_scores_mean": normal_train_scores_mean_50_folds,
"train_scores_std": normal_train_scores_std_50_folds,
"test_scores_mean": normal_test_scores_mean_50_folds,
"test_scores_std": normal_test_scores_std_50_folds
}
}
}
for key, value in data.items():
# print("{}: {}".format(key, value)) # Debug
for subKey, subValue in value.items():
# print("{}: {}".format(subKey, subValue)) # Debug
# Then, we plot the aforementioned learning curves
title = "Learning Curves (Linear SVM without tuning, " + \
key + \
" approach, {} folds)".format(subKey)
fig = plot_learning_curve(title, "accuracy", \
subValue["train_sizes"], \
subValue["train_scores_mean"], \
subValue["train_scores_std"], \
subValue["test_scores_mean"], \
subValue["test_scores_std"])
name_file = "{}_learning_curves_{}_folds.png".format( \
key, subKey)
# save_file = None if not save_file \
# else os.path.join(current_dir, name_file)
save_file = os.path.join(current_dir, name_file)
if save_file:
plt.savefig(save_file, bbox_inches="tight")
plt.close(fig)
else:
plt.show()
if __name__ == "__main__":
main() | 0 | 0 |
cf398266bba39f44a35399478e373fb7c8895c96 | 735 | py | Python | pycopyql/cli.py | elazar/pycopyql | 4c8384b847fcd9ef2811c12375fc5e9e63094b3e | [
"MIT"
] | 1 | 2018-08-02T18:42:34.000Z | 2018-08-02T18:42:34.000Z | pycopyql/cli.py | elazar/pycopyql | 4c8384b847fcd9ef2811c12375fc5e9e63094b3e | [
"MIT"
] | null | null | null | pycopyql/cli.py | elazar/pycopyql | 4c8384b847fcd9ef2811c12375fc5e9e63094b3e | [
"MIT"
] | null | null | null | from .args import get_args
from .config import get_config, get_connection_config, get_engine, get_meta
from .query import query
from .export import get_exporter
def main():
"""
Provides a CLI entrypoint to access a database and export a subset of its
data in a specified format.
"""
args = get_args()
config = get_config(args.config)
connection_config = get_connection_config(config, args.connection)
engine = get_engine(connection_config)
export = get_exporter(args.format, config['exporters'])
connection = engine.connect()
meta = get_meta(engine)
resolver = connection_config['resolver']
data = query(connection, meta, resolver, args.query)
export(meta, data, args.output)
| 31.956522 | 77 | 0.726531 | from .args import get_args
from .config import get_config, get_connection_config, get_engine, get_meta
from .query import query
from .export import get_exporter
def main():
"""
Provides a CLI entrypoint to access a database and export a subset of its
data in a specified format.
"""
args = get_args()
config = get_config(args.config)
connection_config = get_connection_config(config, args.connection)
engine = get_engine(connection_config)
export = get_exporter(args.format, config['exporters'])
connection = engine.connect()
meta = get_meta(engine)
resolver = connection_config['resolver']
data = query(connection, meta, resolver, args.query)
export(meta, data, args.output)
| 0 | 0 |
aeb45e871f3a30dab24f8b244ee9dacf414f769a | 258 | py | Python | cride/circles/urls.py | daecazu/platziride | 79782770d05e71823c7eb27fec76a3870c737689 | [
"MIT"
] | null | null | null | cride/circles/urls.py | daecazu/platziride | 79782770d05e71823c7eb27fec76a3870c737689 | [
"MIT"
] | 2 | 2020-03-03T20:29:18.000Z | 2020-03-03T20:29:19.000Z | cride/circles/urls.py | daecazu/platziride | 79782770d05e71823c7eb27fec76a3870c737689 | [
"MIT"
] | null | null | null | """Circles URLs"""
# Django
from django.urls import path
# Views
from cride.circles.views import list_circles
from cride.circles.views import create_circle
urlpatterns = [
path ('circles/', list_circles),
path ('circles/create/', create_circle),
] | 19.846154 | 45 | 0.732558 | """Circles URLs"""
# Django
from django.urls import path
# Views
from cride.circles.views import list_circles
from cride.circles.views import create_circle
urlpatterns = [
path ('circles/', list_circles),
path ('circles/create/', create_circle),
] | 0 | 0 |
4cbc553ee05900bc18bb23ac67ed68a847770fd8 | 150 | py | Python | billing/__init__.py | xprilion/django-customer-billing | 82f8147d74ff62e84d9e57465b4d521434c48e49 | [
"MIT"
] | null | null | null | billing/__init__.py | xprilion/django-customer-billing | 82f8147d74ff62e84d9e57465b4d521434c48e49 | [
"MIT"
] | null | null | null | billing/__init__.py | xprilion/django-customer-billing | 82f8147d74ff62e84d9e57465b4d521434c48e49 | [
"MIT"
] | 1 | 2020-06-25T22:55:48.000Z | 2020-06-25T22:55:48.000Z | __version__ = '1.5.4'
__copyright__ = 'Copyright (c) 2018, Skioo SA'
__licence__ = 'MIT'
__URL__ = 'https://github.com/skioo/django-customer-billing'
| 30 | 60 | 0.726667 | __version__ = '1.5.4'
__copyright__ = 'Copyright (c) 2018, Skioo SA'
__licence__ = 'MIT'
__URL__ = 'https://github.com/skioo/django-customer-billing'
| 0 | 0 |
2a6cfbfefd1436bb7d9f94ae6539e54bf036f4d7 | 48,716 | py | Python | ShotgunORM/SgFields.py | jonykalavera/python-shotgunorm | 3b0a2b433030815631588ff709c8ffd3e9660476 | [
"BSD-3-Clause"
] | null | null | null | ShotgunORM/SgFields.py | jonykalavera/python-shotgunorm | 3b0a2b433030815631588ff709c8ffd3e9660476 | [
"BSD-3-Clause"
] | null | null | null | ShotgunORM/SgFields.py | jonykalavera/python-shotgunorm | 3b0a2b433030815631588ff709c8ffd3e9660476 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgFieldCheckbox',
'SgFieldColor',
'SgFieldColor2',
'SgFieldDate',
'SgFieldDateTime',
'SgFieldEntity',
'SgFieldEntityMulti',
'SgFieldFloat',
'SgFieldID',
'SgFieldImage',
'SgFieldInt',
'SgFieldSelectionList',
'SgFieldTagList',
'SgFieldText',
'SgFieldType',
'SgFieldUrl'
]
# Python imports
import copy
import datetime
import os
import re
import threading
import urllib2
import webbrowser
# This module imports
import ShotgunORM
class SgFieldCheckbox(ShotgunORM.SgField):
'''
Entity field that stores a bool value for a checkbox.
'''
def _fromFieldData(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_CHECKBOX
def _setValue(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldColor(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Example: [128, 128, 128]
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
try:
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a list of ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = sgData
return result
try:
if isinstance(sgData, str):
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
else:
if len(sgData != 3):
raise ValueError('invalid value %s' % sgData)
sgData = '%d,%d,%d' % (sgData[0], sgData[1], sgData[2])
except:
raise TypeError('%s invalid value "%s", expected a list of three ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def _Value(self):
if self._value == None:
return None
result = []
for i in self._value.split(','):
result.append(int(i))
return result
class SgFieldColor2(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Fix the color return value for Task and Phase Entities color field.
Task and Phase Entities can have their color field set to a value that points
to the color field of the pipeline step or project they belong to.
Brilliant engineering to still call the return type "color" and not
differentiate the two I know right?
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
REGEXP_TASK_COLOR = re.compile(r'(\d+,\d+,\d+)|(pipeline_step)')
REGEXP_PHASE_COLOR = re.compile(r'(\d+,\d+,\d+)|(project)')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldColor2, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self._regexp = self.REGEXP_COLOR
self._linkString = None
self._linkField = None
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR2
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if isinstance(sgData, str):
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
else:
if not isinstance(sgData, (tuple, list)):
raise TypeError('%s invalid value type "%s", expected a list' % (self, type(sgData).__name__))
if len(sgData) != 3:
raise ValueError('%s list len is not 3' % self)
newData = []
try:
sgData = '%d,%d,%d' % tuple(sgData)
except:
raise ValueError('%s invalid color values %s' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def linkField(self):
'''
Returns the link field this color field can possibly link to.
'''
return self._linkField
def parentChanged(self):
'''
'''
parent = self.parentEntity()
if parent == None:
return
pType = parent.schemaInfo().name()
if pType == 'Task':
self._regexp = self.REGEXP_TASK_COLOR
self._linkString = 'pipeline_step'
self._linkField = 'step'
elif pType == 'Phase':
self._regexp = self.REGEXP_PHASE_COLOR
self._linkString = 'project'
self._linkField= 'project'
else:
self._regexp = self.REGEXP_COLOR
def value(self, linkEvaluate=True):
'''
Args:
* (bool) linkEvaluate:
When True and the color field is a link to another Entity's color field
the value of the linked color field will be returned.
If linkEvaluate is False a string may be returned instead of a list.
'''
result = super(SgFieldColor2, self).value()
if result == None:
return None
if not linkEvaluate and result == self._linkString:
return result
parent = self.parentEntity()
if parent == None:
if result == self._linkString:
return None
newResult = []
for i in result.split(','):
newResult.append(int(i))
if result == self._linkString:
linkObj = self.parentEntity()[self._linkField]
if linkObj == None:
return None
return linkObj['color']
else:
newResult = []
for i in result.split(','):
newResult.append(int(i))
class SgFieldDate(ShotgunORM.SgField):
'''
Entity field that stores a date string
Example: "1980-01-30".
'''
REGEXP = re.compile(r'^\d{4}-\d{2}-\d{2}')
def _fromFieldData(self, sgData):
if sgData != None:
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string from Shotgun "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldDateTime(ShotgunORM.SgField):
'''
Entity field that stores a python datetime object.
'''
def _fromFieldData(self, sgData):
if sgData != None:
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE_TIME
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, datetime.datetime):
raise TypeError('%s invalid type "%s", expected a datetime obj' % (self, type(sgData).__name__))
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return result
return datetime.datetime(*result.timetuple()[:6], tzinfo=result.tzinfo)
def _Value(self):
return self._toFieldData()
class SgFieldEntity(ShotgunORM.SgField):
'''
Entity field that stores a link to another Entity.
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
try:
newValue = {
'type': sgData['type'],
'id': sgData['id']
}
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if newValue['type'] in ['AppWelcome', 'Banner'] and sgData.has_key('name'):
newValue['name'] = sgData['name']
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if newValue == self._value:
return False
parent = self.parentEntity()
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_ENTITY
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if not isinstance(sgData, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(sgData).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
if not sgData.type in valueTypes:
raise ValueError('not a valid value Entiy type: %s, valid=%s' % (sgData.type, valueTypes))
if sgData['id'] == None:
raise RuntimeError('can not set field value to a Entity that has not been created in Shotgun yet')
parent = self.parentEntity()
if parent == None:
raise RuntimeError('field does not have a parent')
connection = parent.connection()
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != sgData.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
if self._value == sgData:
return False
self._value = sgData.toEntityFieldData()
return True
def _toFieldData(self):
if self._value == None:
return None
return dict(self._value)
def value(self, sgSyncFields=None):
'''
Returns the fields value as a Entity object.
Args:
* (list) sgSyncFields:
List of field names to populate the returned Entity with.
'''
value = super(SgFieldEntity, self).value()
parent = self.parentEntity()
if value == None or parent == None:
return None
connection = parent.connection()
if isinstance(sgSyncFields, dict):
sgSyncFields = sgSyncFields.get(parent.type, None)
elif isinstance(sgSyncFields, str):
sgSyncFields = [sgSyncFields]
if sgSyncFields == None:
sgSyncFields = connection.defaultEntityQueryFields(value['type'])
if len(sgSyncFields) <= 0:
sgSyncFields = None
else:
pullFields = set(sgSyncFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = parent.fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(value['type'])
pullFields.update(extraFields)
if len(pullFields) >= 1:
sgSyncFields = list(pullFields)
else:
sgSyncFields = None
result = connection._createEntity(
value['type'],
value,
sgSyncFields=sgSyncFields
)
return result
class SgFieldEntityMulti(ShotgunORM.SgField):
'''
Entity field that stores a list of links to other Entities.
Example: [Entity01, Entity02, ...]
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractMultiEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
newValue = []
try:
for i in sgData:
e = {
'type': i['type'],
'id': i['id']
}
if e in newValue:
continue
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if e['type'] in ['AppWelcome', 'Banner'] and i.has_key('name'):
e['name'] = i['name']
newValue.append(e)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if self._value == newValue:
return False
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_MULTI_ENTITY
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
if isinstance(sgData, ShotgunORM.SgEntity):
sgData = [sgData]
elif not isinstance(sgData, list):
raise TypeError('%s invalid value type "%s", expected a SgEntity or list' % (self, type(sgData).__name__))
else:
for i in sgData:
if not isinstance(i, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(i).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
for i in sgData:
if not i.type in valueTypes:
raise ValueError('not a valid value type: %s, valid=%s' % (i.type, valueTypes))
parent = self.parentEntity()
newValue = []
if parent == None:
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
else:
connection = parent.connection()
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != i.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
if self._value == newValue:
return False
self._value = newValue
return True
def _toFieldData(self):
if self._value == None:
return None
result = []
for i in self._value:
result.append(dict(i))
return result
def value(self, sgSyncFields=None):
'''
Returns the fields value as a list of Entity objects.
Args:
* (dict) sgSyncFields:
Dict of entity types and field names to populate the returned Entities
with.
'''
result = super(SgFieldEntityMulti, self).value()
if result in [None, []]:
return result
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(result)
connection = parent.connection()
schema = connection.schema()
tmp = []
qEng = connection.queryEngine()
qEng.block()
try:
for i in result:
t = i['type']
iSyncFields = None
if sgSyncFields != None:
if sgSyncFields.has_key(t):
iFields = sgSyncFields[t]
if iFields == None:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
pullFields = []
if isinstance(iFields, str):
pullFields = set([iFields])
else:
pullFields = set(iFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = schema.entityInfo(t).fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(t)
pullFields.update(extraFields)
if len(pullFields) >= 1:
iSyncFields = list(pullFields)
else:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
entity = connection._createEntity(t, i, sgSyncFields=iSyncFields)
tmp.append(entity)
finally:
qEng.unblock()
return tmp
class SgFieldFloat(ShotgunORM.SgField):
'''
Entity field that stores a float.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a float' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_FLOAT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a float' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldInt(ShotgunORM.SgField):
'''
Entity field that stores an integer.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a int' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_INT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a int' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldSelectionList(ShotgunORM.SgField):
'''
Entity field that stores a text string that is from a list selection.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_LIST
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
validValues = self.validValues()
if len(validValues) > 0:
if not sgData in validValues:
raise ValueError('%s invalid value "%s"' % (self, sgData))
self._value = sgData
return True
class SgFieldSerializable(ShotgunORM.SgField):
'''
Entity field that stores serializable data.
'''
def _fromFieldData(self, sgData):
if sgData in [None, {}]:
result = self._value in [None, {}]
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise ValueError('%s invalid data from Shotgun "%s", expected a dict' % (self, sgData))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SERIALIZABLE
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid value type "%s", expected a dict' % (self, type(sgData).__name__))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
class SgFieldSummary(ShotgunORM.SgField):
'''
Entity field that returns an Entity or list of Entities based on a search
expression.
Summary fields.
'''
DATE_REGEXP = re.compile(r'(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) UTC')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldSummary, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self.__buildLock = threading.Lock()
summaryInfo = self.schemaInfo().summaryInfo()
if summaryInfo == None:
raise RuntimeError('invalid field schema info for summary info')
self._entityType = summaryInfo['entity_type']
self._filtersRaw = summaryInfo['filters']
self._summaryType = summaryInfo['summary_type']
self._summaryField = summaryInfo['summary_field']
self._summaryValue = summaryInfo['summary_value']
self._searchFilter = None
def _buildLogicalOp(self, conditions, info):
'''
Builds the logical operator search pattern and returns it.
'''
result = []
parent = self.parentEntity()
connection = parent.connection()
for c in conditions:
if c.has_key('logical_operator'):
logicalOp = {
'conditions': self._buildLogicalOp(c['conditions'], info),
'logical_operator': c['logical_operator']
}
result.append(logicalOp)
else:
newValues = []
cInfo = info.fieldInfo(c['path'])
cType = cInfo.returnType()
########################################################################
#
# Date and Date Time fields
#
########################################################################
if cType in [ShotgunORM.SgField.RETURN_TYPE_DATE, ShotgunORM.SgField.RETURN_TYPE_DATE_TIME]:
# http://stackoverflow.com/a/13287083
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
for v in c['values']:
if isinstance(v, dict):
if v.has_key('relative_day'):
time = datetime.time(*v['time'])
date = datetime.date.today()
rd = v['relative_day']
if rd == 'tomorrow':
date = date.replace(day=date.day + 1)
elif rd == 'yesterday':
date = date.replace(day=date.day - 1)
dt = datetime.datetime.combine(date, time)
# Relative day calcs use utc time!
dt.replace(tzinfo=None)
newValues.append(dt)
else:
newValues.append(v)
elif isinstance(v, str):
search = DATE_REGEXP.match(v)
if search:
time = datetime.time(search.group(4), search.group(5), search.group(6))
date = datetime.date(search.group(1), search.group(2), search.group(3))
dt = datetime.datetime.combine(date, time)
dt.replace(tzinfo=None)
newValues.append(utc_to_local(dt))
else:
newValues.append(v)
########################################################################
#
# Entity and Multi-Entity fields
#
########################################################################
elif cType in [ShotgunORM.SgField.RETURN_TYPE_ENTITY, ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY]:
for v in c['values']:
if v['name'] == 'Current %s' % parent.type:
newValues.append(parent.toEntityFieldData())
elif v['name'] == 'Me':
login = os.getenv('USERNAME')
user = connection.findOne('HumanUser', [['login', 'is', login]], ['login'])
if user == None:
raise RuntimError('summary field unable to find user "%s" in Shotgun' % login)
newValues.append(user.toEntityFieldData())
else:
newValues.append(v)
else:
# Do nothing
newValues = c['values']
c['values'] = newValues
del c['active']
result.append(c)
return result
def _buildSearchFilter(self):
'''
'''
opsRaw = copy.deepcopy(self._filtersRaw)
logicalOps = {
'conditions': self._buildLogicalOp(
opsRaw['conditions'],
self.parentEntity().connection().schema().entityInfo(self.entityType())
),
'logical_operator': opsRaw['logical_operator']
}
self._searchFilter = logicalOps
def _fromFieldData(self, sgData):
'''
Always return False for summary fields, they can not be set.
'''
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SUMMARY
def _toFieldData(self):
result = self._value
if result == None:
return None
if isinstance(result, dict):
return copy.deepcopy(result)
return result
def entityType(self):
'''
Returns the type of Entity the summary field will return.
'''
return self._entityType
def hasCommit(self):
'''
Always returns False for summary fields.
'''
return False
def _invalidate(self):
'''
Deletes the search filter so its built again.
'''
self._searchFilter = None
def isEditable(self):
'''
Always return False for summary fields.
'''
return False
def isQueryable(self):
'''
Even though summary fields can be queried from Shotgun return False.
'''
return False
def setHasCommit(self, valid):
'''
Summary fields can't be committed, always returns False.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Summary fields cant be queried so thus they can not be background pulled.
Always returns False.
'''
return False
def _setValue(self, value):
'''
Always return False for summary fields, they can not be set.
'''
return False
def _valueSg(self):
parent = self.parentEntity()
if parent == None or not parent.exists():
return None
connection = parent.connection()
with self.__buildLock:
if self._searchFilter == None:
self._buildSearchFilter()
searchExp = self._searchFilter
result = None
############################################################################
#
# Single record
#
############################################################################
if self._summaryType == 'single_record':
order = [
{
'field_name': self._summaryValue['column'],
'direction': self._summaryValue['direction']
}
]
result = connection._sg_find_one(self.entityType(), searchExp, order=order)
############################################################################
#
# Status percentage and list
#
############################################################################
elif self._summaryType.startswith('status_'):
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if self._summaryType == 'status_percentage':
if len(sgSearch) <= 0:
result = 0
else:
validCount = 0
for e in sgSearch:
value = e.field(self._summaryField).value()
if value == self._summaryValue:
validCount += 1
if validCount <= 0:
result = 0.0
else:
result = float(validCount) / len(sgSearch)
elif self._summaryType == 'status_list':
if len(sgSearch) <= 0:
result = 'ip'
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != value:
# I have no clue why Shotgun always defaults this result to ip
# but whatevs yo.
value = 'ip'
break
result = value
############################################################################
#
# Record count
#
############################################################################
elif self._summaryType == 'record_count':
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp)
result = len(sgSearch)
elif self._summaryType == 'count':
searchExp = {
'conditions': [
searchExp,
{
#'active': 'true',
'path': self._summaryField,
'relation': 'is_not',
'values': [None]
}
],
'logical_operator': 'and'
}
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp, fields=[])
result = len(sgSearch)
############################################################################
#
# Sum
#
############################################################################
elif self._summaryType == 'sum':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
v = e.field(self._summaryField).value()
if v != None:
value += v
result = value
############################################################################
#
# Min
#
############################################################################
elif self._summaryType == 'min':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = min(v, value)
result = value
############################################################################
#
# Max
#
############################################################################
elif self._summaryType == 'max':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = max(v, value)
result = value
############################################################################
#
# Average
#
############################################################################
elif self._summaryType == 'avg':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value += v
value = float(value) / len(sgSearch)
result = value
############################################################################
#
# Percentage
#
############################################################################
elif self._summaryType == 'percentage':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
if e.field(self._summaryField).value() == self._summaryValue:
value += 1
if value >= 1:
value = float(value) / len(sgSearch)
result = value
return result
def _Value(self):
if self._value == None:
return None
if self._summaryType == 'single_record':
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(self._value)
connection = parent.connection()
return connection._createEntity(self._value['type'], self._value)
return copy.deepcopy(self._value)
class SgFieldTagList(ShotgunORM.SgField):
'''
Entity field that stores a list of strings.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_TAG_LIST
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return None
return list(result)
def _Value(self):
return self._toFieldData()
class SgFieldText(ShotgunORM.SgField):
'''
Entity field that stores a str.
'''
def _fromFieldData(self, sgData):
if self._value == sgData:
return False
self._value = str(sgData)
return True
def returnType(self):
return self.RETURN_TYPE_TEXT
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid value type "%s", expected a str' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldImage(SgFieldText):
'''
See SgFieldText.
'''
def downloadThumbnail(self, path):
'''
Downloads the image to the specified path.
'''
url = self.value()
if url == None or url == '':
raise ValueError('%s value is empty' % self)
if os.path.exists(path) and os.path.isdir(path):
raise OSError('output path "%s" is a directory' % path)
try:
data = urllib2.urlopen(url)
f = open(path, 'w')
f.write(data.read())
f.close()
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise RuntimeError('%s an error occured while downloading the file' % self)
return True
def openInBrowser(self):
'''
Opens the image in a web-browser
'''
url = self.value()
if url == None:
url = ''
webbrowser.open(url)
def returnType(self):
return self.RETURN_TYPE_IMAGE
def uploadThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities thumbnail.
Returns the Attachment id.
'''
parent = self.parentEntity()
if not parent.exists():
raise RuntimeError('parent entity does not exist')
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
with ShotgunORM.SHOTGUN_API_LOCK:
sgResult = sgconnection.upload_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
def uploadFilmstripThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities flimstrip
thumbnail.
Returns the Attachment id.
Note:
This function is only valid for Version Entities.
'''
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if not parent.type == 'Version':
raise RuntimeError('only valid on Version Entities')
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
sgResult = sgconnection.upload_filmstrip_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
class SgFieldUrl(ShotgunORM.SgField):
'''
Entity field that stores a url.
Example URL: {
'content_type': 'image/jpeg',
'link_type': 'upload',
'name': 'bob.jpg',
'url': 'http://www.owned.com/bob.jpg'
}
Example Local: {
'content_type': 'image/jpeg',
'link_type': 'local',
'name': 'bob.jpg',
'local_storage': 'c:/temp/bob.jpg'
}
'''
def _fromFieldData(self, sgData):
result = {}
if sgData == None:
result = self._value == None
if not result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid sgData "%s", expected a dict or string' % (self, sgData))
try:
result['link_type'] = sgData['link_type'].lower()
if result['link_type'] in ['upload', 'web']:
result['url'] = sgData['url']
else:
result['local_storage'] = sgData['local_storage']
result['name'] = sgData['name']
result['content_type'] = sgData.get('content_type', None)
except Exception, e:
ShotgunORM.LoggerField.warn(e)
raise TypeError('%s invalid sgData dict "%s"' % (self, sgData))
if not result['link_type'] in ['local', 'upload', 'web']:
raise ValueError('%s invalid link_type "%s"' % (self, result['link_type']))
if self._value == result:
return False
self._value = result
return True
def returnType(self):
return self.RETURN_TYPE_URL
def setValue(self, sgData):
return self.fromFieldData(sgData)
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
def url(self, openInBrowser=False):
'''
Returns the url value.
When the arg "openInBrowser" is set to True then the returned URL will
also be opened in the operating systems default web-browser.
'''
data = self.value()
result = ''
if data == None:
result = ''
else:
try:
result = data['url']
except:
pass
if openInBrowser:
webbrowser.open(url)
return result
# Register the fields.
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_CHECKBOX, SgFieldCheckbox)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR, SgFieldColor)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR2, SgFieldColor2)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE, SgFieldDate)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE_TIME, SgFieldDateTime)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_ENTITY, SgFieldEntity)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_FLOAT, SgFieldFloat)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_IMAGE, SgFieldImage)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_INT, SgFieldInt)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY, SgFieldEntityMulti)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SERIALIZABLE, SgFieldSerializable)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_STATUS_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SUMMARY, SgFieldSummary)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TAG_LIST, SgFieldTagList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TEXT, SgFieldText)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_URL, SgFieldUrl)
################################################################################
#
# Custom fields
#
################################################################################
class SgFieldID(SgFieldInt):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldID, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Does nothing for ID fields.
'''
return False
def isCacheable(self):
'''
Always returns False for ID fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for ID fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for ID fields.
'''
return False
def value(self):
'''
Returns the value of the ID field.
'''
return self._value
def _valueSg(self):
'''
Returns the value of the ID field.
For ID fields this will never query Shotgun.
'''
return self._value
class SgFieldType(SgFieldText):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldType, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Always returns False for Type fields.
'''
return False
def isCacheable(self):
'''
Always returns False for Type fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for Type fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for Type fields.
'''
return False
def value(self):
'''
Returns the Entity type the field belongs to.
'''
return self._value
def _valueSg(self):
'''
Returns the Entity type the field belongs to.
For Type fields this will never query Shotgun.
'''
return self._value
| 24.956967 | 132 | 0.59812 | # Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgFieldCheckbox',
'SgFieldColor',
'SgFieldColor2',
'SgFieldDate',
'SgFieldDateTime',
'SgFieldEntity',
'SgFieldEntityMulti',
'SgFieldFloat',
'SgFieldID',
'SgFieldImage',
'SgFieldInt',
'SgFieldSelectionList',
'SgFieldTagList',
'SgFieldText',
'SgFieldType',
'SgFieldUrl'
]
# Python imports
import copy
import datetime
import os
import re
import threading
import urllib2
import webbrowser
# This module imports
import ShotgunORM
class SgFieldCheckbox(ShotgunORM.SgField):
'''
Entity field that stores a bool value for a checkbox.
'''
def _fromFieldData(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_CHECKBOX
def _setValue(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldColor(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Example: [128, 128, 128]
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
try:
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a list of ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = sgData
return result
try:
if isinstance(sgData, str):
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
else:
if len(sgData != 3):
raise ValueError('invalid value %s' % sgData)
sgData = '%d,%d,%d' % (sgData[0], sgData[1], sgData[2])
except:
raise TypeError('%s invalid value "%s", expected a list of three ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def _Value(self):
if self._value == None:
return None
result = []
for i in self._value.split(','):
result.append(int(i))
return result
class SgFieldColor2(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Fix the color return value for Task and Phase Entities color field.
Task and Phase Entities can have their color field set to a value that points
to the color field of the pipeline step or project they belong to.
Brilliant engineering to still call the return type "color" and not
differentiate the two I know right?
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
REGEXP_TASK_COLOR = re.compile(r'(\d+,\d+,\d+)|(pipeline_step)')
REGEXP_PHASE_COLOR = re.compile(r'(\d+,\d+,\d+)|(project)')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldColor2, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self._regexp = self.REGEXP_COLOR
self._linkString = None
self._linkField = None
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR2
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if isinstance(sgData, str):
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
else:
if not isinstance(sgData, (tuple, list)):
raise TypeError('%s invalid value type "%s", expected a list' % (self, type(sgData).__name__))
if len(sgData) != 3:
raise ValueError('%s list len is not 3' % self)
newData = []
try:
sgData = '%d,%d,%d' % tuple(sgData)
except:
raise ValueError('%s invalid color values %s' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def linkField(self):
'''
Returns the link field this color field can possibly link to.
'''
return self._linkField
def parentChanged(self):
'''
'''
parent = self.parentEntity()
if parent == None:
return
pType = parent.schemaInfo().name()
if pType == 'Task':
self._regexp = self.REGEXP_TASK_COLOR
self._linkString = 'pipeline_step'
self._linkField = 'step'
elif pType == 'Phase':
self._regexp = self.REGEXP_PHASE_COLOR
self._linkString = 'project'
self._linkField= 'project'
else:
self._regexp = self.REGEXP_COLOR
def value(self, linkEvaluate=True):
'''
Args:
* (bool) linkEvaluate:
When True and the color field is a link to another Entity's color field
the value of the linked color field will be returned.
If linkEvaluate is False a string may be returned instead of a list.
'''
result = super(SgFieldColor2, self).value()
if result == None:
return None
if not linkEvaluate and result == self._linkString:
return result
parent = self.parentEntity()
if parent == None:
if result == self._linkString:
return None
newResult = []
for i in result.split(','):
newResult.append(int(i))
if result == self._linkString:
linkObj = self.parentEntity()[self._linkField]
if linkObj == None:
return None
return linkObj['color']
else:
newResult = []
for i in result.split(','):
newResult.append(int(i))
class SgFieldDate(ShotgunORM.SgField):
'''
Entity field that stores a date string
Example: "1980-01-30".
'''
REGEXP = re.compile(r'^\d{4}-\d{2}-\d{2}')
def _fromFieldData(self, sgData):
if sgData != None:
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string from Shotgun "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldDateTime(ShotgunORM.SgField):
'''
Entity field that stores a python datetime object.
'''
def _fromFieldData(self, sgData):
if sgData != None:
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE_TIME
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, datetime.datetime):
raise TypeError('%s invalid type "%s", expected a datetime obj' % (self, type(sgData).__name__))
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return result
return datetime.datetime(*result.timetuple()[:6], tzinfo=result.tzinfo)
def _Value(self):
return self._toFieldData()
class SgFieldEntity(ShotgunORM.SgField):
'''
Entity field that stores a link to another Entity.
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
try:
newValue = {
'type': sgData['type'],
'id': sgData['id']
}
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if newValue['type'] in ['AppWelcome', 'Banner'] and sgData.has_key('name'):
newValue['name'] = sgData['name']
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if newValue == self._value:
return False
parent = self.parentEntity()
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_ENTITY
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if not isinstance(sgData, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(sgData).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
if not sgData.type in valueTypes:
raise ValueError('not a valid value Entiy type: %s, valid=%s' % (sgData.type, valueTypes))
if sgData['id'] == None:
raise RuntimeError('can not set field value to a Entity that has not been created in Shotgun yet')
parent = self.parentEntity()
if parent == None:
raise RuntimeError('field does not have a parent')
connection = parent.connection()
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != sgData.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
if self._value == sgData:
return False
self._value = sgData.toEntityFieldData()
return True
def _toFieldData(self):
if self._value == None:
return None
return dict(self._value)
def value(self, sgSyncFields=None):
'''
Returns the fields value as a Entity object.
Args:
* (list) sgSyncFields:
List of field names to populate the returned Entity with.
'''
value = super(SgFieldEntity, self).value()
parent = self.parentEntity()
if value == None or parent == None:
return None
connection = parent.connection()
if isinstance(sgSyncFields, dict):
sgSyncFields = sgSyncFields.get(parent.type, None)
elif isinstance(sgSyncFields, str):
sgSyncFields = [sgSyncFields]
if sgSyncFields == None:
sgSyncFields = connection.defaultEntityQueryFields(value['type'])
if len(sgSyncFields) <= 0:
sgSyncFields = None
else:
pullFields = set(sgSyncFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = parent.fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(value['type'])
pullFields.update(extraFields)
if len(pullFields) >= 1:
sgSyncFields = list(pullFields)
else:
sgSyncFields = None
result = connection._createEntity(
value['type'],
value,
sgSyncFields=sgSyncFields
)
return result
class SgFieldEntityMulti(ShotgunORM.SgField):
'''
Entity field that stores a list of links to other Entities.
Example: [Entity01, Entity02, ...]
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractMultiEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
newValue = []
try:
for i in sgData:
e = {
'type': i['type'],
'id': i['id']
}
if e in newValue:
continue
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if e['type'] in ['AppWelcome', 'Banner'] and i.has_key('name'):
e['name'] = i['name']
newValue.append(e)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if self._value == newValue:
return False
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_MULTI_ENTITY
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
if isinstance(sgData, ShotgunORM.SgEntity):
sgData = [sgData]
elif not isinstance(sgData, list):
raise TypeError('%s invalid value type "%s", expected a SgEntity or list' % (self, type(sgData).__name__))
else:
for i in sgData:
if not isinstance(i, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(i).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
for i in sgData:
if not i.type in valueTypes:
raise ValueError('not a valid value type: %s, valid=%s' % (i.type, valueTypes))
parent = self.parentEntity()
newValue = []
if parent == None:
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
else:
connection = parent.connection()
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != i.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
if self._value == newValue:
return False
self._value = newValue
return True
def _toFieldData(self):
if self._value == None:
return None
result = []
for i in self._value:
result.append(dict(i))
return result
def value(self, sgSyncFields=None):
'''
Returns the fields value as a list of Entity objects.
Args:
* (dict) sgSyncFields:
Dict of entity types and field names to populate the returned Entities
with.
'''
result = super(SgFieldEntityMulti, self).value()
if result in [None, []]:
return result
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(result)
connection = parent.connection()
schema = connection.schema()
tmp = []
qEng = connection.queryEngine()
qEng.block()
try:
for i in result:
t = i['type']
iSyncFields = None
if sgSyncFields != None:
if sgSyncFields.has_key(t):
iFields = sgSyncFields[t]
if iFields == None:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
pullFields = []
if isinstance(iFields, str):
pullFields = set([iFields])
else:
pullFields = set(iFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = schema.entityInfo(t).fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(t)
pullFields.update(extraFields)
if len(pullFields) >= 1:
iSyncFields = list(pullFields)
else:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
entity = connection._createEntity(t, i, sgSyncFields=iSyncFields)
tmp.append(entity)
finally:
qEng.unblock()
return tmp
class SgFieldFloat(ShotgunORM.SgField):
'''
Entity field that stores a float.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a float' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_FLOAT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a float' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldInt(ShotgunORM.SgField):
'''
Entity field that stores an integer.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a int' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_INT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a int' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldSelectionList(ShotgunORM.SgField):
'''
Entity field that stores a text string that is from a list selection.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_LIST
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
validValues = self.validValues()
if len(validValues) > 0:
if not sgData in validValues:
raise ValueError('%s invalid value "%s"' % (self, sgData))
self._value = sgData
return True
class SgFieldSerializable(ShotgunORM.SgField):
'''
Entity field that stores serializable data.
'''
def _fromFieldData(self, sgData):
if sgData in [None, {}]:
result = self._value in [None, {}]
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise ValueError('%s invalid data from Shotgun "%s", expected a dict' % (self, sgData))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SERIALIZABLE
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid value type "%s", expected a dict' % (self, type(sgData).__name__))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
class SgFieldSummary(ShotgunORM.SgField):
'''
Entity field that returns an Entity or list of Entities based on a search
expression.
Summary fields.
'''
DATE_REGEXP = re.compile(r'(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) UTC')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldSummary, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self.__buildLock = threading.Lock()
summaryInfo = self.schemaInfo().summaryInfo()
if summaryInfo == None:
raise RuntimeError('invalid field schema info for summary info')
self._entityType = summaryInfo['entity_type']
self._filtersRaw = summaryInfo['filters']
self._summaryType = summaryInfo['summary_type']
self._summaryField = summaryInfo['summary_field']
self._summaryValue = summaryInfo['summary_value']
self._searchFilter = None
def _buildLogicalOp(self, conditions, info):
'''
Builds the logical operator search pattern and returns it.
'''
result = []
parent = self.parentEntity()
connection = parent.connection()
for c in conditions:
if c.has_key('logical_operator'):
logicalOp = {
'conditions': self._buildLogicalOp(c['conditions'], info),
'logical_operator': c['logical_operator']
}
result.append(logicalOp)
else:
newValues = []
cInfo = info.fieldInfo(c['path'])
cType = cInfo.returnType()
########################################################################
#
# Date and Date Time fields
#
########################################################################
if cType in [ShotgunORM.SgField.RETURN_TYPE_DATE, ShotgunORM.SgField.RETURN_TYPE_DATE_TIME]:
# http://stackoverflow.com/a/13287083
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
for v in c['values']:
if isinstance(v, dict):
if v.has_key('relative_day'):
time = datetime.time(*v['time'])
date = datetime.date.today()
rd = v['relative_day']
if rd == 'tomorrow':
date = date.replace(day=date.day + 1)
elif rd == 'yesterday':
date = date.replace(day=date.day - 1)
dt = datetime.datetime.combine(date, time)
# Relative day calcs use utc time!
dt.replace(tzinfo=None)
newValues.append(dt)
else:
newValues.append(v)
elif isinstance(v, str):
search = DATE_REGEXP.match(v)
if search:
time = datetime.time(search.group(4), search.group(5), search.group(6))
date = datetime.date(search.group(1), search.group(2), search.group(3))
dt = datetime.datetime.combine(date, time)
dt.replace(tzinfo=None)
newValues.append(utc_to_local(dt))
else:
newValues.append(v)
########################################################################
#
# Entity and Multi-Entity fields
#
########################################################################
elif cType in [ShotgunORM.SgField.RETURN_TYPE_ENTITY, ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY]:
for v in c['values']:
if v['name'] == 'Current %s' % parent.type:
newValues.append(parent.toEntityFieldData())
elif v['name'] == 'Me':
login = os.getenv('USERNAME')
user = connection.findOne('HumanUser', [['login', 'is', login]], ['login'])
if user == None:
raise RuntimError('summary field unable to find user "%s" in Shotgun' % login)
newValues.append(user.toEntityFieldData())
else:
newValues.append(v)
else:
# Do nothing
newValues = c['values']
c['values'] = newValues
del c['active']
result.append(c)
return result
def _buildSearchFilter(self):
'''
'''
opsRaw = copy.deepcopy(self._filtersRaw)
logicalOps = {
'conditions': self._buildLogicalOp(
opsRaw['conditions'],
self.parentEntity().connection().schema().entityInfo(self.entityType())
),
'logical_operator': opsRaw['logical_operator']
}
self._searchFilter = logicalOps
def _fromFieldData(self, sgData):
'''
Always return False for summary fields, they can not be set.
'''
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SUMMARY
def _toFieldData(self):
result = self._value
if result == None:
return None
if isinstance(result, dict):
return copy.deepcopy(result)
return result
def entityType(self):
'''
Returns the type of Entity the summary field will return.
'''
return self._entityType
def hasCommit(self):
'''
Always returns False for summary fields.
'''
return False
def _invalidate(self):
'''
Deletes the search filter so its built again.
'''
self._searchFilter = None
def isEditable(self):
'''
Always return False for summary fields.
'''
return False
def isQueryable(self):
'''
Even though summary fields can be queried from Shotgun return False.
'''
return False
def setHasCommit(self, valid):
'''
Summary fields can't be committed, always returns False.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Summary fields cant be queried so thus they can not be background pulled.
Always returns False.
'''
return False
def _setValue(self, value):
'''
Always return False for summary fields, they can not be set.
'''
return False
def _valueSg(self):
parent = self.parentEntity()
if parent == None or not parent.exists():
return None
connection = parent.connection()
with self.__buildLock:
if self._searchFilter == None:
self._buildSearchFilter()
searchExp = self._searchFilter
result = None
############################################################################
#
# Single record
#
############################################################################
if self._summaryType == 'single_record':
order = [
{
'field_name': self._summaryValue['column'],
'direction': self._summaryValue['direction']
}
]
result = connection._sg_find_one(self.entityType(), searchExp, order=order)
############################################################################
#
# Status percentage and list
#
############################################################################
elif self._summaryType.startswith('status_'):
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if self._summaryType == 'status_percentage':
if len(sgSearch) <= 0:
result = 0
else:
validCount = 0
for e in sgSearch:
value = e.field(self._summaryField).value()
if value == self._summaryValue:
validCount += 1
if validCount <= 0:
result = 0.0
else:
result = float(validCount) / len(sgSearch)
elif self._summaryType == 'status_list':
if len(sgSearch) <= 0:
result = 'ip'
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != value:
# I have no clue why Shotgun always defaults this result to ip
# but whatevs yo.
value = 'ip'
break
result = value
############################################################################
#
# Record count
#
############################################################################
elif self._summaryType == 'record_count':
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp)
result = len(sgSearch)
elif self._summaryType == 'count':
searchExp = {
'conditions': [
searchExp,
{
#'active': 'true',
'path': self._summaryField,
'relation': 'is_not',
'values': [None]
}
],
'logical_operator': 'and'
}
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp, fields=[])
result = len(sgSearch)
############################################################################
#
# Sum
#
############################################################################
elif self._summaryType == 'sum':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
v = e.field(self._summaryField).value()
if v != None:
value += v
result = value
############################################################################
#
# Min
#
############################################################################
elif self._summaryType == 'min':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = min(v, value)
result = value
############################################################################
#
# Max
#
############################################################################
elif self._summaryType == 'max':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = max(v, value)
result = value
############################################################################
#
# Average
#
############################################################################
elif self._summaryType == 'avg':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value += v
value = float(value) / len(sgSearch)
result = value
############################################################################
#
# Percentage
#
############################################################################
elif self._summaryType == 'percentage':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
if e.field(self._summaryField).value() == self._summaryValue:
value += 1
if value >= 1:
value = float(value) / len(sgSearch)
result = value
return result
def _Value(self):
if self._value == None:
return None
if self._summaryType == 'single_record':
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(self._value)
connection = parent.connection()
return connection._createEntity(self._value['type'], self._value)
return copy.deepcopy(self._value)
class SgFieldTagList(ShotgunORM.SgField):
'''
Entity field that stores a list of strings.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_TAG_LIST
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return None
return list(result)
def _Value(self):
return self._toFieldData()
class SgFieldText(ShotgunORM.SgField):
'''
Entity field that stores a str.
'''
def _fromFieldData(self, sgData):
if self._value == sgData:
return False
self._value = str(sgData)
return True
def returnType(self):
return self.RETURN_TYPE_TEXT
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid value type "%s", expected a str' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldImage(SgFieldText):
'''
See SgFieldText.
'''
def downloadThumbnail(self, path):
'''
Downloads the image to the specified path.
'''
url = self.value()
if url == None or url == '':
raise ValueError('%s value is empty' % self)
if os.path.exists(path) and os.path.isdir(path):
raise OSError('output path "%s" is a directory' % path)
try:
data = urllib2.urlopen(url)
f = open(path, 'w')
f.write(data.read())
f.close()
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise RuntimeError('%s an error occured while downloading the file' % self)
return True
def openInBrowser(self):
'''
Opens the image in a web-browser
'''
url = self.value()
if url == None:
url = ''
webbrowser.open(url)
def returnType(self):
return self.RETURN_TYPE_IMAGE
def uploadThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities thumbnail.
Returns the Attachment id.
'''
parent = self.parentEntity()
if not parent.exists():
raise RuntimeError('parent entity does not exist')
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
with ShotgunORM.SHOTGUN_API_LOCK:
sgResult = sgconnection.upload_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
def uploadFilmstripThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities flimstrip
thumbnail.
Returns the Attachment id.
Note:
This function is only valid for Version Entities.
'''
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if not parent.type == 'Version':
raise RuntimeError('only valid on Version Entities')
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
sgResult = sgconnection.upload_filmstrip_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
class SgFieldUrl(ShotgunORM.SgField):
'''
Entity field that stores a url.
Example URL: {
'content_type': 'image/jpeg',
'link_type': 'upload',
'name': 'bob.jpg',
'url': 'http://www.owned.com/bob.jpg'
}
Example Local: {
'content_type': 'image/jpeg',
'link_type': 'local',
'name': 'bob.jpg',
'local_storage': 'c:/temp/bob.jpg'
}
'''
def _fromFieldData(self, sgData):
result = {}
if sgData == None:
result = self._value == None
if not result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid sgData "%s", expected a dict or string' % (self, sgData))
try:
result['link_type'] = sgData['link_type'].lower()
if result['link_type'] in ['upload', 'web']:
result['url'] = sgData['url']
else:
result['local_storage'] = sgData['local_storage']
result['name'] = sgData['name']
result['content_type'] = sgData.get('content_type', None)
except Exception, e:
ShotgunORM.LoggerField.warn(e)
raise TypeError('%s invalid sgData dict "%s"' % (self, sgData))
if not result['link_type'] in ['local', 'upload', 'web']:
raise ValueError('%s invalid link_type "%s"' % (self, result['link_type']))
if self._value == result:
return False
self._value = result
return True
def returnType(self):
return self.RETURN_TYPE_URL
def setValue(self, sgData):
return self.fromFieldData(sgData)
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
def url(self, openInBrowser=False):
'''
Returns the url value.
When the arg "openInBrowser" is set to True then the returned URL will
also be opened in the operating systems default web-browser.
'''
data = self.value()
result = ''
if data == None:
result = ''
else:
try:
result = data['url']
except:
pass
if openInBrowser:
webbrowser.open(url)
return result
# Register the fields.
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_CHECKBOX, SgFieldCheckbox)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR, SgFieldColor)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR2, SgFieldColor2)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE, SgFieldDate)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE_TIME, SgFieldDateTime)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_ENTITY, SgFieldEntity)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_FLOAT, SgFieldFloat)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_IMAGE, SgFieldImage)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_INT, SgFieldInt)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY, SgFieldEntityMulti)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SERIALIZABLE, SgFieldSerializable)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_STATUS_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SUMMARY, SgFieldSummary)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TAG_LIST, SgFieldTagList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TEXT, SgFieldText)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_URL, SgFieldUrl)
################################################################################
#
# Custom fields
#
################################################################################
class SgFieldID(SgFieldInt):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldID, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Does nothing for ID fields.
'''
return False
def isCacheable(self):
'''
Always returns False for ID fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for ID fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for ID fields.
'''
return False
def value(self):
'''
Returns the value of the ID field.
'''
return self._value
def _valueSg(self):
'''
Returns the value of the ID field.
For ID fields this will never query Shotgun.
'''
return self._value
class SgFieldType(SgFieldText):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldType, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Always returns False for Type fields.
'''
return False
def isCacheable(self):
'''
Always returns False for Type fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for Type fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for Type fields.
'''
return False
def value(self):
'''
Returns the Entity type the field belongs to.
'''
return self._value
def _valueSg(self):
'''
Returns the Entity type the field belongs to.
For Type fields this will never query Shotgun.
'''
return self._value
| 0 | 0 |
680326ae1a078627805cf6749a493e054c55d195 | 6,129 | py | Python | idealized_experiments/idealized_isopycnal_field.py | daanreijnders/isoneutral-dispersion | 88259ba658d15611609f52e1615aff37a54e7289 | [
"MIT"
] | 1 | 2022-03-18T08:47:41.000Z | 2022-03-18T08:47:41.000Z | idealized_experiments/idealized_isopycnal_field.py | daanreijnders/isoneutral-dispersion | 88259ba658d15611609f52e1615aff37a54e7289 | [
"MIT"
] | null | null | null | idealized_experiments/idealized_isopycnal_field.py | daanreijnders/isoneutral-dispersion | 88259ba658d15611609f52e1615aff37a54e7289 | [
"MIT"
] | 1 | 2022-03-18T08:47:42.000Z | 2022-03-18T08:47:42.000Z | import numpy as np
class densityField:
def __init__(self, g=10, Nsquared=1e-5, alphax=1.1e-3, alphay=1e-3, kappax=2e-5/np.pi, kappay=2e-5/np.pi, rho0=1025):
self.g = g # gravitational acceleration [ms^-2]
self.Nsquared = Nsquared # Square of buoyancy frequency [s^-2]
self.alphax = alphax # zonal amplitude
self.alphay = alphay # meridional amplitude
self.kappax = kappax # zonal wavenumber [m^-1]
self.kappay = kappay # meridional wavenumber [m^-1]
self.rho0 = rho0 # background density [kg m^-3]
def create_interpolated_density_grid(self, nx=101, ny=201, nz=301, Lx=1e6, Ly=2e6, H=3000, rhotype='averaged'):
self.nx = nx
self.ny = ny
self.nz = nz
self.Lx = Lx
self.Ly = Ly
self.H = H
self.X = np.linspace(0, Lx, nx)
self.Y = np.linspace(0, Ly, ny)
self.Z = np.linspace(0, -H, nz)
self.dX = np.abs(self.X[1] - self.X[0])
self.dY = np.abs(self.Y[1] - self.Y[0])
self.dZ = np.abs(self.Z[1] - self.Z[0])
self.XXX, self.YYY, self.ZZZ = np.meshgrid(self.X, self.Y, self.Z, indexing='ij')
if rhotype == 'interpolated':
rho_interpolated = self.rho0 * (
1
- self.Nsquared * self.ZZZ / self.g
+ self.alphax * np.sin(self.kappax * self.XXX)
+ self.alphay * np.sin(self.kappay * self.YYY)
)
self.rho_interpolated
if rhotype == 'averaged':
# Create staggered versions of original grid.
self.create_staggered_grid()
integrated_rho = self.rho0 * (self.XXX_stag * self.YYY_stag * self.ZZZ_stag
- self.Nsquared * self.ZZZ_stag **2 * self.XXX_stag * self.YYY_stag / (2 * self.g)
- self.alphax * np.cos(self.kappax * self.XXX_stag) * self.YYY_stag * self.ZZZ_stag / self.kappax
- self.alphay * np.cos(self.kappay * self.YYY_stag) * self.XXX_stag * self.ZZZ_stag / self.kappay)
self.rho_averaged = np.diff(np.diff(np.diff(integrated_rho, axis=0), axis=1), axis=2)/(self.dX * self.dY * self.dZ)
def create_staggered_grid(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot create a stagered grid before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
self.X_stag = np.linspace(-self.dX, self.Lx, self.nx+1) + self.dX/2
self.Y_stag = np.linspace(-self.dY, self.Ly, self.ny+1) + self.dY/2
# Due to the definition of Z in Parcels!
self.Z_stag = np.linspace(self.dZ, -self.H, self.nz+1) - self.dZ/2
self.XXX_stag, self.YYY_stag, self.ZZZ_stag = np.meshgrid(self.X_stag, self.Y_stag, self.Z_stag, indexing='ij')
def compute_slope(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot compute a staggered rho field before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
if not hasattr(self, 'X_stag'):
self.create_staggered_grid()
XXX_stagX, YYY_stagX, ZZZ_stagX = np.meshgrid(self.X_stag, self.Y, self.Z, indexing='ij')
XXX_stagY, YYY_stagY, ZZZ_stagY = np.meshgrid(self.X, self.Y_stag, self.Z, indexing='ij')
XXX_stagZ, YYY_stagZ, ZZZ_stagZ = np.meshgrid(self.X, self.Y, self.Z_stag, indexing='ij')
rho_stagX = self.rho0 * (
1
- self.Nsquared * ZZZ_stagX / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagX)
+ self.alphay * np.sin(self.kappay * YYY_stagX)
)
rho_stagY = self.rho0 * (
1
- self.Nsquared * ZZZ_stagY / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagY)
+ self.alphay * np.sin(self.kappay * YYY_stagY)
)
rho_stagZ = self.rho0 * (
1
- self.Nsquared * ZZZ_stagZ / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagZ)
+ self.alphay * np.sin(self.kappay * YYY_stagZ)
)
drho_dx = np.diff(rho_stagX, axis=0)/self.dX # np.gradient(rho, axis=1)
drho_dy = np.diff(rho_stagY, axis=1)/self.dY # np.gradient(rho, axis=1)
drho_dz = np.diff(rho_stagZ, axis=2)/self.dZ # np.gradient(rho, axis=0)
# Due to the definition depth in Parcels! Normally these should be negative
self.Sx = drho_dx/drho_dz
self.Sy = drho_dy/drho_dz
self.Sabs2 = self.Sx**2 + self.Sy**2
def compute_slope_deriv(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot compute a staggered rho field before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
if not hasattr(self, 'X_stag'):
self.create_staggered_grid()
if not hasattr(self, 'Sx'):
self.compute_slope()
self.dSxdx = np.diff(self.Sx, axis=0)/self.dX
self.dSxdy = np.diff(self.Sx, axis=1)/self.dY
self.dSxdz = np.diff(self.Sx, axis=2)/self.dZ
self.dSydx = np.diff(self.Sy, axis=0)/self.dX
self.dSydy = np.diff(self.Sy, axis=1)/self.dY
self.dSydz = np.diff(self.Sy, axis=2)/self.dZ
def isopycnal_array(self, x, y, rho_iso=1040):
z_iso = self.g / self.Nsquared* (1 - rho_iso / self.rho0 + self.alphax * np.sin(self.kappax * x)+ self.alphay * np.sin(self.kappay * y))
return z_iso
def isopycnal_grid(self, rho, x_iso, y_iso):
XX, YY = np.meshgrid(x_iso, y_iso, indexing='ij')
ZZ = (
self.g
/ self.Nsquared
* (
(1 - rho / self.rho0)
+ self.alphax * np.sin(self.kappax * XX)
+ self.alphay * np.sin(self.kappay * YY)
)
)
return ZZ, XX, YY | 46.082707 | 169 | 0.56355 | import numpy as np
class densityField:
def __init__(self, g=10, Nsquared=1e-5, alphax=1.1e-3, alphay=1e-3, kappax=2e-5/np.pi, kappay=2e-5/np.pi, rho0=1025):
self.g = g # gravitational acceleration [ms^-2]
self.Nsquared = Nsquared # Square of buoyancy frequency [s^-2]
self.alphax = alphax # zonal amplitude
self.alphay = alphay # meridional amplitude
self.kappax = kappax # zonal wavenumber [m^-1]
self.kappay = kappay # meridional wavenumber [m^-1]
self.rho0 = rho0 # background density [kg m^-3]
def create_interpolated_density_grid(self, nx=101, ny=201, nz=301, Lx=1e6, Ly=2e6, H=3000, rhotype='averaged'):
self.nx = nx
self.ny = ny
self.nz = nz
self.Lx = Lx
self.Ly = Ly
self.H = H
self.X = np.linspace(0, Lx, nx)
self.Y = np.linspace(0, Ly, ny)
self.Z = np.linspace(0, -H, nz)
self.dX = np.abs(self.X[1] - self.X[0])
self.dY = np.abs(self.Y[1] - self.Y[0])
self.dZ = np.abs(self.Z[1] - self.Z[0])
self.XXX, self.YYY, self.ZZZ = np.meshgrid(self.X, self.Y, self.Z, indexing='ij')
if rhotype == 'interpolated':
rho_interpolated = self.rho0 * (
1
- self.Nsquared * self.ZZZ / self.g
+ self.alphax * np.sin(self.kappax * self.XXX)
+ self.alphay * np.sin(self.kappay * self.YYY)
)
self.rho_interpolated
if rhotype == 'averaged':
# Create staggered versions of original grid.
self.create_staggered_grid()
integrated_rho = self.rho0 * (self.XXX_stag * self.YYY_stag * self.ZZZ_stag
- self.Nsquared * self.ZZZ_stag **2 * self.XXX_stag * self.YYY_stag / (2 * self.g)
- self.alphax * np.cos(self.kappax * self.XXX_stag) * self.YYY_stag * self.ZZZ_stag / self.kappax
- self.alphay * np.cos(self.kappay * self.YYY_stag) * self.XXX_stag * self.ZZZ_stag / self.kappay)
self.rho_averaged = np.diff(np.diff(np.diff(integrated_rho, axis=0), axis=1), axis=2)/(self.dX * self.dY * self.dZ)
def create_staggered_grid(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot create a stagered grid before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
self.X_stag = np.linspace(-self.dX, self.Lx, self.nx+1) + self.dX/2
self.Y_stag = np.linspace(-self.dY, self.Ly, self.ny+1) + self.dY/2
# Due to the definition of Z in Parcels!
self.Z_stag = np.linspace(self.dZ, -self.H, self.nz+1) - self.dZ/2
self.XXX_stag, self.YYY_stag, self.ZZZ_stag = np.meshgrid(self.X_stag, self.Y_stag, self.Z_stag, indexing='ij')
def compute_slope(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot compute a staggered rho field before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
if not hasattr(self, 'X_stag'):
self.create_staggered_grid()
XXX_stagX, YYY_stagX, ZZZ_stagX = np.meshgrid(self.X_stag, self.Y, self.Z, indexing='ij')
XXX_stagY, YYY_stagY, ZZZ_stagY = np.meshgrid(self.X, self.Y_stag, self.Z, indexing='ij')
XXX_stagZ, YYY_stagZ, ZZZ_stagZ = np.meshgrid(self.X, self.Y, self.Z_stag, indexing='ij')
rho_stagX = self.rho0 * (
1
- self.Nsquared * ZZZ_stagX / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagX)
+ self.alphay * np.sin(self.kappay * YYY_stagX)
)
rho_stagY = self.rho0 * (
1
- self.Nsquared * ZZZ_stagY / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagY)
+ self.alphay * np.sin(self.kappay * YYY_stagY)
)
rho_stagZ = self.rho0 * (
1
- self.Nsquared * ZZZ_stagZ / self.g
+ self.alphax * np.sin(self.kappax * XXX_stagZ)
+ self.alphay * np.sin(self.kappay * YYY_stagZ)
)
drho_dx = np.diff(rho_stagX, axis=0)/self.dX # np.gradient(rho, axis=1)
drho_dy = np.diff(rho_stagY, axis=1)/self.dY # np.gradient(rho, axis=1)
drho_dz = np.diff(rho_stagZ, axis=2)/self.dZ # np.gradient(rho, axis=0)
# Due to the definition depth in Parcels! Normally these should be negative
self.Sx = drho_dx/drho_dz
self.Sy = drho_dy/drho_dz
self.Sabs2 = self.Sx**2 + self.Sy**2
def compute_slope_deriv(self):
if not hasattr(self, "XXX"):
raise RuntimeError("Cannot compute a staggered rho field before initializing an interpolated density grid (use `create_interpolated_density_grid()` first).")
if not hasattr(self, 'X_stag'):
self.create_staggered_grid()
if not hasattr(self, 'Sx'):
self.compute_slope()
self.dSxdx = np.diff(self.Sx, axis=0)/self.dX
self.dSxdy = np.diff(self.Sx, axis=1)/self.dY
self.dSxdz = np.diff(self.Sx, axis=2)/self.dZ
self.dSydx = np.diff(self.Sy, axis=0)/self.dX
self.dSydy = np.diff(self.Sy, axis=1)/self.dY
self.dSydz = np.diff(self.Sy, axis=2)/self.dZ
def isopycnal_array(self, x, y, rho_iso=1040):
z_iso = self.g / self.Nsquared* (1 - rho_iso / self.rho0 + self.alphax * np.sin(self.kappax * x)+ self.alphay * np.sin(self.kappay * y))
return z_iso
def isopycnal_grid(self, rho, x_iso, y_iso):
XX, YY = np.meshgrid(x_iso, y_iso, indexing='ij')
ZZ = (
self.g
/ self.Nsquared
* (
(1 - rho / self.rho0)
+ self.alphax * np.sin(self.kappax * XX)
+ self.alphay * np.sin(self.kappay * YY)
)
)
return ZZ, XX, YY | 0 | 0 |
b5961ce174a5e07a6544a64153caec0e5af3facd | 3,763 | py | Python | src/day16.py | blu3r4y/AdventOfCode2018 | 5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5 | [
"MIT"
] | 2 | 2019-01-02T22:57:13.000Z | 2019-05-07T23:13:25.000Z | src/day16.py | blu3r4y/AdventOfCode2018 | 5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5 | [
"MIT"
] | null | null | null | src/day16.py | blu3r4y/AdventOfCode2018 | 5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5 | [
"MIT"
] | 1 | 2021-12-06T12:38:26.000Z | 2021-12-06T12:38:26.000Z | # Advent of Code 2018, Day 16
# (c) blu3r4y
from collections import namedtuple
from parse import parse
OPERATIONS = ['addr', 'addi', 'mulr', 'muli', 'banr', 'bani', 'borr', 'bori',
'setr', 'seti', 'gtir', 'gtri', 'gtrr', 'eqir', 'eqri', 'eqrr']
Observation = namedtuple("Observation", ["instruction", "before", "after"])
def part1(observations):
three_or_more = 0
for obsv in observations:
# execute all possible candidates
num_matches = 0
for op in OPERATIONS:
if obsv.after == execute(obsv.instruction, obsv.before, op):
num_matches += 1
# count observations with three or more possible operations
if num_matches >= 3:
three_or_more += 1
return three_or_more
def part2(observations, program):
# store possible candidates for every opcode
operations = {i: set(OPERATIONS) for i in range(len(OPERATIONS))}
for obsv in observations:
matching_operations = set()
opcode = obsv.instruction[0]
# execute all possible candidates
for op in operations[opcode]:
if obsv.after == execute(obsv.instruction, obsv.before, op):
matching_operations.add(op)
# keep only the matching operations
operations[opcode] = matching_operations
# if we uniquely identified an operation ...
if len(matching_operations) == 1:
unique_op = next(iter(matching_operations))
# ... remove it from the other mappings
for key in set(operations.keys()) - {opcode}:
operations[key].discard(unique_op)
# map set values to scalar
operations = {i: ops.pop() for i, ops in operations.items()}
# interpret the program
reg = [0, 0, 0, 0]
for instruction in program:
reg = execute(instruction, reg, operations[instruction[0]])
return reg[0]
def execute(instruction, reg, op):
_, a, b, c = instruction
reg = list(reg) # copy register
if op == 'addr':
reg[c] = reg[a] + reg[b]
elif op == 'addi':
reg[c] = reg[a] + b
elif op == 'mulr':
reg[c] = reg[a] * reg[b]
elif op == 'muli':
reg[c] = reg[a] * b
elif op == 'banr':
reg[c] = reg[a] & reg[b]
elif op == 'bani':
reg[c] = reg[a] & b
elif op == 'borr':
reg[c] = reg[a] | reg[b]
elif op == 'bori':
reg[c] = reg[a] | b
elif op == 'setr':
reg[c] = reg[a]
elif op == 'seti':
reg[c] = a
elif op == 'gtir':
reg[c] = 1 if a > reg[b] else 0
elif op == 'gtri':
reg[c] = 1 if reg[a] > b else 0
elif op == 'gtrr':
reg[c] = 1 if reg[a] > reg[b] else 0
elif op == 'eqir':
reg[c] = 1 if a == reg[b] else 0
elif op == 'eqri':
reg[c] = 1 if reg[a] == b else 0
elif op == 'eqrr':
reg[c] = 1 if reg[a] == reg[b] else 0
return reg
def _parse(lines):
observations, program, i = [], [], 0
# parse observations
while i < len(lines):
before = parse("Before: [{:d}, {:d}, {:d}, {:d}]", lines[i].strip())
instruction = parse("{:d} {:d} {:d} {:d}", lines[i + 1].strip())
after = parse("After: [{:d}, {:d}, {:d}, {:d}]", lines[i + 2].strip())
i += 4
if not (before and after and instruction):
break
observations.append(Observation([*instruction], [*before], [*after]))
# parse program
for line in lines[i - 2:]:
program.append(list(map(int, line.strip().split(' '))))
return observations, program
if __name__ == "__main__":
print(part1(_parse(open(r"../assets/day16.txt").readlines())[0]))
print(part2(*_parse(open(r"../assets/day16.txt").readlines())))
| 28.08209 | 79 | 0.549296 | # Advent of Code 2018, Day 16
# (c) blu3r4y
from collections import namedtuple
from parse import parse
OPERATIONS = ['addr', 'addi', 'mulr', 'muli', 'banr', 'bani', 'borr', 'bori',
'setr', 'seti', 'gtir', 'gtri', 'gtrr', 'eqir', 'eqri', 'eqrr']
Observation = namedtuple("Observation", ["instruction", "before", "after"])
def part1(observations):
three_or_more = 0
for obsv in observations:
# execute all possible candidates
num_matches = 0
for op in OPERATIONS:
if obsv.after == execute(obsv.instruction, obsv.before, op):
num_matches += 1
# count observations with three or more possible operations
if num_matches >= 3:
three_or_more += 1
return three_or_more
def part2(observations, program):
# store possible candidates for every opcode
operations = {i: set(OPERATIONS) for i in range(len(OPERATIONS))}
for obsv in observations:
matching_operations = set()
opcode = obsv.instruction[0]
# execute all possible candidates
for op in operations[opcode]:
if obsv.after == execute(obsv.instruction, obsv.before, op):
matching_operations.add(op)
# keep only the matching operations
operations[opcode] = matching_operations
# if we uniquely identified an operation ...
if len(matching_operations) == 1:
unique_op = next(iter(matching_operations))
# ... remove it from the other mappings
for key in set(operations.keys()) - {opcode}:
operations[key].discard(unique_op)
# map set values to scalar
operations = {i: ops.pop() for i, ops in operations.items()}
# interpret the program
reg = [0, 0, 0, 0]
for instruction in program:
reg = execute(instruction, reg, operations[instruction[0]])
return reg[0]
def execute(instruction, reg, op):
_, a, b, c = instruction
reg = list(reg) # copy register
if op == 'addr':
reg[c] = reg[a] + reg[b]
elif op == 'addi':
reg[c] = reg[a] + b
elif op == 'mulr':
reg[c] = reg[a] * reg[b]
elif op == 'muli':
reg[c] = reg[a] * b
elif op == 'banr':
reg[c] = reg[a] & reg[b]
elif op == 'bani':
reg[c] = reg[a] & b
elif op == 'borr':
reg[c] = reg[a] | reg[b]
elif op == 'bori':
reg[c] = reg[a] | b
elif op == 'setr':
reg[c] = reg[a]
elif op == 'seti':
reg[c] = a
elif op == 'gtir':
reg[c] = 1 if a > reg[b] else 0
elif op == 'gtri':
reg[c] = 1 if reg[a] > b else 0
elif op == 'gtrr':
reg[c] = 1 if reg[a] > reg[b] else 0
elif op == 'eqir':
reg[c] = 1 if a == reg[b] else 0
elif op == 'eqri':
reg[c] = 1 if reg[a] == b else 0
elif op == 'eqrr':
reg[c] = 1 if reg[a] == reg[b] else 0
return reg
def _parse(lines):
observations, program, i = [], [], 0
# parse observations
while i < len(lines):
before = parse("Before: [{:d}, {:d}, {:d}, {:d}]", lines[i].strip())
instruction = parse("{:d} {:d} {:d} {:d}", lines[i + 1].strip())
after = parse("After: [{:d}, {:d}, {:d}, {:d}]", lines[i + 2].strip())
i += 4
if not (before and after and instruction):
break
observations.append(Observation([*instruction], [*before], [*after]))
# parse program
for line in lines[i - 2:]:
program.append(list(map(int, line.strip().split(' '))))
return observations, program
if __name__ == "__main__":
print(part1(_parse(open(r"../assets/day16.txt").readlines())[0]))
print(part2(*_parse(open(r"../assets/day16.txt").readlines())))
| 0 | 0 |
d31ca228a7c49cb870496d2465087e6fff372030 | 2,685 | py | Python | programming_languages_classification/test.py | contimatteo/Programming-Languages-Classification | 34ccf1bd403f55226ed5131d57265df45d314b6f | [
"MIT"
] | 1 | 2022-03-18T12:54:52.000Z | 2022-03-18T12:54:52.000Z | programming_languages_classification/test.py | contimatteo/Programming-Languages-Classification | 34ccf1bd403f55226ed5131d57265df45d314b6f | [
"MIT"
] | 5 | 2021-11-10T19:58:25.000Z | 2022-03-19T18:17:41.000Z | programming_languages_classification/test.py | contimatteo/programming-language-classifier | 60847ab91cff4dc20ded1a024d272c75956194a0 | [
"MIT"
] | null | null | null | from keras.models import load_model
import keras.preprocessing.text as kpt
from keras.preprocessing.sequence import pad_sequences
import sys
import os
import json
import numpy as np
from utils import ConfigurationManager, FileManager
##
global dictionary
global model
dictionaryUrl = os.path.join(FileManager.getRootUrl(), 'tmp/wordindex.json')
dictionary = json.loads(FileManager.readFile(dictionaryUrl))
modelUrl = os.path.join(FileManager.getRootUrl(), 'tmp/code_model.h5')
model = load_model(modelUrl)
def convert_text_to_index_array(text):
# one really important thing that `text_to_word_sequence` does
# is make all texts the same length -- in this case, the length
# of the longest text in the set.
wordvec = []
for word in kpt.text_to_word_sequence(text):
if word in dictionary:
if dictionary[word] <= 100000:
wordvec.append([dictionary[word]])
else:
wordvec.append([0])
else:
wordvec.append([0])
return wordvec
##
def main():
data = {"success": False}
languages = ConfigurationManager.getLanguages()
matched = 0
totalExamples = 0
for languageFolder in FileManager.getLanguagesFolders(FileManager.datasets['testing']['url']):
language = str(languageFolder.name).lower()
for exampleFolder in FileManager.getExamplesFolders(languageFolder.path):
totalExamples += 1
X_test = []
originalFileContent = FileManager.readFile(FileManager.getOriginalFileUrl(exampleFolder.path))
code_snip = originalFileContent
# print(code_snip, file=sys.stdout)
word_vec = convert_text_to_index_array(code_snip)
X_test.append(word_vec)
X_test = pad_sequences(X_test, maxlen=100)
# print(X_test[0].reshape(1,X_test.shape[1]), file=sys.stdout)
y_prob = model.predict(X_test[0].reshape(1, X_test.shape[1]), batch_size=1, verbose=2)[0]
a = np.array(y_prob)
idx = np.argmax(a)
if str(languages[idx]) == language:
matched += 1
# data["predictions"] = []
# for i in range(len(languages)):
# # print(languages[i], file=sys.stdout)
# r = {"label": languages[i], "probability": format(y_prob[i] * 100, '.2f')}
# data["predictions"].append(r)
print('')
print('')
print('totalExamples = ' + str(totalExamples))
print('matched = ' + str(matched))
print('matched / totalExamples = ' + str(matched / totalExamples))
print('')
print('')
##
if __name__ == "__main__":
main()
| 29.833333 | 106 | 0.633147 | from keras.models import load_model
import keras.preprocessing.text as kpt
from keras.preprocessing.sequence import pad_sequences
import sys
import os
import json
import numpy as np
from utils import ConfigurationManager, FileManager
##
global dictionary
global model
dictionaryUrl = os.path.join(FileManager.getRootUrl(), 'tmp/wordindex.json')
dictionary = json.loads(FileManager.readFile(dictionaryUrl))
modelUrl = os.path.join(FileManager.getRootUrl(), 'tmp/code_model.h5')
model = load_model(modelUrl)
def convert_text_to_index_array(text):
# one really important thing that `text_to_word_sequence` does
# is make all texts the same length -- in this case, the length
# of the longest text in the set.
wordvec = []
for word in kpt.text_to_word_sequence(text):
if word in dictionary:
if dictionary[word] <= 100000:
wordvec.append([dictionary[word]])
else:
wordvec.append([0])
else:
wordvec.append([0])
return wordvec
##
def main():
data = {"success": False}
languages = ConfigurationManager.getLanguages()
matched = 0
totalExamples = 0
for languageFolder in FileManager.getLanguagesFolders(FileManager.datasets['testing']['url']):
language = str(languageFolder.name).lower()
for exampleFolder in FileManager.getExamplesFolders(languageFolder.path):
totalExamples += 1
X_test = []
originalFileContent = FileManager.readFile(FileManager.getOriginalFileUrl(exampleFolder.path))
code_snip = originalFileContent
# print(code_snip, file=sys.stdout)
word_vec = convert_text_to_index_array(code_snip)
X_test.append(word_vec)
X_test = pad_sequences(X_test, maxlen=100)
# print(X_test[0].reshape(1,X_test.shape[1]), file=sys.stdout)
y_prob = model.predict(X_test[0].reshape(1, X_test.shape[1]), batch_size=1, verbose=2)[0]
a = np.array(y_prob)
idx = np.argmax(a)
if str(languages[idx]) == language:
matched += 1
# data["predictions"] = []
# for i in range(len(languages)):
# # print(languages[i], file=sys.stdout)
# r = {"label": languages[i], "probability": format(y_prob[i] * 100, '.2f')}
# data["predictions"].append(r)
print('')
print('')
print('totalExamples = ' + str(totalExamples))
print('matched = ' + str(matched))
print('matched / totalExamples = ' + str(matched / totalExamples))
print('')
print('')
##
if __name__ == "__main__":
main()
| 0 | 0 |
2e93f597b4ad68b69e4599e8fc30321be3c05d7a | 31,071 | py | Python | src/Animate/Scripts.py | henkjannl/py-animate | dbc93c8a264ef008954901ea76286331ad1737ee | [
"MIT"
] | null | null | null | src/Animate/Scripts.py | henkjannl/py-animate | dbc93c8a264ef008954901ea76286331ad1737ee | [
"MIT"
] | null | null | null | src/Animate/Scripts.py | henkjannl/py-animate | dbc93c8a264ef008954901ea76286331ad1737ee | [
"MIT"
] | null | null | null | import pandas as pd
from PIL import Image # www.pythonware.com/library/pil/handbook
from PIL import ImageFont, ImageDraw, ImageEnhance
from PIL import ImageFilter
import os
#import time
import logging
from Animate.Items import *
from Animate.Properties import *
from Animate.Constants import *
LOG_FILENAME = '__logfile.txt'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
def SelectFont(Directories, Fonts):
for Font in Fonts:
for Path in Directories:
try:
FontName=os.path.join(Path,Font)
SelectedFont = ImageFont.truetype(FontName, 20)
return FontName
except:
logging.debug('%s not successful' % FontName )
print('All attempts to load fonts failed')
def isNumber(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, int):
return True
elif isinstance(somePandasValue, float):
return True
else:
return False
def isString(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, str):
return True
else:
return False
class Script():
def __init__(self, FileName, SheetName, ScriptList):
logging.debug(' Script.__init__(%s, %s)' % (FileName, SheetName) )
self.FileName = FileName
self.SheetName = SheetName
self.ScriptList = ScriptList
self.IsCanvas = False
self.FirstImage = True
self.ImageDir = 'Pictures'
self.FirstFrame = 0 # Allows the processing of a subset of frames
self.LastFrame = -1
self.FramesPerSecond = 10
self.ShowTime = False # Display the time in each frame
self.Movie = False # Can be overridden by filename of movie
self.AnimatedGIF = False # Can be overridden by filename of animated gif
self.MaxTime = 0 # Largest time, retrieved from the parser
self.TimeOffset = 0.0 # Script, assembly or canvas can be run with an offset to the global time
self.Width = 800 # Width of the output image
self.Height = 600 # Height of the output image
self.Items = ItemDict() # Dictionary of items
# List of (time, item, back/front) tuples
self.Zbuffer = []
self.ZbufferIndex = 0
# List of Items
self.Zorder = []
# Picture that was processed last
self.Picture = False
self.PictureFrame = -1
def ParseScript(self, FileName, SheetName):
logging.debug(' Script.ParseScript(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
print(' - parsing script %s' % SheetName)
for Row in range(df.shape[0]):
# A row contains valid data if the first cell contains a number
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
command = df.loc[Row,1].upper().strip()
if self.MaxTime<time: self.MaxTime=time
if command == 'WIDTH':
# Determine the width of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Width = int(df.loc[Row,2])
elif command == 'HEIGHT':
# Determine the height of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Height = int(df.loc[Row,2])
elif command == 'FRAMESPERSECOND':
# Sets the number of frames per second for the whole movie
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
assert (df.loc[Row,2] >0 ), \
"Frames per second in sheet %s at row %d should be larger than 0" % (SheetName, Row+1)
self.FramesPerSecond = df.loc[Row,2]
elif command == 'FIRSTFRAME':
# Determine the first frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.FirstFrame = int(df.loc[Row,2])
elif command == 'LASTFRAME':
# Determine the last frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.LastFrame = int(df.loc[Row,2])
elif command == 'SHOWTIME':
# Write the time in the lower left corner of the frames, for debug purposes
self.ShowTime = True
elif command == 'HIDETIME':
# Do not write the time
self.ShowTime = False
elif command == 'MOVIE':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the movie" % (command, Row+1, SheetName)
self.Movie= df.loc[Row,2]
print(" - movie {movie} will be created after generating the frames".format(movie=self.Movie))
elif command == 'ANIMATEDGIF':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the animated gif" % (command, Row+1, SheetName)
self.AnimatedGIF= df.loc[Row,2]
print("- animated GIF {gif} will be created after generating the frames".format(gif=self.AnimatedGIF))
elif command == 'TABLE':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the table name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseTable(self.FileName, sheetname)
elif command == 'SCRIPT':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the script name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseScript(self.FileName, sheetname)
elif command == 'ASSEMBLY':
# Create a new script object and use the image created by this
# script as feed for this item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the assembly name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddScript( time, sheetname )
elif command == 'CANVAS':
# A canvas is an assembly of which the background is not reset for a new frame
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
NewScript.IsCanvas = True
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddCanvas( time, sheetname )
elif command == 'IMAGE':
# Assign a new filename for an image item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
# Assign a new filename for a mask item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
# Assign a new title for a text item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
title = df.loc[Row,3]
self.Items[itemname].AddText( time, title )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new x position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects an item name in column C" % (command, Row+1, SheetName)
assert isNumber(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a number in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
value = df.loc[Row,3]
self.Items[itemname].Properties[command].Append(time, value)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine linear or cycloid movement
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
move = df.loc[Row,3].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
# Set a new text color
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
textcolor = df.loc[Row,3].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
elif command == 'BRINGTOFRONT':
# Bring the item to front at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, FRONT) )
elif command == 'SENDTOBACK':
# Send the item to back at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, BACK) )
else:
print("Command %s not recognized on row %d." % (command, Row+1))
def ParseTable(self, FileName, SheetName):
logging.debug(' Script.ParseTable(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
# Investigate which data each column contains
print(' - parsing table %s' % SheetName)
for Row in range(2, df.shape[0]):
# Only process rows with a time in the first column
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
# Increase time if the table exceeds the maximum
if self.MaxTime<time: self.MaxTime=time
for Col in range(1, df.shape[1]):
# Only process columns with an existing object in the first row and a command in the second row
if isString(df.loc[0,Col]) and isString(df.loc[1,Col]) and\
len(df.loc[0,Col])>0 and len(df.loc[1,Col])>0:
itemname = df.loc[0,Col].upper().strip()
command = df.loc[1,Col].upper().strip()
# Only process items that have already been created in another script
if itemname in self.Items:
item = self.Items[itemname]
if command == 'IMAGE':
if item.ItemType == IT_IMAGE:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
if self.Items[item].ItemType == IT_MASK:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
if item.ItemType == IT_TEXT:
# Assign a new title for a text item
if isString(df.loc[Row,Col]):
text = df.loc[Row,Col]
self.Items[itemname].AddText( time, text )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new float property
if isNumber(df.loc[Row,Col]):
val = df.loc[Row,Col]
self.Items[itemname].Properties[command].Append(time, val)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine type of movement
if isString(df.loc[Row,Col]):
move = df.loc[Row,Col].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
if isString(df.loc[Row,Col]):
textcolor = df.loc[Row,Col].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
else:
print('Command: ', command)
print('Column: ', Col+1)
print("Command %s not recognized on col %d." % (command, Col+1))
def StandardChecks(self):
print(' - checking script %s which has %d items' % (self.SheetName, len(self.Items) ))
# Do some standard checks after parsing
OK = True
self.TimeOffsetUsed=False
for i in self.Items.values():
i.StandardChecks()
if i.TimeOffsetUsed:
self.TimeOffsetUsed=True
if (i.ItemType == IT_IMAGE):
if len(i.Properties['IMAGE'].Sequence)==0:
print('ERROR: %s has NO images' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['IMAGE'].Sequence:
if not os.path.isfile(filename):
print('Image not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_MASK):
if len(i.Properties['MASK'].Sequence)==0:
print('ERROR: %s has NO mask' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['MASK'].Sequence:
if not os.path.isfile(filename):
print('Mask not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_TEXT):
if len(i.Properties['TEXT'].Sequence)==0:
print('ERROR: %s has NO lines of text' % i.ItemName)
OK=False
return OK
def Deploy(self, MaxTime):
logging.debug('')
logging.debug('* DEPLOYING SCRIPT %s' % self.SheetName)
for item in self.Items.values():
item.Deploy(MaxTime)
if not self.Zbuffer:
# The Zbuffer has no items because the user did not specify
# any BRINGTOFRONT or SENDTOBACK commands
# Get the name of a random item
itemname = list(self.Items.keys())[0]
self.Zbuffer.append( ( 0, itemname, FRONT) )
self.Zbuffer.sort()
time, item, direction = self.Zbuffer[-1]
self.Zbuffer.append( (MaxTime, item, direction) )
self.Zbuffer.sort()
# Determine the order of the items at time = 0
self.ZbufferIndex = 0
# list() means we create a copy
self.Zorder = list(self.Items.keys())
def GetPicture(self, Time, Frame):
# If exactly the same image was calculated before,
# use that image
#if Frame != self.PictureFrame and not self.TimeOffsetUsed:
if True:
logging.debug('')
logging.debug('* SCRIPT %s IS GENERATING FRAME %.5d at time %.2f' % (self.SheetName, Frame, Time ))
# Start with a transparent image
if (not self.IsCanvas) or self.FirstImage:
self.Picture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
self.FirstImage=False
# Determine the Z-order at the desired time
while True:
time, item, direction = self.Zbuffer[self.ZbufferIndex]
if item not in self.Zorder:
print('Z-order failure: item %s not in script %s' % (item, self.SheetName) )
self.Zorder.remove(item)
if direction == FRONT:
self.Zorder.append(item)
else:
self.Zorder.insert(0, item)
if (self.Zbuffer[self.ZbufferIndex+1][0])>Time:
break
else:
self.ZbufferIndex+=1
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
# Draw each item
for itemname in self.Zorder:
Item = self.Items[itemname]
move = Item.Properties['OMOVE' ].Value(Time)
opacity = Item.Properties['OPACITY' ].Value(Time, move)
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
try:
logging.debug(' - Item %s:%s xpos= %.2f ypos= %.2f xscale= %.3f yscale= %.3f rot= %.3f opacity= %.3f' % (self.SheetName, itemname, xpos, ypos, sx, sy, rot, opacity))
except:
print('opacity', opacity)
print('xpos', xpos)
print('ypos', ypos)
print('sx', sx)
print('sy', sy)
print('rot', rot)
if opacity>0:
if Item.ItemType == IT_ASSY:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Assembly %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
logging.debug(' Script %s not in scriptlist!!:'% (script))
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
logging.debug(' Assembly %s continues:'% (self.SheetName))
if Item.ItemType == IT_CANVAS:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Canvas %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
elif Item.ItemType == IT_IMAGE:
image = Item.Properties['IMAGE'].Value(Time)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_MASK:
image = Item.Properties['MASK'].Value(Time)
logging.debug('Line 585 mask is %s' % image)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_TEXT:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
text = Item.Properties['TEXT' ].Value(Time)
textsize = int(Item.Properties['TEXTSIZE' ].Value(Time, LINEAR))
textcolor = Item.Properties['TEXTCOLOR'].Value(Time)
fontname = Item.Properties['FONT' ].Value(Time)
Directories = [ 'C:\\WINDOWS\\Fonts\\' ]
Fonts = [fontname, 'calibri.ttf', 'YanoneKaffeesatz-Regular.ttf', 'ARIALN.TTF', 'verdana.ttf', 'YanoneKaffeesatz-Light.ttf']
Face = ImageFont.truetype(SelectFont(Directories, Fonts), textsize)
Draw = ImageDraw.Draw(ItemPicture)
Draw.text( (0,0), text, fill=textcolor, font=Face)
# Retrieve the general properties
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
xpole = Item.Properties['XPOLE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
ypole = Item.Properties['YPOLE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
fi = math.pi/180*rot
sinfi = math.sin(fi)
cosfi = math.cos(fi)
w,h = ItemPicture.size
# Resize and rotate the ItemPicture
try:
ItemPicture=ItemPicture.resize( (int(sx*w+0.5), int(sy*h+0.5) ), Image.ANTIALIAS)
ItemPicture=ItemPicture.rotate(rot, expand=1)
except:
print('ERROR Script 663: Item %s:%s sx= %.2f sy= %.2f' % (self.SheetName, itemname, sx, sy))
break
wr,hr = ItemPicture.size
xt = xpos + xpole - ypole*sy*sinfi - xpole*sx*cosfi +0.5*w*sx*cosfi +0.5*h*sy*sinfi -0.5*wr
yt = ypos + ypole - ypole*sy*cosfi + xpole*sx*sinfi -0.5*w*sx*sinfi +0.5*h*sy*cosfi -0.5*hr
Mask = ItemPicture.convert("RGBA")
Mask = Image.blend(Image.new(ItemPicture.mode, ItemPicture.size, 0), ItemPicture, opacity)
if Item.ItemType != IT_MASK:
# Item is picture, assembly or canvas
self.Picture.paste( ItemPicture, (int(xt),int(yt)), Mask )
else:
# Item is mask
logging.debug(' - Applying mask for %s' % itemname)
# Start with a clean image with transparent background
CleanImage = Image.new("RGBA", (self.Width, self.Height), (0,0,0,0) )
# Use the mask rotated and translated
Mask = Image.new("L", (self.Width, self.Height), 0 )
Mask.paste( ItemPicture, (int(xt),int(yt)))
# Copy the image as-is with rotation and translation set to zero
CleanImage.paste( self.Picture, (0,0), Mask )
self.Picture = CleanImage.copy()
self.PictureFrame = Frame
return self.Picture.copy()
| 41.931174 | 186 | 0.491101 | import pandas as pd
from PIL import Image # www.pythonware.com/library/pil/handbook
from PIL import ImageFont, ImageDraw, ImageEnhance
from PIL import ImageFilter
import os
#import time
import logging
from Animate.Items import *
from Animate.Properties import *
from Animate.Constants import *
LOG_FILENAME = '__logfile.txt'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
def SelectFont(Directories, Fonts):
for Font in Fonts:
for Path in Directories:
try:
FontName=os.path.join(Path,Font)
SelectedFont = ImageFont.truetype(FontName, 20)
return FontName
except:
logging.debug('%s not successful' % FontName )
print('All attempts to load fonts failed')
def isNumber(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, int):
return True
elif isinstance(somePandasValue, float):
return True
else:
return False
def isString(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, str):
return True
else:
return False
class Script():
def __init__(self, FileName, SheetName, ScriptList):
logging.debug(' Script.__init__(%s, %s)' % (FileName, SheetName) )
self.FileName = FileName
self.SheetName = SheetName
self.ScriptList = ScriptList
self.IsCanvas = False
self.FirstImage = True
self.ImageDir = 'Pictures'
self.FirstFrame = 0 # Allows the processing of a subset of frames
self.LastFrame = -1
self.FramesPerSecond = 10
self.ShowTime = False # Display the time in each frame
self.Movie = False # Can be overridden by filename of movie
self.AnimatedGIF = False # Can be overridden by filename of animated gif
self.MaxTime = 0 # Largest time, retrieved from the parser
self.TimeOffset = 0.0 # Script, assembly or canvas can be run with an offset to the global time
self.Width = 800 # Width of the output image
self.Height = 600 # Height of the output image
self.Items = ItemDict() # Dictionary of items
# List of (time, item, back/front) tuples
self.Zbuffer = []
self.ZbufferIndex = 0
# List of Items
self.Zorder = []
# Picture that was processed last
self.Picture = False
self.PictureFrame = -1
def ParseScript(self, FileName, SheetName):
logging.debug(' Script.ParseScript(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
print(' - parsing script %s' % SheetName)
for Row in range(df.shape[0]):
# A row contains valid data if the first cell contains a number
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
command = df.loc[Row,1].upper().strip()
if self.MaxTime<time: self.MaxTime=time
if command == 'WIDTH':
# Determine the width of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Width = int(df.loc[Row,2])
elif command == 'HEIGHT':
# Determine the height of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Height = int(df.loc[Row,2])
elif command == 'FRAMESPERSECOND':
# Sets the number of frames per second for the whole movie
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
assert (df.loc[Row,2] >0 ), \
"Frames per second in sheet %s at row %d should be larger than 0" % (SheetName, Row+1)
self.FramesPerSecond = df.loc[Row,2]
elif command == 'FIRSTFRAME':
# Determine the first frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.FirstFrame = int(df.loc[Row,2])
elif command == 'LASTFRAME':
# Determine the last frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.LastFrame = int(df.loc[Row,2])
elif command == 'SHOWTIME':
# Write the time in the lower left corner of the frames, for debug purposes
self.ShowTime = True
elif command == 'HIDETIME':
# Do not write the time
self.ShowTime = False
elif command == 'MOVIE':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the movie" % (command, Row+1, SheetName)
self.Movie= df.loc[Row,2]
print(" - movie {movie} will be created after generating the frames".format(movie=self.Movie))
elif command == 'ANIMATEDGIF':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the animated gif" % (command, Row+1, SheetName)
self.AnimatedGIF= df.loc[Row,2]
print("- animated GIF {gif} will be created after generating the frames".format(gif=self.AnimatedGIF))
elif command == 'TABLE':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the table name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseTable(self.FileName, sheetname)
elif command == 'SCRIPT':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the script name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseScript(self.FileName, sheetname)
elif command == 'ASSEMBLY':
# Create a new script object and use the image created by this
# script as feed for this item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the assembly name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddScript( time, sheetname )
elif command == 'CANVAS':
# A canvas is an assembly of which the background is not reset for a new frame
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
NewScript.IsCanvas = True
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddCanvas( time, sheetname )
elif command == 'IMAGE':
# Assign a new filename for an image item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
# Assign a new filename for a mask item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
# Assign a new title for a text item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
title = df.loc[Row,3]
self.Items[itemname].AddText( time, title )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new x position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects an item name in column C" % (command, Row+1, SheetName)
assert isNumber(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a number in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
value = df.loc[Row,3]
self.Items[itemname].Properties[command].Append(time, value)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine linear or cycloid movement
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
move = df.loc[Row,3].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
# Set a new text color
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
textcolor = df.loc[Row,3].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
elif command == 'BRINGTOFRONT':
# Bring the item to front at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, FRONT) )
elif command == 'SENDTOBACK':
# Send the item to back at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, BACK) )
else:
print("Command %s not recognized on row %d." % (command, Row+1))
def ParseTable(self, FileName, SheetName):
logging.debug(' Script.ParseTable(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
# Investigate which data each column contains
print(' - parsing table %s' % SheetName)
for Row in range(2, df.shape[0]):
# Only process rows with a time in the first column
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
# Increase time if the table exceeds the maximum
if self.MaxTime<time: self.MaxTime=time
for Col in range(1, df.shape[1]):
# Only process columns with an existing object in the first row and a command in the second row
if isString(df.loc[0,Col]) and isString(df.loc[1,Col]) and\
len(df.loc[0,Col])>0 and len(df.loc[1,Col])>0:
itemname = df.loc[0,Col].upper().strip()
command = df.loc[1,Col].upper().strip()
# Only process items that have already been created in another script
if itemname in self.Items:
item = self.Items[itemname]
if command == 'IMAGE':
if item.ItemType == IT_IMAGE:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
if self.Items[item].ItemType == IT_MASK:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
if item.ItemType == IT_TEXT:
# Assign a new title for a text item
if isString(df.loc[Row,Col]):
text = df.loc[Row,Col]
self.Items[itemname].AddText( time, text )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new float property
if isNumber(df.loc[Row,Col]):
val = df.loc[Row,Col]
self.Items[itemname].Properties[command].Append(time, val)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine type of movement
if isString(df.loc[Row,Col]):
move = df.loc[Row,Col].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
if isString(df.loc[Row,Col]):
textcolor = df.loc[Row,Col].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
else:
print('Command: ', command)
print('Column: ', Col+1)
print("Command %s not recognized on col %d." % (command, Col+1))
def StandardChecks(self):
print(' - checking script %s which has %d items' % (self.SheetName, len(self.Items) ))
# Do some standard checks after parsing
OK = True
self.TimeOffsetUsed=False
for i in self.Items.values():
i.StandardChecks()
if i.TimeOffsetUsed:
self.TimeOffsetUsed=True
if (i.ItemType == IT_IMAGE):
if len(i.Properties['IMAGE'].Sequence)==0:
print('ERROR: %s has NO images' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['IMAGE'].Sequence:
if not os.path.isfile(filename):
print('Image not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_MASK):
if len(i.Properties['MASK'].Sequence)==0:
print('ERROR: %s has NO mask' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['MASK'].Sequence:
if not os.path.isfile(filename):
print('Mask not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_TEXT):
if len(i.Properties['TEXT'].Sequence)==0:
print('ERROR: %s has NO lines of text' % i.ItemName)
OK=False
return OK
def Deploy(self, MaxTime):
logging.debug('')
logging.debug('* DEPLOYING SCRIPT %s' % self.SheetName)
for item in self.Items.values():
item.Deploy(MaxTime)
if not self.Zbuffer:
# The Zbuffer has no items because the user did not specify
# any BRINGTOFRONT or SENDTOBACK commands
# Get the name of a random item
itemname = list(self.Items.keys())[0]
self.Zbuffer.append( ( 0, itemname, FRONT) )
self.Zbuffer.sort()
time, item, direction = self.Zbuffer[-1]
self.Zbuffer.append( (MaxTime, item, direction) )
self.Zbuffer.sort()
# Determine the order of the items at time = 0
self.ZbufferIndex = 0
# list() means we create a copy
self.Zorder = list(self.Items.keys())
def GetPicture(self, Time, Frame):
# If exactly the same image was calculated before,
# use that image
#if Frame != self.PictureFrame and not self.TimeOffsetUsed:
if True:
logging.debug('')
logging.debug('* SCRIPT %s IS GENERATING FRAME %.5d at time %.2f' % (self.SheetName, Frame, Time ))
# Start with a transparent image
if (not self.IsCanvas) or self.FirstImage:
self.Picture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
self.FirstImage=False
# Determine the Z-order at the desired time
while True:
time, item, direction = self.Zbuffer[self.ZbufferIndex]
if item not in self.Zorder:
print('Z-order failure: item %s not in script %s' % (item, self.SheetName) )
self.Zorder.remove(item)
if direction == FRONT:
self.Zorder.append(item)
else:
self.Zorder.insert(0, item)
if (self.Zbuffer[self.ZbufferIndex+1][0])>Time:
break
else:
self.ZbufferIndex+=1
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
# Draw each item
for itemname in self.Zorder:
Item = self.Items[itemname]
move = Item.Properties['OMOVE' ].Value(Time)
opacity = Item.Properties['OPACITY' ].Value(Time, move)
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
try:
logging.debug(' - Item %s:%s xpos= %.2f ypos= %.2f xscale= %.3f yscale= %.3f rot= %.3f opacity= %.3f' % (self.SheetName, itemname, xpos, ypos, sx, sy, rot, opacity))
except:
print('opacity', opacity)
print('xpos', xpos)
print('ypos', ypos)
print('sx', sx)
print('sy', sy)
print('rot', rot)
if opacity>0:
if Item.ItemType == IT_ASSY:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Assembly %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
logging.debug(' Script %s not in scriptlist!!:'% (script))
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
logging.debug(' Assembly %s continues:'% (self.SheetName))
if Item.ItemType == IT_CANVAS:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Canvas %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
elif Item.ItemType == IT_IMAGE:
image = Item.Properties['IMAGE'].Value(Time)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_MASK:
image = Item.Properties['MASK'].Value(Time)
logging.debug('Line 585 mask is %s' % image)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_TEXT:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
text = Item.Properties['TEXT' ].Value(Time)
textsize = int(Item.Properties['TEXTSIZE' ].Value(Time, LINEAR))
textcolor = Item.Properties['TEXTCOLOR'].Value(Time)
fontname = Item.Properties['FONT' ].Value(Time)
Directories = [ 'C:\\WINDOWS\\Fonts\\' ]
Fonts = [fontname, 'calibri.ttf', 'YanoneKaffeesatz-Regular.ttf', 'ARIALN.TTF', 'verdana.ttf', 'YanoneKaffeesatz-Light.ttf']
Face = ImageFont.truetype(SelectFont(Directories, Fonts), textsize)
Draw = ImageDraw.Draw(ItemPicture)
Draw.text( (0,0), text, fill=textcolor, font=Face)
# Retrieve the general properties
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
xpole = Item.Properties['XPOLE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
ypole = Item.Properties['YPOLE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
fi = math.pi/180*rot
sinfi = math.sin(fi)
cosfi = math.cos(fi)
w,h = ItemPicture.size
# Resize and rotate the ItemPicture
try:
ItemPicture=ItemPicture.resize( (int(sx*w+0.5), int(sy*h+0.5) ), Image.ANTIALIAS)
ItemPicture=ItemPicture.rotate(rot, expand=1)
except:
print('ERROR Script 663: Item %s:%s sx= %.2f sy= %.2f' % (self.SheetName, itemname, sx, sy))
break
wr,hr = ItemPicture.size
xt = xpos + xpole - ypole*sy*sinfi - xpole*sx*cosfi +0.5*w*sx*cosfi +0.5*h*sy*sinfi -0.5*wr
yt = ypos + ypole - ypole*sy*cosfi + xpole*sx*sinfi -0.5*w*sx*sinfi +0.5*h*sy*cosfi -0.5*hr
Mask = ItemPicture.convert("RGBA")
Mask = Image.blend(Image.new(ItemPicture.mode, ItemPicture.size, 0), ItemPicture, opacity)
if Item.ItemType != IT_MASK:
# Item is picture, assembly or canvas
self.Picture.paste( ItemPicture, (int(xt),int(yt)), Mask )
else:
# Item is mask
logging.debug(' - Applying mask for %s' % itemname)
# Start with a clean image with transparent background
CleanImage = Image.new("RGBA", (self.Width, self.Height), (0,0,0,0) )
# Use the mask rotated and translated
Mask = Image.new("L", (self.Width, self.Height), 0 )
Mask.paste( ItemPicture, (int(xt),int(yt)))
# Copy the image as-is with rotation and translation set to zero
CleanImage.paste( self.Picture, (0,0), Mask )
self.Picture = CleanImage.copy()
self.PictureFrame = Frame
return self.Picture.copy()
| 0 | 0 |
da91ac6418297ed4e02987a76d7e459a1c8dc944 | 2,510 | py | Python | DatasetHandler/FileHelperFunc.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | DatasetHandler/FileHelperFunc.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | DatasetHandler/FileHelperFunc.py | previtus/MGR-Project-Code | 1126215059eb3f731dcf78ec24d9a480e73abce6 | [
"MIT"
] | null | null | null | import os
def get_project_folder():
'''
Gives us the path to MGR-Project-Code from a list of allowed folders.
:return:
'''
PATH_ALTERNATIVES = ['/home/ekmek/Project II/MGR-Project-Code/', '/storage/brno2/home/previtus/MGR-Project-Code/', '/home/ekmek/Vitek/MGR-Project-Code/']
ABS_PATH_TO_PRJ = use_path_which_exists(PATH_ALTERNATIVES)
return ABS_PATH_TO_PRJ
def get_geojson_path():
'''
Gives us the path directly to attractivity_previtus_data_1_edges.geojson from a list of allowed paths
:return:
'''
folders = ['/home/ekmek/Desktop/Project II/graph_new_data/',
'/home/ekmek/Vitek/graph_new_data/',
'/storage/brno2/home/previtus/important_files/']
folder = use_path_which_exists(folders)
return folder+'attractivity_previtus_data_1_edges.geojson'
def use_path_which_exists(list_of_possible_paths):
'''
From a list of possible paths choose the one which exists.
:param list_of_possible_paths: possible paths
:return: working path
'''
used_path = ''
for path in list_of_possible_paths:
if os.path.exists(path):
used_path = path
if used_path == '':
print "Error, cannot locate the path of project, will likely fail!"
return used_path
def file_exists(fname):
''' Does file exist, returns boolean.'''
return os.path.isfile(fname)
def get_folder_from_file(fname):
''' Get folder name from path to a file.'''
return os.path.dirname(fname) + '/'
def folder_exists(directory):
''' Does folder with this name exist, returns boolean'''
return os.path.exists(directory)
def make_folder_ifItDoesntExist(directory):
''' Make a new directory, if it didn't previously exist.'''
if not os.path.exists(directory):
os.makedirs(directory)
import shutil, errno
def copy_folder(src, dst):
''' Copy and paste folders. Used for dataset augmentation.'''
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def copy_file(src, dst):
''' Copy and paste file.'''
try:
shutil.copy(src, dst)
except OSError as exc:
raise
import hashlib
def md5(fname):
''' Get md5 hash of a file.'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
| 30.240964 | 157 | 0.661753 | import os
def get_project_folder():
'''
Gives us the path to MGR-Project-Code from a list of allowed folders.
:return:
'''
PATH_ALTERNATIVES = ['/home/ekmek/Project II/MGR-Project-Code/', '/storage/brno2/home/previtus/MGR-Project-Code/', '/home/ekmek/Vitek/MGR-Project-Code/']
ABS_PATH_TO_PRJ = use_path_which_exists(PATH_ALTERNATIVES)
return ABS_PATH_TO_PRJ
def get_geojson_path():
'''
Gives us the path directly to attractivity_previtus_data_1_edges.geojson from a list of allowed paths
:return:
'''
folders = ['/home/ekmek/Desktop/Project II/graph_new_data/',
'/home/ekmek/Vitek/graph_new_data/',
'/storage/brno2/home/previtus/important_files/']
folder = use_path_which_exists(folders)
return folder+'attractivity_previtus_data_1_edges.geojson'
def use_path_which_exists(list_of_possible_paths):
'''
From a list of possible paths choose the one which exists.
:param list_of_possible_paths: possible paths
:return: working path
'''
used_path = ''
for path in list_of_possible_paths:
if os.path.exists(path):
used_path = path
if used_path == '':
print "Error, cannot locate the path of project, will likely fail!"
return used_path
def file_exists(fname):
''' Does file exist, returns boolean.'''
return os.path.isfile(fname)
def get_folder_from_file(fname):
''' Get folder name from path to a file.'''
return os.path.dirname(fname) + '/'
def folder_exists(directory):
''' Does folder with this name exist, returns boolean'''
return os.path.exists(directory)
def make_folder_ifItDoesntExist(directory):
''' Make a new directory, if it didn't previously exist.'''
if not os.path.exists(directory):
os.makedirs(directory)
import shutil, errno
def copy_folder(src, dst):
''' Copy and paste folders. Used for dataset augmentation.'''
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def copy_file(src, dst):
''' Copy and paste file.'''
try:
shutil.copy(src, dst)
except OSError as exc:
raise
import hashlib
def md5(fname):
''' Get md5 hash of a file.'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
| 0 | 0 |
ca53a879e6d706ed4d4875884504a13f91f27b99 | 1,575 | py | Python | trajectory_executor/Parameters.py | WPI-MMR/trajectory_generator | 61a3c61d37e674cfa81ec4e67fd56bb825e56ab8 | [
"MIT"
] | null | null | null | trajectory_executor/Parameters.py | WPI-MMR/trajectory_generator | 61a3c61d37e674cfa81ec4e67fd56bb825e56ab8 | [
"MIT"
] | null | null | null | trajectory_executor/Parameters.py | WPI-MMR/trajectory_generator | 61a3c61d37e674cfa81ec4e67fd56bb825e56ab8 | [
"MIT"
] | null | null | null | # from sympy import *
# Robot Chassis Parameters
l = 370 #hip to hip length of the robot
b = 210.1 #hip to hip breadth of the robot
h = 44 #height of the robot
## Leg Type 1: Rear
'''
Variable name convention as follows:
The first number represents the length and the second number represents the Leg Type
l11 is the hip to knee length of Leg Type 1
l21 is the knee to ankle length of Leg Type 1
l22 is the knee to ankle length of Leg Type 2
and so on...
'''
# Defining lengths and offsets
l11 = 160 #hip to knee length
l21 = 160 #knee to ankle length
l3 = 39 #ankle to toe length
d1 = 37 #hip offset
d2 = 12.95 #knee offset
'''
Variable name convention as follows:
The first number represents the angle and the second number represents the Leg #
theta11 is the hip rotation angle of Leg 1
theta21 is the knee roation angle of Leg 1
theta31 is the ankle roation angle of Leg 1
theta14 is the hip rotation angle of Leg 4
and so on...
'''
# theta11, alpha11, theta21, alpha21, theta31, alpha31 = symbols("theta11 alpha11 theta21 alpha21 theta31 alpha31")
# theta14, alpha14, theta24, alpha24, theta34, alpha34 = symbols("theta14 alpha14 theta24 alpha24 theta34 alpha34")
## Leg Type 2: Front
# Defining lengths and offsets
l12 = 160 #hip to knee length
l22 = 173.5 #knee to ankle length
# theta12, alpha12, theta22, alpha22 = symbols("theta12 alpha12 theta22 alpha22")
# theta13, alpha13, theta23, alpha23 = symbols("theta13 alpha13 theta23 alpha23")
| 28.125 | 116 | 0.690794 | # from sympy import *
# Robot Chassis Parameters
l = 370 #hip to hip length of the robot
b = 210.1 #hip to hip breadth of the robot
h = 44 #height of the robot
## Leg Type 1: Rear
'''
Variable name convention as follows:
The first number represents the length and the second number represents the Leg Type
l11 is the hip to knee length of Leg Type 1
l21 is the knee to ankle length of Leg Type 1
l22 is the knee to ankle length of Leg Type 2
and so on...
'''
# Defining lengths and offsets
l11 = 160 #hip to knee length
l21 = 160 #knee to ankle length
l3 = 39 #ankle to toe length
d1 = 37 #hip offset
d2 = 12.95 #knee offset
'''
Variable name convention as follows:
The first number represents the angle and the second number represents the Leg #
theta11 is the hip rotation angle of Leg 1
theta21 is the knee roation angle of Leg 1
theta31 is the ankle roation angle of Leg 1
theta14 is the hip rotation angle of Leg 4
and so on...
'''
# theta11, alpha11, theta21, alpha21, theta31, alpha31 = symbols("theta11 alpha11 theta21 alpha21 theta31 alpha31")
# theta14, alpha14, theta24, alpha24, theta34, alpha34 = symbols("theta14 alpha14 theta24 alpha24 theta34 alpha34")
## Leg Type 2: Front
# Defining lengths and offsets
l12 = 160 #hip to knee length
l22 = 173.5 #knee to ankle length
# theta12, alpha12, theta22, alpha22 = symbols("theta12 alpha12 theta22 alpha22")
# theta13, alpha13, theta23, alpha23 = symbols("theta13 alpha13 theta23 alpha23")
| 0 | 0 |
26e42554fb75ae570d805b94fb43bd82a761275e | 1,622 | py | Python | vedicpy/squareroot.py | utkarsh0702/vedicpy | 56a3228a8caea0976570c119e02516600f26e88b | [
"BSD-3-Clause"
] | 1 | 2021-02-14T16:22:17.000Z | 2021-02-14T16:22:17.000Z | vedicpy/squareroot.py | utkarsh0702/vedicpy | 56a3228a8caea0976570c119e02516600f26e88b | [
"BSD-3-Clause"
] | null | null | null | vedicpy/squareroot.py | utkarsh0702/vedicpy | 56a3228a8caea0976570c119e02516600f26e88b | [
"BSD-3-Clause"
] | null | null | null | from math import floor, log10
def squareroot_check(a: int) -> bool:
a= int(a) ; b=a%10 ; x=a; a//=10
if(b==2 or b==3 or b==7 or b==8):
return False
else:
n= floor(log10(abs(x))) + 1
while(n!=1):
sum=0
while(x!=0):
sum+=(x%10) ; x//=10
x= sum
n= floor(log10(abs(x))) + 1
if(x==2 or x==3 or x==5 or x==6 or x==8 or x==9):
return False
else:
if b==0:
sum=0; i=0
while(sum==0):
i+=1; sum= a%(10**i)
if(i%2==0):
return True
else:
return False
elif(b==6):
return True if((a%10)%2==1) else False
else:
return True if((a%10)%2==0) else False
def perfect_sqrt_under_sqof100(a: int) -> int:
a= int(a)
if(squareroot_check(a)==True):
c=0; b= a%10
for i in range(100):
if((i*10)**2 <= a ):
if(a <= ((i+1)*10)**2):
c= i*10; break
d1= a- ((i*10)**2); d2= (((i+1)*10)**2)- a
if(b==0): b=0
elif(b==1): b= 1 if (d2>d1) else 9
elif(b==4): b= 2 if (d2>d1) else 8
elif(b==6): b= 4 if (d2>d1) else 6
elif(b==9): b= 3 if (d2>d1) else 7
elif(b==5): b= 5
else:
print("Error: Not a perfect square."); exit(0)
return (c+b)
else:
print('The number is not a perfect square.'); exit(0) | 26.16129 | 61 | 0.380395 | from math import floor, log10
def squareroot_check(a: int) -> bool:
a= int(a) ; b=a%10 ; x=a; a//=10
if(b==2 or b==3 or b==7 or b==8):
return False
else:
n= floor(log10(abs(x))) + 1
while(n!=1):
sum=0
while(x!=0):
sum+=(x%10) ; x//=10
x= sum
n= floor(log10(abs(x))) + 1
if(x==2 or x==3 or x==5 or x==6 or x==8 or x==9):
return False
else:
if b==0:
sum=0; i=0
while(sum==0):
i+=1; sum= a%(10**i)
if(i%2==0):
return True
else:
return False
elif(b==6):
return True if((a%10)%2==1) else False
else:
return True if((a%10)%2==0) else False
def perfect_sqrt_under_sqof100(a: int) -> int:
a= int(a)
if(squareroot_check(a)==True):
c=0; b= a%10
for i in range(100):
if((i*10)**2 <= a ):
if(a <= ((i+1)*10)**2):
c= i*10; break
d1= a- ((i*10)**2); d2= (((i+1)*10)**2)- a
if(b==0): b=0
elif(b==1): b= 1 if (d2>d1) else 9
elif(b==4): b= 2 if (d2>d1) else 8
elif(b==6): b= 4 if (d2>d1) else 6
elif(b==9): b= 3 if (d2>d1) else 7
elif(b==5): b= 5
else:
print("Error: Not a perfect square."); exit(0)
return (c+b)
else:
print('The number is not a perfect square.'); exit(0) | 0 | 0 |
41e9c7b6bd0603988abf0a0263e4f16fab3ff22b | 716 | py | Python | plbmng/lib/ssh_map.py | xxMAKMAKxx/plbmng | 64bbe70424801092c7429d5e73ecaf5466b6c437 | [
"MIT"
] | null | null | null | plbmng/lib/ssh_map.py | xxMAKMAKxx/plbmng | 64bbe70424801092c7429d5e73ecaf5466b6c437 | [
"MIT"
] | null | null | null | plbmng/lib/ssh_map.py | xxMAKMAKxx/plbmng | 64bbe70424801092c7429d5e73ecaf5466b6c437 | [
"MIT"
] | null | null | null | import folium
import csv
from folium.plugins import MarkerCluster
def main():
"""
Creates a map of nodes with available SSH connection.\n
:return: map_ssh.html file
"""
map_ssh = folium.Map(location=[45.523, -122.675],
zoom_start=2)
with open('lib/base_data.txt') as tsv:
for row in csv.reader(tsv, delimiter='\t'):
name = row[0]
try:
x = float(row[1])
y = float(row[2])
print(" %s " % name)
folium.Marker([x, y], popup=name).add_to(map_ssh)
except ValueError:
pass
map_ssh.save('map_ssh.html')
if __name__ == "__main__":
main()
| 23.866667 | 65 | 0.523743 | import folium
import csv
from folium.plugins import MarkerCluster
def main():
"""
Creates a map of nodes with available SSH connection.\n
:return: map_ssh.html file
"""
map_ssh = folium.Map(location=[45.523, -122.675],
zoom_start=2)
with open('lib/base_data.txt') as tsv:
for row in csv.reader(tsv, delimiter='\t'):
name = row[0]
try:
x = float(row[1])
y = float(row[2])
print(" %s " % name)
folium.Marker([x, y], popup=name).add_to(map_ssh)
except ValueError:
pass
map_ssh.save('map_ssh.html')
if __name__ == "__main__":
main()
| 0 | 0 |
2bb73ddf9d4ab92638002a38558adbe2794f6be1 | 146 | py | Python | day08/part2.py | mtn/advent15 | b23bcf5761363596336d5361218c52db0b078793 | [
"MIT"
] | null | null | null | day08/part2.py | mtn/advent15 | b23bcf5761363596336d5361218c52db0b078793 | [
"MIT"
] | null | null | null | day08/part2.py | mtn/advent15 | b23bcf5761363596336d5361218c52db0b078793 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
ans = 0
with open("input.txt") as f:
for line in f:
ans += 2 + line.count("\\") + line.count("\"")
print(ans)
| 18.25 | 54 | 0.541096 | #!/usr/bin/env python3
ans = 0
with open("input.txt") as f:
for line in f:
ans += 2 + line.count("\\") + line.count("\"")
print(ans)
| 0 | 0 |
b39083aacc0b8fea019f95a80c7e48ff65c4cb4a | 818 | py | Python | opttrack/lib/ui/obs_menu.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
] | null | null | null | opttrack/lib/ui/obs_menu.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
] | 2 | 2016-03-30T02:50:31.000Z | 2016-03-30T16:18:23.000Z | opttrack/lib/ui/obs_menu.py | aisthesis/opttrack | 17e0c7740ea43e0f07166e30d689b106d0319d0b | [
"MIT"
] | null | null | null | """
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/edit_menu.py
Content for interactive editor
"""
from functools import partial
from .obs_handlers import ObsHandlers
from .menu import Menu
from .spread_selector import SpreadSelector
class ObsMenu(Menu):
def __init__(self, logger, tz):
super(ObsMenu, self).__init__(logger, tz=tz)
self.spread_sel = SpreadSelector()
self._handlers = ObsHandlers(self.logger, self.tz)
overrides = {'main': {'desc': 'Quit', 'do': lambda: False}}
self._menus = {'main': self.spread_sel.get(overrides,
dgb=partial(self.handlers.obs, 'dgb'))}
@property
def menus(self):
return self._menus
@property
def handlers(self):
return self._handlers
| 24.787879 | 67 | 0.665037 | """
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/edit_menu.py
Content for interactive editor
"""
from functools import partial
from .obs_handlers import ObsHandlers
from .menu import Menu
from .spread_selector import SpreadSelector
class ObsMenu(Menu):
def __init__(self, logger, tz):
super(ObsMenu, self).__init__(logger, tz=tz)
self.spread_sel = SpreadSelector()
self._handlers = ObsHandlers(self.logger, self.tz)
overrides = {'main': {'desc': 'Quit', 'do': lambda: False}}
self._menus = {'main': self.spread_sel.get(overrides,
dgb=partial(self.handlers.obs, 'dgb'))}
@property
def menus(self):
return self._menus
@property
def handlers(self):
return self._handlers
| 0 | 0 |