hexsha
stringlengths 40
40
| size
int64 24
287k
| ext
stringclasses 2
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 7
126
| max_stars_repo_name
stringlengths 8
97
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequence | max_stars_count
float64 1
15.9k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 7
126
| max_issues_repo_name
stringlengths 8
97
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequence | max_issues_count
float64 1
14.6k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 7
126
| max_forks_repo_name
stringlengths 8
97
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequence | max_forks_count
float64 1
8.43k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 24
287k
| avg_line_length
float64 12.3
530
| max_line_length
int64 24
10.2k
| alphanum_fraction
float64 0.41
0.88
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f49aa977ab9bbd0f8065ca2b090584768f9022c | 1,736 | py | Python | color/color2.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | null | null | null | color/color2.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | null | null | null | color/color2.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Alta3 Research || Author RZFeeser@alta3.com
Learning how to use functions"""
## Installs the crayons package.
## python3 -m pip install crayons
## import statements ALWAYS go up top
import crayons
def main():
"""run time code. Always indent under function"""
# print 'red string' in red
print(crayons.red('red string'))
# Red White and Blue text
#print('{} white {}'.format(crayons.red('red'), crayons.blue('blue'))) # format string (old ver of str templating)
print(f"{crayons.red('red')} white {crayons.blue('blue')}") # f-string (newest version of str templating)
crayons.disable() # disables the crayons package
# this line should NOT have color as crayons is disabled
print(f"{crayons.red('red')} white {crayons.blue('blue')}") # f-string (newest version of string templating)
crayons.DISABLE_COLOR = False # enable the crayons package
# This line will print in color because color is enabled
print(f"{crayons.red('red')} white {crayons.blue('blue')}") # f-string (newest version of string templating)
# print 'red string' in red
print(crayons.red('red string', bold=True))
# print 'yellow string' in yellow
print(crayons.yellow('yellow string', bold=True))
# print 'magenta string' in magenta
print(crayons.magenta('magenta string', bold=True))
# print 'white string' in white
print(crayons.white('white string', bold=True))
print(crayons.green('Nebiu Tadele in green'))
print(crayons.blue('Nebiu Tadele in blue and in bold', bold=True))
# this condition is only true if our script is run directly
# it is NOT true if our code is imported into another script
if __name__ == "__main__":
main()
| 34.039216 | 118 | 0.68894 |
4f4aa9f6cde34245c22f658c223f7f879a4f102e | 30,096 | py | Python | src/paql/package_query.py | mattfeel/Scalable-PaQL-Queries | 993733f6d8afb607e2789d7b5ffb6535e0b5c8ce | [
"MIT"
] | 6 | 2016-10-04T05:35:24.000Z | 2020-11-12T09:31:42.000Z | src/paql/package_query.py | matteo-brucato/Scalable-PaQL-Queries | 993733f6d8afb607e2789d7b5ffb6535e0b5c8ce | [
"MIT"
] | null | null | null | src/paql/package_query.py | matteo-brucato/Scalable-PaQL-Queries | 993733f6d8afb607e2789d7b5ffb6535e0b5c8ce | [
"MIT"
] | 3 | 2017-08-02T23:55:23.000Z | 2020-05-17T19:46:04.000Z | import gc
import hashlib
import itertools
import logging
import math
import sys
import traceback
from logging import warning, debug
import numpy as np
from pulp import LpProblem, LpMinimize, LpVariable, LpInteger, CPLEX, LpStatus
from src.dbms.utils import sql_get_all_attributes, sql_table_column_data_type
from src.paql.constraints import *
from src.paql.expression_trees.expression_trees import ArithmeticExpression
from src.paql.expression_trees.syntax_tree import Expression
from src.paql.objectives import *
from src.utils.utils import op_to_opstr
class NotPackageQueryException(Exception):
pass
class PaQLParserError(Exception):
pass
class PackageQuery(object):
allowed_dbms_data_types = {
"integer",
"bigint",
"double precision",
# "numeric",
# "numeric(15,2)"
}
@property
def table_name(self):
assert len(self.rel_namespace.values()) == 1
return self.rel_namespace.itervalues().next()
@table_name.setter
def table_name(self, table_name):
assert len(self.rel_namespace.values()) == 1
if self.table_name is not None and self.rel_namespace is not None:
for rel, relname in self.rel_namespace.iteritems():
if relname.lower() == self.table_name.lower():
self.rel_namespace[rel] = table_name
self._paql_query_str_stale = True
@property
def bc_query(self):
bc_query = "SELECT * FROM {}".format(
','.join([
rel_name + " " + rel_alias for rel_alias, rel_name in self.rel_namespace.iteritems()
]))
where_clause_str = self.where_expr.get_str()
if where_clause_str:
bc_query += " WHERE {}".format(where_clause_str)
if self.limit is not None and self.limit["TYPE"] =="INPUT":
bc_query += " LIMIT {}".format(self.limit["LIMIT"])
return bc_query
def __init__(self, d):
assert isinstance(d, dict)
self._paql_query_str = None
self._paql_query_str_stale = True
# self.package_rel_names = d["package rel names"]
self.rel_namespace = d["namespace"]
self.rel_repeats = d["repeats"]
self.where_expr = d["where expr"]
self.such_that_expr = d["such that expr"]
if d["objective expr"] is not None:
self.objective = PackageQueryObjective(
sqlquery_expr=d["objective expr"].get_sql_arithmetic_expression(),
sense=d["objective sense"])
else:
self.objective = None
self.limit = d["limit"]
# NOTE: For now, assuming that the query is single-table.
# TODO: We need to take into account REPEAT! It's not implemented yet!
# rel_names = self.rel_namespace.values()
assert len(self.rel_namespace.values()) == 1, "Not a single-table package query!"
# self.table_name = self.bc_query.lower().split("from")[1].split("where")[0].split()[0].strip()
# self.table_name = rel_names[0]
def __str__(self):
raise NotImplementedError
def md5(self):
return hashlib.md5(str(self)).hexdigest()
@classmethod
def get_json_from_paql(cls, paql_str):
from subprocess import Popen, PIPE
p = Popen(["PaQL_Parser"], stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
json_str, err = p.communicate(input=paql_str)
p.wait()
if err != "":
raise PaQLParserError(err)
return json_str
@classmethod
def from_paql(cls, paql_str):
"""
Returns a new PackageQuery object from a PaQL query string. This is the method that you would call more often.
:param paql_str: A string containing a PaQL query
:rtype : PackageQuery
"""
json_str = PackageQuery.get_json_from_paql(paql_str)
try:
package_query = cls.from_json(json_str)
except ValueError as e:
traceback.print_exc(file=sys.stdout)
raise PaQLParserError(e)
else:
package_query._paql_query_str = paql_str
package_query._paql_query_str_stale = False
return package_query
finally:
gc.collect()
@classmethod
def from_json(cls, json_str):
"""
Returns a new PackageQuery object from a JSON string. This method is usually called by from_PaQL() to
transform the paql parser output (which is a JSON) into a PackageQuery object.
This is the main entry point from the direct output of the paql parser.
:param json_str: A string containing a JSON structure for a parsed PaQL query
"""
import json
q = json.loads(json_str)
# The namespace of relations defined by the query. A dictionary alias -> relation-name.
# This way, all references to relations can be just made based on the alias names, and we can avoid confusion
# when nested queries contain the same relation names, etc.
rel_namespace = { }
# The mapping from relation aliases into their corresponding REPEAT values.
rel_repeats = { }
# The list of relation aliases which form the PACKAGE.
package_rel_names = []
# TODO: Ideally, if the query is not a package query we may want to just execute it as it is...
# TODO: If it doesn't contain the PACKAGE clause, we should make sure it does not contain SUCH THAT either.
# Check if it's package query and store reference to relation names
for select_item in q["SELECT"]:
assert type(select_item) == dict
if select_item["NODE_TYPE"] == "*":
raise NotPackageQueryException()
elif select_item["NODE_TYPE"] == "COL_REF":
raise NotPackageQueryException()
elif select_item["NODE_TYPE"] == "PACKAGE":
package_rel_names.extend(r["REL_NAME"] for r in select_item["PACKAGE_RELS"])
else:
raise Exception("Problem in SELECT clause, NODE_TYPE non recognized: " + select_item["NODE_TYPE"])
# Store relation names and aliases, and repeat constraint for each of them
# These are stored in a dictionary rel_namespace(key=rel_alias, val=rel_names)
for from_ in q["FROM"]:
assert type(from_) == dict
rel_name = from_["REL_NAME"]
rel_alias = from_.get("REL_ALIAS", rel_name)
repeat = from_.get("REPEAT", -1)
rel_namespace[rel_alias] = rel_name
rel_repeats[rel_alias] = repeat
# Make sure that all relation aliases referred in PACKAGE(...) are in the FROM clause as well
assert all(p_rel_name in rel_namespace for p_rel_name in package_rel_names)
# Stricter (for now): Make sure that they are exactly the same relation references
assert set(package_rel_names) == set(rel_namespace.iterkeys())
# Create WHERE clause expression tree
where_clause = Expression(q["WHERE"])
# Create SUCH THAT clause expression tree
such_that_clause = Expression(q["SUCH-THAT"])
# Create objective clause expression tree
if q["OBJECTIVE"] is not None:
objective_expr = Expression(q["OBJECTIVE"]["EXPR"])
if q["OBJECTIVE"]["TYPE"] == "MAXIMIZE":
# objective = { "type": "maximize", "expr": objective_expr }
objective_sense = ObjectiveSenseMAX()
elif q["OBJECTIVE"]["TYPE"] == "MINIMIZE":
# objective = { "type": "minimize", "expr": objective_expr }
objective_sense = ObjectiveSenseMIN()
else:
raise Exception("Unsupported objective type: `{}'".format(q["OBJECTIVE"]["TYPE"]))
else:
objective_expr = objective_sense = None
query_dict = {
# "package rel names": package_rel_names,
"where expr": where_clause,
"such that expr": such_that_clause,
"objective expr": objective_expr,
"objective sense": objective_sense,
"namespace": rel_namespace,
"repeats": rel_repeats,
"limit": q["LIMIT"],
}
if such_that_clause.is_conjunctive() and where_clause.is_conjunctive():
return ConjunctivePackageQuery(query_dict)
else:
return cls(query_dict)
@staticmethod
def from_uncoalesced_constraints(table_name, unc_bcs, unc_gcs, objective):
"""
This method creates a new PackageQuery from sets of uncoalesced constraints and an objective.
"""
bc_query = "SELECT * FROM {} {}".format(table_name, "WHERE true" if len(unc_bcs) > 0 else "")
for attr, op, n in unc_bcs:
bc_query += " AND {a} {o} {b}".format(a=attr, o=op_to_opstr(op), b=n)
gc_queries = []
gc_ranges = []
for (aggr, attr), op, n in unc_gcs:
gc_query = "SELECT {aggr}({attr}) FROM memory_representations".format(aggr=aggr, attr=attr)
if op == operator.le:
# gc_range = (-sys.maxint, n)
gc_range = (-float("inf"), n)
elif op == operator.ge:
# gc_range = (n, sys.maxint)
gc_range = (n, float("inf"))
elif op == operator.eq:
gc_range = (n, n)
else:
raise Exception("Operator '{}' not supported yet.".format(op))
gc_queries.append(gc_query)
gc_ranges.append(gc_range)
return PackageQuery({
"bc": bc_query,
"gc": map(lambda x: (x[0], x[1][0], x[1][1]), zip(gc_queries, gc_ranges)),
"objective": objective,
})
def get_objective_attributes(self):
attrs = set()
if self.objective is not None:
for attr in self.objective.get_attributes():
if attr != "*":
attrs.add(attr)
return attrs
def get_bcs_attributes(self):
return set(attr for attr in self.coalesced_bcs) - {"*"}
def get_gcs_attributes(self):
gcs_attrs = set()
for gc in self.coalesced_gcs:
assert isinstance(gc, CGlobalConstraint)
gcs_attrs.update(gc.get_attributes())
return gcs_attrs
def get_attributes(self):
# FIXME: If this is a relaxed query, you should return all attributes including those of the original query.
return self.get_bcs_attributes() | self.get_gcs_attributes() | self.get_objective_attributes()
def get_data_attributes(self, db):
all_data_attributes = sql_get_all_attributes(db, self.table_name)
# Only pick the data attributes of the allowed data type
data_attributes = set()
for data_attr in all_data_attributes:
attribute_type = sql_table_column_data_type(db, self.table_name, data_attr)
if attribute_type in self.allowed_dbms_data_types:
data_attributes.add(data_attr)
return sorted(data_attributes)
def get_paql_str(self, redo=False, recompute_gcs=True, coalesced=False):
raise NotImplementedError
def abs_ugc_errors(self, gc_scores, attrs=None):
"""
Returns absolute errors for each (uncoalesced) global constraint.
"""
if attrs is None:
use_attrs = self.get_attributes()
else:
use_attrs = set(attrs)
return {
(aggr, attr): max(0, c - gc_scores[aggr, attr] if op == operator.ge else gc_scores[aggr, attr] - c)
for (aggr, attr), op, c in self.uncoalesced_gcs if attr == "*" or attr in use_attrs
}
def error_mape(self, u_gc_scores, u_bc_scores):
errorsum = .0
n_gcs = 0
n_bcs = 0
for i, ((aggr, attr), op, c) in enumerate(self.uncoalesced_gcs):
score = u_gc_scores[i]
if not op(score, c):
errorsum += abs((c - score) / c)
n_gcs += 1
for bscores in u_bc_scores:
for i, (attr, op, c) in enumerate(self.uncoalesced_bcs):
score = bscores[i]
if not op(score, c):
errorsum += abs((c - score) / c)
n_bcs += 1
if n_gcs + n_bcs > 0:
return errorsum / (n_gcs + n_bcs)
else:
assert errorsum == 0
return 0
def generate_data_for_selectivity(self, selectivity, n_tuples):
"""
NOTE: This is currently unused. Not even sure if I completed it. But give a look at it again because
there were some interesting ideas.
"""
def generate_valid_and_invalid_subsets(n_vars, n_subsets, n_valid):
# TODO: Read again this function. There's some interesting logic
n_subsets = int(math.ceil(n_subsets))
n_valid = int(math.ceil(n_valid))
assert n_valid <= n_subsets == 2**n_vars
valid = []
invalid = []
# This must be always valid (it is the sum of no tuples)
# valid.append( (0,)*n_vars )
valid.append(0)
# Generate half of vars valid and half invalid
for i in range(n_vars):
if len(valid) < n_valid/2.:
# valid.append(tuple( bit for bit in ('{:0%dbms}' % n_tuples).format(2**i) ))
valid.append(2**i)
elif len(invalid) < (n_subsets - n_valid)/2.:
# invalid.append(tuple( bit for bit in ('{:0%dbms}' % n_tuples).format(2**i) ))
invalid.append(2**i)
else:
valid.append(2**i)
# Generate more invalid (up to n_subsets-n_valid) by combining invalid + invalid
while len(invalid) < n_subsets-n_valid:
found = False
for i in range(len(invalid)):
for j in range(len(invalid)):
new_invalid = invalid[i] | invalid[j]
if new_invalid not in invalid:
invalid.append(new_invalid)
found = True
break
if found:
break
if not found:
break
# If more invalid are needed, generate them by combining invalid + valid
while len(invalid) < n_subsets-n_valid:
found = False
for i in range(len(invalid)):
for j in range(len(valid)):
new_invalid = invalid[i] | valid[j]
if new_invalid not in invalid:
invalid.append(new_invalid)
found = True
break
if found:
break
if not found:
raise Exception
# All the remaining ones are valid
valid = set(range(n_subsets)) - set(invalid)
assert len(valid) == n_valid
assert len(valid) + len(invalid) == n_subsets
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
debug("n invalid = {}".format(n_subsets - n_valid))
debug("{}".format(valid))
debug("{}".format(invalid))
debug("{}".format([ tuple( bit for bit in ('{:0%dbms}' % n_tuples).format(i) ) for i in valid ]))
debug("{}".format([ tuple( bit for bit in ('{:0%dbms}' % n_tuples).format(i) ) for i in invalid ]))
return valid, invalid
def generate_set_of_problems(base_prob, vars, total_n_constraints, n_valid_constraints, a, b):
problems = []
for valid in itertools.combinations(range(total_n_constraints), int(math.ceil(n_valid_constraints))):
valid = set(valid)
invalid = set(range(total_n_constraints)) - valid
assert set(valid) | invalid == set(range(total_n_constraints))
# The empty package must always be valid. TODO: Really?
# valid = [0] + list(valid)
prob = base_prob.copy()
# valid = generate_valid_and_invalid_subsets(n_tuples, total_n_constraints, n_valid_constraints)[0]
# valid = np.random.choice(range(total_n_constraints), size=n_valid_constraints, replace=False)
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
debug("VALID: {}".format(valid))
debug("INVALID: {}".format(sorted(set(range(total_n_constraints)) - set(valid))))
# Add valid constraints to the problem
n_valid_added = 0
for i in valid:
package_bitmap = [ int(bit) for bit in ('{:0%dbms}' % n_tuples).format(i) ]
assert len(package_bitmap) == len(vars)
# Add a VALID constraint for this combination of tuples
prob += np.dot(vars, package_bitmap) >= a
prob += np.dot(vars, package_bitmap) <= b
n_valid_added += 1
assert n_valid_added == len(valid)
# Add invalid constraints to the problem
n_invalid_added = 0
if float(a) > -float("inf") and float(b) < float("inf"):
# In this case, we produce 2**(len(invalid)) new sub-problems, each for a different set of ways
# to break the constraints a <= sum() <= b
pairs_of_invalid_constraints = []
for i in invalid:
package_bitmap = [ int(bit) for bit in ('{:0%dbms}' % n_tuples).format(i) ]
pairs_of_invalid_constraints.append((
(package_bitmap, operator.le, a-1),
(package_bitmap, operator.ge, b+1),
))
orig_prob = prob.copy()
for set_of_invalid in itertools.product(*pairs_of_invalid_constraints):
new_prob = orig_prob.copy()
for invalid_bitmap, op, c in set_of_invalid:
new_prob += op(np.dot(vars, invalid_bitmap), c)
problems.append(new_prob)
else:
# In this case, we only generate one sub-problem by adding all invalid constraints
for i in invalid:
package_bitmap = [ int(bit) for bit in ('{:0%dbms}' % n_tuples).format(i) ]
assert len(package_bitmap) == len(vars)
# Add an INVALID (out of range) constraint for this combination of tuples
if float(a) > -float("inf") and float(b) < float("inf"):
raise Exception("Should never happen!")
# prob += np.dot(vars, package_bitmap) <= a-1
elif float(a) > -float("inf"):
prob += np.dot(vars, package_bitmap) <= a-1
elif float(b) < float("inf"):
prob += np.dot(vars, package_bitmap) >= b+1
else:
raise Exception
assert n_invalid_added == len(invalid)
problems.append(prob)
return problems
assert 0 <= selectivity <= 1
assert n_tuples >= 0
table_name_start = self.bc_query.lower().find("from ")
table_name_end = self.bc_query[table_name_start+5:].lower().find(" ")
table_name = self.bc_query[table_name_start+5:table_name_start+5+table_name_end]
attribute_names = []
ranges = []
for j in range(len(self.gc_queries)):
if 'sum(' in self.gc_queries[j].lower():
attr_start = self.gc_queries[j].lower().find('sum(')
attr_end = self.gc_queries[j][attr_start+4:].lower().find(')')
attribute_names.append(self.gc_queries[j][attr_start+4:attr_start+4+attr_end])
ranges.append(self.gc_ranges[j])
debug("{} {}".format(attribute_names, ranges))
assert len(attribute_names) == len(ranges)
# Generate the data via CPLEX
data_columns = []
# Generate one column at a time. Each column is generated with a CPLEX problem
for j in range(len(attribute_names)):
a, b = ranges[j]
total_n_constraints = 2**n_tuples
n_valid_constraints = (1-selectivity) * total_n_constraints
# Check satisfiability of requirements
if n_valid_constraints == 0 and a <= 0 <= b:
warning("Since a<=0<=b there is always at least one valid package, i.e. the empty package, "
"therefore selectivity=1 (where no package is valid) is impossible.")
return None
if n_valid_constraints == total_n_constraints and not a <= 0 <= b:
warning("Since not a<=0<=b, the empty package may never be a valid package, "
"therefore selectivity=0 (where all packages are valid) is impossible.")
return None
# Create the base problem
base_prob = LpProblem("package-builder", LpMinimize)
base_prob += 0 # no objective
# Add constraints to the problem
vars = [
LpVariable("{}_{}".format(attribute_names[j], i), -float("inf"), float("inf"), LpInteger)
for i in range(n_tuples)
]
# Generate all possible combination of problem constraints
# One of them will be feasible and will give us the dataset
problems = generate_set_of_problems(base_prob, vars, total_n_constraints, n_valid_constraints, a, b)
# Now try to find one feasible problem
for prob in problems:
# Solve the problem
debug("{}".format(prob))
solver = CPLEX(msg=True, timeLimit=None)
solver.solve(prob)
# Check the problem status
if LpStatus[prob.status]=='Infeasible':
debug("@@@@@@@@@@@@@@@@@ INFEASIBLE: CONTINUE")
continue
elif LpStatus[prob.status]=='Undefined':
raise Exception("Problem is undefined.")
elif LpStatus[prob.status]=='Optimal':
debug("################## OPTIMAL")
prob.roundSolution()
sol = [ v.varValue for v in prob.tuple_variables() if type(v.varValue) is float ]
data_columns.append(sol)
break
else:
raise Exception("LP status: {}".format(LpStatus[prob.status]))
else:
raise Exception("Could not find feasible combination of constraints "
"for selectivity {} and {} tuples.".format(selectivity, n_tuples))
tuples = np.array(data_columns).transpose()
return table_name, attribute_names, tuples
class ConjunctivePackageQuery(PackageQuery):
# TODO: later on, move the two staticmethods from_... outside. Make them just functions.
# TODO: IMPORTANT! All base and gc queries MUST be instance of some class SQL_Query instead of just strings
def __init__(self, query_dict):
super(ConjunctivePackageQuery, self).__init__(query_dict)
# Store the base and global constraints as coalesced and un-coalesced constraints
gc_constraint_trees = []
gc_ranges = []
gcs = self.such_that_expr.get_ANDed_gc_list()
for sqlquery_expr, gc_range_a, gc_range_b in gcs:
if isinstance(sqlquery_expr, SQLQueryExpression):
# Note: Technically, you'll get an expression tree of "constraint trees" (query plans). So you
# should actually try to combine them into one single constraint tree. Right now I'm simplifying
# by assuming that the expression tree is always a simple leaf (so directly a constraint tree).
operator_tree_expr = sqlquery_expr.traverse_leaf_func(leaf_func="get_constraint_tree")
assert isinstance(operator_tree_expr, ArithmeticExpression)
else:
raise Exception
gc_constraint_trees.append(operator_tree_expr)
gc_ranges.append((np.float64(gc_range_a), np.float64(gc_range_b)))
self.coalesced_gcs = get_coalesced_global_constraints(gc_constraint_trees, gc_ranges)
self.uncoalesced_gcs = get_uncoalesced_global_constraints(self.coalesced_gcs)
self.coalesced_bcs = get_coalesced_base_constraints(self.bc_query)
self.uncoalesced_bcs = get_uncoalesced_base_constraints(self.coalesced_bcs)
def __str__(self):
return (
"/-------------------------------------------- PaQL Query ---------------------------------------------\\\n"
"| PaQL query:\n"
"| " + str(self._paql_query_str) + "\n"
"| Base SQL query:\n"
"| " + str(self.bc_query) + "\n"
"| Global SQL queries:\n"
"| " + ("| ".join([ str(q) + "\n" for q in self.gc_queries ]) if self.gc_queries else "None\n") + ""
"| Glogal constraint ranges:\n"
"| " + ("| ".join([ str(q) + "\n" for q in self.gc_ranges ]) if self.gc_ranges else "None\n") + ""
"| Optimization objective:\n"
"| " + (str(self.objective) if self.objective else "None") + "\n"
"\-----------------------------------------------------------------------------------------------------/"
)
def get_paql_str(self, redo=False, recompute_gcs=True, coalesced=False):
if redo or self._paql_query_str is None or self._paql_query_str_stale:
if recompute_gcs:
self.coalesced_gcs = get_coalesced_global_constraints(self.gc_queries, self.gc_ranges)
self.uncoalesced_gcs = get_uncoalesced_global_constraints(self.coalesced_gcs)
self.coalesced_bcs = get_coalesced_base_constraints(self.bc_query)
self.uncoalesced_bcs = get_uncoalesced_base_constraints(self.coalesced_bcs)
if self.rel_namespace is None:
# raise Exception("rel_namespace is None")
# return ""
self.rel_namespace = { "R": self.table_name }
bcs_str = []
gcs_str = []
obj_str = None
if not coalesced:
if len(self.uncoalesced_bcs) > 0:
for attr, op, n in self.uncoalesced_bcs:
bcs_str.append("{} {} {}".format(attr, op_to_opstr(op), n))
if len(self.uncoalesced_gcs) > 0:
for (aggr, attr), op, n in self.uncoalesced_gcs:
gcs_str.append("{}({}) {} {}".format(aggr, attr, op_to_opstr(op), n))
else:
if len(self.coalesced_bcs) > 0:
for attr, (lb, ub) in self.coalesced_bcs.iteritems():
if float(lb) == -float("inf") and float(ub) == float("inf"):
continue
elif float(ub) == float("inf"):
bcs_str.append("{} {} {}".format(attr, op_to_opstr(operator.ge), lb))
elif float(lb) == -float("inf"):
bcs_str.append("{} {} {}".format(attr, op_to_opstr(operator.le), ub))
elif lb == ub:
bcs_str.append("{} {} {}".format(attr, op_to_opstr(operator.eq), ub))
else:
bcs_str.append("{} BETWEEN {} AND {}".format(attr, lb, ub))
if len(self.coalesced_gcs) > 0:
for (aggr, attr), (lb, ub) in self.coalesced_gcs.iteritems():
if aggr.lower() == "count":
lb, ub = int(lb), int(ub)
uaggr = aggr.upper()
if float(lb) == -float("inf") and float(ub) == float("inf"):
continue
elif float(ub) == float("inf"):
gcs_str.append("{}({}) {} {}".format(uaggr, attr, op_to_opstr(operator.ge), lb))
elif float(lb) == -float("inf"):
gcs_str.append("{}({}) {} {}".format(uaggr, attr, op_to_opstr(operator.le), ub))
elif lb == ub:
gcs_str.append("{}({}) {} {}".format(uaggr, attr, op_to_opstr(operator.eq), ub))
else:
gcs_str.append("{}({}) BETWEEN {} AND {}".format(uaggr, attr, lb, ub))
if self.objective is not None:
if self.objective["type"] == "maximize":
obj_str = "MAXIMIZE "
elif self.objective["type"] == "minimize":
obj_str = "MINIMIZE "
else:
raise
obj_str += self.objective["func"].get_str()
self._paql_query_str = \
"SELECT \n\tPACKAGE({pack}) \n" \
"FROM \n\t{tables} {bcs}{gcs}{obj};".format(
pack=", ".join(self.rel_namespace.keys()),
tables=", ".join("{} {}".format(name, alias) for alias, name in self.rel_namespace.iteritems()),
bcs="\nWHERE \n\t{} ".format(" AND\n\t".join(bcs_str)) if bcs_str else "",
gcs="\nSUCH THAT \n\t{} ".format(" AND\n\t".join(gcs_str)) if gcs_str else "",
obj="\n{}".format(obj_str) if obj_str is not None else "")
self._paql_query_str_stale = False
return self._paql_query_str
| 41.283951 | 120 | 0.554326 |
4f4a97e35757ebe49c3484c56a9ee824088c0411 | 116,081 | py | Python | plotting.py | jfoley-yw/scrabble | 049a69572138b06341af163ec69e18a1eb20b737 | [
"MIT"
] | 1 | 2021-04-01T14:24:17.000Z | 2021-04-01T14:24:17.000Z | plotting.py | jfoley-yw/scrabble | 049a69572138b06341af163ec69e18a1eb20b737 | [
"MIT"
] | 3 | 2021-04-05T01:09:16.000Z | 2021-04-19T20:19:30.000Z | plotting.py | jfoley-yw/scrabble | 049a69572138b06341af163ec69e18a1eb20b737 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import statistics
# Scores for ABPruning Depth 2 on rack size 5
# nodes = [5, 101, 56, 64, 100, 71, 135, 55, 39, 14, 85, 12, 21, 48, 17, 21, 64, 214, 93, 9, 14, 6, 16, 12, 42, 17, 25, 117, 35, 37, 35, 89, 6, 70, 22, 80, 16, 64, 70, 51, 21, 39, 46, 31, 30, 82, 18, 32, 71, 9, 59, 4, 74, 49, 1, 55, 30, 2, 50, 61, 62, 86, 26, 116, 75, 21, 52, 19, 146, 83, 57, 65, 3, 74, 14, 59, 24, 3, 23, 62, 53, 57, 69, 90, 9, 26, 72, 59, 19, 35, 17, 31, 68, 16, 26, 13, 130, 78, 13, 29, 17, 44, 54, 14, 50, 43, 60, 1, 7, 5, 10, 9, 42, 22, 97, 11, 89, 40, 134, 7, 49, 140, 190, 42, 52, 12, 72, 27, 16, 48, 21, 30, 14, 31, 92, 42, 26, 59, 15, 87, 42, 101, 67, 49, 22, 15, 222, 66, 26, 72, 12, 14, 42, 15, 36, 10, 1, 43, 8, 64, 89, 74, 19, 21, 12, 10, 9, 39, 98, 14, 24, 45, 49, 3, 13, 55, 18, 103, 10, 7, 54]
# p1scores = [204, 167, 283, 162, 240, 250, 301, 167, 221, 193, 307, 199, 208, 187, 146, 219, 197, 149, 229, 187, 262, 15, 183, 294, 139, 249, 230, 178, 198, 237, 281, 198, 201, 307, 188, 253, 257, 239, 315, 249, 154, 231, 222, 226, 250, 248, 200, 175, 234, 261, 227, 154, 290, 250, 190, 270, 227, 208, 155, 232, 271, 285, 168, 242, 226, 181, 217, 248, 192, 179, 237, 238, 215, 191, 252, 293, 248, 232, 190, 193, 255, 164, 252, 221, 220, 153, 239, 0, 205, 0, 190, 149, 238, 284, 194, 52, 249, 227, 264, 195, 200, 246, 207, 0, 253, 271, 176, 246, 189, 216, 210, 169, 273, 190, 285, 281, 179, 207, 255, 248, 283, 173, 210, 261, 196, 241, 0, 217, 212, 205, 200, 208, 205, 267, 206, 230, 138, 231, 181, 109, 0, 255, 259, 229, 175, 234, 356, 275, 280, 273, 255, 27, 199, 220, 289, 188, 299, 291, 211, 246, 267, 192, 206, 278, 242, 253, 266, 182, 162, 230, 225, 250, 199, 0, 243, 337, 170, 175, 260, 145, 207, 231, 203, 235, 169, 232, 194, 177, 258, 194, 216, 267, 14, 186, 221, 199, 281, 293, 121, 0]
# p2scores =[219, 293, 237, 328, 267, 275, 235, 305, 265, 237, 276, 165, 247, 262, 154, 174, 163, 288, 199, 288, 206, 49, 362, 195, 262, 265, 265, 215, 206, 195, 262, 240, 310, 173, 194, 256, 251, 166, 226, 231, 295, 309, 226, 260, 201, 259, 266, 252, 305, 164, 293, 211, 269, 177, 274, 180, 161, 242, 264, 142, 229, 276, 226, 281, 196, 169, 185, 202, 249, 329, 208, 178, 225, 183, 265, 185, 292, 224, 284, 223, 201, 331, 158, 296, 118, 318, 239, 0, 208, 0, 197, 283, 237, 277, 229, 81, 271, 274, 206, 281, 138, 209, 290, 0, 255, 205, 180, 181, 271, 227, 201, 268, 151, 283, 232, 202, 165, 165, 213, 234, 227, 267, 181, 213, 226, 240, 0, 278, 214, 260, 185, 286, 235, 236, 223, 278, 235, 210, 156, 215, 0, 249, 309, 249, 155, 215, 197, 205, 211, 263, 240, 18, 303, 301, 247, 282, 206, 191, 246, 284, 195, 191, 246, 300, 216, 220, 201, 364, 183, 223, 273, 212, 183, 0, 275, 179, 294, 241, 138, 91, 202, 188, 273, 211, 299, 207, 282, 252, 238, 153, 221, 176, 21, 330, 221, 198, 233, 256, 67, 0]
# p1endgamescores = [204, 155, 268, 103, 193, 240, 232, 151, 162, 179, 281, 199, 190, 127, 136, 207, 175, 149, 206, 163, 258, None, 153, 251, 139, 204, 185, 168, 180, 237, 239, 184, 165, 293, 155, 193, 230, 199, 236, 196, 139, 204, 207, 215, 185, 186, 189, 163, 212, 220, 215, 136, 278, 234, 190, 254, 209, 208, 131, 232, 234, 271, 151, 174, 174, 161, 193, 177, 178, 129, 187, 196, 184, 179, 174, 248, 224, 222, 165, 171, 210, 153, 210, 188, None, 136, 158, None, 188, None, 190, 137, 193, 256, 182, None, 236, 212, 224, 174, 200, 233, 207, None, 207, 185, 176, 179, 181, 197, 186, 163, 209, 173, 251, 281, 179, 202, 209, 206, 233, 160, 171, 213, 166, 136, None, 160, 167, 185, 182, 164, 157, 198, 153, 170, 131, 231, None, 100, None, 231, 210, 185, None, 192, 284, 259, 223, 222, 203, None, 188, 200, 267, 165, 228, 219, 203, 206, 198, 164, 188, 264, 189, 253, 213, 175, 118, 230, 225, 212, 199, None, 223, 226, 143, 165, 248, None, 202, 190, 162, 224, 160, 191, 186, 166, 173, 192, 164, 203, None, 184, 204, 194, 226, 245, None, None]
# p2endgamescores = [197, 237, 197, 281, 254, 202, 223, 292, 252, 133, 223, 147, 188, 244, 141, 167, 156, 170, 143, 249, 184, None, 279, 177, 262, 235, 242, 156, 195, 195, 219, 164, 160, 168, 176, 204, 210, 148, 217, 201, 229, 258, 197, 216, 185, 234, 194, 205, 205, 149, 233, 166, 255, 171, 253, 170, 134, 203, 211, 142, 224, 185, 140, 247, 174, 152, 170, 197, 208, 304, 197, 156, 171, 123, 244, 172, 220, 171, 262, 216, 173, 283, 128, 219, None, 250, 201, None, 185, None, 180, 240, 188, 189, 157, None, 196, 211, 149, 198, 138, 145, 224, None, 224, 205, 180, 167, 185, 210, 194, 196, 103, 223, 170, 190, 165, 148, 188, 220, 202, 251, 165, 187, 167, 197, None, 235, 181, 190, 178, 232, 188, 222, 180, 242, 223, 147, None, 203, None, 174, 286, 230, None, 181, 158, 181, 161, 243, 180, None, 228, 253, 171, 205, 191, 179, 214, 258, 193, 173, 190, 237, 196, 204, 181, 336, 169, 193, 254, 156, 181, None, 204, 168, 285, 207, 106, None, 195, 180, 259, 168, 258, 186, 193, 200, 233, 146, 187, 148, None, 242, 221, 175, 206, 207, None, None]
# times =[0.392374, 1.087435, 0.7119709999999997, 0.810673, 1.126067, 0.7669739999999994, 1.994154, 0.6895319999999998, 0.6009550000000008, 0.3275289999999984, 0.9863210000000002, 0.2606470000000005, 0.34875400000000134, 0.7456220000000009, 0.32181700000000113, 0.4138010000000012, 0.8067349999999998, 0.15606599999999915, 2.6905530000000013, 0.9456589999999991, 0.31708599999999976, 0.02612799999999993, 0.3128700000000002, 0.18505099999999786, 0.30555900000000236, 0.2620560000000012, 0.6745239999999981, 0.3066670000000009, 0.47260400000000047, 0.17609900000000067, 1.093862999999999, 0.45542199999999866, 0.7054580000000001, 0.5520420000000001, 1.0170379999999994, 0.2309340000000013, 0.733039999999999, 0.3513699999999993, 1.010028000000002, 0.29272900000000135, 0.8606179999999988, 0.7247850000000007, 0.6421530000000004, 0.2927980000000012, 0.7698590000000003, 0.6178729999999995, 0.3698689999999978, 0.43626899999999935, 0.8837449999999976, 0.3141510000000025, 0.4212889999999945, 0.7093589999999992, 0.2769800000000018, 0.7990300000000019, 0.21445099999999684, 0.8646510000000021, 0.6168930000000046, 0.1808240000000012, 0.6278900000000007, 0.4272070000000028, 0.21817000000000064, 0.7489089999999976, 0.6385280000000009, 0.9075630000000032, 0.8847120000000004, 0.5543809999999993, 1.1678890000000024, 0.9268649999999994, 0.32570499999999925, 0.7548699999999968, 0.3216419999999971, 1.9017319999999955, 1.4462270000000004, 0.6572630000000004, 1.060724999999998, 0.20168500000000478, 0.8210849999999965, 0.44043299999999874, 0.7265659999999983, 0.46925099999999986, 0.19774300000000267, 0.4200649999999939, 0.7009690000000006, 0.6478700000000046, 0.11564000000000618, 0.8002919999999989, 0.8857590000000002, 0.00432199999999483, 1.0995959999999982, 0.002073000000002878, 0.251300999999998, 0.39348400000000083, 0.8867929999999973, 0.9020439999999965, 0.33812700000000007, 0.07212599999999725, 0.48321200000000175, 0.3076520000000045, 0.5212119999999985, 0.7779639999999972, 0.3196629999999985, 0.37090200000000095, 0.2648180000000053, 0.0021559999999993806, 1.3325860000000063, 0.9182589999999919, 0.26353699999999947, 0.6415129999999891, 0.30544600000000344, 0.4823680000000081, 0.7752869999999916, 0.28730600000000095, 0.5652749999999997, 0.7506249999999994, 0.8220110000000034, 0.2295310000000086, 0.20372600000000318, 0.1945779999999928, 0.32604299999999853, 0.24623400000000117, 0.5504200000000026, 0.39636099999999885, 1.0773339999999934, 0.30486299999999744, 0.9997989999999959, 0.6598670000000055, 0.00277499999999975, 1.401699999999991, 0.24062299999999937, 0.612363000000002, 1.5137590000000074, 2.308454999999995, 0.5395869999999974, 1.1261659999999978, 0.3013700000000057, 0.8219319999999897, 0.46814299999999776, 0.19157900000000438, 0.15418999999999983, 0.2853580000000022, 0.0024219999999957054, 0.5605540000000104, 0.43711700000000064, 0.47679000000000826, 0.13257300000000782, 0.3474680000000063, 0.45118300000000033, 0.9176939999999973, 0.6302490000000063, 0.44485699999999895, 0.6749839999999949, 0.02986500000000092, 0.3542879999999968, 1.1250870000000077, 0.48420600000000036, 1.1846189999999979, 0.9359439999999921, 0.9131380000000036, 0.35106799999999794, 0.36182700000000523, 2.052135000000007, 0.8193789999999979, 0.3867840000000058, 0.8020479999999992, 0.3208309999999983, 0.351657000000003, 0.6312570000000051, 0.36063900000000615, 0.5858340000000055, 0.3393300000000039, 0.23335600000000056, 0.545345999999995, 0.263898999999995, 0.002562999999994986, 0.7251380000000012, 1.133899999999997, 0.8232259999999911, 0.38127900000000636, 0.40733799999999576, 0.08812200000001269, 0.2706400000000002, 0.29463900000000365, 0.28798100000000204, 0.5042630000000088, 0.9467050000000086, 0.3245540000000062, 0.35645500000001107, 0.4888389999999987, 0.5706250000000068, 0.1804039999999958, 0.40680500000000563, 0.709456000000003, 0.015135000000000787, 0.2789380000000108, 1.0135240000000039, 0.2699709999999982, 0.32936300000000074, 0.7083930000000009, 0.059475999999989426, 0.00197799999999404]
# AB Pruning Depth 3
# nodes = [86, 5, 16, 5, 24, 1001, 55, 221, 4, 1027, 124, 77, 335, 137, 211, 403, 4, 11, 41, 27, 21, 1486, 654, 65, 49, 772, 22, 63, 216, 48, 364, 11605, 537, 23, 42, 14, 147, 146, 61, 95, 458, 1625, 55, 672, 271, 91, 49, 757, 4, 127, 29, 63, 1719, 71, 100, 127, 103, 76, 103, 12, 127, 64, 1, 160, 18, 97, 8, 143, 127, 292, 704, 367, 13, 52, 8, 63, 148, 8, 225, 107, 61, 18, 226, 4333, 48, 510, 28, 356, 646, 182, 44, 109, 245, 589, 33, 257, 7, 531, 1125, 35, 9, 441, 3, 54, 79, 87, 328, 31, 391, 413, 383, 11, 1058, 18, 183, 69, 56, 74, 74, 1, 22, 711, 8, 196, 374, 45, 106, 2, 480, 32, 821, 675, 55, 1881, 590, 203, 23, 243, 574, 155, 29, 69, 54, 900, 27, 340, 405, 24, 39, 307, 139, 4, 77, 789, 182, 90, 252, 426, 10, 219, 26, 73, 1, 183, 370, 26, 1, 2, 90, 2866, 6, 51, 4, 989, 436, 34, 534, 717, 4, 99] + [3, 30, 28, 62, 14, 32, 13, 577, 1, 396, 41, 59, 59, 447, 36, 117, 31, 219, 40, 11, 5, 21, 38, 2, 92, 477, 56, 177, 158, 1277, 9, 27, 429, 2447, 23, 403, 45, 14, 35, 799, 310, 77, 72, 25, 449, 61, 1, 7, 835, 5, 72, 1, 18, 85, 159, 95, 133, 84, 177, 79, 36, 118, 301, 23, 51, 1, 85, 11, 483, 68, 135, 26, 33, 97, 155, 33, 12, 329, 133, 1, 22, 212, 10, 501, 439, 186, 33, 740, 27, 107, 4, 1151, 333, 23, 14, 25, 109, 168, 209, 83, 41, 239, 301, 305, 65, 69, 183, 597, 717, 69, 291, 250, 48, 17, 176, 53, 65, 72, 1040, 64, 6, 71, 1033, 10, 6, 303, 117, 192, 103, 80, 159, 121, 105, 113, 184, 8, 27, 114, 439, 89, 21, 22, 759, 1073, 10, 41, 21, 301, 5, 653, 9, 2595, 165, 59, 190, 158, 70, 7, 35, 2, 7, 327, 126, 296, 42, 574, 241, 335, 893, 102, 24, 279, 229, 1172, 73, 1457, 116]
# p1scores =[228, 140, 230, 155, 308, 305, 273, 222, 364, 0, 183, 280, 228, 167, 278, 216, 316, 124, 228, 123, 305, 245, 200, 275, 175, 251, 320, 217, 281, 47, 276, 88, 240, 207, 194, 243, 234, 408, 275, 227, 197, 234, 247, 366, 245, 156, 198, 225, 234, 193, 281, 221, 191, 202, 222, 193, 374, 256, 153, 200, 331, 252, 270, 218, 228, 245, 224, 266, 165, 331, 208, 208, 0, 272, 209, 224, 223, 300, 0, 245, 216, 296, 274, 164, 253, 290, 228, 283, 257, 222, 163, 251, 220, 110, 225, 307, 187, 248, 214, 226, 232, 249, 170, 213, 165, 186, 208, 174, 223, 135, 237, 231, 238, 251, 174, 231, 198, 311, 249, 213, 218, 192, 179, 202, 174, 311, 273, 185, 263, 333, 321, 233, 141, 260, 180, 180, 263, 183, 187, 180, 230, 143, 229, 253, 93, 254, 184, 241, 190, 257, 66, 218, 153, 360, 280, 192, 181, 214, 214, 207, 213, 162, 257, 239, 262, 189, 185, 272, 300, 292, 180, 181, 228, 271, 278, 190, 151, 174, 257, 250, 163, 207, 212, 275, 173, 251, 328, 245, 282, 219, 265, 278, 157, 205, 177, 196, 0, 0, 260, 203] + [155, 231, 171, 247, 245, 0, 202, 235, 159, 283, 209, 265, 269, 243, 273, 163, 192, 176, 0, 164, 220, 195, 216, 211, 154, 275, 192, 244, 192, 191, 181, 231, 229, 253, 213, 316, 170, 162, 307, 231, 161, 163, 178, 193, 249, 317, 356, 203, 234, 244, 190, 177, 256, 0, 234, 230, 172, 182, 265, 152, 160, 0, 174, 170, 284, 222, 226, 258, 139, 190, 139, 212, 174, 226, 160, 54, 152, 315, 136, 158, 320, 196, 280, 168, 124, 81, 178, 167, 174, 171, 220, 253, 186, 154, 209, 272, 230, 255, 272, 309, 368, 260, 165, 169, 294, 234, 231, 328, 168, 151, 171, 221, 207, 252, 359, 48, 257, 269, 231, 221, 0, 290, 220, 66, 289, 194, 236, 115, 162, 156, 228, 207, 208, 318, 206, 230, 221, 230, 202, 207, 330, 252, 271, 218, 242, 302, 257, 221, 296, 241, 194, 174, 306, 145, 233, 252, 286, 297, 231, 169, 247, 0, 284, 272, 162, 300, 235, 268, 258, 0, 193, 257, 353, 176, 234, 172, 182, 256, 160, 258, 186, 178, 191, 58, 159, 306, 238, 308, 208, 210, 239, 252, 0, 212, 235, 114, 252, 176, 307, 222]
# p2scores = [234, 234, 251, 241, 175, 253, 229, 235, 204, 0, 268, 228, 319, 282, 181, 195, 207, 133, 238, 87, 156, 223, 217, 170, 276, 186, 191, 183, 209, 41, 150, 195, 267, 207, 228, 301, 275, 176, 273, 290, 255, 221, 233, 218, 208, 173, 274, 295, 219, 246, 197, 212, 202, 119, 243, 282, 114, 229, 221, 193, 235, 228, 193, 182, 319, 199, 221, 242, 197, 150, 211, 166, 0, 179, 225, 238, 193, 182, 0, 276, 270, 192, 202, 191, 241, 184, 217, 115, 167, 220, 230, 226, 180, 223, 252, 209, 345, 137, 230, 222, 280, 248, 327, 226, 309, 207, 192, 264, 247, 252, 246, 222, 244, 170, 283, 249, 233, 171, 241, 253, 245, 217, 243, 289, 202, 143, 173, 200, 239, 266, 245, 293, 239, 151, 236, 151, 185, 190, 227, 225, 223, 206, 171, 283, 67, 238, 219, 267, 265, 210, 99, 283, 214, 146, 256, 328, 268, 358, 218, 252, 179, 116, 301, 260, 239, 282, 314, 197, 234, 222, 209, 124, 223, 190, 211, 224, 224, 235, 197, 142, 206, 178, 262, 228, 183, 185, 212, 280, 155, 233, 249, 252, 348, 315, 217, 205, 0, 0, 307, 225] + [227, 197, 196, 129, 291, 0, 210, 247, 187, 245, 256, 190, 184, 214, 201, 144, 270, 224, 0, 186, 235, 204, 209, 202, 343, 124, 194, 229, 179, 184, 259, 265, 239, 166, 202, 274, 249, 265, 358, 252, 242, 250, 299, 367, 223, 196, 194, 320, 259, 247, 356, 203, 184, 0, 253, 272, 228, 262, 241, 230, 271, 0, 299, 184, 273, 156, 237, 147, 99, 183, 273, 242, 265, 240, 310, 35, 210, 250, 168, 230, 154, 210, 222, 167, 149, 55, 222, 263, 256, 214, 148, 251, 278, 221, 267, 184, 208, 176, 244, 253, 154, 291, 178, 314, 126, 281, 214, 243, 192, 199, 242, 193, 208, 238, 246, 62, 198, 279, 209, 296, 0, 210, 238, 138, 173, 207, 250, 113, 162, 319, 350, 221, 196, 170, 222, 194, 222, 279, 242, 224, 163, 183, 224, 275, 187, 191, 230, 312, 259, 230, 240, 327, 252, 205, 162, 110, 179, 176, 279, 285, 255, 0, 278, 298, 291, 269, 222, 237, 209, 0, 191, 181, 220, 211, 185, 335, 297, 258, 183, 174, 179, 238, 164, 86, 285, 197, 276, 189, 255, 337, 212, 190, 0, 269, 244, 143, 227, 205, 199, 237]
# p1endgamescores = [209, 140, 219, 155, 308, 264, 209, 210, 259, None, 183, 240, 158, 109, 254, 203, 270, None, 211, None, 241, 203, 200, 275, 167, 201, 243, 150, 263, None, 221, None, 177, 160, 181, 188, 188, 333, 259, 173, 173, 222, 186, 321, 239, 147, 174, 158, 162, 193, 254, 208, 182, None, 182, 174, 374, 249, 153, 192, 331, 188, 266, 159, 192, 186, 215, 203, 165, 271, 168, 208, None, 221, 209, 179, 185, 283, None, 192, 169, 224, 220, 164, 208, 238, 228, 217, 203, 177, 121, 236, 210, 110, 178, 244, 183, None, 168, 175, 224, 193, 159, 172, 158, 162, 190, 166, 192, 121, 226, 205, 194, 210, 146, 182, 194, 257, 227, 187, 205, 170, 157, 169, 174, 265, 271, 183, 251, 278, 271, 184, 141, 260, 110, 180, 211, 165, 160, 161, 192, 129, 184, 179, None, 233, 174, 241, 167, 200, None, 203, 147, 304, 203, 173, 137, 201, 156, 143, 207, None, 180, 169, 219, 180, 158, 220, 260, 210, 143, None, 213, 230, 260, 180, 151, 153, 208, 247, 163, 203, 174, 270, 173, 212, 274, 220, 282, 176, 222, 173, 145, 186, 162, 193, None, None, 216, 192] + [155, 196, 171, None, 234, None, 146, 187, 159, 283, 168, 265, 216, 228, 225, None, 156, 157, None, 159, 202, 189, 149, 209, 142, 275, 143, 199, 192, 182, 172, 222, 199, 211, 164, 273, 159, 144, 212, 222, 125, 154, 178, 188, 197, 250, 307, 158, 217, 182, 175, 177, 256, None, 163, 191, 160, 182, 259, None, 144, None, 144, 145, 226, 222, 164, 243, None, 171, 139, 188, 156, 216, 131, None, 152, 289, None, 149, 233, 189, 207, 162, None, None, 164, 137, 144, 148, 220, 199, 170, 154, 179, 228, 230, 196, 203, 281, 288, 248, 165, 156, 252, 225, 202, 300, 168, 141, 160, 212, 180, 193, 291, None, 183, 209, 214, 210, None, 229, 150, None, 232, 130, 214, None, 149, 135, 187, 185, 178, 304, 193, 216, 166, 214, 162, 166, 267, 208, 227, 162, 176, 246, 247, 156, 274, 214, 147, 159, 289, 145, 229, 252, 219, 268, 198, 156, 194, None, 233, 171, 162, 292, 235, 202, 220, None, 173, 257, 293, 164, 230, 158, 166, 214, 160, 246, 186, 178, 177, None, 139, 257, 204, 255, 178, 178, 179, 188, None, 206, 168, 102, 181, 160, 234, 208]
# p2endgamescores = [158, 189, 184, 176, 141, 214, 229, 158, 189, None, 209, 198, 277, 260, 121, 184, 204, None, 183, None, 145, 200, 197, 154, 197, 172, 171, 163, 190, None, 124, None, 230, 181, 159, 272, 233, 168, 175, 230, 180, 176, 187, 194, 202, 163, 227, 268, 210, 246, 187, 197, 187, None, 227, 214, 111, 174, 175, 185, 235, 214, 159, 164, 233, 194, 205, 192, 182, 142, 186, 152, None, 172, 225, 167, 167, 164, None, 266, 233, 157, 185, 191, 221, 149, 194, 86, 146, 191, 182, 159, 151, 223, 219, 190, 305, None, 215, 201, 229, 233, 278, 202, 282, 186, 189, 259, 193, 196, 199, 202, 202, 153, 243, 225, 189, 149, 191, 234, 201, 169, 228, 242, 202, 127, 145, 192, 215, 213, 209, 260, 221, 130, 213, 142, 143, 181, 148, 183, 196, 193, 149, 269, None, 168, 152, 180, 198, 189, None, 275, 212, 142, 226, 275, 232, 312, 182, 205, 157, None, 258, 250, 208, 224, 204, 175, 210, 183, 156, None, 132, 142, 195, 207, 217, 158, 155, 111, 189, 154, 210, 182, 153, 165, 176, 199, 123, 208, 222, 244, 288, 274, 156, 142, None, None, 271, 216] + [216, 159, 196, None, 238, None, 172, 230, 187, 229, 199, 163, 178, 144, 148, None, 187, 209, None, 168, 193, 173, 180, 184, 279, 124, 164, 176, 153, 171, 210, 208, 195, 157, 202, 250, 198, 213, 345, 193, 228, 193, 283, 291, 172, 188, 134, 280, 171, 235, 283, 184, 163, None, 245, 231, 220, 214, 176, None, 222, None, 259, 179, 243, 156, 210, 112, None, 173, 200, 196, 223, 174, 270, None, 196, 210, None, 172, 139, 193, 220, 151, None, None, 174, 251, 227, 149, 132, 218, 262, 195, 188, 132, 208, 169, 229, 193, 132, 225, 148, 269, 96, 228, 165, 174, 175, 195, 238, 193, 166, 213, 246, None, 178, 270, 168, 252, None, 171, 208, None, 150, 200, 176, None, 147, 311, 332, 219, 139, 140, 203, 150, 211, 190, 220, 200, 149, 168, 205, 227, 176, 170, 166, 295, 212, 188, 198, 280, 171, 202, 151, 110, 161, 171, 236, 205, 233, None, 256, 236, 252, 191, 208, 215, 183, None, 176, 116, 197, 165, 176, 262, 188, 228, 161, 124, 179, 217, 157, None, 236, 155, 189, 171, 191, 268, 192, 177, None, 231, 211, 140, 216, 194, 180, 197]
# times = [0.302287, 0.48822600000000005, 0.5163570000000002, 0.16153499999999976, 0.7776640000000001, 0.001535000000000064, 0.29301699999999986, 0.5065280000000003, 0.12990400000000024, 0.32921500000000004, 4.679038, 0.1681329999999992, 3.2305550000000007, 0.5520459999999989, 0.7192220000000002, 0.12025600000000125, 0.6981760000000001, 3.6961950000000012, 0.003185000000001992, 0.48873000000000033, 1.2964649999999978, 0.5333119999999987, 1.792968000000002, 0.6345550000000024, 0.28365699999999805, 0.20615100000000197, 0.3406500000000001, 0.4964759999999977, 0.18752800000000036, 1.0494940000000028, 3.761049, 0.9204159999999995, 1.9074659999999959, 1.5012680000000032, 9.198293, 0.24091200000000157, 0.4045929999999984, 3.628905000000003, 17.396524999999997, 0.3690509999999989, 3.187155000000004, 0.6000769999999989, 0.29019800000000373, 0.48729500000000314, 6.386953000000005, 2.8630199999999917, 0.7748610000000014, 0.7580719999999985, 0.3651010000000099, 3.6552050000000094, 0.7605970000000042, 0.19398900000000197, 0.24668499999999938, 0.0033159999999980982, 6.240561999999997, 0.1838510000000042, 0.8707640000000083, 0.16168700000000058, 0.4301110000000108, 0.16683499999999185, 0.9311519999999973, 0.0026849999999996044, 1.4847460000000012, 1.142407999999989, 1.4954979999999978, 1.0758420000000086, 1.8791650000000004, 0.9196019999999976, 0.10925000000000296, 0.5226569999999953, 0.1593209999999914, 1.157032000000001, 2.5614479999999986, 0.3541659999999922, 0.6173730000000006, 0.059522000000001185, 0.160381000000001, 0.9406049999999908, 0.13094700000000614, 0.25168399999999735, 3.846960999999993, 0.7361259999999987, 1.356601000000012, 0.6552250000000015, 0.11782300000000134, 0.04031500000000676, 0.5775300000000101, 1.1632939999999934, 1.4485340000000093, 0.45620300000000213, 0.27908299999999997, 3.0335510000000028, 1.2548239999999993, 0.17242399999999236, 0.36704799999999693, 2.1102300000000014, 0.29303199999999663, 4.164985999999985, 4.567004999999995, 2.086742000000015, 0.4733879999999999, 5.844267000000002, 0.4267099999999857, 1.209859999999992, 0.2015480000000025, 8.292562000000004, 2.92284699999999, 0.4104629999999929, 0.29586899999998195, 0.46127000000001317, 1.0170789999999954, 1.455286000000001, 1.8845369999999946, 0.8482099999999946, 0.5651830000000189, 0.04415199999999686, 2.0751889999999946, 2.479348000000016, 2.5295759999999916, 0.7797919999999863, 0.0026730000000156906, 0.9183069999999987, 1.6649270000000058, 0.1019019999999955, 4.529155000000003, 5.846455999999989, 0.7741430000000094, 0.08540899999999851, 2.5175160000000005, 2.068481999999989, 0.5640789999999924, 0.428260999999992, 1.7390479999999968, 1.0019539999999836, 0.8226879999999994, 0.9755020000000059, 7.529112999999995, 0.7808420000000069, 0.21134399999999687, 0.7513040000000046, 7.666158999999993, 0.26195699999999533, 0.23442700000001082, 2.399527000000006, 1.2790499999999838, 1.7537809999999752, 1.2455859999999745, 1.0338150000000041, 1.7212069999999926, 1.1864989999999977, 1.219680000000011, 1.1028200000000083, 2.04205300000001, 0.26001099999999155, 0.38013599999999315, 0.15817499999999995, 1.6904879999999878, 4.912279000000012, 0.9157529999999952, 0.3886739999999804, 0.4158909999999878, 0.0027560000000050877, 6.362789000000021, 7.658319000000006, 0.28317899999998986, 0.5404159999999933, 0.47652000000002204, 3.7486769999999865, 0.2283069999999725, 0.0037100000000123146, 4.584168000000034, 0.28847100000001547, 19.02055999999999, 1.7636289999999804, 0.7426980000000185, 2.4264119999999707, 1.9503730000000132, 0.8652650000000222, 0.20177099999995107, 0.5929290000000265, 0.1720860000000357, 0.2188199999999938, 3.2740200000000073, 0.050792999999998756, 1.3619659999999953, 3.1773059999999873, 0.6073489999999993, 4.746735000000001, 2.2632360000000062, 3.4399270000000115, 8.308117999999979, 1.0250589999999988, 0.00169000000005326, 0.39543000000003303, 2.3666280000000484, 1.9741940000000113, 9.370889999999974, 0.8959370000000035, 10.932597000000044, 1.146158000000014] + [0.9818600000000001, 0.18684599999999985, 0.284651, 0.1931339999999997, 0.20349899999999987, 0.37138899999999975, 7.124136999999999, 0.6058829999999986, 2.4041420000000002, 0.003247, 0.1637550000000001, 0.23646800000000034, 7.939729000000002, 1.4012209999999996, 0.877122, 3.031236, 1.5146709999999999, 0.11898400000000109, 1.9030850000000008, 0.12234899999999982, 3.3588829999999987, 0.20842400000000083, 0.2950750000000042, 0.5302830000000043, 0.45972799999999836, 0.3606710000000035, 12.203933, 5.492877, 0.770862000000001, 0.03264899999999926, 0.6282549999999958, 0.17952399999999358, 6.333936999999999, 0.37348300000000023, 0.7375720000000001, 1.793454000000004, 0.5641670000000119, 3.2574770000000086, 74.67717799999998, 4.1415079999999875, 0.4001869999999883, 0.6712049999999863, 0.3119820000000004, 1.284636000000006, 1.3202970000000107, 0.6841909999999984, 1.0290500000000122, 3.697091999999998, 11.638582000000014, 0.6672000000000082, 5.086490999999995, 2.5516369999999995, 0.8975179999999909, 0.1515009999999961, 0.5307719999999847, 5.827793000000014, 0.22040800000002037, 1.4703389999999956, 0.528538999999995, 0.684363999999988, 0.15716900000001033, 11.696953000000008, 0.893558000000013, 0.9925029999999992, 1.231281999999993, 1.0381110000000149, 0.8575850000000003, 1.1173339999999996, 0.29615900000001716, 1.1849320000000034, 0.6783920000000023, 0.1940620000000024, 0.0026720000000182154, 1.5418269999999836, 0.3530099999999834, 1.0412879999999802, 0.26709900000000175, 1.358899000000008, 0.0024579999999900792, 1.183667999999983, 2.0924819999999897, 5.934335000000004, 3.029453999999987, 0.1766280000000222, 0.31619799999998577, 0.5998119999999858, 0.28447099999999637, 0.7415009999999995, 1.376227, 0.248722000000015, 1.985258000000016, 1.0290329999999983, 0.7684800000000109, 0.2983030000000042, 1.9575689999999781, 28.328948000000025, 0.573664000000008, 0.17980899999997746, 4.1246980000000235, 0.5455279999999902, 2.5830419999999776, 5.1338309999999865, 1.8075769999999807, 0.5125810000000115, 1.0577020000000061, 2.2284500000000094, 4.102599999999995, 0.4566859999999906, 2.302823999999987, 0.2938520000000153, 4.157145000000014, 8.024020999999948, 0.5287420000000225, 0.2804760000000215, 3.1981030000000032, 0.22229199999998173, 0.5656019999999558, 1.203959999999995, 1.0350630000000365, 3.2405449999999973, 0.41684399999996913, 2.891042000000027, 3.298088000000007, 3.5374330000000214, 0.2688280000000418, 7.112251000000015, 0.3323740000000157, 1.5135099999999966, 0.675389999999993, 0.7517609999999877, 0.7041050000000268, 0.6652470000000221, 0.19704200000001038, 0.3577319999999986, 5.477946999999972, 0.2305769999999825, 1.974019999999996, 2.7916529999999966, 0.49781999999999016, 1.2191030000000183, 0.17617500000000064, 3.7758499999999913, 0.4338819999999828, 5.4692380000000185, 0.07212999999995873, 5.095369000000005, 0.5802400000000034, 0.20575300000001562, 12.511234000000002, 4.129910999999993, 0.06967699999995602, 1.6817480000000273, 0.39475400000003447, 1.9287759999999707, 5.909084000000007, 1.5200940000000287, 0.4394580000000019, 0.6964069999999651, 0.5813059999999837, 6.31139300000001, 0.40707700000001523, 0.1473760000000084, 2.8344220000000178, 3.150712999999996, 0.4260699999999815, 0.4825100000000475, 2.814973000000009, 1.2284329999999954, 0.19369899999998097, 0.7573419999999942, 6.188214000000016, 0.11435000000000173, 1.5439799999999764, 0.8929660000000013, 2.1043359999999893, 3.371122000000014, 0.27641800000003514, 1.863973000000044, 0.36803000000003294, 0.6705519999999865, 0.15709199999997736, 1.5896569999999883, 2.8591299999999933, 0.36724600000002283, 0.17984000000001288, 0.21805399999999509, 0.9198010000000068, 18.44721400000003, 0.21354800000000296, 0.6144419999999968, 0.17110600000000886, 6.6795529999999985, 3.6269309999999564, 0.42886500000003025, 4.119304, 5.221270000000004, 0.002066000000013446, 0.0019340000000056534, 0.2663430000000062, 0.9184209999999666]
# # Score for ABPruning Depth 4 on rack size 5
# p1scores = [175, 14, 220, 184, 201, 199, 248, 187, 259, 241, 292, 142, 261, 284, 193, 0, 196, 149, 227, 247, 170, 210, 284, 240, 353, 247, 214, 333, 210, 0, 230, 165, 207, 193, 259, 183, 247, 164, 250, 301, 193, 149, 223, 211, 193, 223, 287, 123, 240, 276, 222, 308, 246, 154, 173, 221, 158, 199, 147, 121, 0, 77, 153, 185, 218, 200, 207, 211, 171, 234, 198, 222, 175, 159, 180, 280, 219, 172, 277, 254, 238, 226, 200, 209, 258, 303, 242, 210, 244, 249, 300, 107, 174, 330, 268, 220, 252, 243, 196, 370, 213, 167, 168, 94, 202, 125, 175, 211, 253, 0, 125, 201, 0, 219, 211, 176, 209, 200, 223, 312, 197, 240, 274, 261, 212, 165, 258, 223, 204, 282, 238, 232, 200, 229, 292, 234, 248, 269, 227, 225, 150, 203, 253, 251, 127, 235, 229, 174, 268, 179, 189, 206, 262, 0, 186, 138, 274, 207, 213, 241, 237, 283, 223, 190, 325, 252, 211, 0, 205, 317, 145, 332, 217, 199, 273, 294, 240, 210, 218, 312, 211, 176, 230, 162, 249, 149, 166, 186, 131, 179, 174, 198, 208, 359, 273, 168, 225, 275, 406, 236]
# p2scores = [169, 11, 225, 226, 253, 198, 214, 280, 297, 206, 203, 209, 200, 206, 267, 0, 219, 273, 297, 189, 245, 218, 117, 170, 236, 238, 138, 181, 261, 0, 261, 191, 245, 236, 218, 225, 206, 270, 293, 198, 220, 253, 174, 288, 253, 240, 194, 202, 280, 254, 211, 138, 258, 262, 159, 191, 320, 170, 264, 188, 0, 60, 199, 340, 295, 167, 254, 311, 263, 160, 169, 221, 187, 224, 205, 163, 190, 208, 226, 213, 199, 234, 144, 182, 300, 216, 233, 234, 267, 300, 219, 248, 274, 158, 238, 237, 224, 284, 270, 214, 241, 225, 249, 178, 274, 189, 268, 282, 157, 0, 129, 257, 0, 95, 222, 135, 244, 212, 188, 178, 200, 250, 226, 292, 235, 187, 159, 210, 187, 188, 264, 174, 241, 230, 242, 193, 257, 252, 290, 227, 204, 233, 189, 194, 44, 143, 214, 217, 233, 145, 226, 195, 231, 0, 203, 236, 208, 202, 247, 205, 274, 140, 238, 224, 245, 274, 174, 0, 204, 232, 260, 189, 266, 225, 226, 228, 276, 169, 288, 156, 280, 243, 200, 200, 216, 188, 181, 259, 183, 270, 253, 247, 272, 182, 227, 199, 231, 239, 154, 241]
# p1endgamescores = [166, None, 157, 170, 201, 192, 211, 180, 239, 198, 243, 142, 188, 210, 171, None, 160, 149, 173, 184, 159, 207, 284, 240, 299, 185, 212, 267, 180, None, 182, 165, 141, 139, 194, 174, 247, 152, 221, 240, 193, 142, 223, 211, 181, 212, 248, 123, 228, 208, 222, 255, 191, 154, 173, 154, 150, 199, 124, 121, None, None, 153, 181, 183, 194, 185, 183, 112, 234, 198, 198, 175, 129, 180, 211, 188, 172, 277, 227, 238, 172, 193, 209, 214, 247, 198, 180, 209, 213, 244, None, 138, 330, 187, 180, 237, 178, 187, 304, 157, 155, 152, None, 148, 125, 160, 175, 196, None, None, 187, None, None, 211, None, 174, 200, 184, 255, 159, 157, 229, 247, 212, None, 258, 161, 189, 204, 202, 211, 182, 209, 243, 234, 248, 247, 188, 225, 150, 199, 253, 174, None, 235, 175, 174, 214, None, 189, 206, 249, None, 183, 138, 200, 145, 173, 230, 227, 283, 193, 153, 280, 153, 211, None, 142, 257, None, 278, 191, 199, 228, 249, 179, 200, 204, 258, 178, 152, 170, 148, 197, 149, 151, 171, 124, 146, 162, 186, 208, 301, 223, 168, 164, 257, 340, 190]
# p2endgamescores = [163, None, 196, 209, 207, 177, 192, 192, 191, 180, 147, 209, 193, 194, 205, None, 182, 251, 261, 177, 239, 190, 117, 153, 185, 222, 136, 171, 223, None, 219, 185, 214, 202, 200, 158, 206, 163, 245, 190, 212, 238, 151, 267, 210, 232, 179, 202, 222, 219, 197, 114, 246, 238, 159, 185, 269, 159, 225, 188, None, None, 199, 259, 221, 153, 245, 260, 249, 160, 155, 180, 187, 181, 205, 153, 186, 193, 226, 173, 199, 206, 111, 164, 272, 209, 208, 219, 217, 287, 204, None, 204, 158, 179, 211, 160, 262, 178, 183, 230, 206, 203, None, 258, 172, 193, 235, 143, None, None, 179, None, None, 222, None, 194, 212, 170, 161, 188, 239, 196, 249, 211, None, 159, 180, 187, 181, 195, 160, 184, 183, 213, 166, 206, 180, 241, 206, 204, 218, 119, 178, None, 127, 174, 217, 175, None, 176, 192, 172, None, 193, 236, 187, 180, 194, 193, 131, 140, 189, 152, 232, 260, 174, None, 184, 210, None, 159, 225, 208, 190, 178, 268, 162, 216, 143, 230, 222, 155, 180, 192, 188, 173, 218, 165, 238, 213, 170, 212, 154, 200, 199, 199, 180, 135, 182]
# times = [2.732856, 0.013799000000000117, 0.6212740000000005, 17.461609000000003, 0.15544099999999972, 3.888458, 0.1793350000000018, 0.3636580000000009, 0.7966540000000002, 0.4364050000000006, 0.3475600000000014, 0.4426419999999993, 16.792683999999998, 45.095144999999995, 0.8019570000000016, 0.002502999999990152, 0.7269470000000098, 0.25820200000001137, 1.8612710000000021, 19.421993, 3.292968000000002, 0.6188310000000001, 0.418879000000004, 1.749019000000004, 3.5704350000000034, 7.313006999999999, 0.5017420000000072, 6.908141999999998, 0.8737129999999809, 0.0025660000000016225, 0.5404660000000092, 0.3335399999999993, 5.45119600000001, 0.9017589999999984, 20.26504, 0.703519, 0.4969130000000064, 0.4349410000000091, 1.9195970000000102, 8.028055999999992, 0.28790599999999245, 0.5483049999999992, 0.2805319999999938, 0.29201000000000477, 0.49781300000000783, 4.297392000000002, 2.5112029999999947, 3.0092320000000257, 2.238361999999995, 11.072942000000012, 1.7760710000000017, 0.48068399999999656, 2.9702339999999765, 0.24950999999998658, 0.4291249999999991, 12.07982100000001, 0.5462039999999888, 0.22472299999998313, 0.9820869999999786, 1.1886100000000113, 0.003675000000015416, 0.07956299999997896, 2.8142090000000053, 0.388623999999993, 10.462159000000014, 38.91616700000003, 1.1222400000000334, 4.5918779999999515, 1.1467499999999973, 0.21945599999997967, 0.38036699999997836, 1.5123520000000212, 0.5903450000000134, 0.8667050000000245, 0.48585700000001, 26.411121999999978, 1.29780599999998, 0.2938289999999597, 1.8478240000000028, 0.7368250000000103, 1.1077379999999835, 21.91255000000001, 1.8880800000000022, 2.213452000000018, 3.029463000000021, 3.320968999999991, 0.2863209999999867, 2.8904580000000237, 1.3423369999999863, 7.960946000000035, 6.179254999999955, 0.10425200000003088, 2.6533749999999827, 0.1375290000000291, 0.47113200000001143, 0.993741, 13.20896399999998, 23.886505999999997, 0.38961600000004637, 3.5794860000000313, 3.537679, 5.111327000000001, 53.177969000000004, 0.1302570000000003, 1.8522600000000011, 0.6265039999999971, 4.604827999999998, 0.5654570000000092, 1.4373130000000032, 0.002815999999995711, 0.09244599999999537, 0.40296200000000226, 0.002415999999996643, 0.11714700000000278, 0.24759699999999896, 0.0949499999999972, 1.584856000000002, 2.4273259999999937, 0.18289699999999698, 1.5296809999999965, 14.213572999999997, 4.0379059999999924, 0.2696860000000072, 2.9077939999999955, 2.107904000000005, 0.16219599999999446, 0.5316650000000038, 6.9160169999999965, 0.41877599999999404, 8.157394999999994, 4.251577000000012, 4.231014000000002, 9.600237000000007, 7.453451000000001, 0.5010110000000054, 0.9992290000000139, 0.20955200000000218, 0.9690920000000176, 0.5488789999999995, 0.24547300000000405, 0.6523559999999975, 2.037618999999978, 0.18045399999999745, 18.83436499999999, 0.06877599999998552, 0.38777899999999477, 0.9994989999999859, 3.7605120000000056, 0.8266940000000034, 0.09906100000000606, 0.14770200000000955, 0.14975399999997308, 0.5807059999999922, 0.002221999999989066, 1.9387200000000178, 0.7457660000000033, 11.175792999999999, 0.6206999999999994, 3.6378159999999866, 3.3504609999999957, 4.76576399999999, 1.0963680000000124, 14.936075000000017, 1.338105000000013, 0.45994799999999714, 1.2010209999999972, 0.35091800000000717, 0.002596000000011145, 9.615527000000014, 15.125898999999976, 0.14351299999998446, 8.531500999999992, 11.296021999999994, 0.23752799999999752, 0.43902700000001005, 0.405838000000017, 2.0098749999999654, 9.548711000000026, 1.9105750000000512, 5.05331000000001, 0.6292460000000233, 0.34711999999996124, 0.917568000000017, 2.972601999999995, 0.4131250000000364, 0.9787170000000174, 3.019792999999993, 0.3610109999999622, 1.3573409999999626, 0.3707560000000285, 8.371876000000043, 6.125440000000026, 0.1835360000000037, 19.582659999999976, 9.010859000000039, 0.3678770000000213, 3.2456080000000043, 0.36412599999999884, 3.174400999999989, 1.2992909999999824]
#score for ABPruning Depth 5 on rack size 5
# nodes1 = [2030, 15, 51, 67, 94, 27, 868, 862, 37, 8, 561, 2, 439, 375, 930, 136, 898, 20, 91, 21, 39, 27, 285, 219, 38, 52, 109, 4, 63, 628, 929, 237, 2169, 13800, 151, 1184, 12, 71, 11, 417, 3589, 2811, 225, 32, 14, 31, 11, 95, 15, 46, 29, 67, 71, 2097, 619, 588, 39, 2600, 624, 50, 41, 169, 5, 39, 765, 48, 44, 4, 81, 8, 92, 1, 72, 84, 73, 201, 66, 58, 59, 372, 153, 146, 17, 28, 16, 711, 610, 11]
# p1scores1 = [170, 188, 237, 232, 214, 253, 279, 239, 203, 171, 167, 284, 304, 225, 214, 201, 152, 252, 365, 193, 219, 169, 252, 190, 267, 251, 222, 211, 290, 163, 272, 253, 195, 174, 64, 212, 312, 247, 192, 146, 208, 200, 230, 274, 0, 228, 286, 183, 178, 180, 376, 258, 150, 180, 200, 124, 249, 266, 291, 227, 222, 153, 182, 135, 190, 201, 275, 196, 317, 0, 0, 220, 238, 186, 209, 236, 255, 219, 205, 228, 22, 0, 191, 202, 253, 218, 200, 305, 285, 249, 256, 233, 255, 0, 155, 126, 287, 203, 169, 256]
# p2scores1 =[260, 184, 203, 231, 269, 195, 237, 266, 255, 222, 165, 181, 191, 251, 225, 244, 264, 283, 188, 220, 180, 187, 252, 225, 271, 221, 150, 259, 202, 193, 217, 230, 173, 288, 113, 247, 225, 189, 245, 192, 175, 287, 243, 267, 0, 213, 218, 236, 205, 253, 156, 184, 304, 315, 275, 160, 168, 272, 175, 170, 266, 190, 230, 363, 243, 273, 227, 183, 184, 0, 0, 246, 203, 303, 281, 240, 215, 189, 213, 302, 20, 0, 184, 194, 184, 169, 178, 196, 278, 247, 223, 173, 184, 0, 224, 178, 175, 217, 208, 165]
# p1endgamescores1 = [133, 188, 237, 176, 186, 243, 279, 213, 147, 162, 167, 216, 258, 189, 214, 182, 152, 252, 305, 193, 219, 169, 239, 162, 256, 230, 222, 203, 253, 163, 229, 230, 148, 165, None, 186, 235, 208, 126, 146, 208, 200, 230, 264, None, 166, 227, 166, 178, 180, 322, 258, 150, 160, 200, 124, 203, 242, 200, 168, 204, 153, 182, 114, 169, 166, 214, 196, 280, None, None, 204, 232, 170, 158, 236, 215, 219, 205, 210, None, None, 191, 156, 253, 218, 194, 254, 218, 192, 207, 222, 255, None, 155, 126, 249, 125, 164, 214]
# p2endgamescores1 = [185, 167, 203, 227, 196, 132, 190, 240, 204, 192, 165, 169, 174, 233, 225, 224, 264, 283, 160, 208, 180, 170, 183, 167, 228, 164, 120, 196, 185, 158, 188, 173, 167, 243, None, 179, 195, 143, 202, 192, 163, 287, 189, 235, None, 196, 176, 187, 178, 227, 138, 148, 230, 272, 275, 160, 149, 225, 173, 149, 219, 190, 230, 284, 184, 230, 199, 162, 160, None, None, 196, 203, 206, 232, 178, 198, 189, 213, 229, None, None, 160, 166, 184, 152, 151, 149, 268, 243, 195, 173, 184, None, 224, 178, 143, 177, 182, 138]
# times1 =[16.112638, 0.31204600000000227, 0.6796920000000028, 0.7734100000000019, 0.9705629999999985, 0.46329000000000065, 0.17553400000000252, 8.127403000000001, 7.779152, 0.5737299999999976, 0.23147000000000162, 4.792618999999995, 0.2497279999999975, 5.353186999999998, 3.664079000000001, 8.297562, 1.6148019999999974, 0.2074479999999994, 7.665702000000003, 0.3802950000000038, 1.2186629999999923, 0.39734699999999634, 0.5429369999999949, 0.3928499999999957, 2.7380150000000043, 2.2847989999999925, 0.5711359999999956, 0.6492369999999994, 1.601081999999991, 0.23464300000000549, 0.7281010000000094, 5.839645000000004, 8.009985, 2.2360089999999957, 0.03794600000000514, 18.62865500000001, 106.599591, 1.4785849999999812, 10.312135000000012, 0.2718900000000133, 0.8596039999999903, 0.19486899999998286, 0.24861400000000344, 3.8706320000000005, 0.0023579999999867596, 29.138779999999997, 23.555926999999997, 2.274694000000011, 0.5276250000000005, 0.27002799999996796, 0.41291000000001077, 0.25529799999998204, 0.17174199999999473, 0.9664579999999887, 0.3229390000000194, 0.6177310000000489, 0.3732360000000199, 0.7111439999999902, 0.8822620000000256, 16.29657199999997, 5.067601999999965, 5.20061800000002, 0.5240180000000123, 21.185518000000002, 5.6123850000000175, 0.6041490000000067, 0.5517729999999688, 1.9459439999999972, 0.21777700000001232, 0.0031510000000025684, 0.0023370000000113578, 0.5712520000000154, 6.16274999999996, 0.5592800000000011, 0.4912060000000338, 0.1607930000000124, 0.1942860000000337, 0.847802999999999, 0.2699929999999995, 0.8795339999999783, 0.025317999999970198, 0.0020590000000311193, 0.17303100000003724, 0.7059210000000462, 1.0945910000000367, 0.877018000000021, 1.789291999999989, 0.7205609999999751, 0.6656090000000177, 0.8197440000000142, 3.1443270000000325, 1.449218999999971, 1.6580999999999904, 0.003945999999984906, 0.365415999999982, 0.46404300000000376, 0.31528900000000704, 6.125376000000017, 5.259367999999995, 0.25710000000003674]
# nodes2 = [1, 1, 20, 7433, 21, 10733, 357, 186, 231, 990, 421, 5195, 33, 400, 20, 197, 81, 157, 30, 84, 1, 14, 5147, 838, 289, 80, 361, 699, 8385, 374, 846, 683, 6, 88, 43, 215, 193, 11, 8, 37, 1780]
# p1scores2 =[147, 129, 0, 147, 316, 194, 234, 237, 316, 240, 207, 251, 199, 197, 0, 271, 242, 254, 231, 277, 233, 146, 305, 181, 186, 266, 232, 214, 78, 247, 0, 281, 217, 200, 149, 177, 0, 248, 228, 229, 174, 212, 0, 223, 186, 250, 0, 186, 331, 266]
# p2scores2 =[218, 278, 0, 274, 200, 144, 173, 182, 178, 236, 286, 312, 158, 189, 0, 246, 235, 161, 245, 214, 237, 221, 144, 162, 136, 162, 137, 241, 113, 232, 0, 178, 303, 313, 62, 225, 0, 212, 186, 225, 206, 262, 0, 159, 219, 197, 0, 240, 228, 168]
# p1endgamescores2 =[147, 129, None, 136, 253, 194, 169, 165, 260, 176, 138, 240, None, 138, None, 255, 182, 197, 228, 197, 153, 146, 258, 181, 186, 256, 154, 202, None, 236, None, 211, 198, 166, None, 169, None, 188, 177, 184, 169, 161, None, 177, 145, 194, None, 186, 311, 211]
# p2endgamescores2 =[203, 270, None, 207, 187, 144, 167, 168, 128, 223, 269, 305, None, 171, None, 155, 214, 146, 219, 196, 231, 191, 117, 146, 125, 133, 129, 182, None, 199, None, 150, 230, 251, None, 211, None, 205, 165, 189, 198, 223, None, 137, 174, 165, None, 226, 177, 154]
# times2= [0.27102499999999996, 0.206704, 0.003219000000000083, 0.339874, 73.975264, 0.32704999999999984, 80.912725, 3.3703629999999976, 2.0692709999999863, 2.395210999999989, 10.872302999999988, 4.510770000000008, 0.1395480000000191, 48.527783, 0.0015649999999993724, 0.4957629999999824, 5.709429999999998, 0.3313050000000146, 2.196942000000007, 0.9056879999999978, 1.7203229999999792, 0.46023599999998055, 0.83063199999998, 0.17995000000001937, 0.3007690000000025, 44.54222200000001, 7.979903999999976, 2.9357029999999895, 0.0614140000000134, 0.9921439999999961, 0.0038780000000429027, 3.3685120000000097, 6.164650999999992, 62.014320999999995, 0.0810989999999947, 4.158075999999994, 0.0031409999999709726, 7.932039000000032, 5.6581540000000246, 0.31328999999999496, 0.9587760000000003, 0.5008419999999774, 0.0027319999999804168, 2.3414569999999912, 1.9283489999999688, 0.2215190000000007, 0.002453000000002703, 0.2446790000000192, 0.4796670000000063, 15.450380999999993]
# nodes = nodes1 + nodes2
# p1scores = p1scores1 + p1scores2
# p2scores = p2scores1 + p2scores2
# p1endgamescores = p1endgamescores1 + p1endgamescores2
# p2endgamescores = p2endgamescores1 + p2endgamescores2
# times = times1 + times2
# No max depth for AB pruning on rack size 5
# nodes1 = [2918, 19, 20, 167, 223, 24, 66, 239, 901, 34, 5, 439, 14, 34, 667, 170, 79, 670, 308, 66, 62, 253, 343, 4, 79, 2440, 54, 2283, 92, 206, 433, 3, 43, 938, 54, 10, 197, 2857, 75, 13, 105, 44, 119, 19, 69, 4487, 1236, 25, 10, 2, 199, 981, 96, 12, 815, 53, 726, 61, 3301, 69, 1665, 1018, 2148, 120432, 49, 262, 27, 528, 75, 2508, 65, 43, 60, 109, 32, 1664, 287, 2759, 1428, 246, 128, 90, 898, 20, 371, 56, 13, 8, 1196, 6033, 129, 13, 1399]
# p1scores1 = [180, 215, 196, 195, 195, 235, 184, 220, 231, 195, 197, 212, 263, 30, 187, 161, 293, 218, 268, 273, 173, 280, 203, 243, 235, 305, 255, 197, 191, 255, 170, 228, 213, 234, 163, 189, 355, 214, 212, 167, 225, 214, 206, 158, 145, 264, 239, 259, 224, 182]
# p2scores1 = [261, 276, 286, 203, 278, 182, 178, 177, 198, 209, 189, 200, 187, 22, 112, 229, 206, 188, 268, 220, 194, 171, 158, 260, 179, 224, 253, 256, 186, 186, 288, 267, 323, 274, 202, 230, 211, 268, 208, 178, 171, 255, 161, 184, 53, 144, 197, 220, 262, 251]
# p1endgamescores1 = [104, 170, 196, 181, 184, 190, 184, 210, 220, 195, 190, 158, 263, None, None, 161, 222, 215, 256, 264, 173, 229, 203, 221, 165, 265, 216, 177, 191, 200, 158, 213, 206, 195, 163, 165, 297, 214, 212, 167, 211, 206, 206, 158, None, 264, 234, 196, 170, 156]
# p2endgamescores1 = [230, 258, 228, 160, 222, 149, 178, 147, 189, 200, 150, 187, 156, None, None, 229, 200, 188, 178, 172, 194, 157, 133, 196, 169, 210, 220, 184, 186, 180, 213, 147, 277, 236, 202, 183, 205, 244, 179, 178, 129, 178, 161, 184, None, 123, 180, 172, 252, 232]
# times1 = [28.457569, 0.33411299999999855, 0.3422729999999987, 1.7242899999999999, 1.9751229999999964, 0.34948599999999885, 0.8178440000000009, 2.284306000000001, 8.470917, 0.49538099999999474, 0.2556449999999941, 4.728405000000002, 0.34807999999999595, 0.023735999999999535, 0.12552500000000322, 0.483984999999997, 7.080171, 1.9386399999999995, 0.9097670000000022, 6.730280999999998, 2.9818090000000126, 0.7033690000000092, 0.72840699999999, 2.370058, 4.470228999999989, 0.2786049999999989, 0.8592219999999884, 20.823213999999993, 0.6710889999999949, 17.893317999999994, 0.8816439999999943, 1.7517409999999956, 3.9330260000000123, 0.20288299999999992, 0.6357670000000013, 7.664194999999992, 0.7840810000000147, 0.3089580000000183, 1.8615410000000168, 24.267165000000006, 0.848608999999982, 0.27916300000001115, 1.231020000000001, 0.5714190000000201, 0.04297500000001264, 1.3442199999999787, 0.3977549999999894, 0.8133920000000217, 36.14156200000002, 10.697302999999977, 0.5683670000000001, 0.26924099999999984, 0.17733, 2.02914, 8.453273, 0.8671249999999997, 0.2741490000000013, 8.405273000000001, 0.003088000000001756, 0.7832660000000011, 6.770484, 0.001788000000001233, 0.7870349999999995, 32.759317, 0.8810690000000037, 15.959770999999996, 9.082476, 23.287146000000007, 899.159576, 0.6069360000000188, 2.3854630000000725, 0.4569099999999935, 0.13301100000001043, 4.110847000000035, 0.874349000000052, 20.135546999999974, 0.7187790000000405, 0.4951149999999416, 0.675617000000102, 1.151836000000003, 0.4612999999999374, 13.440222000000176, 0.0017380000001594453, 2.7643929999999273, 22.713453000000072, 11.432960999999978, 2.4568299999998544, 1.368257000000085, 0.9997370000000956, 7.1294339999999465, 0.3538969999999608, 3.9655290000000605, 0.6393659999998818, 0.26812500000005457, 0.23288300000012896, 9.49111599999992, 55.96718499999997, 1.2186229999999796, 0.28220200000009754, 10.691080000000056]
# nodes2 = [499, 1628, 990, 2, 13, 133, 17, 1173, 60, 5, 1573, 333, 12296, 31539, 51, 35, 229, 61, 1644, 129, 52, 55, 2175, 41, 137, 73, 7, 12, 41, 993, 14, 23, 15, 670, 505, 7139, 23, 17, 492, 2, 7498, 17, 4, 859, 4331, 8, 3, 165, 3847]
# p1scores2 = [226, 198, 227, 200, 237, 209, 153, 247, 177, 143, 222, 230, 184, 321, 220, 229, 261, 185, 226, 233, 258, 145, 154, 258, 250, 186, 178, 127, 240, 266, 246, 205, 289, 224, 256, 246, 224, 244, 198, 147, 241, 244, 235, 308, 192, 261, 202, 274, 295, 267]
# p2scores2 = [225, 190, 209, 284, 179, 162, 169, 211, 202, 212, 258, 230, 213, 229, 220, 133, 169, 295, 206, 233, 320, 267, 276, 253, 305, 229, 216, 237, 178, 318, 319, 240, 195, 190, 200, 175, 210, 201, 206, 238, 205, 218, 226, 199, 231, 206, 149, 204, 234, 255]
# p1endgamescores2 = [167, 190, 167, 200, 237, 209, 153, 198, 177, 143, 147, 202, 184, 260, 200, 229, 261, 155, 164, 233, 243, 115, 150, 173, 240, 168, 178, 127, 240, 249, 227, 195, 248, 224, 248, 240, 167, 184, 198, 120, 204, 175, 190, 265, 192, 203, 202, 230, 280, 171]
# p2endgamescores2 = [219, 185, 195, 266, 163, 162, 169, 194, 202, 200, 233, 175, 213, 215, 178, 119, 145, 201, 178, 155, 234, 226, 215, 204, 238, 185, 172, 183, 157, 252, 204, 154, 175, 170, 133, 167, 175, 168, 194, 184, 169, 188, 206, 183, 231, 184, 149, 185, 154, 186]
# times2 = [5.462273, 13.628348999999998, 9.027452999999998, 0.20035800000000137, 0.350676, 1.452566000000001, 0.3063469999999988, 9.485683000000002, 0.7841099999999983, 0.20266799999999563, 17.036028, 3.118584999999996, 102.818501, 242.39040999999997, 0.5522730000000138, 0.5206039999999916, 2.3233899999999608, 0.7285350000000221, 14.949749999999995, 0.17539299999998548, 1.184301000000005, 0.577241000000015, 0.6432050000000231, 20.644333999999958, 0.5160099999999943, 1.323853999999983, 0.8926519999999982, 0.2601789999999937, 0.33121899999997595, 0.4770679999999743, 8.132326000000035, 0.31665900000001557, 0.39193999999997686, 0.3116600000000176, 6.508721000000037, 5.085165000000018, 55.82077000000004, 0.3661329999999907, 0.4054830000000038, 4.384490000000028, 0.16655200000002424, 70.42404600000009, 0.35272199999997156, 0.2003789999999981, 8.768035000000054, 35.55618399999992, 0.24955899999997655, 0.1856559999999945, 1.568037000000004, 29.536572999999976]
# nodes = nodes1 + nodes2
# p1scores = p1scores1 + p1scores2
# p2scores = p2scores1 + p2scores2
# p1endgamescores = p1endgamescores1 + p1endgamescores2
# p2endgamescores = p2endgamescores1 + p2endgamescores2
# times = times1 + times2
# PVS Search
# nodes = [1, 13, 3, 101, 3, 4, 16, 11, 26, 6, 44, 43, 23, 3, 3, 9, 133, 69, 24, 13, 1098, 22, 51, 3, 282, 19, 22, 93, 12, 9, 139, 307, 4, 11, 16, 3, 10, 5, 34, 153, 15, 11, 47, 14, 76, 25, 6, 69, 18, 6, 22, 14, 141, 7, 3, 12, 286, 31, 45, 4, 39, 78, 21, 8, 12, 11, 20, 6, 3, 468, 177, 17, 215, 6, 32, 20, 4, 15, 55, 8, 7, 60, 301, 3, 6, 12, 1542, 111, 11, 19, 8, 3, 269, 44, 82, 88, 5, 11, 1368, 6, 39, 12, 13, 49, 55, 11, 3, 19, 6, 36, 12, 35, 23, 29, 213, 22, 23, 9, 6, 6, 455, 3, 8, 2, 83, 12, 137, 74, 38, 15, 4, 3, 6, 11, 3, 3, 34, 28, 21, 17, 51, 3, 7, 29, 67, 11, 6, 30, 35, 476, 3, 3, 49, 17, 6, 3, 224, 9, 57, 3, 10, 43, 164, 33, 13, 138, 41, 12, 3, 20, 169, 167, 3, 3, 13, 14, 20, 3, 50, 3, 146, 77, 3, 3, 194, 7, 11, 3, 8, 8, 6, 181, 11, 11, 15, 28, 40, 14, 261, 3, 18, 47, 10, 14, 4, 236, 6, 6, 6, 77, 57, 3, 18, 54, 41, 3, 3, 592, 15, 62, 29, 47, 1, 5, 55, 31, 7182, 283, 167, 18, 62, 14, 233, 32, 11, 30, 149, 3, 22849, 141, 130, 6, 24, 31, 11, 83, 262, 204, 5, 9, 71, 74, 3, 12, 61, 41, 38, 4, 49, 15, 21, 4, 123, 14, 3, 4, 15, 284, 49, 553, 15, 41, 5, 37, 29, 92, 11, 13, 39, 5, 57, 129, 21, 104, 25, 66, 13, 81, 3, 3, 8, 3, 192, 28, 93, 17107, 53, 135, 53, 89, 7, 6, 23, 9, 80, 408, 125, 36, 56, 1, 204, 7, 13, 3, 12, 18, 3, 30, 25, 7, 22, 581, 6, 2, 3, 110, 6, 164, 57, 6, 10, 3, 3, 6, 3, 7, 3, 25, 3, 3, 1, 102, 1, 1, 13, 5, 15, 3, 250, 11, 223, 101, 16, 4, 90, 19, 75, 9, 25, 3, 40, 5, 7, 16, 69, 5, 6, 3, 6, 22, 3, 3, 54, 8, 69, 6, 3, 3, 524, 17, 27, 9, 56, 6, 16, 17, 48, 3, 376, 1, 56, 30, 21, 378, 48, 27, 23, 3, 821, 12, 6, 31, 12, 8, 284, 35, 172, 59, 36, 1652, 122, 19, 41, 27, 20, 11, 325, 13, 13, 133, 27, 3, 3, 91, 3, 3, 1, 385, 27, 47, 7, 127, 3, 26, 232, 20, 18, 3, 9, 84, 11, 3, 3, 116, 14, 53, 32, 22, 15]
# p1scores =[218, 166, 280, 234, 259, 315, 0, 261, 282, 258, 224, 198, 246, 0, 185, 204, 144, 153, 214, 235, 188, 108, 223, 227, 161, 260, 138, 220, 321, 287, 240, 73, 250, 203, 260, 191, 225, 182, 188, 228, 240, 171, 183, 216, 235, 180, 273, 165, 229, 226, 209, 137, 213, 299, 254, 241, 205, 320, 230, 227, 227, 214, 299, 187, 181, 251, 309, 140, 181, 195, 286, 139, 201, 281, 224, 212, 230, 152, 174, 267, 165, 115, 208, 156, 0, 262, 279, 202, 267, 197, 220, 263, 246, 118, 216, 157, 150, 337, 172, 49, 276, 193, 145, 217, 200, 276, 227, 219, 290, 189, 243, 251, 200, 39, 286, 318, 326, 268, 214, 169, 197, 235, 249, 338, 176, 168, 226, 212, 248, 257, 255, 0, 0, 172, 278, 192, 244, 182, 243, 200, 189, 191, 268, 215, 270, 259, 156, 304, 196, 271, 194, 255, 309, 286, 121, 229, 293, 226, 176, 165, 305, 376, 150, 227, 235, 112, 31, 202, 219, 152, 337, 269, 195, 240, 16, 241, 237, 163, 257, 169, 210, 298, 257, 232, 268, 290, 316, 256, 202, 174, 181, 244, 165, 284, 238, 193, 194, 264, 207, 233, 280, 187, 257, 212, 239, 258, 189, 235, 229, 226, 270, 208, 0, 0, 278, 229, 278, 184, 215, 263, 209, 188, 222, 166, 272, 0, 286, 223, 343, 325, 171, 246, 313, 164, 381, 311, 242, 238, 214, 141, 284, 196, 310, 297, 202, 195, 226, 249, 243, 163, 224, 263, 172, 161, 288, 205, 179, 156, 380, 260, 269, 223, 208, 0, 252, 189, 0, 257, 0, 190, 313, 234, 186, 187, 164, 272, 154, 168, 148, 60, 187, 0, 250, 194, 31, 325, 174, 261, 0, 232, 227, 279, 233, 166, 225, 284, 346, 196, 234, 0, 209, 180, 258, 207, 175, 215, 172, 233, 189, 260, 190, 174, 248, 256, 255, 126, 228, 229, 164, 260, 0, 288, 279, 44, 190, 218, 226, 135, 273, 229, 237, 250, 233, 254, 203, 288, 188, 171, 191, 280, 258, 196, 238, 160, 294, 203, 231, 265, 180, 223, 265, 108, 321, 0, 270, 144, 212, 272, 230, 290, 197, 249, 300, 149, 198, 191, 221, 280, 200, 224, 198, 168, 146, 207, 0, 241, 9, 191, 144, 290, 293, 213, 269, 215, 199, 195, 254, 202, 20, 204, 259, 235, 153, 161, 202, 264, 208, 280, 142, 250, 141, 142, 99, 190, 232, 190, 218, 329, 201, 185, 35, 181, 257, 218, 192, 213, 262, 242, 160, 149, 268, 214, 182, 166, 275, 205, 289, 216, 256, 249, 202, 204, 277, 307, 233, 273, 200, 294, 0, 198, 220, 123, 181, 147, 257, 251, 178, 250, 203, 205, 136, 0, 230, 289, 221, 197, 212, 207, 274, 293, 152, 178, 323, 202, 134, 241, 180, 191, 170, 222, 149, 249, 242, 204, 251, 309, 301, 322, 136, 224, 176, 204, 291, 279, 203, 238, 252, 283, 285, 238, 323, 241, 260, 194, 210, 248, 0, 238, 247, 185]
# p2scores =[189, 196, 243, 218, 227, 220, 0, 259, 216, 248, 316, 289, 184, 0, 146, 141, 184, 198, 186, 261, 170, 94, 184, 367, 178, 332, 233, 200, 254, 238, 195, 95, 192, 305, 263, 156, 306, 248, 203, 215, 148, 246, 248, 118, 247, 276, 195, 243, 309, 273, 335, 174, 255, 268, 215, 140, 252, 221, 205, 226, 243, 149, 112, 269, 199, 219, 221, 96, 290, 143, 164, 222, 209, 171, 250, 264, 264, 263, 221, 215, 265, 158, 219, 201, 0, 240, 246, 195, 196, 290, 250, 264, 267, 192, 271, 275, 240, 222, 193, 12, 177, 178, 184, 160, 248, 190, 262, 200, 140, 208, 298, 215, 250, 24, 173, 219, 197, 256, 386, 200, 218, 279, 196, 179, 292, 215, 225, 186, 201, 173, 266, 0, 0, 196, 160, 212, 154, 143, 312, 170, 212, 120, 222, 202, 195, 239, 208, 186, 195, 177, 206, 153, 168, 153, 188, 191, 256, 255, 261, 314, 297, 181, 195, 284, 179, 98, 33, 155, 246, 159, 186, 274, 226, 184, 35, 224, 189, 232, 203, 192, 220, 159, 232, 206, 237, 274, 182, 134, 187, 185, 217, 163, 197, 197, 184, 180, 248, 186, 232, 261, 181, 198, 258, 151, 206, 190, 221, 238, 235, 209, 193, 266, 0, 0, 129, 280, 196, 202, 225, 218, 259, 239, 164, 242, 282, 0, 219, 198, 193, 249, 301, 176, 245, 215, 203, 187, 220, 261, 230, 239, 189, 217, 261, 168, 321, 195, 242, 263, 268, 224, 208, 171, 332, 258, 209, 238, 223, 274, 179, 192, 214, 250, 121, 0, 223, 316, 0, 290, 0, 294, 204, 215, 311, 184, 317, 157, 276, 230, 145, 136, 218, 0, 195, 205, 25, 167, 197, 192, 0, 242, 171, 290, 195, 160, 193, 330, 258, 249, 293, 0, 196, 206, 198, 217, 223, 160, 222, 247, 178, 212, 177, 227, 250, 196, 190, 261, 271, 283, 215, 237, 0, 203, 233, 153, 184, 202, 284, 227, 186, 210, 194, 277, 225, 176, 170, 245, 369, 181, 227, 230, 251, 330, 199, 205, 252, 308, 197, 195, 244, 230, 171, 274, 206, 0, 136, 178, 220, 294, 221, 125, 257, 304, 303, 252, 274, 179, 268, 173, 173, 191, 236, 229, 214, 278, 0, 197, 17, 327, 129, 186, 208, 230, 223, 160, 139, 191, 183, 252, 11, 238, 207, 126, 186, 263, 187, 209, 190, 263, 305, 241, 291, 202, 148, 214, 246, 209, 188, 271, 213, 240, 46, 305, 275, 315, 230, 125, 214, 223, 199, 204, 237, 209, 169, 351, 191, 238, 219, 234, 311, 220, 245, 192, 224, 160, 233, 170, 200, 249, 0, 167, 188, 175, 231, 238, 252, 243, 139, 285, 281, 283, 185, 0, 260, 181, 204, 278, 312, 233, 156, 215, 348, 230, 146, 212, 205, 286, 197, 300, 249, 222, 266, 166, 166, 225, 217, 192, 205, 231, 161, 221, 360, 250, 170, 198, 184, 255, 250, 222, 206, 169, 235, 159, 301, 184, 296, 215, 0, 149, 218, 345]
# p1endgamescores =[218, 166, 234, 166, 212, 270, None, 229, 211, 194, 199, 166, 201, None, 185, 204, 144, 153, 214, 178, 133, None, 223, 165, 161, 210, 138, 220, 305, 194, 240, None, 203, 203, 216, 191, 183, 182, 188, 184, 231, 158, 183, None, 202, 168, 222, 165, 219, 152, 201, 137, 206, 203, 225, 241, 156, 250, 230, 146, 188, 214, 299, 158, 181, 180, 269, None, 112, 195, 205, 139, 150, 226, 208, 194, 171, 152, 174, 193, 154, None, 195, 156, None, 262, 230, 158, 241, 189, 206, 189, 181, 118, 204, 157, 136, 329, 172, None, 276, 193, 145, 217, 190, 212, 164, 219, 246, 189, 213, 251, 191, None, 207, 246, 278, 251, 150, 169, 150, 219, 188, 287, 160, 168, 154, 143, 197, 202, 236, None, None, 172, 278, 178, 244, 182, 201, 200, 173, None, 218, 205, 207, 188, 145, 263, 196, 231, 176, 255, 309, 264, 121, 229, 285, 201, 176, 165, 290, 293, 150, 182, 219, None, None, 202, 210, 152, 255, 258, 167, 191, None, 196, 237, 163, 218, 169, 210, 207, 212, 221, 166, 267, 305, 209, 202, 174, 145, 181, 165, 231, 189, 193, 194, 212, 149, 193, 210, 158, 210, 212, 176, 188, 176, 188, 182, 226, 206, 185, None, None, 278, 229, 207, 184, 182, 224, 196, 188, 222, 136, 266, None, 276, 223, 275, 308, 134, 246, 251, 149, 340, 311, 179, 227, 167, 141, 193, 149, 257, 211, 163, 195, 202, 225, 189, 163, 157, 198, 167, 151, 226, 183, 150, 135, 335, 195, 269, 210, 208, None, 192, 189, None, 241, None, 155, 246, 223, 164, 134, 161, 231, 139, 153, 148, None, 187, None, 199, 194, None, 263, 174, 190, None, 164, 227, 236, 168, 166, 160, 239, 303, 160, 218, None, 189, 166, 209, 137, 175, 215, 172, 200, 189, 178, 190, 164, 232, 216, 235, 126, 218, 203, 164, 236, None, 232, 279, None, 190, 173, 142, 135, 273, 214, 163, 162, 187, 254, 203, 271, 177, 171, 175, 222, 211, 154, 238, 160, 241, 195, 206, 221, 180, 223, 223, 94, 296, None, 201, 144, 212, 251, 187, 252, 156, 149, 253, 149, 160, 191, 221, 217, 200, 224, 178, 168, 146, 207, None, 197, None, 181, None, 290, 237, 213, 193, 215, 199, 195, 254, 182, None, 204, 259, 222, 153, 155, 188, 181, 148, 223, 142, 237, 141, 142, None, 144, 218, 178, 206, 305, 184, 161, None, 163, 208, 215, 156, 213, 216, 228, 160, 139, 229, 168, 182, 145, 224, 192, 244, 194, 236, 249, 202, 204, 208, 267, 183, 273, 139, 248, None, 187, 203, 123, 152, 136, 210, 190, 178, 248, 184, 157, 136, None, 192, 210, 215, 183, 186, 174, 223, 279, 145, 168, 258, 202, 134, 227, 180, 191, 170, 222, 134, 201, 242, 183, 187, 262, 301, 251, 136, 197, 143, 195, 291, 207, 203, 217, 163, 239, 237, 189, 263, 241, 218, 194, 157, 202, None, 238, 236, 158]
# p2endgamescores= [183, 196, 222, 205, 206, 191, None, 216, 202, 219, 277, 225, 165, None, 124, 141, 184, 198, 175, 233, 161, None, 184, 354, 178, 293, 233, 200, 184, 227, 177, None, 167, 305, 181, 144, 277, 248, 179, 170, 139, 179, 203, None, 194, 254, 158, 225, 187, 249, 278, 174, 182, 223, 158, 128, 199, 207, 178, 167, 201, 127, 112, 226, 199, 203, 187, None, 278, 143, 146, 205, 187, 151, 223, 224, 237, 263, 221, 175, 212, None, 146, 187, None, 220, 228, 190, 144, 224, 172, 228, 254, 169, 214, 261, 170, 153, 166, None, 157, 178, 184, 160, 207, 176, 233, 200, 114, 197, 240, 170, 166, None, 159, 192, 186, 216, 295, 200, 160, 189, 146, 134, 230, 188, 208, 182, 189, 166, 207, None, None, 177, 160, 141, 154, 136, 270, 164, 167, None, 217, 162, 169, 219, 197, 176, 162, 162, 157, 135, 155, 147, 188, 153, 196, 213, 261, 282, 234, 158, 195, 259, 121, None, None, 155, 221, 139, 151, 259, 172, 170, None, 199, 189, 215, 156, 169, 187, 149, 183, 189, 230, 227, 167, 123, 171, 169, 193, 135, 197, 179, 155, 180, 248, 170, 187, 239, 144, 193, 239, 141, 164, 138, 183, 219, 235, 209, 193, 223, None, None, 129, 256, 177, 202, 178, 174, 259, 239, 164, 157, 212, None, 151, 185, 169, 196, 255, 176, 226, 162, 181, 187, 210, 199, 203, 233, 180, 193, 230, 153, 278, 189, 186, 187, 211, 224, 195, 158, 271, 215, 189, 223, 173, 201, 132, 163, 200, 182, 109, None, 201, 257, None, 222, None, 237, 155, 179, 266, 168, 305, 132, 212, 203, 145, None, 192, None, 155, 205, None, 144, 184, 192, None, 230, 171, 263, 166, 151, 135, 305, 196, 205, 245, None, 128, 199, 131, 205, 190, 160, 183, 204, 158, 199, 177, 125, 169, 134, 156, 261, 191, 187, 194, 195, None, 180, 233, None, 184, 167, 257, 227, 186, 199, 187, 261, 187, 176, 166, 206, 261, 181, 153, 223, 228, 289, 154, 205, 223, 269, 134, 161, 219, 198, 151, 223, 157, None, 111, 161, 170, 249, 194, 115, 209, 268, 249, 252, 223, 160, 164, 162, 157, 155, 162, 214, 214, 278, None, 176, None, 276, None, 153, 172, 224, 190, 160, 134, 191, 167, 215, None, 238, 195, 79, 186, 198, 144, 199, 190, 221, 286, 214, 291, 202, None, 188, 200, 147, 150, 188, 175, 164, None, 256, 208, 253, 183, 125, 190, 141, 199, 199, 211, 173, 157, 304, 169, 189, 177, 226, 268, 173, 229, 192, 195, 149, 175, 143, 193, 221, None, 130, 178, 175, 180, 225, 225, 208, 139, 206, 209, 271, 165, None, 208, 161, 197, 237, 238, 188, 123, 208, 254, 168, 137, 212, 168, 231, 175, 269, 249, 222, 223, 146, 159, 175, 201, 165, 192, 225, 161, 160, 317, 223, 170, 183, 169, 211, 205, 195, 181, 140, 209, 148, 280, 184, 254, 178, None, 149, 155, 296]
# times= [0.2928059999999999, 0.3718410000000001, 0.23194399999999993, 1.0388579999999998, 0.22879199999999988, 0.22002699999999997, 0.0020039999999998948, 0.4194810000000002, 0.3165870000000002, 0.37523799999999996, 0.21161799999999964, 0.8742600000000005, 0.6539440000000001, 0.0025639999999995666, 0.33535300000000046, 0.19672800000000024, 0.19727399999999928, 0.21439200000000014, 1.375591000000001, 0.8091880000000007, 0.38516199999999934, 0.09082000000000079, 0.2991969999999995, 9.217161, 0.37808399999999764, 0.8026850000000003, 0.18621900000000124, 2.9802820000000025, 0.3490499999999983, 0.43283699999999925, 1.1114730000000002, 0.08380700000000019, 0.3125120000000017, 0.25763999999999854, 1.3630969999999998, 2.764902000000003, 0.23528799999999706, 0.3022879999999972, 0.4018209999999982, 0.1808000000000014, 0.26575300000000013, 0.24835099999999954, 0.1446050000000021, 0.15552600000000183, 0.5358790000000013, 1.543016999999999, 0.3358879999999971, 0.3114440000000016, 0.6193889999999982, 0.3618939999999995, 0.7966959999999972, 0.390703000000002, 0.25773399999999924, 0.6757259999999974, 0.35827700000000107, 0.22720100000000087, 0.3796840000000046, 0.43254400000000004, 1.4931490000000025, 0.2548719999999989, 0.23152199999999823, 0.2505860000000055, 0.1444189999999992, 2.4351270000000014, 0.5021939999999958, 0.6229390000000024, 0.2047719999999984, 0.07404299999999608, 0.473507000000005, 0.9960200000000015, 0.37344699999999875, 0.2536440000000013, 0.32034300000000115, 0.29288700000000034, 0.4103250000000003, 0.25149100000000146, 0.1767630000000011, 4.083317000000001, 1.9761140000000026, 0.35448799999999636, 1.6717980000000026, 0.09816299999999956, 0.23800300000000618, 0.4695199999999957, 0.003132999999998276, 0.437342000000001, 0.26709399999999306, 0.3666509999999974, 0.5937780000000004, 0.23120199999999613, 0.24429899999999805, 0.7964970000000022, 3.20188499999999, 0.25137300000000096, 0.2140629999999959, 0.2290739999999971, 12.218799000000004, 0.989570999999998, 0.2993850000000009, 0.03139299999999423, 0.34039199999999425, 0.2431930000000051, 0.2137830000000065, 0.13565599999999733, 2.187706999999989, 0.6617399999999947, 1.391360000000006, 0.9645450000000011, 0.20681100000000185, 0.33145700000000033, 9.58070099999999, 0.30060399999999277, 0.512715, 0.021703999999999724, 0.3306620000000038, 0.3271529999999956, 0.614806999999999, 0.6048430000000025, 0.3124660000000006, 0.22258399999999767, 0.3527690000000092, 0.21717099999999334, 0.5492429999999899, 0.3776319999999913, 0.4154920000000004, 0.42213999999999885, 0.4515840000000111, 2.5750829999999922, 0.401961, 0.43810599999999056, 0.22623299999999347, 0.002532000000002199, 0.0018150000000076716, 0.22750400000001036, 0.23555100000000095, 0.2394079999999974, 4.218062000000003, 0.18922299999999836, 0.2122399999999942, 0.19388100000000463, 0.7154629999999997, 0.11671599999999671, 0.31569100000000105, 1.2622329999999948, 0.7974519999999927, 0.6517769999999956, 0.41394299999998907, 0.20967999999999165, 0.234185999999994, 0.251410000000007, 0.2803170000000108, 0.18839300000000492, 0.2053939999999983, 0.534396000000001, 0.4496340000000032, 0.37912299999999277, 0.3112829999999889, 0.6531530000000032, 0.29095900000000086, 0.2679389999999984, 0.5020150000000001, 0.8538089999999983, 0.2451690000000042, 0.24101299999999526, 0.40490699999999435, 0.08170499999999947, 0.03884700000000407, 0.5336039999999969, 5.059089999999998, 0.18274100000000715, 0.2251499999999993, 0.8296859999999953, 0.31562900000000127, 0.21862600000000043, 0.026416000000011763, 0.21508499999998776, 2.6393170000000055, 0.2953679999999963, 0.669667000000004, 0.19704600000000028, 0.2758139999999969, 0.6253249999999753, 1.6880829999999776, 0.5045389999999941, 0.41301500000000146, 1.3947259999999915, 0.5800529999999924, 0.27186100000000124, 0.16846100000000774, 0.36898900000002754, 1.8655620000000113, 1.9216669999999851, 0.22315499999999133, 0.22424100000000635, 0.2909649999999999, 0.3366570000000024, 0.37276799999997934, 0.20635099999998374, 0.6916360000000168, 0.22340499999998542, 1.4546670000000006, 0.9543570000000159, 0.2264419999999916, 0.23100800000000277, 1.7468560000000082, 0.2247030000000052, 0.38132199999998306, 0.208044000000001, 0.22038499999999317, 0.192748000000023, 0.2522610000000043, 2.0404780000000073, 0.0028300000000172076, 0.0027790000000038617, 0.24784099999999398, 0.3263469999999984, 0.3013249999999914, 0.4446060000000216, 0.41555499999998347, 0.3310480000000098, 2.386485999999991, 0.1714550000000088, 0.3242869999999982, 0.7600260000000105, 0.27437799999998447, 0.002240000000000464, 0.2850119999999947, 0.17968399999998041, 2.786428000000001, 0.20285200000000714, 0.20589999999998554, 0.1930720000000008, 0.8937139999999886, 0.8421769999999924, 0.2545039999999972, 0.40410099999999716, 0.8030490000000157, 0.5610989999999845, 0.21402100000000246, 0.2232050000000072, 5.460230999999993, 0.28734200000002375, 0.8302300000000002, 0.45365300000000275, 0.554003999999992, 0.17290600000001177, 0.22391000000001782, 0.7349100000000135, 0.4988390000000038, 57.867542000000014, 2.9327009999999802, 1.8384949999999947, 0.34806199999999876, 0.6671989999999823, 0.3419749999999908, 2.6556659999999965, 0.40598099999999704, 0.3016930000000002, 0.7211619999999925, 1.6560149999999965, 0.2520900000000097, 172.473804, 1.5781190000000151, 0.003030000000023847, 1.5507789999999773, 0.1987359999999967, 0.0020370000000298205, 0.2470360000000369, 0.0019209999999816318, 0.46808499999997366, 0.5146919999999682, 0.30661500000002206, 1.0912050000000022, 3.103079999999977, 2.1348110000000133, 0.20253500000001168, 0.22418199999998478, 0.8674279999999612, 0.8155019999999809, 0.04360199999996439, 0.16060300000003735, 0.0019020000000296022, 0.3572400000000471, 0.6463150000000155, 0.02232800000001589, 0.5644740000000183, 0.5229150000000118, 0.17267799999996214, 0.0018010000000003856, 0.6582479999999578, 0.3232359999999517, 0.42014299999999594, 0.17136499999998023, 1.1740439999999808, 0.3454179999999951, 0.24046300000003384, 0.22762199999999666, 0.3469519999999875, 2.7941959999999995, 0.0023160000000075343, 0.5626589999999965, 5.594690000000014, 0.3335419999999658, 0.5099799999999846, 0.20556300000004057, 0.5425690000000145, 0.4355209999999943, 0.8707939999999894, 0.24418299999996407, 0.380628999999999, 0.5869670000000156, 0.21521200000000817, 0.6054320000000075, 1.3143350000000282, 0.42159899999995787, 1.293295999999998, 0.38424699999995937, 0.6967149999999833, 0.300407000000007, 0.8768420000000106, 0.0034870000000069012, 0.16064799999998058, 0.2565319999999929, 0.07622500000002219, 0.2552289999999857, 0.16474999999996953, 1.8532679999999573, 0.4848479999999995, 1.3019820000000095, 117.35243199999996, 0.7084909999999809, 1.5091749999999138, 0.5793330000000196, 1.1138200000000325, 0.24734999999998308, 0.2551379999999881, 0.41855899999995927, 0.23927900000001046, 0.8112929999999778, 4.130955999999969, 1.4083500000000413, 0.5017380000000458, 0.6638189999999895, 0.13171699999998054, 2.0982510000000048, 0.23331199999995533, 0.3367120000000341, 0.18857300000001942, 0.22771699999998418, 0.36383999999998196, 0.2500719999999319, 0.33536500000002434, 0.5049139999999852, 0.0017800000000534055, 0.2386900000000196, 0.3896989999999505, 0.16270300000007865, 5.211135000000013, 0.22361899999998514, 0.17212899999992715, 0.1858239999999114, 1.361674999999991, 0.21281499999997777, 1.8029099999999971, 0.6559100000000626, 0.2045899999999392, 0.1893720000000485, 0.26809000000002925, 0.18401700000003984, 0.20749799999998686, 0.20333000000005086, 0.19717500000001564, 0.2518370000000232, 0.17771000000004733, 0.004097000000001572, 0.20062699999994038, 0.016684999999938555, 0.3801069999999527, 0.12811999999996715, 0.23352599999998347, 0.23732299999994666, 0.17410899999993035, 1.1341130000000703, 0.15998000000001866, 0.16210699999999179, 0.17891899999995076, 0.3379569999999603, 0.26499699999999393, 0.01535799999999199, 0.314029000000005, 0.19228199999997742, 2.368088000000057, 0.2382149999999683, 2.0576879999999846, 1.0398030000000063, 0.37048200000003817, 0.2978219999999965, 0.8121879999999919, 0.40711000000010245, 0.9477319999999736, 0.2424389999999903, 0.4584310000000187, 0.07188800000005813, 0.19823200000007546, 0.4595449999999346, 0.2064699999999675, 0.24890899999991234, 0.31812999999999647, 0.7636250000000473, 0.20206900000005135, 0.028891999999927975, 0.18949299999997038, 0.27459399999997913, 0.2852930000000242, 0.3780209999999897, 0.18278799999995954, 0.22277599999995346, 0.5774390000000267, 0.23634100000003855, 0.9100349999999935, 0.26783899999998084, 0.1856520000000046, 0.1749169999999367, 4.234426999999982, 0.375171000000023, 0.4404510000000528, 0.30652699999996, 0.8563239999999723, 0.26012500000001637, 0.15538699999990513, 0.3218699999999899, 0.3836069999999836, 0.7139559999999392, 0.16673899999989317, 3.1698390000000245, 0.177866999999992, 0.773089999999911, 0.47497200000009343, 0.0037820000000010623, 0.37524600000006103, 4.157304999999951, 0.6040100000000166, 0.36641099999997095, 0.3943050000000312, 0.23496799999998075, 7.371562999999924, 0.28099299999996674, 0.23972500000002128, 0.41920299999992494, 0.26016200000003664, 0.2959040000000641, 0.0020480000000588916, 2.8170840000000226, 0.5177230000000463, 1.8271009999999706, 0.6777199999999084, 0.5227209999999332, 11.963899999999967, 1.501141999999959, 0.3560229999999365, 0.4501699999999573, 0.3957470000000285, 0.3981499999999869, 0.17331400000000485, 0.26129900000000816, 2.6427649999999403, 0.2815819999999576, 0.29509600000005776, 0.1560309999999845, 1.2763760000000275, 0.3734460000000581, 0.16770899999994526, 0.21281999999996515, 0.9043809999999439, 0.1593480000000227, 0.18587000000002263, 0.2203460000000632, 4.136911999999938, 0.48137499999995725, 0.7381710000000794, 0.2286619999999857, 1.283792999999946, 0.19774400000005699, 0.4337709999999788, 2.3513019999999187, 0.35244299999999384, 0.38877899999999954, 0.25251300000002175, 0.30437899999992624, 0.9129359999999451, 0.33933600000000297, 0.18143799999995736, 0.23576900000000478, 1.2049879999999575, 0.3232110000000148, 0.6946389999999383, 0.002425000000016553, 0.4352480000000014, 0.36690199999998185, 0.30720100000007733]
# nodes = [584, 137, 33, 106, 5, 25, 6, 4, 255, 15, 54, 455, 27, 332, 735, 55, 1233, 278, 33, 8, 4, 4, 6, 3, 5, 13, 159, 39, 3, 33, 39, 335, 57, 6, 779, 146, 9, 328, 9, 11, 47, 367, 6, 1592, 12, 11, 634, 42, 128, 3, 21, 7, 22, 3, 33, 248, 58, 10, 5, 13, 23, 6, 2, 305, 24, 615, 3, 54, 27, 51, 1, 71, 4, 222, 6, 191, 203, 5, 11, 33, 105, 5, 90, 65, 3, 10, 15, 256, 70, 1118, 31, 4, 23, 20, 12, 63, 42, 3, 7, 13, 28, 3, 61, 77, 18, 83, 22, 45, 7894, 128, 247, 18, 313, 6, 9, 299, 11, 303, 18, 4, 31, 32, 3, 65, 3, 15, 18, 16, 3, 6, 15, 69, 27, 129, 539, 2, 1939, 17, 3, 3, 37, 12, 193, 3, 14, 52, 21, 53, 421, 1, 6, 263, 535, 20, 5, 347, 676, 3, 53, 3, 3, 16, 4, 14, 121, 152, 10, 9, 12, 29, 11, 24, 14, 7, 34, 1, 14, 49, 397, 16, 3, 8, 178, 13, 18, 256, 472, 11, 3, 262, 3, 11, 103, 123, 4, 15, 3, 49, 8, 252, 16, 30, 10, 315, 12, 211, 12, 589, 47, 5, 210, 6, 4, 6, 33, 6, 174, 180, 5, 4, 29, 25, 48, 11, 1, 69, 6, 47, 3, 25, 14, 3, 6, 6, 9, 3, 3, 3, 13883, 8, 31, 20, 40, 9, 6, 29, 965, 4, 155, 6, 16, 6, 11, 656, 166, 238, 5, 20, 9, 35, 118, 47, 5, 2, 139, 17, 17, 49, 830, 3, 5, 12, 34, 39, 247, 9, 408, 14, 9, 52, 14, 7, 3, 2370, 3, 133, 6, 237, 6, 73, 6, 1373, 163, 9, 1, 11, 1, 13, 35, 14, 11, 96, 3, 73, 71, 21, 230, 35, 15, 13, 61, 260, 5, 249, 3, 55, 41, 18, 14, 9, 1, 94, 11, 22, 28, 1, 29, 95, 3, 3, 40, 27, 5, 106, 13, 41, 83, 25, 19, 5, 11, 1, 6, 14, 39, 36, 3, 51, 11, 6, 18, 3360, 43, 419, 5, 24, 60, 20, 70, 325, 6, 10, 6, 159, 19, 25, 12, 32, 5, 3, 4, 11, 8, 119, 1, 8, 3, 11, 77, 3, 10, 11, 11, 244, 8, 18, 29, 26, 19, 69, 3, 100, 21, 3, 45, 177, 10, 6, 25, 277, 1185, 19, 25, 3, 3, 10, 6, 6, 130, 71, 9, 81, 282, 4, 4, 15, 6, 126, 327, 5, 21, 23, 3, 7, 6, 116, 147, 14, 3, 19, 22, 65, 12, 30, 23, 85, 38, 6195, 349, 37, 276, 27, 3, 168]
# p1scores = [184, 236, 90, 177, 291, 288, 291, 159, 235, 211, 289, 325, 182, 0, 264, 136, 172, 180, 304, 191, 259, 316, 187, 194, 321, 219, 186, 190, 0, 171, 251, 287, 136, 192, 124, 0, 317, 216, 135, 161, 269, 183, 0, 155, 188, 271, 242, 260, 284, 223, 183, 215, 255, 239, 259, 167, 227, 243, 186, 246, 186, 0, 199, 216, 215, 180, 254, 189, 275, 179, 192, 134, 230, 186, 346, 208, 247, 312, 216, 173, 248, 137, 237, 201, 204, 299, 282, 150, 246, 256, 189, 140, 193, 255, 240, 0, 205, 217, 250, 301, 133, 214, 0, 284, 182, 202, 218, 168, 0, 302, 168, 201, 230, 196, 73, 327, 215, 173, 228, 275, 226, 128, 232, 228, 229, 282, 298, 192, 296, 214, 140, 285, 226, 249, 247, 256, 304, 0, 230, 279, 264, 222, 158, 321, 282, 200, 209, 199, 253, 343, 199, 223, 177, 239, 0, 244, 161, 0, 215, 236, 268, 211, 279, 180, 223, 308, 262, 140, 160, 207, 172, 211, 189, 236, 265, 104, 167, 288, 216, 281, 270, 256, 178, 240, 134, 261, 260, 180, 222, 0, 215, 203, 327, 251, 174, 256, 109, 235, 181, 214, 258, 215, 0, 297, 220, 149, 292, 241, 193, 178, 343, 271, 208, 195, 2, 299, 221, 192, 312, 343, 220, 190, 293, 257, 0, 154, 201, 166, 211, 165, 121, 262, 312, 229, 221, 296, 283, 294, 251, 200, 227, 210, 173, 151, 299, 165, 287, 126, 208, 158, 268, 120, 132, 205, 200, 225, 163, 193, 153, 245, 231, 150, 241, 264, 182, 261, 277, 216, 189, 261, 0, 294, 217, 262, 229, 203, 228, 233, 258, 185, 231, 163, 325, 164, 342, 184, 243, 223, 228, 170, 203, 230, 145, 242, 90, 287, 199, 288, 267, 222, 272, 240, 181, 0, 300, 0, 366, 286, 180, 161, 188, 203, 270, 181, 261, 151, 185, 214, 181, 186, 212, 221, 192, 270, 208, 270, 173, 222, 223, 193, 192, 190, 304, 187, 252, 0, 269, 297, 202, 239, 172, 0, 243, 193, 34, 225, 241, 251, 304, 265, 280, 225, 0, 315, 185, 196, 265, 232, 0, 221, 121, 176, 211, 132, 0, 218, 102, 198, 44, 233, 282, 169, 263, 242, 263, 254, 246, 241, 218, 224, 272, 297, 169, 306, 0, 347, 167, 261, 0, 296, 365, 183, 246, 195, 206, 202, 139, 279, 153, 211, 280, 155, 190, 203, 154, 216, 254, 190, 197, 218, 210, 187, 226, 234, 178, 228, 139, 192, 147, 246, 195, 251, 206, 209, 9, 291, 222, 61, 280, 228, 186, 180, 217, 195, 201, 174, 207, 198, 282, 198, 246, 169, 225, 190, 188, 184, 261, 215, 206, 148, 217, 253, 290, 316, 233, 243, 147, 189, 171, 184, 233, 200, 170, 180, 123, 257, 232, 247, 358, 202, 145, 0, 224, 0, 211, 257, 271, 216, 262, 200, 192, 244, 241, 286, 364, 213, 245, 195, 213, 220, 173, 201, 331, 164, 223, 271, 195, 266, 206, 130]
# p2scores = [222, 201, 56, 162, 279, 200, 212, 309, 234, 203, 169, 255, 147, 0, 237, 187, 287, 282, 225, 363, 251, 177, 185, 243, 210, 255, 217, 151, 0, 193, 216, 218, 194, 212, 57, 0, 208, 174, 229, 246, 242, 212, 0, 210, 257, 218, 222, 218, 257, 123, 216, 195, 199, 188, 279, 191, 188, 260, 295, 265, 223, 0, 190, 203, 178, 162, 229, 283, 260, 252, 134, 226, 217, 293, 190, 176, 178, 267, 297, 265, 202, 85, 178, 254, 183, 273, 242, 200, 197, 238, 203, 71, 169, 207, 232, 0, 153, 173, 276, 210, 305, 216, 0, 225, 283, 168, 279, 188, 0, 235, 338, 261, 205, 395, 131, 210, 181, 205, 299, 258, 217, 208, 303, 193, 173, 255, 178, 182, 215, 212, 276, 180, 258, 225, 213, 194, 209, 0, 213, 194, 243, 272, 212, 256, 292, 249, 207, 213, 236, 198, 216, 310, 241, 147, 0, 282, 258, 0, 229, 238, 287, 203, 187, 245, 285, 267, 223, 274, 321, 241, 171, 270, 169, 315, 289, 212, 294, 230, 259, 195, 173, 276, 184, 205, 225, 203, 349, 255, 76, 0, 313, 174, 190, 235, 184, 268, 129, 244, 261, 246, 230, 172, 0, 161, 323, 181, 188, 192, 251, 194, 236, 193, 258, 135, 11, 206, 285, 289, 206, 214, 270, 236, 221, 206, 0, 254, 215, 253, 184, 144, 151, 159, 200, 232, 204, 222, 228, 267, 204, 188, 292, 221, 240, 195, 205, 150, 164, 216, 167, 196, 223, 224, 205, 202, 160, 257, 211, 259, 139, 253, 199, 241, 268, 188, 225, 201, 193, 181, 211, 211, 0, 203, 217, 197, 178, 281, 242, 169, 211, 168, 155, 246, 166, 313, 195, 254, 191, 218, 224, 189, 168, 201, 178, 207, 70, 179, 142, 241, 226, 212, 254, 243, 298, 0, 118, 0, 211, 223, 194, 214, 270, 212, 213, 328, 268, 227, 177, 292, 245, 197, 229, 226, 219, 162, 281, 167, 269, 225, 234, 243, 225, 198, 196, 272, 170, 0, 207, 263, 221, 205, 240, 0, 304, 174, 50, 281, 196, 206, 204, 183, 175, 192, 0, 221, 258, 172, 212, 237, 0, 215, 223, 206, 232, 315, 0, 285, 161, 208, 72, 247, 305, 260, 314, 174, 152, 182, 225, 201, 186, 168, 262, 158, 219, 197, 0, 165, 300, 221, 0, 180, 213, 257, 278, 163, 166, 230, 214, 182, 207, 246, 186, 215, 233, 281, 184, 329, 325, 280, 240, 207, 246, 260, 198, 217, 185, 287, 248, 213, 173, 198, 241, 160, 179, 243, 14, 225, 257, 88, 203, 299, 262, 199, 263, 340, 195, 166, 268, 274, 228, 177, 243, 160, 245, 213, 232, 113, 210, 241, 251, 208, 219, 242, 259, 211, 163, 206, 200, 259, 196, 217, 312, 150, 336, 224, 164, 170, 156, 265, 182, 240, 278, 0, 198, 0, 175, 208, 173, 172, 267, 241, 281, 261, 234, 196, 174, 139, 188, 252, 142, 263, 304, 292, 209, 142, 172, 296, 249, 137, 188, 282]
# p1endgamescores= [184, 174, None, 177, 266, 267, 273, 146, 190, 171, 280, 303, None, None, 252, 136, 161, 172, 234, 178, 179, 256, 187, 182, 283, 219, 186, 190, None, 171, 204, 201, None, 192, None, None, 317, 216, 135, 149, 217, 183, None, 155, 126, 254, 212, 178, 267, 223, 183, 215, 248, 173, 180, 167, 164, 176, 186, 207, 167, None, 199, 205, 215, 180, 232, 174, 213, 173, 192, 134, 210, 147, 281, 149, 223, 272, 216, 146, 197, None, 237, 153, 204, 253, 282, 150, 231, 244, 189, None, None, 233, 173, None, 205, 169, 236, 258, 133, 214, None, 258, 172, 202, 185, 168, None, 250, 157, 174, 230, 181, None, 253, 204, 173, 224, 232, 219, 128, 221, 183, 229, 235, 298, 180, 238, 214, None, 285, 150, 249, 239, 179, 258, None, 163, 189, 214, 172, 127, 277, 240, 200, 206, 181, 193, 293, 199, 178, 172, 239, None, 182, 135, None, 215, 219, 203, 173, 279, 157, 205, 234, 198, 135, 145, 207, 172, 211, 189, 223, 182, None, 167, 244, 210, 219, 206, 189, 170, 197, 134, 211, 212, 180, None, None, 160, None, 274, 241, 167, 186, None, 224, 181, 171, 215, 215, None, 242, 213, 149, 240, 235, 182, 167, 301, 265, 185, 195, None, 242, 206, 185, 235, 299, 136, 153, 293, 257, None, 146, 153, 148, 211, 165, None, 221, 237, 207, 166, 239, 260, 252, 171, 200, 166, 152, 152, 129, 299, None, 241, 126, 208, 158, 213, 120, 132, 205, 190, 185, 150, 134, 153, 166, 192, 143, 198, 264, 174, 261, 227, 216, 189, 208, None, 249, 217, 181, 229, 182, 219, 233, 203, 185, 231, 149, 258, 147, 301, 184, 192, 206, 191, 163, 186, 230, 145, 242, None, 199, 199, 223, 214, 183, 214, 228, 175, None, 300, None, 335, 241, 180, 161, 165, 203, 217, 163, 172, 151, 185, 178, 157, 186, 212, 192, 192, 254, 186, 218, 165, 156, 203, 193, 178, 190, 304, 129, 252, None, 269, 285, 202, 233, 158, None, 202, 193, None, 214, 180, 171, 266, 216, 280, 225, None, 253, 179, 196, 224, 161, None, 206, 121, 176, 157, 132, None, 207, None, 127, None, 200, 194, 169, 260, 200, 205, 212, 179, 185, 218, 224, 217, 212, 169, 253, None, 281, 157, 249, None, 296, 311, 183, 153, 182, 165, 202, 139, 240, 153, 176, 194, 155, 190, 197, 154, 201, 214, 170, 169, 218, 196, 167, 210, 185, 178, 187, 129, 192, 147, 239, 189, 181, 206, 209, None, 235, 172, None, 216, 174, 162, 164, 217, 147, 152, 174, 168, 186, 183, 198, 189, None, 169, 143, 188, 184, 206, 215, 206, 145, 151, 253, 262, 249, 181, 243, 147, 135, 171, 178, 203, 200, 170, 180, 123, 196, 184, 186, 307, 147, 137, None, 164, None, 199, 233, 217, 216, 244, 190, 144, 187, 167, 239, 310, 202, 209, 195, 213, 178, 160, 143, 302, None, 223, 250, 184, 248, 164, 130]
# p2endgamescores = [222, 184, None, 162, 236, 135, 150, 240, 195, 175, 155, 151, None, None, 149, 187, 220, 230, 207, 310, 213, 143, 185, 176, 156, 181, 210, 138, None, 193, 186, 205, None, 189, None, None, 190, 174, 213, 205, 202, 212, None, 210, 239, 161, 151, 193, 184, 123, 165, 195, 154, 172, 279, 158, 174, 215, 275, 238, 209, None, 190, 177, 157, 162, 181, 227, 233, 196, 134, 226, 147, 185, 174, 162, 168, 251, 297, 216, 158, None, 151, 176, 183, 257, 209, 200, 192, 188, 203, None, None, 164, 182, None, 153, 149, 230, 171, 283, 216, None, 206, 226, 168, 236, 149, None, 210, 275, 183, 191, 338, None, 186, 147, 205, 264, 231, 190, 208, 209, 168, 173, 193, 178, 161, 188, 196, None, 180, 165, 209, 153, 186, 187, None, 186, 165, 222, 241, 151, 196, 233, 228, 182, 150, 204, 185, 199, 261, 204, 135, None, 243, 211, None, 210, 174, 270, 183, 180, 199, 237, 241, 223, 243, 264, 221, 166, 246, 169, 243, 270, None, 294, 187, 174, 180, 153, 239, 156, 145, 225, 197, 328, 255, None, None, 286, None, 168, 150, 160, 225, None, 201, 261, 229, 214, 155, None, 140, 277, 173, 165, 120, 242, 175, 221, 138, 170, 135, None, 180, 202, 236, 181, 193, 263, 206, 172, 206, None, 184, 173, 209, 178, 144, None, 133, 179, 203, 183, 203, 170, 211, 184, 159, 273, 170, 171, 174, 184, None, 148, 216, 143, 196, 207, 224, 190, 178, 152, 213, 141, 204, 137, 221, 163, 226, 225, 188, 207, 189, 193, 174, 199, 211, None, 189, 205, 171, 161, 238, 168, 154, 158, 156, 155, 204, 143, 236, 179, 226, 173, 168, 204, 142, 151, 174, 178, 196, None, 168, 142, 226, 193, 196, 230, 237, 227, None, 118, None, 154, 193, 194, 190, 225, 212, 207, 231, 252, 227, 177, 208, 181, 197, 208, 137, 219, 145, 234, 150, 211, 212, 180, 243, 172, 180, 154, 249, 159, None, 175, 190, 221, 152, 193, None, 235, 138, None, 192, 178, 145, 177, 144, 175, 192, None, 165, 245, 172, 155, 212, None, 193, 223, 206, 212, 299, None, 198, None, 197, None, 173, 292, 255, 239, 174, 152, 156, 169, 196, 176, 168, 255, 155, 219, 194, None, 144, 259, 146, None, 163, 170, 257, 260, 143, 152, 230, 214, 127, 183, 194, 150, 215, 217, 194, 184, 229, 272, 237, 212, 207, 196, 244, 171, 191, 185, 263, 201, 213, 173, 141, 176, 116, 141, 182, None, 209, 230, None, 184, 299, 216, 152, 263, 277, 177, 166, 226, 262, 197, 177, 207, None, 210, 168, 194, 113, 189, 226, 226, 176, 169, 185, 204, 171, 153, 164, 200, 245, 159, 202, 227, 150, 287, 224, 164, 151, 141, 246, 154, 208, 234, None, 183, None, 150, 200, 151, 172, 188, 201, 193, 194, 201, 180, 151, 127, 149, 252, 142, 210, 223, 273, 160, None, 172, 283, 198, 135, 165, 282]
# times =[0.2743589999999999, 5.318681, 0.07979300000000045, 1.5507600000000004, 0.5014630000000011, 1.1186679999999996, 0.27624600000000044, 0.34914800000000135, 0.20126000000000133, 0.19063099999999977, 2.79533, 0.3795719999999996, 0.11689600000000056, 0.0023640000000000327, 0.6355559999999993, 4.5787379999999995, 0.3549609999999994, 3.3291059999999995, 6.6117750000000015, 0.6707800000000006, 11.804835, 3.248521999999994, 0.5565570000000051, 0.26883400000000535, 0.20992000000000388, 0.2048680000000047, 0.21727899999999778, 0.20856299999999806, 0.0026269999999968263, 0.20617599999999925, 0.2120870000000039, 0.33939900000000023, 0.17759799999999615, 1.487210999999995, 0.0785849999999968, 0.002582000000003859, 0.6452020000000047, 0.1866330000000005, 0.47352700000000425, 0.5345780000000033, 3.126418000000001, 0.7590050000000019, 0.0023320000000026653, 0.26359099999999813, 7.4764960000000045, 1.4632659999999973, 0.2262970000000024, 4.835604999999987, 0.27355199999999513, 0.30192799999998954, 0.5614940000000104, 4.153507000000005, 0.25050199999999734, 13.877615999999989, 0.30518599999999196, 0.27388400000000956, 6.8312329999999974, 0.6253849999999943, 1.385463999999999, 0.2206780000000066, 0.39123200000000224, 0.0026289999999988822, 0.2288730000000072, 0.37665399999998783, 0.25503500000000656, 0.5221849999999932, 2.5710949999999997, 0.7062860000000057, 0.30631700000000706, 0.2537200000000013, 0.27305599999999686, 0.36915000000000475, 0.19596199999999442, 0.1919990000000098, 3.1551429999999954, 0.716202999999993, 5.582977999999997, 0.17267300000000319, 0.7769669999999991, 0.37520699999998897, 0.5890309999999914, 0.12458099999999206, 0.15832800000001157, 0.8142780000000016, 0.18010799999998994, 2.2336070000000063, 0.2858439999999973, 2.0616249999999923, 2.2583609999999936, 0.21343000000000245, 0.29611099999999624, 0.07873299999999972, 0.10656600000000083, 0.5221829999999983, 1.1246090000000066, 0.0034959999999983893, 0.19683299999999804, 0.9667299999999841, 0.6986190000000079, 0.23166599999998994, 0.28642899999999827, 0.2941720000000032, 0.0021700000000066666, 3.1485469999999793, 0.7726910000000089, 10.74784600000001, 0.47506500000000074, 0.2107340000000022, 0.004694000000000642, 0.4216910000000098, 0.384254999999996, 0.9270379999999818, 0.8823179999999979, 0.6307809999999847, 0.07757700000001932, 0.25545099999999366, 0.23527200000000903, 0.27938999999997804, 0.5966109999999958, 0.23215700000000083, 0.7871729999999957, 0.9395379999999989, 0.31039200000000733, 0.8570550000000026, 0.3962099999999964, 0.6121700000000203, 74.240038, 1.2719389999999748, 2.398614999999978, 0.4113229999999817, 0.130693999999977, 0.16336599999999635, 3.3271819999999934, 0.21936999999999784, 0.2399300000000153, 3.271817999999996, 0.3502979999999809, 0.0021349999999813463, 2.8564560000000085, 0.4366709999999898, 0.21518299999999613, 0.47966900000000123, 0.511070999999987, 0.21172500000000127, 0.7042129999999815, 0.22021899999998595, 0.3094350000000077, 0.35768600000000106, 0.3170769999999834, 0.20976599999997347, 0.2586960000000147, 0.3194780000000037, 0.7840920000000153, 0.4228789999999947, 0.0018149999999934607, 1.4951489999999978, 4.571793999999983, 0.0028519999999900847, 0.19428400000001034, 16.458572000000004, 0.36880500000000893, 0.2092370000000301, 0.17441700000000537, 0.5177160000000072, 0.3180580000000077, 1.9738370000000032, 0.14592999999996437, 0.3311919999999873, 0.5329450000000406, 0.3922040000000493, 0.7291119999999864, 3.838661000000002, 0.18346299999996063, 0.30108599999999797, 2.617703000000006, 0.11662799999999152, 5.350319999999954, 0.4023660000000291, 0.20773600000001124, 3.0374439999999936, 6.259878000000015, 0.20349399999997786, 0.7443569999999795, 0.20455600000002505, 0.20005799999995588, 0.33098400000000083, 0.28242800000003854, 0.1911269999999945, 0.16996500000004744, 0.00200700000004872, 0.35800599999998894, 0.1151039999999739, 1.2366170000000238, 1.463862000000006, 0.24819899999999961, 0.28080200000005107, 0.1446510000000103, 0.2656059999999911, 0.49560800000000427, 0.3013970000000086, 0.41183500000005324, 0.32828000000000657, 0.0023799999999596366, 0.18572399999999334, 0.4779570000000035, 0.16291100000000824, 0.3611760000000004, 0.48043000000001257, 3.6279910000000086, 0.34387399999997115, 0.21615400000001728, 0.2522140000000377, 1.64122100000003, 0.3578430000000026, 0.012416999999970812, 0.3818390000000136, 2.794548999999961, 4.532386000000031, 0.25787900000000263, 0.28167300000001205, 3.2173520000000053, 0.16719399999999496, 0.26954000000000633, 1.2589529999999627, 0.0030679999999847496, 1.2137920000000122, 0.18453900000002932, 0.26699800000000096, 0.1593520000000126, 0.6906700000000114, 0.11297799999999825, 0.23153299999995625, 2.6802209999999604, 0.332396000000017, 0.4681940000000395, 0.3165329999999926, 3.07279299999999, 0.24047500000000355, 2.0580280000000357, 0.3123619999999505, 5.243685999999968, 0.7146920000000136, 0.21976000000000795, 3.076519000000019, 0.25107799999994995, 0.12842000000000553, 0.24390399999998635, 0.22460699999999179, 0.4590610000000197, 0.2646139999999946, 1.7169870000000174, 1.5391970000000015, 0.1926499999999578, 0.18852800000001935, 0.42942499999998063, 0.46656999999999016, 0.5425439999999639, 0.2884779999999978, 0.16632099999998218, 0.7489209999999957, 0.25986500000004753, 0.6344950000000154, 0.17514399999998886, 0.3713470000000143, 0.43203099999999495, 0.22101200000003018, 0.24162699999999404, 0.28059899999999516, 0.26426200000003064, 0.18813199999999597, 0.002047000000004573, 0.22939300000001595, 0.22065400000002455, 112.07626800000003, 0.25548199999997223, 0.5102570000000242, 0.3294389999999794, 0.48925700000000916, 0.2741060000000175, 0.22553599999997687, 0.4796089999999822, 9.269835999999998, 0.21005400000001373, 1.567997000000048, 0.24786600000004455, 0.37319400000001224, 0.2031189999999583, 0.29812600000002476, 6.961295000000007, 1.6417099999999891, 2.217209999999966, 0.22646800000001122, 0.34776600000003555, 0.27075999999999567, 0.04555599999997639, 0.559359000000029, 1.4431720000000041, 0.6163859999999772, 0.1976860000000329, 0.1663120000000049, 1.4424380000000383, 0.4179849999999874, 0.3047669999999698, 0.0022060000000010405, 0.6744509999999764, 0.0025359999999636784, 7.280429999999967, 0.19869099999993978, 0.2118520000000217, 0.3106629999999768, 0.4921759999999722, 0.5397970000000214, 2.563126000000011, 0.23918699999990167, 4.168202999999949, 0.25603000000000975, 0.25373100000001614, 0.5347769999999628, 0.4362109999999575, 0.248826999999892, 0.2005619999999908, 20.58092499999998, 0.16772800000001098, 1.2809089999999514, 0.23856100000000424, 2.6518559999999525, 0.2241759999999431, 0.807495999999901, 0.25628199999994195, 15.08129299999996, 1.718539000000078, 0.2800760000000082, 0.15305699999998978, 0.24920399999996334, 0.19556299999999283, 0.002608999999893058, 0.3430789999999888, 0.5016770000000861, 0.27557999999999083, 0.24524400000007063, 1.1488299999999754, 0.001960999999937485, 0.18066000000010263, 1.0189860000000408, 0.046310999999946034, 0.8147239999999556, 0.4319380000000592, 2.463842999999997, 0.5028310000000147, 0.28361099999995076, 0.3403039999999464, 0.7582350000000133, 0.002641999999923428, 2.6517499999999927, 0.20867100000009486, 2.8328109999999924, 0.21719999999993433, 0.8316340000000082, 0.004023999999958505, 0.862070000000017, 0.3754100000001017, 0.3171740000000227, 0.28037300000005416, 0.15669800000000578, 0.0025540000000319196, 1.0522500000000719, 0.13599099999998998, 0.3395850000000564, 0.03323299999999563, 0.3468769999999495, 0.5886510000000271, 0.15522699999996803, 0.4459910000000491, 0.9823129999999765, 0.16250299999990148, 0.2060279999999466, 1.0837969999998904, 0.4193809999999303, 0.26057700000001205, 1.1498530000000073, 0.2932789999999841, 0.5540510000000722, 1.00759400000004, 0.5449180000000524, 0.001852999999982785, 0.3948650000000953, 0.19931499999995594, 0.24875999999994747, 0.0020850000000791624, 0.19165899999995872, 0.2945310000000063, 0.2972780000000057, 0.5189850000000433, 0.6321240000000898, 0.20359499999995023, 0.7282339999999294, 0.2336639999999761, 0.22433399999999892, 0.3277249999999867, 25.414679999999976, 1.1345509999999877, 3.627480999999989, 0.17990399999996498, 0.3493879999999763, 0.8598930000000564, 0.4006809999999632, 0.7420379999999795, 3.5034540000000334, 0.34112900000002355, 0.2840160000000651, 0.3063030000000708, 1.7751930000000584, 0.33825100000001385, 0.4618129999998928, 0.3032819999999674, 0.4490490000000591, 0.1971369999999979, 0.18943799999999555, 0.1644260000000486, 0.2644780000000537, 0.25672099999997045, 1.1970969999999852, 0.17178499999999985, 0.18757900000002792, 0.011530999999990854, 0.18386999999995624, 0.2971830000000182, 0.061786000000097374, 0.8652479999999514, 0.18300199999998767, 0.23523599999998623, 0.2565879999999652, 0.28473799999994753, 2.536852000000067, 0.25889299999994364, 0.37905799999998635, 0.38067899999998644, 0.481068999999934, 0.32650599999999486, 0.8703060000000278, 0.2168890000000374, 0.1374399999999696, 1.190992999999935, 0.3806890000000749, 0.23123199999997723, 0.6082290000000512, 1.8757859999999482, 0.3063209999999117, 0.28667799999993804, 0.3929840000000695, 2.716554999999971, 0.18356700000003912, 9.648217000000045, 0.3845440000000053, 0.46393799999998464, 0.17559099999994032, 0.1838070000000016, 0.29988399999990634, 0.20619599999997718, 0.24680100000000493, 1.222122000000013, 0.9158440000001065, 0.15998100000001614, 0.24987899999996444, 0.8780479999999216, 2.5886379999999463, 0.1621099999999842, 0.20714399999997113, 0.3633859999999913, 0.2887719999999945, 1.257669000000078, 0.002312999999958265, 3.205654999999979, 0.002686000000039712, 0.1995879999999488, 0.4251120000000128, 0.4229799999999386, 0.18300399999998263, 0.1944580000000542, 0.21475999999995565, 1.0064419999999927, 1.523727000000008, 0.35039299999994, 0.21240199999999732, 0.36000000000001364, 0.3293019999999842, 0.5997019999999793, 0.34401500000001306, 0.45804899999995996, 0.3713300000000572, 0.9313150000000405, 0.49069199999996727, 46.48023999999998, 0.09830699999997705, 3.6500310000000127, 0.5836429999999382, 2.2442889999999807, 0.5207999999998947, 0.22033799999996972, 2.03454099999999]
""""""""""""""""" Data for evaluation functions """""""""""""""
# Rack Eval Depth 4
# p1scores =[146, 184, 274, 288, 0, 222, 223, 304, 157, 347, 0, 300, 173, 201, 182, 222, 170, 160, 189, 230, 223, 181, 117, 247, 221, 229, 284, 169, 268, 241, 0, 235, 215, 178, 197, 222, 145, 197, 215, 258, 0, 216, 0, 194, 213, 241, 196, 217, 272, 177, 288, 269, 232, 0, 277, 212, 336, 201, 249, 144, 321, 121, 236, 265, 191, 192, 222, 204, 242, 197, 317, 0, 220, 144, 182, 225, 178, 248, 272, 221, 116, 7, 219, 148, 220, 226, 166, 280, 236, 215, 263, 236, 250, 154, 227, 260, 296, 226, 219, 216]
# p2scores =[212, 230, 180, 226, 0, 231, 220, 316, 233, 206, 0, 146, 239, 207, 180, 173, 191, 159, 233, 175, 228, 211, 165, 178, 196, 243, 221, 218, 258, 235, 0, 291, 151, 228, 185, 179, 219, 286, 203, 231, 0, 194, 0, 213, 233, 202, 232, 309, 155, 161, 245, 213, 205, 0, 272, 305, 169, 217, 201, 136, 178, 190, 221, 195, 244, 236, 178, 238, 238, 284, 189, 0, 239, 193, 180, 189, 304, 250, 221, 212, 147, 16, 198, 255, 299, 237, 313, 206, 258, 243, 201, 242, 263, 218, 206, 255, 172, 290, 300, 176]
# p1endgamescores= [146, 141, 232, 229, None, 217, 178, 291, 157, 269, None, 257, 161, 201, 182, 171, 170, 160, 180, 187, 161, 158, None, 244, 204, 181, 230, 169, 218, 183, None, 198, None, 178, 150, 181, 135, 197, 209, 200, None, 216, None, 194, 193, 192, 190, 164, 272, 177, 214, 257, 184, None, 249, 195, 267, 183, 186, 141, 259, None, 172, 207, 191, 176, 172, 139, 171, 178, 267, None, 178, 144, 125, 157, 146, 200, 219, 221, None, None, 173, 148, 207, 215, 137, 205, 193, 170, 218, 177, 218, 154, 168, 185, 252, 182, 211, 190]
# p2endgamescores =[168, 200, 168, 184, None, 171, 216, 242, 214, 183, None, 133, 217, 195, 180, 169, 191, 137, 171, 151, 220, 151, None, 178, 185, 220, 193, 197, 215, 221, None, 252, None, 191, 154, 146, 134, 230, 172, 214, None, 177, None, 213, 182, 179, 205, 289, 123, 161, 224, 152, 182, None, 204, 240, 155, 202, 176, 136, 147, None, 213, 165, 244, 183, 157, 228, 223, 211, 153, None, 215, 165, 157, 173, 222, 175, 200, 190, None, None, 171, 255, 259, 170, 239, 187, 208, 196, 173, 242, 192, 218, 195, 249, 156, 211, 284, 169]
# times =[0.3045979999999999, 0.31411900000000004, 1.028207, 0.7278799999999999, 0.002391000000000254, 0.8223549999999995, 0.5361419999999999, 0.30885799999999985, 0.4713649999999996, 8.040690000000001, 0.0017460000000006914, 4.453520000000001, 3.8119639999999997, 0.15258000000000038, 2.6539730000000006, 4.876311000000001, 1.587686999999999, 0.22876500000000277, 0.37653800000000004, 1.014126000000001, 15.079270000000001, 2.490597000000001, 0.09578199999999981, 0.58981, 5.331534000000005, 14.039788000000009, 0.8188509999999951, 0.9263629999999949, 1.0293779999999941, 30.377004, 0.0019709999999975025, 0.21263400000000843, 0.13112600000000896, 0.6971180000000032, 1.796717000000001, 0.29961399999999117, 0.2729439999999954, 0.17386500000000638, 1.2968099999999936, 21.760698000000005, 0.002534999999994625, 0.3060159999999996, 0.002262999999999238, 4.769763999999981, 13.430371000000008, 7.867820000000023, 0.4319200000000194, 10.286172999999991, 0.5875459999999748, 0.8725009999999997, 17.543420999999995, 0.3250749999999982, 0.3641499999999951, 0.003634000000005244, 14.963843999999995, 2.745355999999987, 4.397753000000023, 1.3617279999999994, 5.664902999999981, 0.8148960000000045, 6.911541, 0.1422369999999944, 34.491161999999974, 1.352770000000021, 0.2054809999999634, 1.0123800000000074, 0.3612370000000169, 1.0711959999999863, 12.283149999999978, 0.9499319999999898, 0.8136440000000107, 0.0021970000000237633, 0.24760899999995445, 0.20240000000001146, 10.06759199999999, 17.460323999999957, 7.640714000000003, 0.28121299999997973, 5.4325359999999705, 1.2148369999999886, 0.07670100000001412, 0.013754000000005817, 0.3093770000000404, 0.8976590000000328, 0.6152389999999741, 0.2235469999999964, 8.328934000000004, 5.629072000000008, 7.482176999999979, 1.0071859999999901, 0.3191290000000322, 1.495381000000009, 1.3220549999999776, 3.6810449999999832, 3.060097999999982, 27.271862, 0.2244190000000117, 0.594159999999988, 1.149567999999988, 15.241562999999985]
# 2x Eval Function Depth 4
# p1scores= [202, 223, 274, 184, 263, 204, 215, 187, 133, 205, 278, 296, 241, 192, 181, 31, 161, 250, 159, 325, 347, 295, 329, 174, 254, 263, 266, 189, 341, 288, 163, 204, 201, 228, 207, 301, 186, 150, 166, 0, 0, 210, 212, 219, 161, 139, 269, 253, 73, 271, 237, 243, 226, 293, 157, 268, 215, 150, 223, 184, 239, 248, 121, 253, 0, 225, 297, 289, 182, 219, 190, 116, 212, 0, 265, 175, 307, 257, 215, 0, 334, 224, 182, 202, 149, 246, 150, 190, 268, 177, 201, 259, 203, 246, 166, 253, 241, 233, 171, 187]
# p2scores= [163, 261, 187, 200, 144, 141, 188, 224, 266, 270, 219, 179, 310, 287, 241, 34, 164, 185, 249, 188, 210, 270, 223, 250, 203, 260, 291, 273, 200, 206, 323, 194, 227, 238, 206, 181, 337, 237, 259, 0, 0, 221, 168, 272, 325, 133, 230, 219, 72, 211, 175, 281, 236, 181, 319, 228, 219, 240, 222, 204, 267, 222, 242, 246, 0, 241, 146, 225, 210, 178, 205, 207, 260, 0, 236, 173, 175, 253, 250, 0, 203, 220, 359, 327, 280, 184, 232, 236, 163, 154, 210, 184, 173, 221, 242, 195, 218, 275, 222, 175]
# p1endgamescores =[202, 209, 226, 184, 263, 204, 163, 187, 128, 192, 229, 220, 208, 184, 181, None, 135, 188, 128, 311, 302, 213, 257, 150, 187, 182, 174, 175, 286, 262, 138, 204, 201, 228, 201, 240, 156, 138, 154, None, None, 189, 191, 197, 143, None, 228, 246, None, 200, 228, 228, 176, 252, 157, 220, 204, 150, 166, 137, 190, 178, 121, 243, None, 164, 229, 197, 182, 203, 190, 116, 196, None, 199, 175, 254, 225, 139, None, 253, 224, 125, 172, 149, 192, 150, 190, 238, 172, 185, 193, 201, 202, 144, 192, 179, 220, 171, 187]
# p2endgamescores = [163, 201, 187, 177, 144, 141, 158, 196, 254, 190, 184, 152, 268, 239, 224, None, 154, 161, 196, 171, 189, 253, 173, 227, 185, 254, 267, 205, 173, 162, 258, 194, 227, 206, 177, 178, 297, 229, 195, None, None, 141, 126, 228, 288, None, 199, 185, None, 198, 155, 193, 209, 154, 304, 194, 159, 183, 222, 188, 238, 204, 242, 183, None, 233, 134, 198, 210, 138, 205, 207, 200, None, 199, 173, 158, 206, 221, None, 188, 220, 339, 288, 260, 146, 232, 236, 151, 106, 161, 153, 161, 209, 188, 160, 198, 206, 222, 152]
# times = [1.6513579999999999, 0.6924669999999997, 1.2346110000000001, 0.44587600000000016, 0.2932700000000006, 0.21223100000000006, 2.0563260000000003, 0.3351480000000002, 0.4891210000000008, 1.4405210000000004, 0.2486669999999993, 1.874815, 1.0169619999999995, 0.8398979999999998, 0.526726, 0.03356199999999987, 2.181731000000001, 1.1180710000000005, 1.1629669999999983, 10.324406, 41.16270399999999, 14.29225000000001, 2.9749949999999927, 3.5038200000000046, 13.185907999999998, 1.687308999999999, 17.269787000000008, 0.6310780000000022, 0.33241999999999905, 1.520972999999998, 1.9234519999999975, 2.1628419999999977, 0.2926080000000013, 0.3611009999999908, 0.35834699999998065, 9.21249400000002, 1.1236370000000022, 1.434044, 0.9033249999999953, 0.002450999999979331, 0.0024689999999907286, 2.669511, 1.0052470000000255, 0.4205469999999991, 0.5798229999999762, 0.1314869999999928, 1.144127999999995, 1.239783000000017, 0.06410700000000702, 0.3739930000000129, 12.927264000000008, 1.9154200000000117, 1.834779999999995, 0.47640100000000984, 0.36341600000000085, 0.4595600000000104, 0.3548640000000205, 0.26604800000001205, 13.487632999999988, 10.89629099999999, 1.0695749999999862, 34.55716100000001, 0.8232450000000142, 0.3374949999999899, 0.002517000000011649, 13.29813900000002, 4.015976999999992, 1.9313290000000052, 10.20299399999999, 0.31741999999997006, 0.3866659999999911, 0.40405199999997876, 0.3988350000000196, 0.0027299999999854663, 0.47522499999996626, 0.7034429999999929, 1.440870000000018, 2.0513310000000047, 5.061823000000004, 0.001909000000011929, 17.014077000000043, 0.5752730000000383, 5.345965000000035, 0.9250749999999925, 0.20375300000000607, 0.3181369999999788, 0.5623110000000224, 1.655349000000001, 5.229088000000047, 1.2716040000000248, 1.0718870000000038, 1.7607150000000047, 0.3929019999999923, 0.7842210000000023, 27.87901800000003, 4.229108999999994, 9.242232999999999, 0.567216999999971, 1.0133329999999887, 0.3096429999999941]
#Baseline Eval Depth 4
# nodes = [52, 288, 52, 249, 966, 77, 34, 18, 111, 26, 41, 197, 34, 84, 4303, 108, 580, 27, 1, 36, 6, 515, 915, 266, 37, 2, 108, 594, 126, 803, 4793, 365, 1045, 100, 17, 403, 8, 35, 45, 5, 225, 113, 10, 111, 92, 49, 17, 3275, 175, 16, 1156, 214, 169, 25, 3, 507, 1970, 163, 234, 1, 205, 149, 55, 87, 986, 2, 176, 202, 3, 8, 77, 10, 1236, 1, 311, 262, 2700, 3879, 22, 77, 462, 472, 63, 59, 212, 3, 1683, 591, 1113, 3615, 2494] + [43, 8, 38, 30, 4, 64, 487, 506, 1472, 87, 13, 88, 5273, 50, 725, 63, 269, 33, 77, 64, 13, 93, 1293, 2, 106, 21, 154, 33, 33, 6, 3622, 11, 3483, 67, 657, 159, 276, 167, 231, 958, 50, 71, 200, 236, 1754, 1371, 8, 6, 82, 23, 89, 108, 248, 589, 3, 46, 2188, 41, 7, 418, 82, 326, 307, 23, 19, 61, 1318, 83, 1364, 505, 634, 43, 295, 218, 41, 79, 371, 213, 607, 37, 39, 636, 1507, 25, 109, 571, 1236, 1144, 719, 23, 581, 2407, 10, 697, 58, 171, 135, 905, 56]
# p1scores =[292, 202, 285, 193, 238, 152, 255, 227, 209, 192, 143, 196, 231, 182, 220, 215, 305, 237, 213, 308, 279, 206, 280, 202, 312, 196, 269, 179, 142, 202, 277, 221, 326, 229, 272, 248, 267, 278, 232, 202, 154, 347, 267, 260, 293, 347, 156, 184, 258, 190, 214, 179, 195, 261, 238, 212, 201, 246, 215, 215, 180, 227, 209, 207, 188, 200, 218, 133, 245, 292, 272, 149, 147, 229, 175, 187, 165, 238, 159, 214, 267, 208, 269, 241, 213, 273, 215, 233, 224, 156, 289, 369, 157, 262, 261, 307, 193, 0, 291, 199] + [182, 169, 192, 207, 199, 200, 224, 217, 223, 0, 157, 237, 216, 214, 168, 266, 149, 209, 192, 193, 148, 223, 279, 248, 228, 218, 233, 210, 199, 241, 292, 0, 202, 214, 249, 213, 218, 0, 268, 169, 186, 179, 150, 261, 255, 220, 0, 274, 265, 294, 230, 151, 188, 288, 268, 238, 239, 265, 207, 160, 293, 225, 254, 199, 11, 257, 261, 193, 264, 0, 251, 166, 213, 146, 189, 185, 278, 223, 199, 197, 276, 188, 234, 216, 257, 246, 237, 225, 169, 177, 253, 280, 160, 164, 239, 0, 388, 239, 226, 186]
# p2scores =[159, 255, 300, 211, 183, 199, 214, 194, 294, 224, 197, 259, 196, 321, 229, 250, 200, 147, 281, 179, 182, 238, 152, 254, 151, 269, 214, 206, 176, 172, 205, 218, 287, 227, 246, 217, 197, 233, 222, 197, 263, 259, 267, 241, 173, 142, 150, 235, 224, 282, 228, 308, 213, 298, 134, 286, 262, 211, 266, 217, 194, 207, 196, 236, 266, 292, 314, 303, 219, 244, 225, 187, 275, 204, 227, 333, 157, 185, 197, 242, 246, 259, 203, 234, 286, 212, 213, 201, 150, 207, 247, 162, 254, 183, 162, 228, 364, 0, 234, 168, 178, 231, 241, 145, 247, 277, 314, 149, 223, 0, 259, 203, 205, 240, 104, 251, 243, 230, 249, 247, 235, 228, 180, 212, 274, 280, 225, 166, 281, 230, 160, 0, 277, 247, 259, 259, 261, 0, 185, 312, 229, 196, 198, 178, 251, 274, 0, 188, 218, 288, 222, 239, 219, 179, 230, 242, 201, 240, 234, 221, 234, 186, 201, 154, 21, 211, 180, 234, 200, 0, 227, 265, 212, 213, 220, 210, 181, 219, 272, 176, 210, 128, 267, 215, 192, 219, 228, 261, 215, 221, 189, 182, 234, 219, 177, 0, 134, 243, 269, 283]
# p1endgamescores= [241, 160, 263, 193, 193, 152, 228, 165, 170, 174, 143, 178, 178, 166, 156, 197, 250, 233, 196, 259, 234, 158, 222, 156, 312, 189, 223, 179, 142, 202, 211, 221, 227, 180, 249, 197, 206, 212, 170, 148, 154, 302, 261, 199, 233, 286, 156, 180, 237, 180, 169, 161, 162, 171, 238, 195, 167, 197, 213, 168, 178, 172, 209, 200, 145, 144, 200, 109, 186, 218, 256, 149, 147, 175, 149, 175, 165, 187, 143, 198, 252, 154, 244, 230, 182, 210, 187, 174, 219, 156, 224, 276, 157, 256, 248, 240, 166, None, 226, 199] + [182, 155, 178, 151, 143, 181, 206, 214, 216, None, 148, 188, 200, 172, 168, 185, 149, 209, 185, 158, 148, 183, 279, 158, 204, 166, 226, 210, 181, 222, 292, None, 140, 208, 180, 155, 138, None, 268, 169, 180, 179, 150, 208, 208, 207, None, 230, 265, 263, 199, 133, 177, 194, 196, 184, 179, 245, 146, 157, 253, 221, 254, 199, None, 213, 261, 183, 256, None, 221, 166, 171, 146, 180, 174, 239, 223, 142, 154, 224, 188, 190, 163, 206, 159, 192, 212, 142, 160, 253, 236, 145, 164, 176, None, 318, 230, 145, 149]
# p2endgamescores =[137, 228, 216, 211, 160, 199, 157, 194, 226, 155, 175, 202, 163, 283, 214, 169, 196, 121, 237, 156, 165, 207, 136, 203, 151, 199, 178, 187, 176, 147, 177, 159, 279, 189, 189, 188, 167, 212, 216, 184, 229, 199, 214, 194, 173, 134, 127, 175, 174, 207, 186, 249, 169, 280, 134, 241, 210, 183, 199, 196, 159, 174, 196, 223, 224, 229, 195, 252, 207, 228, 167, 187, 275, 192, 157, 267, 157, 167, 184, 177, 179, 185, 146, 159, 216, 183, 193, 201, 137, 207, 208, 151, 240, 180, 144, 201, 312, None, 166, 168] + [178, 216, 169, 133, 229, 213, 231, 118, 202, None, 180, 179, 163, 189, 104, 227, 243, 167, 189, 223, 222, 206, 166, 202, 207, 257, 191, 148, 268, 170, 160, None, 242, 241, 235, 220, 247, None, 164, 288, 219, 145, 198, 151, 216, 223, None, 157, 202, 249, 171, 198, 168, 148, 192, 216, 194, 188, 222, 172, 209, 170, 201, 154, None, 180, 170, 162, 150, None, 184, 265, 185, 190, 204, 194, 172, 208, 272, 160, 183, 110, 196, 151, 174, 190, 180, 198, 193, 197, 143, 159, 225, 212, 165, None, 124, 231, 266, 194]
# times= [0.679628, 0.25508600000000015, 0.43363600000000013, 0.46399999999999997, 0.16895599999999966, 0.8028839999999997, 4.356975, 4.766655000000001, 13.009975999999998, 0.842490999999999, 0.28075099999999864, 0.9972959999999986, 48.782015, 0.5818909999999988, 6.880387999999996, 0.8260039999999975, 3.160426000000001, 0.48574299999999937, 0.8762100000000004, 0.8821650000000005, 0.4071739999999977, 1.005681999999993, 12.369773999999992, 0.17704700000000173, 1.269368, 0.37784999999999513, 1.5121890000000064, 0.5563420000000008, 0.47250099999999406, 0.23128400000000227, 26.600381999999996, 0.2803899999999828, 29.685314000000005, 0.7725399999999922, 6.294240000000002, 1.6343379999999854, 2.923070999999993, 1.9321170000000052, 2.941325000000006, 8.602185999999989, 0.6906869999999969, 0.7905009999999777, 1.866658000000001, 2.6730459999999994, 15.083501999999982, 14.42065199999999, 0.3328260000000114, 0.2328860000000077, 0.8476809999999944, 0.37371799999999666, 0.9207979999999907, 1.137835999999993, 2.7452109999999834, 6.731470999999999, 0.1816129999999987, 0.5684990000000028, 19.516876999999994, 0.6207129999999665, 0.2729100000000244, 3.6562700000000063, 0.8647639999999797, 2.9245769999999993, 3.6845069999999964, 0.446596999999997, 0.3615979999999581, 0.7543730000000437, 12.700273000000038, 0.9589679999999703, 12.965763999999979, 5.987181000000021, 5.982330000000047, 0.6078249999999912, 3.409419000000014, 2.7159429999999816, 0.5523069999999848, 1.0286279999999692, 4.084158000000002, 2.0033220000000256, 5.545056000000045, 0.4627570000000105, 0.5283400000000142, 6.723304999999982, 13.699906999999996, 0.4170040000000199, 1.166246000000001, 5.239834999999971, 11.313789999999983, 9.967671999999993, 6.971202000000005, 0.433037000000013, 5.9265589999999975, 29.824003000000005, 0.2711219999999912, 7.5386910000000285, 0.7378659999999968, 1.8787570000000073, 1.4262469999999894, 0.0019649999999842294, 8.404497000000049, 0.6995429999999487] + [0.8239759999999999, 2.8693069999999996, 0.6537499999999996, 2.7714349999999994, 8.983509, 0.8282300000000014, 0.47210600000000014, 0.38156700000000043, 1.2638299999999987, 0.0023620000000015295, 0.39808499999999825, 0.4976900000000022, 2.021621999999997, 0.5062250000000006, 1.0995509999999982, 44.594679000000006, 1.1106079999999992, 0.1901060000000001, 4.9927550000000025, 0.4491119999999995, 0.16447800000000257, 0.4472559999999959, 0.2007289999999955, 6.015974, 8.65227200000001, 2.798123000000004, 0.5117840000000058, 0.20239599999999314, 1.181874999999991, 6.446624, 0.2352120000000042, 0.0025700000000057344, 2.038622999999987, 8.662423999999987, 40.825494000000006, 3.801616999999993, 9.708668000000017, 0.003264999999998963, 1.3517200000000003, 0.3241820000000075, 4.665531999999985, 0.22598800000000097, 0.47939200000001847, 0.6513869999999997, 0.23165700000001266, 2.067575000000005, 0.002983000000000402, 1.1652159999999867, 0.2645569999999964, 1.0616219999999998, 1.220879999999994, 0.5819160000000068, 0.3263249999999971, 31.597102000000007, 1.9335740000000214, 0.31726299999999696, 9.984741999999983, 2.2907459999999844, 2.25433799999999, 0.3571900000000028, 0.2136199999999917, 4.687556999999998, 18.58814799999999, 1.8963210000000004, 0.01300999999995156, 2.2318690000000174, 0.21198600000002443, 1.8818820000000187, 1.4707750000000033, 0.002824000000032356, 0.660618999999997, 0.9758380000000102, 8.486921999999993, 0.17451399999998785, 1.6599539999999706, 2.7566459999999893, 0.20125600000000077, 0.2870110000000068, 0.9532130000000052, 0.268195999999989, 11.179752999999948, 0.2229910000000359, 3.120221000000015, 2.5662949999999682, 23.72242, 39.422394999999995, 0.36750200000000177, 0.8205089999999586, 4.176756000000012, 4.312453000000005, 0.8300380000000018, 0.7237190000000169, 2.1888329999999883, 0.21207099999998036, 15.47395499999999, 0.0029519999999934043, 8.639047000000005, 11.690905000000043, 30.171132999999998, 19.65263299999998]
print(statistics.median(nodes))
p1wins = 0
p2wins = 0
ties = 0
final_times = 0
ind = 0
for i in range(len(p1scores)):
if p1endgamescores[i] is not None and p2endgamescores[i] is not None:
ind = 1
if p1scores[i] > p2scores[i]:
p1wins += 1
elif p1scores[i] < p2scores[i]:
p2wins += 1
else:
ties += 1
final_times += times[i]
average_time = statistics.median(times)
print(average_time)
print(p1wins)
print(p2wins)
print(ties)
"""""""""""""""""""""""""""NODE SORTING Evaluation"""""""""""""""""""""""""""""""""
# smoves = [499, 1628, 990, 2, 13, 133, 17, 1173, 60, 5, 1573, 333, 12296, 31539, 51, 35, 229, 61, 1644, 129, 52, 55, 2175, 41, 137, 73, 7, 12, 41, 993, 14, 23, 15, 670, 505, 7139, 23, 17, 492, 2, 7498, 17, 4, 859, 4331, 8, 3, 165, 3847]
# rmoves = [4, 866, 39, 487, 28, 838, 45, 5786, 14, 320, 869, 6, 16965, 322, 2250, 2670, 495, 87, 1490, 2, 473, 66, 35, 165, 25, 86, 19, 214, 87, 99, 361, 49, 17, 19382, 8629, 169, 754, 5, 734, 9616, 94, 2669, 309, 1071, 23607, 2763, 15]
# len_sorted_moves = [2918, 19, 20, 167, 223, 24, 66, 239, 901, 34, 5, 439, 14, 34, 667, 170, 79, 670, 308, 66, 62, 253, 343, 4, 79, 2440, 54, 2283, 92, 206, 433, 3, 43, 938, 54, 10, 197, 2857, 75, 13, 105, 44, 119, 19, 69, 4487, 1236, 25, 10, 2, 199, 981, 96, 12, 815, 53, 726, 61, 3301, 69, 1665, 1018, 2148, 120432, 49, 262, 27, 528, 75, 2508, 65, 43, 60, 109, 32, 1664, 287, 2759, 1428, 246, 128, 90, 898, 20, 371, 56, 13, 8, 1196, 6033, 129, 13, 1399]
# len_sorted_times = [28.457569, 0.33411299999999855, 0.3422729999999987, 1.7242899999999999, 1.9751229999999964, 0.34948599999999885, 0.8178440000000009, 2.284306000000001, 8.470917, 0.49538099999999474, 0.2556449999999941, 4.728405000000002, 0.34807999999999595, 0.023735999999999535, 0.12552500000000322, 0.483984999999997, 7.080171, 1.9386399999999995, 0.9097670000000022, 6.730280999999998, 2.9818090000000126, 0.7033690000000092, 0.72840699999999, 2.370058, 4.470228999999989, 0.2786049999999989, 0.8592219999999884, 20.823213999999993, 0.6710889999999949, 17.893317999999994, 0.8816439999999943, 1.7517409999999956, 3.9330260000000123, 0.20288299999999992, 0.6357670000000013, 7.664194999999992, 0.7840810000000147, 0.3089580000000183, 1.8615410000000168, 24.267165000000006, 0.848608999999982, 0.27916300000001115, 1.231020000000001, 0.5714190000000201, 0.04297500000001264, 1.3442199999999787, 0.3977549999999894, 0.8133920000000217, 36.14156200000002, 10.697302999999977, 28.457569, 0.33411299999999855, 0.3422729999999987, 1.7242899999999999, 1.9751229999999964, 0.34948599999999885, 0.8178440000000009, 2.284306000000001, 8.470917, 0.49538099999999474, 0.2556449999999941, 4.728405000000002, 0.34807999999999595, 0.023735999999999535, 0.12552500000000322, 0.483984999999997, 7.080171, 1.9386399999999995, 0.9097670000000022, 6.730280999999998, 2.9818090000000126, 0.7033690000000092, 0.72840699999999, 2.370058, 4.470228999999989, 0.2786049999999989, 0.8592219999999884, 20.823213999999993, 0.6710889999999949, 17.893317999999994, 0.8816439999999943, 1.7517409999999956, 3.9330260000000123, 0.20288299999999992, 0.6357670000000013, 7.664194999999992, 0.7840810000000147, 0.3089580000000183, 1.8615410000000168, 24.267165000000006, 0.848608999999982, 0.27916300000001115, 1.231020000000001, 0.5714190000000201, 0.04297500000001264, 1.3442199999999787, 0.3977549999999894, 0.8133920000000217, 36.14156200000002, 10.697302999999977, 0.5683670000000001, 0.26924099999999984, 0.17733, 2.02914, 8.453273, 0.8671249999999997, 0.2741490000000013, 8.405273000000001, 0.003088000000001756, 0.7832660000000011, 6.770484, 0.001788000000001233, 0.7870349999999995, 32.759317, 0.8810690000000037, 15.959770999999996, 9.082476, 23.287146000000007, 899.159576, 0.6069360000000188, 2.3854630000000725, 0.4569099999999935, 0.13301100000001043, 4.110847000000035, 0.874349000000052, 20.135546999999974, 0.7187790000000405, 0.4951149999999416, 0.675617000000102, 1.151836000000003, 0.4612999999999374, 13.440222000000176, 0.0017380000001594453, 2.7643929999999273, 22.713453000000072, 11.432960999999978, 2.4568299999998544, 1.368257000000085, 0.9997370000000956, 7.1294339999999465, 0.3538969999999608, 3.9655290000000605, 0.6393659999998818, 0.26812500000005457, 0.23288300000012896, 9.49111599999992, 55.96718499999997, 1.2186229999999796, 0.28220200000009754, 10.691080000000056]
# stimes = [5.462273, 13.628348999999998, 9.027452999999998, 0.20035800000000137, 0.350676, 1.452566000000001, 0.3063469999999988, 9.485683000000002, 0.7841099999999983, 0.20266799999999563, 17.036028, 3.118584999999996, 102.818501, 242.39040999999997, 0.5522730000000138, 0.5206039999999916, 2.3233899999999608, 0.7285350000000221, 14.949749999999995, 0.17539299999998548, 1.184301000000005, 0.577241000000015, 0.6432050000000231, 20.644333999999958, 0.5160099999999943, 1.323853999999983, 0.8926519999999982, 0.2601789999999937, 0.33121899999997595, 0.4770679999999743, 8.132326000000035, 0.31665900000001557, 0.39193999999997686, 0.3116600000000176, 6.508721000000037, 5.085165000000018, 55.82077000000004, 0.3661329999999907, 0.4054830000000038, 4.384490000000028, 0.16655200000002424, 70.42404600000009, 0.35272199999997156, 0.2003789999999981, 8.768035000000054, 35.55618399999992, 0.24955899999997655, 0.1856559999999945, 1.568037000000004, 29.536572999999976]
# rtimes = [0.338896, 8.730683, 0.4996229999999997, 4.7850139999999985, 0.37907200000000074, 7.887321, 0.5963290000000008, 47.081212, 0.29992200000000935, 2.7501099999999923, 7.412319000000011, 0.2718089999999904, 167.70644700000003, 3.072586000000001, 21.200469, 20.699720999999954, 4.870582000000013, 0.763321000000019, 13.35970900000001, 0.21506199999998898, 4.463702000000012, 0.7095789999999624, 0.539273000000037, 1.8219100000000026, 0.0038090000000465807, 0.36327099999999746, 1.0180799999999977, 0.3082679999999982, 2.0309389999999894, 0.7468609999999671, 0.9124090000000251, 2.8971290000000067, 0.584384, 0.28475700000001325, 163.11071900000002, 76.13204299999995, 2.0015580000000455, 6.309083999999984, 0.19872700000007626, 6.9720570000000635, 101.70751199999995, 0.002196000000026288, 0.14134500000000116, 0.9927730000000565, 26.648521000000073, 2.640781000000061, 11.751337000000035, 197.13955399999998, 26.002137999999945, 0.29406200000005356]
# lmoves = len_sorted_moves[:50]
# ltimes = len_sorted_times[:50]
# print(statistics.mean(smoves))
# print(statistics.median(rmoves))
# print(statistics.median(lmoves))
# print(statistics.median(stimes))
# print(statistics.median(rtimes))
# print(statistics.median(ltimes))
# print(len(baselinep1))
# print(len(baselinep2))
# p1wins = 0
# p2wins = 0
# for i in range(len(baselinep1)):
# if baselinep1[i] >= baselinep2[i]:
# p1wins += 1
# else:
# p2wins += 1
# print()
# sns.set_palette(["cadetblue", "gold", "tomato"])
# sns.barplot(x=["AB Pruning", "Baseline"], y = [p1wins, p2wins])
# plt.title("Wins after 200 Simulations")
# plt.xlabel("Agents")
# plt.ylabel("# of Wins")
# plt.show()
# print(p1wins)
# p1wins = 0
# p2wins = 0
# for i in range(len(randomp1)):
# if randomp1[i] >= randomp2[i]:
# p1wins += 1
# else:
# p2wins += 1
# print(p1wins)
# print(p1wins + p2wins)
# sns.set_palette(["cadetblue", "orangered"])
# sns.barplot(x=["AB Pruning", "Baseline"], y = [p1wins, p2wins])
# plt.title("Wins after 55 Simulations")
# plt.xlabel("Agents")
# plt.ylabel("# of Wins")
# plt.show()
| 530.050228 | 10,170 | 0.711874 |
4f4aaad52aa59474e76d982060300703501fbd44 | 160 | py | Python | core/bodyparts/__init__.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | 3 | 2017-10-28T11:28:38.000Z | 2018-09-12T09:47:00.000Z | core/bodyparts/__init__.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | core/bodyparts/__init__.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | from core.bodyparts.base import (
Torso, Teeth, Tail, Nose, Neck,
Muzzle, Mouth, Heart, Lungs, Head, Hand, Foot, Fangs, Eye,
Ear, Arm, Brain, Leg
)
| 26.666667 | 62 | 0.65 |
4f4a24ec2ae4972532fb3b29c2aaddff0748d2ea | 719 | py | Python | destinator/util/listener.py | PJUllrich/distributed-systems | b362c1c6783fbd1659448277aab6c158485d7c3c | [
"MIT"
] | null | null | null | destinator/util/listener.py | PJUllrich/distributed-systems | b362c1c6783fbd1659448277aab6c158485d7c3c | [
"MIT"
] | null | null | null | destinator/util/listener.py | PJUllrich/distributed-systems | b362c1c6783fbd1659448277aab6c158485d7c3c | [
"MIT"
] | null | null | null | import logging
import threading
MESSAGE_SIZE = 1024
logger = logging.getLogger(__name__)
class Listener(threading.Thread):
def __init__(self, sock, queue):
super().__init__()
self.daemon = True
self.cancelled = False
self.sock = sock
self.queue = queue
def run(self):
"""
Starts the receiving loop, which receives packages from the socket.
"""
self.receive()
def receive(self):
logger.debug(f"Thread {threading.get_ident()}: "
f"Socket {self.sock}: Listener is now receiving.")
while not self.cancelled:
message = self.sock.recv(MESSAGE_SIZE)
self.queue.put(message)
| 23.966667 | 75 | 0.600834 |
4f4c168083a04a814702809e1d88fdaa878b4426 | 2,212 | py | Python | tests/helpers/__init__.py | Aetf/cc-modern-cmake | 4421755acf85a30a42f3f48b6d2e1a5130fd3f46 | [
"BSD-2-Clause"
] | null | null | null | tests/helpers/__init__.py | Aetf/cc-modern-cmake | 4421755acf85a30a42f3f48b6d2e1a5130fd3f46 | [
"BSD-2-Clause"
] | 3 | 2021-03-29T18:28:19.000Z | 2021-03-29T18:28:20.000Z | tests/helpers/__init__.py | Aetf/cc-modern-cmake | 4421755acf85a30a42f3f48b6d2e1a5130fd3f46 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
import os
import difflib
import pytest
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
@contextmanager
def inside_dir(dirpath):
"""
Execute code from inside the given directory
:param dirpath: String, path of the directory the command is being run.
"""
old_path = os.getcwd()
try:
os.chdir(str(dirpath))
yield
finally:
os.chdir(old_path)
def cast_path(anypath):
"""Cast a py.path.local or a str or a pathlib.Path to a pathlib.Path
"""
return Path(str(anypath))
def assertMultiLineEqual(first, second, msg=None):
"""Assert that two multi-line strings are equal.
If they aren't, show a nice diff.
"""
__tracebackhide__ = True
if first != second:
message = ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
if msg:
message += " : " + msg
pytest.fail("Multi-line strings are unequal:\n" + message)
def assertFileHeadLines(filename, lines):
"""Assert that first few lines in the file with given name are equal to given lines.
If they aren't, show a nice diff.
None entry in lines will be skipped.
"""
__tracebackhide__ = True
if not lines:
return
orig_len = len(lines)
with Path(filename).open() as f:
def skip_none(fline, line):
if line is None:
line = fline
return (fline, line)
flines, lines = zip(*[skip_none(fline, line) for fline, line in zip(f, lines)])
assert len(flines) == orig_len
return assertMultiLineEqual(''.join(flines), ''.join(lines))
def assertFileStructure(basedir, manifest):
"""Assert that the file system structure starting from basedir follows the manifest.
Only paths in the manifest are checked.
"""
__tracebackhide__ = True
basedir = Path(basedir)
for relpath in manifest:
path = basedir / relpath
if relpath.endswith('/'):
assert path.is_dir()
else:
assert path.is_file()
| 26.97561 | 88 | 0.633816 |
4f4b54754cf61f654d775bce346bcefeea43bb24 | 4,718 | py | Python | solutions/rank-5/predict_code/predict_model.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 48 | 2020-03-18T11:34:49.000Z | 2022-03-31T18:30:00.000Z | solutions/rank-5/predict_code/predict_model.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 40 | 2020-03-24T18:17:51.000Z | 2022-03-12T00:30:30.000Z | solutions/rank-5/predict_code/predict_model.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 24 | 2020-04-18T02:52:47.000Z | 2022-01-22T19:13:16.000Z | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import _pickle as cPickle
import argparse
from copy import deepcopy
import japanize_matplotlib
import lightgbm as lgb
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import mean_squared_error
import time
from tqdm import tqdm
import os
code_path = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('seed', type=int)
arg('iteration_mul', type=float)
arg('train_file', type=str)
arg('test_file', type=str)
arg('--learning_rate', type=float, default=0.05)
arg('--num_leaves', type=int, default=31)
arg('--n_estimators', type=int, default=500)
args = parser.parse_args()#args=['1', '0.5','train_fe.ftr', 'test_fe.ftr'])
# print(args)
train_fe = pd.read_feather(f'{code_path}/../prepare_data/{args.train_file}')
test_fe = pd.read_feather(f'{code_path}/../prepare_data/{args.test_file}')
target_fe = train_fe['meter_reading']
train_fe = train_fe.drop('meter_reading', axis=1)
X_train = train_fe.query('20160115 <= timestamp < 20160601 & site_id != 0')
X_valid = train_fe.query('20160901 <= timestamp < 20170101 & site_id != 0')
X_test = test_fe
y_train = target_fe.loc[X_train.index]
y_valid = target_fe.loc[X_valid.index]
# y_train = np.log1p(y_train)
# y_valid = np.log1p(y_valid)
X_train = X_train.drop('timestamp', axis=1)
X_valid = X_valid.drop('timestamp', axis=1)
X_test = X_test.drop('timestamp', axis=1)
# print(X_train.shape)
def meter_predict(meter, model, X_test, best_iteration, iteration_mul=1.5):
X_test_m = X_test.query('meter == {}'.format(meter)).drop('meter', axis=1)
g = X_test_m.groupby('building_id')
y_pred = []
for building_id in tqdm(sorted(X_test_m['building_id'].unique())):
X_building = g.get_group(building_id)
y_pred.append(pd.Series(model.predict(X_building, n_jobs=4,num_iteration=min(models_all[meter].n_estimators, int(best_iteration[meter][building_id]*iteration_mul))), index=X_building.index))
return pd.concat(y_pred).sort_index()
# load model
load_name = '{}/../model/model_use_{}_seed{}_leave{}_lr{}_tree{}.pkl'.format(code_path, args.train_file.replace('.ftr', ''),args.seed, args.num_leaves, str(args.learning_rate).replace('.', ''), args.n_estimators)
with open(load_name, 'rb') as f:
models = pickle.load(f)
# with open(f'{code_path}/../model/model_5_95_hokan_cleaning_50000tree_seed{}.pkl'.format(args.seed), 'wb') as f:
# pickle.dump(models, f)
# 各building, meter毎の最良のiteration数
best_iteration = dict()
for meter in [0,1,2,3]:
best_iteration[meter] = dict()
# for i in range(1448):
# best_iteration[meter][i] = 200
for i in tqdm(sorted(X_valid.query('meter == {}'.format(meter))['building_id'].unique())):
best_iteration[meter][i] = max(20, np.argmin(np.array(models[meter].evals_result_[i]['rmse'])) + 1)
# best_iteration[meter][i] = np.argmin(np.array(models[meter].evals_result_[i]['rmse'])) + 1
del_list = [list(), list(), list(), list()]
for meter in [0,1,2,3]:
for buildingID, itr in best_iteration[meter].items():
if itr<=20:
del_list[meter].append(buildingID)
if itr<=100:
best_iteration[meter][buildingID] = 100
# if itr>=int(models[0].n_estimators * 0.98):
# best_iteration[meter][buildingID] = models[0].n_estimatorss
for meter in [0,1,2,3]:
for i in range(1448):
if i not in best_iteration[meter]:
best_iteration[meter][i] = 200
#load model
load_name = '{}/../model/model_all_use_{}_seed{}_leave{}_lr{}_tree{}.pkl'.format(code_path, args.train_file.replace('.ftr', ''),args.seed, args.num_leaves, str(args.learning_rate).replace('.', ''), args.n_estimators)
with open(load_name, 'rb') as f:
models_all = pickle.load(f)
# meter type毎のtestの予測
preds = list()
for i in tqdm([3,2,1,0]):
preds.append(meter_predict(i, models_all[i], X_test, best_iteration, iteration_mul=args.iteration_mul))
y_preds = pd.concat(preds).sort_index()
# lgb.plot_importance(models_all[0], importance_type='gain', figsize=(10,20))
# lgb.plot_importance(models_all[0], importance_type='split', figsize=(10,20))
submission = pd.read_csv(f'{code_path}/../input/sample_submission.csv')
submission['meter_reading'] = (np.expm1(y_preds))
submission.loc[submission['meter_reading']<0, 'meter_reading'] = 0
save_name = '{}/../output/use_{}_seed{}_leave{}_lr{}_tree{}_mul{}.csv'.format(code_path, args.train_file.replace('.ftr', ''), args.seed, args.num_leaves, str(args.learning_rate).replace('.', ''), args.n_estimators, str(args.iteration_mul).replace('.', ''))
submission.to_csv(save_name, index=False)
submission.head()
| 37.444444 | 256 | 0.701357 |
4f4bdab7e7c433d49e00c470d188df5e8e4d786e | 357 | py | Python | app/api/v2/models/rsvp.py | jmusila/Questioner-v2 | 54110d8233311862ccfadb32b5ced557c33c4a0f | [
"MIT"
] | 1 | 2019-02-13T10:02:16.000Z | 2019-02-13T10:02:16.000Z | app/api/v2/models/rsvp.py | jmusila/Questioner-v2 | 54110d8233311862ccfadb32b5ced557c33c4a0f | [
"MIT"
] | 6 | 2019-01-21T19:18:14.000Z | 2019-01-24T19:46:40.000Z | app/api/v2/models/rsvp.py | jmusila/Questioner-v2 | 54110d8233311862ccfadb32b5ced557c33c4a0f | [
"MIT"
] | null | null | null | from datetime import datetime
class Responds:
""" Respond constructor """
def __init__(self, r_id, meetup_id, topic, status):
self.r_id = r_id
self.meetup_id = meetup_id
self.topic = topic
self.status = status
""" Method for getting single comment """
def get_single_response(self, r_id):
pass
| 21 | 55 | 0.62465 |
4f4a004a9be01e0c89f6305d96270831fd4b2262 | 18,040 | py | Python | scf/uhf_symm.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/uhf_symm.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/uhf_symm.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import time
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf.scf import hf_symm
from pyscf.scf import uhf
from pyscf.scf import chkfile
def analyze(mf, verbose=logger.DEBUG, **kwargs):
from pyscf.lo import orth
from pyscf.tools import dump_mat
mol = mf.mol
if not mol.symmetry:
return uhf.analyze(mf, verbose, **kwargs)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
log = logger.Logger(mf.stdout, verbose)
nirrep = len(mol.irrep_id)
ovlp_ao = mf.get_ovlp()
orbsyma = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[0], ovlp_ao, False)
orbsymb = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[1], ovlp_ao, False)
orbsyma = numpy.array(orbsyma)
orbsymb = numpy.array(orbsymb)
tot_sym = 0
noccsa = [sum(orbsyma[mo_occ[0]>0]==ir) for ir in mol.irrep_id]
noccsb = [sum(orbsymb[mo_occ[1]>0]==ir) for ir in mol.irrep_id]
for i, ir in enumerate(mol.irrep_id):
if (noccsa[i]+noccsb[i]) % 2:
tot_sym ^= ir
if mol.groupname in ('Dooh', 'Coov', 'SO3'):
log.note('TODO: total symmetry for %s', mol.groupname)
else:
log.note('total symmetry = %s',
symm.irrep_id2name(mol.groupname, tot_sym))
log.note('alpha occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsa)
log.note('beta occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsb)
ss, s = mf.spin_square((mo_coeff[0][:,mo_occ[0]>0],
mo_coeff[1][:,mo_occ[1]>0]), ovlp_ao)
log.note('multiplicity <S^2> = %.8g 2S+1 = %.8g', ss, s)
if verbose >= logger.NOTE:
log.note('**** MO energy ****')
irname_full = {}
for k, ir in enumerate(mol.irrep_id):
irname_full[ir] = mol.irrep_name[k]
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('alpha MO #%d (%s #%d), energy= %.15g occ= %g',
k+1, irname_full[j], irorbcnt[j], mo_energy[0][k], mo_occ[0][k])
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('beta MO #%d (%s #%d), energy= %.15g occ= %g',
k+1, irname_full[j], irorbcnt[j], mo_energy[1][k], mo_occ[1][k])
ovlp_ao = mf.get_ovlp()
if mf.verbose >= logger.DEBUG:
label = mol.spheric_labels(True)
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' % (k+1, irname_full[j], irorbcnt[j]))
log.debug(' ** alpha MO coefficients (expansion on meta-Lowdin AOs) **')
orth_coeff = orth.orth_ao(mol, 'meta_lowdin', s=ovlp_ao)
c_inv = numpy.dot(orth_coeff.T, ovlp_ao)
dump_mat.dump_rec(mol.stdout, c_inv.dot(mo_coeff[0]), label, molabel,
start=1, **kwargs)
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' % (k+1, irname_full[j], irorbcnt[j]))
log.debug(' ** beta MO coefficients (expansion on meta-Lowdin AOs) **')
dump_mat.dump_rec(mol.stdout, c_inv.dot(mo_coeff[1]), label, molabel,
start=1, **kwargs)
dm = mf.make_rdm1(mo_coeff, mo_occ)
return mf.mulliken_meta(mol, dm, s=ovlp_ao, verbose=log)
def get_irrep_nelec(mol, mo_coeff, mo_occ, s=None):
'''Alpha/beta electron numbers for each irreducible representation.
Args:
mol : an instance of :class:`Mole`
To provide irrep_id, and spin-adapted basis
mo_occ : a list of 1D ndarray
Regular occupancy, without grouping for irreps
mo_coeff : a list of 2D ndarray
Regular orbital coefficients, without grouping for irreps
Returns:
irrep_nelec : dict
The number of alpha/beta electrons for each irrep {'ir_name':(int,int), ...}.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
-75.623975516256721
>>> scf.uhf_symm.get_irrep_nelec(mol, mf.mo_coeff, mf.mo_occ)
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
'''
orbsyma = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[0], s, False)
orbsymb = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[1], s, False)
orbsyma = numpy.array(orbsyma)
orbsymb = numpy.array(orbsymb)
irrep_nelec = dict([(mol.irrep_name[k], (int(sum(mo_occ[0][orbsyma==ir])),
int(sum(mo_occ[1][orbsymb==ir]))))
for k, ir in enumerate(mol.irrep_id)])
return irrep_nelec
map_rhf_to_uhf = uhf.map_rhf_to_uhf
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the UHF Fock matrix in occupied, virtual
subspaces separatedly (without change occupancy).
'''
if not mf.mol.symmetry:
return uhf.canonicalize(mf, mo_coeff, mo_occ, fock)
mo_occ = numpy.asarray(mo_occ)
assert(mo_occ.ndim == 2)
if fock is None:
dm = mf.make_rdm1(mo_coeff, mo_occ)
fock = mf.get_hcore() + mf.get_jk(mol, dm)
occidxa = mo_occ[0] == 1
occidxb = mo_occ[1] == 1
viridxa = mo_occ[0] == 0
viridxb = mo_occ[1] == 0
s = mf.get_ovlp()
def eig_(fock, mo_coeff, idx, es, cs):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.T.conj(), fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
c = numpy.dot(mo_coeff[:,idx], c)
cs[:,idx] = hf_symm._symmetrize_canonicalization_(mf.mol, e, c, s)
mo = numpy.empty_like(mo_coeff)
mo_e = numpy.empty(mo_occ.shape)
eig_(fock[0], mo_coeff[0], occidxa, mo_e[0], mo[0])
eig_(fock[0], mo_coeff[0], viridxa, mo_e[0], mo[0])
eig_(fock[1], mo_coeff[1], occidxb, mo_e[1], mo[1])
eig_(fock[1], mo_coeff[1], viridxb, mo_e[1], mo[1])
return mo_e, mo
class UHF(uhf.UHF):
__doc__ = uhf.UHF.__doc__ + '''
Attributes for symmetry allowed UHF:
irrep_nelec : dict
Specify the number of alpha/beta electrons for particular irrep
{'ir_name':(int,int), ...}.
For the irreps not listed in these dicts, the program will choose the
occupancy based on the orbital energies.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
-75.623975516256692
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
>>> mf.irrep_nelec = {'B1': (1, 0)}
>>> mf.scf()
-75.429189192031131
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 0), 'B2': (1, 1)}
'''
def __init__(self, mol):
uhf.UHF.__init__(self, mol)
# number of electrons for each irreps
self.irrep_nelec = {}
self._keys = self._keys.union(['irrep_nelec'])
def dump_flags(self):
uhf.UHF.dump_flags(self)
hf_symm.check_irrep_nelec(self.mol, self.irrep_nelec, self.nelec)
def build(self, mol=None):
for irname in self.irrep_nelec:
if irname not in self.mol.irrep_name:
logger.warn(self, '!! No irrep %s', irname)
return uhf.UHF.build(self, mol)
def eig(self, h, s):
if not self.mol.symmetry:
return uhf.UHF.eig(self, h, s)
nirrep = self.mol.symm_orb.__len__()
s = symm.symmetrize_matrix(s, self.mol.symm_orb)
ha = symm.symmetrize_matrix(h[0], self.mol.symm_orb)
cs = []
es = []
for ir in range(nirrep):
e, c = hf.SCF.eig(self, ha[ir], s[ir])
cs.append(c)
es.append(e)
ea = numpy.hstack(es)
ca = hf_symm.so2ao_mo_coeff(self.mol.symm_orb, cs)
hb = symm.symmetrize_matrix(h[1], self.mol.symm_orb)
cs = []
es = []
for ir in range(nirrep):
e, c = scipy.linalg.eigh(hb[ir], s[ir])
cs.append(c)
es.append(e)
eb = numpy.hstack(es)
cb = hf_symm.so2ao_mo_coeff(self.mol.symm_orb, cs)
return numpy.array((ea,eb)), (ca,cb)
def get_grad(self, mo_coeff, mo_occ, fock=None):
mol = self.mol
if not mol.symmetry:
return uhf.UHF.get_grad(self, mo_coeff, mo_occ, fock)
if fock is None:
dm1 = self.make_rdm1(mo_coeff, mo_occ)
fock = self.get_hcore(mol) + self.get_veff(self.mol, dm1)
ovlp_ao = self.get_ovlp()
orbsyma = symm.label_orb_symm(self, mol.irrep_id, mol.symm_orb,
mo_coeff[0], ovlp_ao, False)
orbsymb = symm.label_orb_symm(self, mol.irrep_id, mol.symm_orb,
mo_coeff[1], ovlp_ao, False)
orbsyma = numpy.asarray(orbsyma)
orbsymb = numpy.asarray(orbsymb)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
ga = reduce(numpy.dot, (mo_coeff[0][:,viridxa].T.conj(), fock[0],
mo_coeff[0][:,occidxa]))
ga[orbsyma[viridxa].reshape(-1,1)!=orbsyma[occidxa]] = 0
gb = reduce(numpy.dot, (mo_coeff[1][:,viridxb].T.conj(), fock[1],
mo_coeff[1][:,occidxb]))
gb[orbsymb[viridxb].reshape(-1,1)!=orbsymb[occidxb]] = 0
return numpy.hstack((ga.ravel(), gb.ravel()))
def get_occ(self, mo_energy=None, mo_coeff=None, orbsym=None):
''' We assumed mo_energy are grouped by symmetry irreps, (see function
self.eig). The orbitals are sorted after SCF.
'''
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
if not mol.symmetry:
return uhf.UHF.get_occ(self, mo_energy, mo_coeff)
if orbsym is None:
if mo_coeff is not None: # due to linear-dep
ovlp_ao = self.get_ovlp()
orbsyma = symm.label_orb_symm(self, mol.irrep_id, mol.symm_orb,
mo_coeff[0], ovlp_ao, False)
orbsymb = symm.label_orb_symm(self, mol.irrep_id, mol.symm_orb,
mo_coeff[1], ovlp_ao, False)
orbsyma = numpy.asarray(orbsyma)
orbsymb = numpy.asarray(orbsymb)
else:
ovlp_ao = None
orbsyma = [numpy.repeat(ir, mol.symm_orb[i].shape[1])
for i, ir in enumerate(mol.irrep_id)]
orbsyma = orbsymb = numpy.hstack(orbsyma)
else:
orbsyma = numpy.asarray(orbsym[0])
orbsymb = numpy.asarray(orbsym[1])
assert(mo_energy[0].size == orbsyma.size)
mo_occ = numpy.zeros_like(mo_energy)
idx_ea_left = []
idx_eb_left = []
neleca_fix = nelecb_fix = 0
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = numpy.where(orbsyma == ir)[0]
ir_idxb = numpy.where(orbsymb == ir)[0]
if irname in self.irrep_nelec:
if isinstance(self.irrep_nelec[irname], (int, numpy.integer)):
nelecb = self.irrep_nelec[irname] // 2
neleca = self.irrep_nelec[irname] - nelecb
else:
neleca, nelecb = self.irrep_nelec[irname]
ea_idx = numpy.argsort(mo_energy[0][ir_idxa].round(9))
eb_idx = numpy.argsort(mo_energy[1][ir_idxb].round(9))
mo_occ[0,ir_idxa[ea_idx[:neleca]]] = 1
mo_occ[1,ir_idxb[eb_idx[:nelecb]]] = 1
neleca_fix += neleca
nelecb_fix += nelecb
else:
idx_ea_left.append(ir_idxa)
idx_eb_left.append(ir_idxb)
neleca_float = self.nelec[0] - neleca_fix
nelecb_float = self.nelec[1] - nelecb_fix
assert(neleca_float >= 0)
assert(nelecb_float >= 0)
if len(idx_ea_left) > 0:
idx_ea_left = numpy.hstack(idx_ea_left)
ea_left = mo_energy[0][idx_ea_left]
ea_sort = numpy.argsort(ea_left.round(9))
occ_idx = idx_ea_left[ea_sort][:neleca_float]
mo_occ[0][occ_idx] = 1
if len(idx_eb_left) > 0:
idx_eb_left = numpy.hstack(idx_eb_left)
eb_left = mo_energy[1][idx_eb_left]
eb_sort = numpy.argsort(eb_left.round(9))
occ_idx = idx_eb_left[eb_sort][:nelecb_float]
mo_occ[1][occ_idx] = 1
vir_idx = (mo_occ[0]==0)
if self.verbose >= logger.INFO and numpy.count_nonzero(vir_idx) > 0:
ehomoa = max(mo_energy[0][mo_occ[0]>0 ])
elumoa = min(mo_energy[0][mo_occ[0]==0])
ehomob = max(mo_energy[1][mo_occ[1]>0 ])
elumob = min(mo_energy[1][mo_occ[1]==0])
noccsa = []
noccsb = []
p0 = 0
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = orbsyma == ir
ir_idxb = orbsymb == ir
noccsa.append(numpy.count_nonzero(mo_occ[0][ir_idxa]))
noccsb.append(numpy.count_nonzero(mo_occ[1][ir_idxb]))
if ehomoa in mo_energy[0][ir_idxa]:
irhomoa = irname
if elumoa in mo_energy[0][ir_idxa]:
irlumoa = irname
if ehomob in mo_energy[1][ir_idxb]:
irhomob = irname
if elumob in mo_energy[1][ir_idxb]:
irlumob = irname
logger.info(self, 'alpha HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomoa, ehomoa, irlumoa, elumoa)
logger.info(self, 'beta HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomob, ehomob, irlumob, elumob)
ehomo = max(ehomoa,ehomob)
elumo = min(elumoa,elumob)
logger.debug(self, 'alpha irrep_nelec = %s', noccsa)
logger.debug(self, 'beta irrep_nelec = %s', noccsb)
hf_symm._dump_mo_energy(mol, mo_energy[0], mo_occ[0], ehomo, elumo,
orbsyma, 'alpha-', verbose=self.verbose)
hf_symm._dump_mo_energy(mol, mo_energy[1], mo_occ[1], ehomo, elumo,
orbsymb, 'beta-', verbose=self.verbose)
if mo_coeff is not None and self.verbose >= logger.DEBUG:
if ovlp_ao is None:
ovlp_ao = self.get_ovlp()
ss, s = self.spin_square((mo_coeff[0][:,mo_occ[0]>0],
mo_coeff[1][:,mo_occ[1]>0]), ovlp_ao)
logger.debug(self, 'multiplicity <S^2> = %.8g 2S+1 = %.8g', ss, s)
return mo_occ
def _finalize(self):
uhf.UHF._finalize(self)
ea = numpy.hstack(self.mo_energy[0])
eb = numpy.hstack(self.mo_energy[1])
oa_sort = numpy.argsort(ea[self.mo_occ[0]>0 ].round(9))
va_sort = numpy.argsort(ea[self.mo_occ[0]==0].round(9))
ob_sort = numpy.argsort(eb[self.mo_occ[1]>0 ].round(9))
vb_sort = numpy.argsort(eb[self.mo_occ[1]==0].round(9))
self.mo_energy = (numpy.hstack((ea[self.mo_occ[0]>0 ][oa_sort],
ea[self.mo_occ[0]==0][va_sort])),
numpy.hstack((eb[self.mo_occ[1]>0 ][ob_sort],
eb[self.mo_occ[1]==0][vb_sort])))
ca = self.mo_coeff[0]
cb = self.mo_coeff[1]
self.mo_coeff = (numpy.hstack((ca[:,self.mo_occ[0]>0 ].take(oa_sort, axis=1),
ca[:,self.mo_occ[0]==0].take(va_sort, axis=1))),
numpy.hstack((cb[:,self.mo_occ[1]>0 ].take(ob_sort, axis=1),
cb[:,self.mo_occ[1]==0].take(vb_sort, axis=1))))
nocc_a = int(self.mo_occ[0].sum())
nocc_b = int(self.mo_occ[1].sum())
self.mo_occ[0][:nocc_a] = 1
self.mo_occ[0][nocc_a:] = 0
self.mo_occ[1][:nocc_b] = 1
self.mo_occ[1][nocc_b:] = 0
if self.chkfile:
chkfile.dump_scf(self.mol, self.chkfile, self.e_tot, self.mo_energy,
self.mo_coeff, self.mo_occ, overwrite_mol=True)
return self
def analyze(self, verbose=None, **kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, **kwargs)
@lib.with_doc(get_irrep_nelec.__doc__)
def get_irrep_nelec(self, mol=None, mo_coeff=None, mo_occ=None, s=None):
if mol is None: mol = self.mol
if mo_occ is None: mo_occ = self.mo_occ
if mo_coeff is None: mo_coeff = self.mo_coeff
if s is None: s = self.get_ovlp()
return get_irrep_nelec(mol, mo_coeff, mo_occ, s)
canonicalize = canonicalize
| 40.907029 | 113 | 0.542295 |
4f4a37537510da8d441ad30b15f70773fd2c8364 | 3,612 | py | Python | hatasmota/fan.py | emontnemery/hatasmota | c1d1629ee2e88d027741a474c9648de8d67ba817 | [
"MIT"
] | 19 | 2020-09-03T19:12:43.000Z | 2022-01-01T07:50:32.000Z | hatasmota/fan.py | emontnemery/hatasmota | c1d1629ee2e88d027741a474c9648de8d67ba817 | [
"MIT"
] | 41 | 2020-10-08T20:35:58.000Z | 2022-03-30T00:02:57.000Z | hatasmota/fan.py | emontnemery/hatasmota | c1d1629ee2e88d027741a474c9648de8d67ba817 | [
"MIT"
] | 11 | 2020-10-27T21:22:14.000Z | 2022-01-06T11:19:55.000Z | """Tasmota fan."""
from __future__ import annotations
import logging
from typing import Any
import attr
from .const import (
COMMAND_FANSPEED,
CONF_DEVICENAME,
CONF_MAC,
FAN_SPEED_HIGH,
FAN_SPEED_LOW,
FAN_SPEED_MEDIUM,
FAN_SPEED_OFF,
)
from .entity import (
TasmotaAvailability,
TasmotaAvailabilityConfig,
TasmotaEntity,
TasmotaEntityConfig,
)
from .mqtt import ReceiveMessage
from .utils import (
config_get_state_offline,
config_get_state_online,
get_topic_command,
get_topic_command_state,
get_topic_stat_result,
get_topic_tele_state,
get_topic_tele_will,
get_value_by_path,
)
SUPPORTED_FAN_SPEEDS = [FAN_SPEED_OFF, FAN_SPEED_LOW, FAN_SPEED_MEDIUM, FAN_SPEED_HIGH]
_LOGGER = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True)
class TasmotaFanConfig(TasmotaAvailabilityConfig, TasmotaEntityConfig):
"""Tasmota fan configuation."""
command_topic: str = attr.ib()
result_topic: str = attr.ib()
state_topic: str = attr.ib()
@classmethod
def from_discovery_message(cls, config: dict, platform: str) -> TasmotaFanConfig:
"""Instantiate from discovery message."""
return cls(
endpoint="fan",
idx="ifan",
friendly_name=config[CONF_DEVICENAME],
mac=config[CONF_MAC],
platform=platform,
poll_payload="",
poll_topic=get_topic_command_state(config),
availability_topic=get_topic_tele_will(config),
availability_offline=config_get_state_offline(config),
availability_online=config_get_state_online(config),
command_topic=get_topic_command(config),
result_topic=get_topic_stat_result(config),
state_topic=get_topic_tele_state(config),
)
class TasmotaFan(TasmotaAvailability, TasmotaEntity):
"""Representation of a Tasmota fan."""
_cfg: TasmotaFanConfig
def __init__(self, **kwds: Any):
"""Initialize."""
self._sub_state: dict | None = None
super().__init__(**kwds)
async def subscribe_topics(self) -> None:
"""Subscribe to topics."""
def state_message_received(msg: ReceiveMessage) -> None:
"""Handle new MQTT state messages."""
if not self._on_state_callback:
return
fanspeed: int = get_value_by_path(msg.payload, [COMMAND_FANSPEED])
if fanspeed in SUPPORTED_FAN_SPEEDS:
self._on_state_callback(fanspeed)
availability_topics = self.get_availability_topics()
topics = {
"result_topic": {
"event_loop_safe": True,
"topic": self._cfg.result_topic,
"msg_callback": state_message_received,
},
"state_topic": {
"event_loop_safe": True,
"topic": self._cfg.state_topic,
"msg_callback": state_message_received,
},
}
topics = {**topics, **availability_topics}
self._sub_state = await self._mqtt_client.subscribe(
self._sub_state,
topics,
)
async def unsubscribe_topics(self) -> None:
"""Unsubscribe to all MQTT topics."""
self._sub_state = await self._mqtt_client.unsubscribe(self._sub_state)
def set_speed(self, fanspeed: int) -> None:
"""Set the fan's speed."""
payload = fanspeed
command = COMMAND_FANSPEED
self._mqtt_client.publish(
self._cfg.command_topic + command,
payload,
)
| 29.606557 | 87 | 0.637874 |
4f4b2b222baf4299a6b0082bb48f1904faeb627e | 181,816 | py | Python | NBA Project/venv/Lib/site-packages/plotly/graph_objs/scatter3d/__init__.py | EnriqueGambra/Most-Efficient-NBA-Players | ea67c28b5294dbc9713200a937deb9f4211ba754 | [
"MIT"
] | 1 | 2020-08-08T21:56:11.000Z | 2020-08-08T21:56:11.000Z | NBA Project/venv/Lib/site-packages/plotly/graph_objs/scatter3d/__init__.py | EnriqueGambra/Most-Efficient-NBA-Players | ea67c28b5294dbc9713200a937deb9f4211ba754 | [
"MIT"
] | 2 | 2021-03-31T19:54:17.000Z | 2021-06-02T02:33:56.000Z | NBA Project/venv/Lib/site-packages/plotly/graph_objs/scatter3d/__init__.py | EnriqueGambra/Most-Efficient-NBA-Players | ea67c28b5294dbc9713200a937deb9f4211ba754 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["colorsrc"] = v_textfont.ColorsrcValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["size"] = v_textfont.SizeValidator()
self._validators["sizesrc"] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Stream
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Projection(_BaseTraceHierarchyType):
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.projection.X
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
opacity
Sets the projection color.
scale
Sets the scale factor determining the size of
the projection marker points.
show
Sets whether or not projections are shown along
the x axis.
Returns
-------
plotly.graph_objs.scatter3d.projection.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.projection.Y
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
opacity
Sets the projection color.
scale
Sets the scale factor determining the size of
the projection marker points.
show
Sets whether or not projections are shown along
the y axis.
Returns
-------
plotly.graph_objs.scatter3d.projection.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.projection.Z
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
opacity
Sets the projection color.
scale
Sets the scale factor determining the size of
the projection marker points.
show
Sets whether or not projections are shown along
the z axis.
Returns
-------
plotly.graph_objs.scatter3d.projection.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
plotly.graph_objects.scatter3d.projection.X instance or
dict with compatible properties
y
plotly.graph_objects.scatter3d.projection.Y instance or
dict with compatible properties
z
plotly.graph_objects.scatter3d.projection.Z instance or
dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Projection
x
plotly.graph_objects.scatter3d.projection.X instance or
dict with compatible properties
y
plotly.graph_objects.scatter3d.projection.Y instance or
dict with compatible properties
z
plotly.graph_objects.scatter3d.projection.Z instance or
dict with compatible properties
Returns
-------
Projection
"""
super(Projection, self).__init__("projection")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Projection
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Projection"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import projection as v_projection
# Initialize validators
# ---------------------
self._validators["x"] = v_projection.XValidator()
self._validators["y"] = v_projection.YValidator()
self._validators["z"] = v_projection.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("z", None)
self["z"] = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scatter3d.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.scatter3d.marke
r.colorbar.Tickformatstop instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scatter3d.marker.colorbar.tickformatstopdefau
lts), sets the default property values to use
for elements of
scatter3d.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.scatter3d.marker.colorbar.
Title instance or dict with compatible
properties
titlefont
Deprecated: Please use
scatter3d.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter3d.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scatter3d.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
Returns
-------
plotly.graph_objs.scatter3d.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity. Note that the marker opacity for
scatter3d traces must be a scalar value for performance
reasons. To set a blending opacity value (i.e. which is not
transparent), set "marker.color" to an rgba color and use its
alpha channel.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'circle-open', 'square', 'square-open',
'diamond', 'diamond-open', 'cross', 'x']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on plot.ly for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.scatter3d.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
line
plotly.graph_objects.scatter3d.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity. Note that the marker opacity
for scatter3d traces must be a scalar value for
performance reasons. To set a blending opacity value
(i.e. which is not transparent), set "marker.color" to
an rgba color and use its alpha channel.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type.
symbolsrc
Sets the source reference on plot.ly for symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.scatter3d.marker.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color .
line
plotly.graph_objects.scatter3d.marker.Line instance or
dict with compatible properties
opacity
Sets the marker opacity. Note that the marker opacity
for scatter3d traces must be a scalar value for
performance reasons. To set a blending opacity value
(i.e. which is not transparent), set "marker.color" to
an rgba color and use its alpha channel.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size .
symbol
Sets the marker symbol type.
symbolsrc
Sets the source reference on plot.ly for symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Marker
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import marker as v_marker
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_marker.AutocolorscaleValidator()
self._validators["cauto"] = v_marker.CautoValidator()
self._validators["cmax"] = v_marker.CmaxValidator()
self._validators["cmid"] = v_marker.CmidValidator()
self._validators["cmin"] = v_marker.CminValidator()
self._validators["color"] = v_marker.ColorValidator()
self._validators["coloraxis"] = v_marker.ColoraxisValidator()
self._validators["colorbar"] = v_marker.ColorBarValidator()
self._validators["colorscale"] = v_marker.ColorscaleValidator()
self._validators["colorsrc"] = v_marker.ColorsrcValidator()
self._validators["line"] = v_marker.LineValidator()
self._validators["opacity"] = v_marker.OpacityValidator()
self._validators["reversescale"] = v_marker.ReversescaleValidator()
self._validators["showscale"] = v_marker.ShowscaleValidator()
self._validators["size"] = v_marker.SizeValidator()
self._validators["sizemin"] = v_marker.SizeminValidator()
self._validators["sizemode"] = v_marker.SizemodeValidator()
self._validators["sizeref"] = v_marker.SizerefValidator()
self._validators["sizesrc"] = v_marker.SizesrcValidator()
self._validators["symbol"] = v_marker.SymbolValidator()
self._validators["symbolsrc"] = v_marker.SymbolsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("opacity", None)
self["opacity"] = opacity if opacity is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizemin", None)
self["sizemin"] = sizemin if sizemin is not None else _v
_v = arg.pop("sizemode", None)
self["sizemode"] = sizemode if sizemode is not None else _v
_v = arg.pop("sizeref", None)
self["sizeref"] = sizeref if sizeref is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
_v = arg.pop("symbol", None)
self["symbol"] = symbol if symbol is not None else _v
_v = arg.pop("symbolsrc", None)
self["symbolsrc"] = symbolsrc if symbolsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in `line.color`is set
to a numerical array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `line.color`) or the bounds
set in `line.cmin` and `line.cmax` Has an effect only if in
`line.color`is set to a numerical array. Defaults to `false`
when `line.cmin` and `line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `line.color`is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmin` must
be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `line.cmin`
and/or `line.cmax` to be equidistant to this point. Has an
effect only if in `line.color`is set to a numerical array.
Value should have the same units as in `line.color`. Has no
effect when `line.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `line.color`is set to a numerical array. Value should have
the same units as in `line.color` and if set, `line.cmax` must
be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets thelinecolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to `line.cmin`
and `line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scatter3d.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.line.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.scatter3d.line.
colorbar.Tickformatstop instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scatter3d.line.colorbar.tickformatstopdefault
s), sets the default property values to use for
elements of
scatter3d.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.scatter3d.line.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
scatter3d.line.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter3d.line.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scatter3d.line.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `line.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`line.cmin` and `line.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list: Greys,YlGnBu,Gr
eens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet
,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
Returns
-------
Any
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`line.color`is set to a numerical array. If true, `line.cmin`
will correspond to the last color in the array and `line.cmax`
will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `line.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has
an effect only if in `line.color`is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color`is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color`is set
to a numerical array. Value should have the same units
as in `line.color`. Has no effect when `line.cauto` is
`false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color`is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.scatter3d.line.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named color
string. At minimum, a mapping for the lowest (0) and
highest (1) values are required. For example, `[[0,
'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`line.cmin`
and `line.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorsrc
Sets the source reference on plot.ly for color .
dash
Sets the dash style of the lines.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color`is set to a numerical array. If true,
`line.cmin` will correspond to the last color in the
array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color`is set
to a numerical array.
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
dash=None,
reversescale=None,
showscale=None,
width=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Line
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`line.colorscale`. Has an effect only if in
`line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `line.color`)
or the bounds set in `line.cmin` and `line.cmax` Has
an effect only if in `line.color`is set to a numerical
array. Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `line.color`is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`line.cmin` and/or `line.cmax` to be equidistant to
this point. Has an effect only if in `line.color`is set
to a numerical array. Value should have the same units
as in `line.color`. Has no effect when `line.cauto` is
`false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `line.color`is set to a numerical array.
Value should have the same units as in `line.color` and
if set, `line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `line.cmin` and `line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.scatter3d.line.ColorBar instance
or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named color
string. At minimum, a mapping for the lowest (0) and
highest (1) values are required. For example, `[[0,
'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`line.cmin`
and `line.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorsrc
Sets the source reference on plot.ly for color .
dash
Sets the dash style of the lines.
reversescale
Reverses the color mapping if true. Has an effect only
if in `line.color`is set to a numerical array. If true,
`line.cmin` will correspond to the last color in the
array and `line.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `line.color`is set
to a numerical array.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Line
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import line as v_line
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_line.AutocolorscaleValidator()
self._validators["cauto"] = v_line.CautoValidator()
self._validators["cmax"] = v_line.CmaxValidator()
self._validators["cmid"] = v_line.CmidValidator()
self._validators["cmin"] = v_line.CminValidator()
self._validators["color"] = v_line.ColorValidator()
self._validators["coloraxis"] = v_line.ColoraxisValidator()
self._validators["colorbar"] = v_line.ColorBarValidator()
self._validators["colorscale"] = v_line.ColorscaleValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["dash"] = v_line.DashValidator()
self._validators["reversescale"] = v_line.ReversescaleValidator()
self._validators["showscale"] = v_line.ShowscaleValidator()
self._validators["width"] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("dash", None)
self["dash"] = dash if dash is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.scatter3d.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.scatter3d.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorZ(_BaseTraceHierarchyType):
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on plot.ly for arrayminus .
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on plot.ly for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorZ object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.ErrorZ
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorZ
"""
super(ErrorZ, self).__init__("error_z")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.ErrorZ
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.ErrorZ"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import error_z as v_error_z
# Initialize validators
# ---------------------
self._validators["array"] = v_error_z.ArrayValidator()
self._validators["arrayminus"] = v_error_z.ArrayminusValidator()
self._validators["arrayminussrc"] = v_error_z.ArrayminussrcValidator()
self._validators["arraysrc"] = v_error_z.ArraysrcValidator()
self._validators["color"] = v_error_z.ColorValidator()
self._validators["symmetric"] = v_error_z.SymmetricValidator()
self._validators["thickness"] = v_error_z.ThicknessValidator()
self._validators["traceref"] = v_error_z.TracerefValidator()
self._validators["tracerefminus"] = v_error_z.TracerefminusValidator()
self._validators["type"] = v_error_z.TypeValidator()
self._validators["value"] = v_error_z.ValueValidator()
self._validators["valueminus"] = v_error_z.ValueminusValidator()
self._validators["visible"] = v_error_z.VisibleValidator()
self._validators["width"] = v_error_z.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
self["array"] = array if array is not None else _v
_v = arg.pop("arrayminus", None)
self["arrayminus"] = arrayminus if arrayminus is not None else _v
_v = arg.pop("arrayminussrc", None)
self["arrayminussrc"] = arrayminussrc if arrayminussrc is not None else _v
_v = arg.pop("arraysrc", None)
self["arraysrc"] = arraysrc if arraysrc is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("symmetric", None)
self["symmetric"] = symmetric if symmetric is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("traceref", None)
self["traceref"] = traceref if traceref is not None else _v
_v = arg.pop("tracerefminus", None)
self["tracerefminus"] = tracerefminus if tracerefminus is not None else _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valueminus", None)
self["valueminus"] = valueminus if valueminus is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorY(_BaseTraceHierarchyType):
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on plot.ly for arrayminus .
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on plot.ly for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# copy_zstyle
# -----------
@property
def copy_zstyle(self):
"""
The 'copy_zstyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_zstyle"]
@copy_zstyle.setter
def copy_zstyle(self, val):
self["copy_zstyle"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_zstyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.ErrorY
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super(ErrorY, self).__init__("error_y")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.ErrorY
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.ErrorY"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import error_y as v_error_y
# Initialize validators
# ---------------------
self._validators["array"] = v_error_y.ArrayValidator()
self._validators["arrayminus"] = v_error_y.ArrayminusValidator()
self._validators["arrayminussrc"] = v_error_y.ArrayminussrcValidator()
self._validators["arraysrc"] = v_error_y.ArraysrcValidator()
self._validators["color"] = v_error_y.ColorValidator()
self._validators["copy_zstyle"] = v_error_y.CopyZstyleValidator()
self._validators["symmetric"] = v_error_y.SymmetricValidator()
self._validators["thickness"] = v_error_y.ThicknessValidator()
self._validators["traceref"] = v_error_y.TracerefValidator()
self._validators["tracerefminus"] = v_error_y.TracerefminusValidator()
self._validators["type"] = v_error_y.TypeValidator()
self._validators["value"] = v_error_y.ValueValidator()
self._validators["valueminus"] = v_error_y.ValueminusValidator()
self._validators["visible"] = v_error_y.VisibleValidator()
self._validators["width"] = v_error_y.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
self["array"] = array if array is not None else _v
_v = arg.pop("arrayminus", None)
self["arrayminus"] = arrayminus if arrayminus is not None else _v
_v = arg.pop("arrayminussrc", None)
self["arrayminussrc"] = arrayminussrc if arrayminussrc is not None else _v
_v = arg.pop("arraysrc", None)
self["arraysrc"] = arraysrc if arraysrc is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("copy_zstyle", None)
self["copy_zstyle"] = copy_zstyle if copy_zstyle is not None else _v
_v = arg.pop("symmetric", None)
self["symmetric"] = symmetric if symmetric is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("traceref", None)
self["traceref"] = traceref if traceref is not None else _v
_v = arg.pop("tracerefminus", None)
self["tracerefminus"] = tracerefminus if tracerefminus is not None else _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valueminus", None)
self["valueminus"] = valueminus if valueminus is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorX(_BaseTraceHierarchyType):
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on plot.ly for arrayminus .
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on plot.ly for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# copy_zstyle
# -----------
@property
def copy_zstyle(self):
"""
The 'copy_zstyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_zstyle"]
@copy_zstyle.setter
def copy_zstyle(self, val):
self["copy_zstyle"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "scatter3d"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_zstyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorX object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.scatter3d.ErrorX
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on plot.ly for arrayminus .
arraysrc
Sets the source reference on plot.ly for array .
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorX
"""
super(ErrorX, self).__init__("error_x")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.ErrorX
constructor must be a dict or
an instance of plotly.graph_objs.scatter3d.ErrorX"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.scatter3d import error_x as v_error_x
# Initialize validators
# ---------------------
self._validators["array"] = v_error_x.ArrayValidator()
self._validators["arrayminus"] = v_error_x.ArrayminusValidator()
self._validators["arrayminussrc"] = v_error_x.ArrayminussrcValidator()
self._validators["arraysrc"] = v_error_x.ArraysrcValidator()
self._validators["color"] = v_error_x.ColorValidator()
self._validators["copy_zstyle"] = v_error_x.CopyZstyleValidator()
self._validators["symmetric"] = v_error_x.SymmetricValidator()
self._validators["thickness"] = v_error_x.ThicknessValidator()
self._validators["traceref"] = v_error_x.TracerefValidator()
self._validators["tracerefminus"] = v_error_x.TracerefminusValidator()
self._validators["type"] = v_error_x.TypeValidator()
self._validators["value"] = v_error_x.ValueValidator()
self._validators["valueminus"] = v_error_x.ValueminusValidator()
self._validators["visible"] = v_error_x.VisibleValidator()
self._validators["width"] = v_error_x.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
self["array"] = array if array is not None else _v
_v = arg.pop("arrayminus", None)
self["arrayminus"] = arrayminus if arrayminus is not None else _v
_v = arg.pop("arrayminussrc", None)
self["arrayminussrc"] = arrayminussrc if arrayminussrc is not None else _v
_v = arg.pop("arraysrc", None)
self["arraysrc"] = arraysrc if arraysrc is not None else _v
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("copy_zstyle", None)
self["copy_zstyle"] = copy_zstyle if copy_zstyle is not None else _v
_v = arg.pop("symmetric", None)
self["symmetric"] = symmetric if symmetric is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("traceref", None)
self["traceref"] = traceref if traceref is not None else _v
_v = arg.pop("tracerefminus", None)
self["tracerefminus"] = tracerefminus if tracerefminus is not None else _v
_v = arg.pop("type", None)
self["type"] = type if type is not None else _v
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
_v = arg.pop("valueminus", None)
self["valueminus"] = valueminus if valueminus is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.scatter3d import projection
from plotly.graph_objs.scatter3d import marker
from plotly.graph_objs.scatter3d import line
from plotly.graph_objs.scatter3d import hoverlabel
| 36.406888 | 87 | 0.556442 |
4f49d6efff3b5cee84e7ecd9219bb654ee2406ab | 1,202 | py | Python | src/visions/visualisation/plot_circular_packing.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | src/visions/visualisation/plot_circular_packing.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | src/visions/visualisation/plot_circular_packing.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | import json
import re
from pathlib import Path
import networkx as nx
from networkx.readwrite import json_graph
from visions.core.implementations.typesets import visions_complete_set
def update(data):
data["name"] = data.pop("id")
if "children" not in data:
data["size"] = 1
else:
data["children"] = [update(child) for child in data["children"]]
return data
def write_json(data):
with Path("typesets/typeset_complete_base.json").open("w") as f:
json.dump(data, f)
def write_html(data):
string = f"\n\troot = {json.dumps(data)}\n\t"
file_name = Path("circular_packing.html")
fc = file_name.read_text()
fc = re.sub(
r"// START-REPLACE(.*)// END-REPLACE",
f"// START-REPLACE{string}// END-REPLACE",
fc,
flags=re.MULTILINE | re.DOTALL,
)
file_name.write_text(fc)
def main():
typeset = visions_complete_set()
graph = typeset.base_graph.copy()
nx.relabel_nodes(graph, {n: str(n) for n in graph.nodes}, copy=False)
data = json_graph.tree_data(graph, root="visions_generic")
data = update(data)
write_json(data)
write_html(data)
if __name__ == "__main__":
main()
| 22.259259 | 73 | 0.648087 |
4f49c3d081e63f390fbd919d518203ec78ef6c39 | 142 | py | Python | src/world/world_listener.py | vt-sailbot/sailbot-19 | a9252e9415d3fef62e4baf813306bdc07fb69781 | [
"MIT"
] | 1 | 2019-05-29T19:32:48.000Z | 2019-05-29T19:32:48.000Z | src/world/world_listener.py | vt-sailbot/sailbot-20 | b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52 | [
"MIT"
] | 5 | 2019-08-25T21:01:18.000Z | 2020-09-04T02:56:40.000Z | src/world/world_listener.py | vt-sailbot/sailbot-20 | b5d75cb82e4bc3e9c4e428a288c6ac98a4aa2c52 | [
"MIT"
] | null | null | null | from threading import Thread
"""Listens for asynchronous sensor data and creates new World states"""
class WorldListener(Thread):
pass
| 17.75 | 71 | 0.767606 |
4f4d2058ef6ed1b434831351c0c126a4fc18e829 | 2,923 | py | Python | tests/unit/testplan/testing/multitest/driver/test_http.py | armarti/testplan | 5dcfe5840c0c99e9535cc223230f400fa62802f2 | [
"Apache-2.0"
] | null | null | null | tests/unit/testplan/testing/multitest/driver/test_http.py | armarti/testplan | 5dcfe5840c0c99e9535cc223230f400fa62802f2 | [
"Apache-2.0"
] | 64 | 2019-04-15T20:56:40.000Z | 2021-03-23T01:00:30.000Z | tests/unit/testplan/testing/multitest/driver/test_http.py | armarti/testplan | 5dcfe5840c0c99e9535cc223230f400fa62802f2 | [
"Apache-2.0"
] | null | null | null | """Unit tests for the HTTPServer and HTTPClient drivers."""
import uuid
import time
import requests
import pytest
from testplan.testing.multitest.driver.http import HTTPServer, HTTPResponse, HTTPClient
def create_server(name, host, port):
server = HTTPServer(name=name,
host=host,
port=port)
server.start()
server._wait_started()
return server
def create_client(name, host, port, timeout):
client = HTTPClient(name=name,
host=host,
port=port,
timeout=timeout)
client.start()
client._wait_started()
return client
class TestHTTP(object):
def setup_method(self, method):
self.server = create_server('http_server', 'localhost', 0)
self.client = create_client(
'http_client',
self.server.host,
self.server.port,
10
)
def teardown_method(self, method):
for device in [self.server, self.client]:
device.stop()
device._wait_stopped()
@pytest.mark.parametrize(
'method',
('get', 'post', 'put', 'delete', 'patch', 'options')
)
def test_server_method_request(self, method):
text = str(uuid.uuid4())
res = HTTPResponse(content=[text])
self.server.queue_response(res)
method_request = getattr(requests, method)
url = 'http://{}:{}/'.format(self.server.host, self.server.port)
r = method_request(url)
assert requests.codes.ok == r.status_code
assert 'text/plain' == r.headers['content-type']
assert text == r.text.strip('\n')
def test_wait_for_response(self):
# Send HTTP request
self.client.get('random/text')
# Send response
wait = 0.2
time.sleep(wait)
text = str(uuid.uuid4())
res = HTTPResponse(content=[text])
self.server.respond(res)
# Receive response
r = self.client.receive()
# Verify response
assert requests.codes.ok == r.status_code
assert 'text/plain' == r.headers['Content-type']
assert text == r.text
@pytest.mark.parametrize(
'method',
('get', 'post', 'put', 'delete', 'patch', 'options')
)
def test_client_method_request(self, method):
method_request = getattr(self.client, method)
method_request('random/text')
text = str(uuid.uuid4())
res = HTTPResponse(content=[text])
self.server.queue_response(res)
r = self.client.receive()
assert requests.codes.ok == r.status_code
assert 'text/plain' == r.headers['content-type']
assert text == r.text.strip('\n')
def test_client_flush(self):
self.client.get('random/text')
self.client.flush()
msg = self.client.receive()
assert None == msg
| 27.838095 | 87 | 0.583989 |
4f49da3fda428bc7b5ff13ea5b9c08a4557abdf0 | 780 | py | Python | Chapter05/mobilenet_keras.py | Wenhan-Zhang-327/hands-on-deep-learning-code | 61ae6aea8618093c7abf44c2fe00b3d1e6e2d3c8 | [
"MIT"
] | 18 | 2019-05-19T03:18:39.000Z | 2022-01-30T16:40:20.000Z | Chapter05/mobilenet_keras.py | Wenhan-Zhang-327/hands-on-deep-learning-code | 61ae6aea8618093c7abf44c2fe00b3d1e6e2d3c8 | [
"MIT"
] | null | null | null | Chapter05/mobilenet_keras.py | Wenhan-Zhang-327/hands-on-deep-learning-code | 61ae6aea8618093c7abf44c2fe00b3d1e6e2d3c8 | [
"MIT"
] | 9 | 2019-08-10T06:49:33.000Z | 2022-02-19T13:21:55.000Z | # MobileNEt
import keras
from keras.preprocessing import image
from keras.applications import imagenet_utils
from keras.applications.mobilenet import preprocess_input
from keras.models import Model
import numpy as np
import argparse
import matplotlib.pyplot as plt
model = keras.applications.mobilenet.MobileNet(weights = 'imagenet')
parser = argparse.ArgumentParser()
parser.add_argument('--im_path', type = str, help = 'path to the image')
args = parser.parse_args()
# adding the path to image
IM_PATH = args.im_path
img = image.load_img(IM_PATH, target_size = (224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
img = preprocess_input(img)
prediction = model.predict(img)
output = imagenet_utils.decode_predictions(prediction)
print(output) | 23.636364 | 72 | 0.784615 |
4f4d742e55cbc44ec6a8cbd370757f185e7e22a5 | 19,503 | py | Python | tests/functional/coercers/test_coercer_list_with_default_non_null_int_field.py | davestone/tartiflette | 7aa91f77852455c214413de6b0384102dfbb9b47 | [
"MIT"
] | 530 | 2019-06-04T11:45:36.000Z | 2022-03-31T09:29:56.000Z | tests/functional/coercers/test_coercer_list_with_default_non_null_int_field.py | davestone/tartiflette | 7aa91f77852455c214413de6b0384102dfbb9b47 | [
"MIT"
] | 242 | 2019-06-04T11:53:08.000Z | 2022-03-28T07:06:27.000Z | tests/functional/coercers/test_coercer_list_with_default_non_null_int_field.py | mkniewallner/tartiflette | e292c28ed4fa279ecedb8980fc3741965bd28c87 | [
"MIT"
] | 36 | 2019-06-21T06:40:27.000Z | 2021-11-04T13:11:16.000Z | import pytest
from tests.functional.coercers.common import resolve_list_field
@pytest.mark.asyncio
@pytest.mark.ttftt_engine(
name="coercion",
resolvers={"Query.listWithDefaultNonNullIntField": resolve_list_field},
)
@pytest.mark.parametrize(
"query,variables,expected",
[
(
"""query { listWithDefaultNonNullIntField }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[123459]"}},
),
(
"""query { listWithDefaultNonNullIntField(param: null) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query { listWithDefaultNonNullIntField(param: [null]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Int! > must not be null.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 40}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query { listWithDefaultNonNullIntField(param: 10) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13]"}},
),
(
"""query { listWithDefaultNonNullIntField(param: [10]) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13]"}},
),
(
"""query { listWithDefaultNonNullIntField(param: [10, null]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < param > of non-null type < Int! > must not be null.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 40}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[123459]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = null) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = null) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = null) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = null) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [null]) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < [null] >.",
"path": None,
"locations": [{"line": 1, "column": 25}],
}
],
},
),
(
"""query ($param: [Int!] = [null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = 30) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[33]"}},
),
(
"""query ($param: [Int!] = 30) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = 30) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = 30) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30]) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[33]"}},
),
(
"""query ($param: [Int!] = [30]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [30]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid default value < [30, null] >.",
"path": None,
"locations": [{"line": 1, "column": 25}],
}
],
},
),
(
"""query ($param: [Int!] = [30, null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!] = [30, null]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [Int!]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [Int!]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[123459]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[None]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < Int! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20, None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [20, None] >; Expected non-nullable type < Int! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $param > of required type < [Int!]! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": None},
{
"data": None,
"errors": [
{
"message": "Variable < $param > of non-null type < [Int!]! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [None] >; Expected non-nullable type < Int! > not to be null at value[0].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20]},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[23]"}},
),
(
"""query ($param: [Int!]!) { listWithDefaultNonNullIntField(param: $param) }""",
{"param": [20, None]},
{
"data": None,
"errors": [
{
"message": "Variable < $param > got invalid value < [20, None] >; Expected non-nullable type < Int! > not to be null at value[1].",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
None,
{
"data": {"listWithDefaultNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 60}],
}
],
},
),
(
"""query ($item: Int) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listWithDefaultNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 60}],
}
],
},
),
(
"""query ($item: Int) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int = null) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
None,
{
"data": {"listWithDefaultNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 67}],
}
],
},
),
(
"""query ($item: Int = null) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listWithDefaultNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 67}],
}
],
},
),
(
"""query ($item: Int = null) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int = 30) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
None,
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13-33]"}},
),
(
"""query ($item: Int = 30) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": {"listWithDefaultNonNullIntField": None},
"errors": [
{
"message": "Argument < param > has invalid value < [10, $item] >.",
"path": ["listWithDefaultNonNullIntField"],
"locations": [{"line": 1, "column": 65}],
}
],
},
),
(
"""query ($item: Int = 30) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13-23]"}},
),
(
"""query ($item: Int!) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
None,
{
"data": None,
"errors": [
{
"message": "Variable < $item > of required type < Int! > was not provided.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int!) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": None},
{
"data": None,
"errors": [
{
"message": "Variable < $item > of non-null type < Int! > must not be null.",
"path": None,
"locations": [{"line": 1, "column": 8}],
}
],
},
),
(
"""query ($item: Int!) { listWithDefaultNonNullIntField(param: [10, $item]) }""",
{"item": 20},
{"data": {"listWithDefaultNonNullIntField": "SUCCESS-[13-23]"}},
),
],
)
async def test_coercion_list_with_default_non_null_int_field(
engine, query, variables, expected
):
assert await engine.execute(query, variables=variables) == expected
| 39.559838 | 155 | 0.409783 |
4f4cd28ef2e3c3f7a7abb5edebe11be92260b98e | 8,272 | py | Python | development/OCCUtils/types_lut.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | 7 | 2021-04-07T06:31:58.000Z | 2022-01-27T09:49:51.000Z | development/OCCUtils/types_lut.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | null | null | null | development/OCCUtils/types_lut.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | 2 | 2021-05-19T03:39:04.000Z | 2021-08-12T08:20:19.000Z | ##Copyright 2008-2015 Jelle Feringa (jelleferinga@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>
from OCC.Core.BRepCheck import *
from OCC.Core.GeomAbs import *
from OCC.Core.TopoDS import topods, TopoDS_Shape
from OCC.Core.BRep import BRep_Tool_Surface
from OCC.Core.TopAbs import *
from OCC.Core.Geom import Geom_CylindricalSurface, Geom_Plane
class ShapeToTopology(object):
'''
looks up the topology type and returns the corresponding topological entity
'''
def __init__(self):
self.topoTypes = {TopAbs_VERTEX: topods.Vertex,
TopAbs_EDGE: topods.Edge,
TopAbs_FACE: topods.Face,
TopAbs_WIRE: topods.Wire,
TopAbs_SHELL: topods.Shell,
TopAbs_SOLID: topods.Solid,
TopAbs_COMPOUND: topods.Compound,
TopAbs_COMPSOLID: topods.CompSolid,
}
def __call__(self, shape):
if isinstance(shape, TopoDS_Shape):
return self.topoTypes[shape.ShapeType()](shape)
else:
raise AttributeError('shape has not method `ShapeType`')
def __getitem__(self, item):
return self(item)
class EnumLookup(object):
"""
perform bi-directional lookup of Enums'...
"""
def __init__(self, li_in, li_out):
self.d = {}
for a, b in zip(li_in, li_out):
self.d[a] = b
self.d[b] = a
def __getitem__(self, item):
return self.d[item]
_curve_typesA = (GeomAbs_Line, GeomAbs_Circle, GeomAbs_Ellipse,
GeomAbs_Hyperbola, GeomAbs_Parabola,
GeomAbs_BezierCurve, GeomAbs_BSplineCurve, GeomAbs_OtherCurve)
_curve_typesB = ('line', 'circle', 'ellipse', 'hyperbola', 'parabola',
'bezier', 'spline', 'other')
_surface_typesA = (GeomAbs_Plane, GeomAbs_Cylinder, GeomAbs_Cone,
GeomAbs_Sphere, GeomAbs_Torus, GeomAbs_BezierSurface,
GeomAbs_BSplineSurface, GeomAbs_SurfaceOfRevolution,
GeomAbs_SurfaceOfExtrusion,
GeomAbs_OffsetSurface, GeomAbs_OtherSurface)
_surface_typesB = ('plane', 'cylinder', 'cone', 'sphere', 'torus', 'bezier',
'spline', 'revolution', 'extrusion', 'offset', 'other')
_stateA = ('in', 'out', 'on', 'unknown')
_stateB = (TopAbs_IN, TopAbs_OUT, TopAbs_ON, TopAbs_UNKNOWN)
_orientA = ['TopAbs_FORWARD', 'TopAbs_REVERSED', 'TopAbs_INTERNAL',
'TopAbs_EXTERNAL']
_orientB = [TopAbs_FORWARD, TopAbs_REVERSED, TopAbs_INTERNAL,
TopAbs_EXTERNAL]
_topoTypesA = ['vertex', 'edge', 'wire', 'face', 'shell',
'solid', 'compsolid', 'compound', 'shape']
_topoTypesB = [TopAbs_VERTEX, TopAbs_EDGE, TopAbs_WIRE, TopAbs_FACE,
TopAbs_SHELL, TopAbs_SOLID,
TopAbs_COMPSOLID, TopAbs_COMPOUND, TopAbs_SHAPE]
_geom_types_a = ['line', 'circle', 'ellipse', 'hyperbola', 'parabola',
'beziercurve', 'bsplinecurve', 'othercurve']
_geom_types_b = [GeomAbs_Line, GeomAbs_Circle, GeomAbs_Ellipse,
GeomAbs_Hyperbola, GeomAbs_Parabola, GeomAbs_BezierCurve,
GeomAbs_BSplineCurve, GeomAbs_OtherCurve]
# TODO: make a function that generalizes this, there is absolutely
# no need for 2 lists to define an EnumLookup
def fix_formatting(_str):
return [i.strip() for i in _str.split(',')]
_brep_check_a = fix_formatting("NoError, InvalidPointOnCurve,\
InvalidPointOnCurveOnSurface, InvalidPointOnSurface,\
No3DCurve, Multiple3DCurve, Invalid3DCurve, NoCurveOnSurface,\
InvalidCurveOnSurface, InvalidCurveOnClosedSurface, InvalidSameRangeFlag,\
InvalidSameParameterFlag,\
InvalidDegeneratedFlag, FreeEdge, InvalidMultiConnexity, InvalidRange,\
EmptyWire, RedundantEdge, SelfIntersectingWire, NoSurface,\
InvalidWire, RedundantWire, IntersectingWires, InvalidImbricationOfWires,\
EmptyShell, RedundantFace, UnorientableShape, NotClosed,\
NotConnected, SubshapeNotInShape, BadOrientation, BadOrientationOfSubshape,\
InvalidToleranceValue, CheckFail")
_brep_check_b = [BRepCheck_NoError, BRepCheck_InvalidPointOnCurve,
BRepCheck_InvalidPointOnCurveOnSurface,
BRepCheck_InvalidPointOnSurface,
BRepCheck_No3DCurve, BRepCheck_Multiple3DCurve,
BRepCheck_Invalid3DCurve, BRepCheck_NoCurveOnSurface,
BRepCheck_InvalidCurveOnSurface,
BRepCheck_InvalidCurveOnClosedSurface,
BRepCheck_InvalidSameRangeFlag,
BRepCheck_InvalidSameParameterFlag,
BRepCheck_InvalidDegeneratedFlag, BRepCheck_FreeEdge,
BRepCheck_InvalidMultiConnexity, BRepCheck_InvalidRange,
BRepCheck_EmptyWire, BRepCheck_RedundantEdge,
BRepCheck_SelfIntersectingWire, BRepCheck_NoSurface,
BRepCheck_InvalidWire, BRepCheck_RedundantWire,
BRepCheck_IntersectingWires,
BRepCheck_InvalidImbricationOfWires,
BRepCheck_EmptyShell, BRepCheck_RedundantFace,
BRepCheck_UnorientableShape, BRepCheck_NotClosed,
BRepCheck_NotConnected, BRepCheck_SubshapeNotInShape,
BRepCheck_BadOrientation, BRepCheck_BadOrientationOfSubshape,
BRepCheck_InvalidToleranceValue, BRepCheck_CheckFail]
brepcheck_lut = EnumLookup(_brep_check_a, _brep_check_b)
curve_lut = EnumLookup(_curve_typesA, _curve_typesB)
surface_lut = EnumLookup(_surface_typesA, _surface_typesB)
state_lut = EnumLookup(_stateA, _stateB)
orient_lut = EnumLookup(_orientA, _orientB)
topo_lut = EnumLookup(_topoTypesA, _topoTypesB)
shape_lut = ShapeToTopology()
geom_lut = EnumLookup(_geom_types_a, _geom_types_b)
# todo: refactor, these classes have been moved from the "Topology" directory
# which had too many overlapping methods & classes, that are
# now part of the KBE module...
# still need to think what to do with these...
# what_is_face should surely become a lut [ geom_lut? ]
# i'm not sure whether casting to a gp_* is useful...
classes = dir()
geom_classes = []
for elem in classes:
if elem.startswith('Geom') and not 'swig' in elem:
geom_classes.append(elem)
def what_is_face(face):
''' Returns all class names for which this class can be downcasted
'''
if not face.ShapeType() == TopAbs_FACE:
print('%s is not a TopAbs_FACE. Conversion impossible')
return None
hs = BRep_Tool_Surface(face)
obj = hs.GetObject()
result = []
for elem in classes:
if (elem.startswith('Geom') and not 'swig' in elem):
geom_classes.append(elem)
# Run the test for each class
for geom_class in geom_classes:
if obj.IsKind(geom_class) and not geom_class in result:
result.append(geom_class)
return result
def face_is_plane(face):
''' Returns True if the TopoDS_Shape is a plane, False otherwise
'''
hs = BRep_Tool_Surface(face)
downcast_result = Geom_Plane().DownCast(hs)
# the handle is null if downcast failed or is not possible,
# that is to say the face is not a plane
if downcast_result.IsNull():
return False
else:
return True
def shape_is_cylinder(face):
''' Returns True is the TopoDS_Shape is a cylinder, False otherwise
'''
hs = BRep_Tool_Surface(face)
downcast_result = Geom_CylindricalSurface().DownCast(hs)
if downcast_result.IsNull():
return False
else:
return True
| 39.961353 | 79 | 0.684478 |
4f4ab8bb262815afd6a15272a76699efa066e554 | 3,032 | py | Python | rescene/unrar.py | zordtk/pyReScene | fe20ee6c72dcbd62e92d53678016321705795871 | [
"MIT"
] | 18 | 2020-08-09T02:17:46.000Z | 2022-02-18T09:17:25.000Z | rescene/unrar.py | EchterAgo/pyrescene | 78e9c0c880b1ba89a85772765cdac78d0ce93f79 | [
"MIT"
] | 1 | 2021-11-23T21:13:37.000Z | 2021-11-23T21:13:37.000Z | rescene/unrar.py | EchterAgo/pyrescene | 78e9c0c880b1ba89a85772765cdac78d0ce93f79 | [
"MIT"
] | 9 | 2020-10-15T11:02:49.000Z | 2022-03-15T10:36:14.000Z | #!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2013 pyReScene
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
from distutils.spawn import find_executable
try:
# renamed to winreg in Python 3
from _winreg import *
except ImportError:
try:
from winreg import *
except ImportError:
pass
def locate_unrar():
"""locating installed unrar"""
if(os.name == "nt"):
unrar = locate_windows()
else:
unrar = locate_in_path()
return unrar
def locate_windows():
unrar = ""
try:
unrar = os.environ["ProgramW6432"] + "\\WinRAR\\UnRAR.exe"
if not os.path.exists(unrar):
unrar = os.environ["ProgramW6432"] + "\\Unrar\\UnRAR.exe"
if not os.path.exists(unrar):
raise KeyError
except KeyError:
try:
unrar = os.environ["ProgramFiles(x86)"] + "\\WinRAR\\UnRAR.exe"
if not os.path.exists(unrar):
unrar = os.environ["ProgramFiles(x86)"] + "\\Unrar\\UnRAR.exe"
if not os.path.exists(unrar):
raise KeyError
except KeyError:
unrar = try_registry()
if not unrar:
unrar = locate_in_path()
if not unrar:
# makes it work anyway when located in the same directory
unrar = "UnRAR.exe"
if not os.path.exists(unrar):
print("-----------------------------------------------")
print("Install WinRAR to use all the functionalities.")
print("Freeware 'UnRAR for Windows' is already enough.")
print("http://www.rarlab.com/rar_add.htm")
print("-----------------------------------------------")
return unrar
def try_registry():
"""try grabbing location from the Windows registry"""
try:
regpath = ("SOFTWARE\\Microsoft\\Windows\\" +
"CurrentVersion\\App Paths\\WinRAR.exe")
key = OpenKey(HKEY_LOCAL_MACHINE, regpath, 0, KEY_READ)
unrar = os.path.join(QueryValueEx(key, "Path")[0], "UnRAR.exe")
if os.path.isfile(unrar):
return unrar
else:
raise
except:
return None
def locate_in_path():
return find_executable("unrar")
if __name__ == '__main__':
print(locate_unrar())
print(try_registry())
| 30.938776 | 67 | 0.689974 |
4f49eb70c4fc7e6fa544959c85d13254dabdc9a1 | 12,476 | py | Python | world/fashion/tests.py | PWBaboons/Ithir | 3a65ab1e866b10ffd568f6b15a66576e1853f47d | [
"MIT"
] | null | null | null | world/fashion/tests.py | PWBaboons/Ithir | 3a65ab1e866b10ffd568f6b15a66576e1853f47d | [
"MIT"
] | null | null | null | world/fashion/tests.py | PWBaboons/Ithir | 3a65ab1e866b10ffd568f6b15a66576e1853f47d | [
"MIT"
] | 1 | 2018-09-12T02:06:04.000Z | 2018-09-12T02:06:04.000Z | # -*- coding: utf-8 -*-
"""
Tests for fashion app
"""
from __future__ import unicode_literals
from server.utils.test_utils import ArxCommandTest, TestEquipmentMixins
from world.fashion import fashion_commands
from mock import patch, Mock
class FashionCommandTests(TestEquipmentMixins, ArxCommandTest):
@patch('world.dominion.models.get_week')
@patch('world.stats_and_skills.do_dice_check')
def test_outfit_cmd(self, mock_dice_check, mock_get_week):
mock_get_week.return_value = 1
fake_dt = self.fake_datetime
self.mask1.db.quality_level = 11
to_be_worn = [self.top2, self.catsuit1, self.purse1, self.sword1, self.hairpins1, self.mask1]
self.setup_cmd(fashion_commands.CmdFashionOutfit, self.char2)
self.call_cmd("", "No outfits to display! Try creating one, or 'outfits/archives' instead.")
self.call_cmd("Friendly Shadows", "'Friendly Shadows' not found in your collection of outfits.")
self.call_cmd("/create", "Cannot create your shiny new outfit without a name.")
self.call_cmd("/create Friendly Shadows", "Emperor Testaccount2's New Clothes? Put something on "
"and try again.")
self.knife1.wield(self.char2)
for item in to_be_worn:
item.wear(self.char2)
to_be_removed = to_be_worn[1:] + [self.knife1]
with patch('django.utils.timezone.now', Mock(return_value=fake_dt)):
self.call_cmd("/create Friendly Shadows", "Created [Friendly Shadows] weapons: Lickyknife1 and Sword1\n"
"attire: Top2, Slinkity1, Purse1, Hairpins1, and A Fox Mask")
for item in to_be_removed:
item.remove(self.char2)
self.call_cmd("/create Unfriendly Shadows", "Created [Unfriendly Shadows]\nattire: Top2")
outfit1 = fashion_commands.get_caller_outfit_from_args(self.caller, "Friendly Shadows")
outfit2 = fashion_commands.get_caller_outfit_from_args(self.caller, "Unfriendly Shadows")
self.call_cmd("/create Friendly Shadows", "You own an outfit named 'Friendly Shadows' already.")
self.call_cmd("", "Created Outfit Appraisal/Buzz \n"
"1978/08/27 Friendly Shadows 27290 "
"1978/08/27 Unfriendly Shadows 500")
self.call_cmd("/archive", "No archived outfits to display! Try regular 'outfits' instead.")
self.call_cmd("/archive friendly Shadows", "Friendly Shadows is added to your outfit archives.")
self.assertTrue(outfit1.archived)
self.call_cmd("", "Created Outfit Appraisal/Buzz \n"
"1978/08/27 Unfriendly Shadows 500")
self.call_cmd("/archive friendly shadows", "Friendly Shadows is restored from your outfit archives.")
self.assertFalse(outfit1.archived)
# Modeling outfit2 means outfit1's appraisal goes down, since they have one item overlapping.
self.roster_entry2.action_points = 200
with patch('django.utils.timezone.now', Mock(return_value=fake_dt)):
outfit2.model_outfit_for_fashion(self.org)
self.assertTrue(self.top2.modeled_by)
self.call_cmd("", "Created Outfit Appraisal/Buzz \n"
"1978/08/27 Friendly Shadows 26790 "
"1978/08/27 Unfriendly Shadows little")
self.call_cmd("Unfriendly shadows", "Unfriendly Shadows Slot Location \n"
"Top2 chest Char2 "
"\nModeled by Testaccount2 for Orgtest, generating little buzz on "
"1978/08/27.")
self.call_cmd("/delete", "Requires an outfit's name.")
top2_snapshot = self.top2.fashion_snapshots.first()
self.assertEqual(self.top2.ndb.snapshots_cache, [top2_snapshot])
self.call_cmd("/delete Friendly shadows", "Deleting Friendly Shadows.") # :'(
self.assertEqual(self.top2.ndb.snapshots_cache, None)
outfit2.owner_character.msg = Mock()
self.top2.softdelete()
outfit2.owner_character.msg.assert_called_with("Nothing remains of the outfit formerly known as "
"'Unfriendly Shadows'.")
self.assertFalse(self.char2.dompc.fashion_outfits.all().exists())
@patch('world.dominion.models.get_week')
@patch('world.stats_and_skills.do_dice_check')
def test_model_cmd(self, mock_dice_check, mock_get_week):
mock_get_week.return_value = 1
fake_dt = self.fake_datetime
self.obj1.location, self.top1.location = self.char1, self.char1
self.mask1.db.quality_level = 11
ap_cost = self.top1.fashion_ap_cost
self.setup_cmd(fashion_commands.CmdFashionModel, self.char1)
self.call_cmd("catsuit", "Please specify <item>=<organization>")
self.call_cmd("catsuit=Shadow Striders",
"Could not find 'catsuit'.|Could not find public org 'Shadow Striders'.")
self.call_cmd("Obj=Orgtest", "Obj is not an item you can model for fashion.")
self.call_cmd("Top1=Orgtest", "Top1 was wrought by no mortal hand, and from it no mortal fame can be "
"earned.")
self.top1.db.recipe = 1
self.top1.db.crafted_by = self.char1
self.call_cmd("Top1=Orgtest", "Top1 was wrought by no mortal hand, and from it no mortal fame can be "
"earned.")
self.top1.db.crafted_by = self.char2
self.call_cmd("Top1=Orgtest", "Please wear Top1 before trying to model it as fashion.")
self.top1.wear(self.char1)
self.roster_entry.action_points = 0
self.call_cmd("Top1=Orgtest", "It costs %s AP to model Top1; you do not have enough energy." % ap_cost)
self.roster_entry.action_points = 100
mock_dice_check.return_value = 100
self.org.assets.inform_owner = Mock()
self.account2.assets.inform_owner = Mock() # the designer's assetowner
with patch('django.utils.timezone.now', Mock(return_value=fake_dt)):
self.call_cmd("Top1=Orgtest", "[Fashion] When Testaccount models 'Top1' on behalf of Orgtest, it gains "
"modest attention from admiring onlookers.|For modeling Top1 you earn "
"1000 fame. Your prestige is now 1015.")
self.assertEqual(self.roster_entry.action_points, 100 - ap_cost)
self.org.assets.inform_owner.assert_called_with("{315500{n fame awarded from Testaccount modeling Top1.",
append=True, category='fashion')
self.account2.assets.inform_owner.assert_called_with("{315250{n fame awarded from Testaccount modeling "
"Top1.", append=True, category='fashion')
self.assertEqual(self.top1.modeled_by, "Modeled by {315Testaccount{n for {125Orgtest{n, generating "
"{355modest{n buzz on 1978/08/27.")
# test "model/outfit":
self.top1.remove(self.char1)
self.top1.location = self.char2
self.roster_entry2.action_points = 0
self.caller = self.char2
to_be_worn = [self.top1, self.catsuit1, self.purse1, self.sword1, self.hairpins1, self.mask1]
self.knife1.wield(self.char2)
for item in to_be_worn:
item.wear(self.char2)
outfit1 = self.create_ze_outfit("Friendly Shadows")
self.mask1.remove(self.char2)
self.assertFalse(outfit1.is_equipped)
self.call_cmd("/outfit Friendly Shadows=Orgtest", "Outfit must be equipped before trying to model it.")
self.mask1.wear(self.char2)
self.assertTrue(outfit1.is_equipped)
self.call_cmd("/outfit Friendly Shadows=Orgtest", "Pieces of this outfit cannot be modeled:\n"
"- Top1 has already been used to model fashion.\n"
"Repeat command to model the 6 remaining item(s).")
self.call_cmd("/outfit Friendly Shadows=Orgtest", "It costs %d AP to model Friendly Shadows; you do not "
"have enough energy." % (ap_cost * 6))
self.roster_entry2.action_points = 200
outfit1.owner.player.ndb.outfit_model_prompt = str(outfit1)
with patch('django.utils.timezone.now', Mock(return_value=fake_dt)):
self.call_cmd("/outfit Friendly Shadows=Orgtest", "[Fashion] With talented modeling, Testaccount2 "
"displays 'Friendly Shadows' around Arx, garnering "
"flattering conversation and murmurs throughout the "
"city about the fine choices made by Orgtest for "
"sponsoring someone with such exceptional taste.|"
"For modeling Friendly Shadows you earn 72016 "
"fame. Your prestige is now 91159.")
self.assertEqual(self.roster_entry2.action_points, 200 - (ap_cost * 6))
self.assertTrue(outfit1.modeled)
self.assertTrue(self.hairpins1.modeled_by)
self.assertEqual(self.hairpins1.fashion_snapshots.first().outfit, outfit1)
# this tests if hairpins carries the "buzz message" from the entire outfit:
self.assertEqual(self.hairpins1.modeled_by, "Modeled by {315Testaccount2{n for {125Orgtest{n, generating "
"{542exceptional{n buzz on 1978/08/27!")
self.assertEqual(outfit1.model_info, "Modeled by {315Testaccount2{n for {125Orgtest{n, generating "
"{542exceptional{n buzz on 1978/08/27!")
self.call_cmd("/outfit Friendly Shadows=Orgtest", "Friendly Shadows has already been modeled.")
for item in to_be_worn:
item.remove(self.char2)
outfit2 = self.create_ze_outfit("Friendliest") # only knife1 remains wielded
self.call_cmd("/outfit Friendliest=Orgtest", "Pieces of this outfit cannot be modeled:\n"
"- Lickyknife1 has already been used to model fashion.\n"
"No valid items remain! Try modeling a different outfit.")
self.assertFalse(outfit2.modeled)
# test leaderboards:
self.call_cmd("", "Fashion Model Fame Items Avg Item Fame \n"
"TestAccount2 72016 6 12002 "
"TestAccount 1000 1 1000")
self.call_cmd("/designer", "Designer Fame Items Avg Item Fame \n"
"TestAccount2 18253 7 2607")
self.call_cmd("/designer Testaccount2", "Testaccount2 Model Fame Items Avg Item Fame \n"
"TestAccount2 18003 6 3000 "
"TestAccount 250 1 250")
self.call_cmd("/orgs", "Organization Fame Items Avg Item Fame \n"
"Orgtest 36508 7 5215")
self.call_cmd("/org Orgtest", "Orgtest Model Fame Items Avg Item Fame \n"
"TestAccount2 36008 6 6001 "
"TestAccount 500 1 500")
def test_refund_cmd(self):
from world.fashion.models import FashionSnapshot
self.setup_cmd(fashion_commands.CmdAdminFashion, self.char1)
snapshot = FashionSnapshot.objects.create(fashion_model=self.dompc2, designer=self.dompc2, fame=50000,
org=self.org, fashion_item=self.top1)
snapshot.apply_fame()
self.assertEqual(self.dompc2.assets.fame, 62500)
self.call_cmd("/delete 1", 'Snapshot #1 fame/ap has been reversed. Deleting it.')
self.assertEqual(self.dompc2.assets.fame, 0)
| 68.549451 | 116 | 0.591456 |
4f4b03b00235c92be3168128aa9908837705e19e | 246 | py | Python | uqcsbot/scripts/echo.py | BlueDragon23/uqcsbot | 3bfa23bfc331257ae3e12dd15e9c3dfb0ac56150 | [
"MIT"
] | 38 | 2017-04-13T01:15:05.000Z | 2021-07-25T08:22:20.000Z | uqcsbot/scripts/echo.py | BlueDragon23/uqcsbot | 3bfa23bfc331257ae3e12dd15e9c3dfb0ac56150 | [
"MIT"
] | 302 | 2017-04-13T01:20:26.000Z | 2021-04-06T07:08:29.000Z | uqcsbot/scripts/echo.py | BlueDragon23/uqcsbot | 3bfa23bfc331257ae3e12dd15e9c3dfb0ac56150 | [
"MIT"
] | 54 | 2017-05-04T23:18:29.000Z | 2021-04-07T17:57:16.000Z | from uqcsbot import bot, Command
@bot.on_command("echo")
def handle_echo(command: Command):
"""
`!echo [TEXT]` - Echos back the given text.
"""
bot.post_message(command.channel_id, command.arg if command.has_arg() else 'ECHO!')
| 24.6 | 87 | 0.678862 |
4f4b769b8e269d551181c0b9aa28e82feb33ec2f | 5,360 | py | Python | network_simulator/test/testUserNumber.py | brokenax3/network-simulator-py | 8ce238219010046aaf1063d546b4b7232a38d2fa | [
"MIT"
] | null | null | null | network_simulator/test/testUserNumber.py | brokenax3/network-simulator-py | 8ce238219010046aaf1063d546b4b7232a38d2fa | [
"MIT"
] | null | null | null | network_simulator/test/testUserNumber.py | brokenax3/network-simulator-py | 8ce238219010046aaf1063d546b4b7232a38d2fa | [
"MIT"
] | null | null | null | from bokeh.plotting import figure, show, output_file
from bokeh.palettes import Category20 as palette
import itertools
import tqdm
import numpy as np
from network_simulator.components import Location
from network_simulator.components import User
from network_simulator.components import simulator
from network_simulator.helpers import writeSimCache, readSimCache
from network_simulator.helpers import genUserMovementLoc
def main():
return simulator(g_init_vars, g_aplist, g_usrlist)
def UserNumberCompare(init_vars, aplist, usrlist):
global g_init_vars, g_aplist, g_usrlist
g_init_vars = init_vars
g_aplist = aplist
# g_usrlist = usrlist
plot_from_saved = 1
total_runs = range(20)
number_user = np.arange(10, 201, 10)
_output = {}
_sim_dict_axes = {
"axes1" : {
"param" : "No Policy - Epsilon Greedy",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 5,
"SMART_PARAM" : [0.01, 12]
},
"axes2" : {
"param" : "Cheapest Users - Epsilon Greedy",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 5,
"SMART_PARAM" : [0.01, 12]
},
"axes3" : {
"param" : "No Policy - UCB1",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 6,
"SMART_PARAM" : [0.001, 12]
},
"axes4" : {
"param" : "Cheapest Users - UCB1",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 6,
"SMART_PARAM" : [0.001, 12]
},
"axes5" : {
"param" : "No Transmission Policy - Shared Evenly",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 1,
},
"axes6" : {
"param" : "Cheapest Users - Shared Evenly",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 1,
},
"axes7" : {
"param" : "No Transmission Policy - AP Energy Arrival",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 2,
},
"axes8" : {
"param" : "Cheapest Users - AP Energy Arrival",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 2,
},
"axes9" : {
"param" : "No Transmission Policy - AP Energy Use",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 3,
},
"axes10" : {
"param" : "Cheapest Users - AP Energy Use",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 3,
},
"axes11" : {
"param" : "No Transmission Policy - AP Energy Efficiency",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 4,
},
"axes12" : {
"param" : "Cheapest Users - AP Energy Efficiency",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 4,
}
}
if plot_from_saved == 0:
bar = tqdm.tqdm(desc="Total Number of Users", total=len(_sim_dict_axes.keys()) * len(number_user))
for axes in _sim_dict_axes.values():
print("Algorithms " + axes["param"])
for key in ["ENERGY_POLICY", "SHARE_ENERGY"]:
init_vars[key] = axes[key]
if init_vars["SHARE_ENERGY"] == 6 or init_vars["SHARE_ENERGY"] == 5:
init_vars["SMART_PARAM"] == axes["SMART_PARAM"]
_avg_serviced_users = []
for usernum in number_user:
init_vars["USR_TOTAL"] = usernum
USR_TOTAL = init_vars["USR_TOTAL"]
g_init_vars = init_vars
_serviced_users = []
for run in total_runs:
init_vars["usr_mov_loc"] = genUserMovementLoc(USR_TOTAL, init_vars["TIME_MAX"], init_vars["DIST_MOVEUSER_MAX"], init_vars["GRID_SIZE"], 0, [0, 0])
g_usrlist = [User(index, Location(init_vars["usr_mov_loc"][index][0][0], init_vars["usr_mov_loc"][index][0][1])) for index in range(USR_TOTAL)]
_serviced_users.append(main())
_avg_serviced_users.append(sum(_serviced_users) / len(total_runs))
bar.update(1)
_output[axes["param"]] = { "result" : _avg_serviced_users }
bar.close()
writeSimCache("numberofUsers", _output)
else:
_output = readSimCache("numberofUsers")
output_file("interactive/numberofUsers.html")
TOOLTIPS = [
("(x, y)", "($x, $y)"),
("desc", "$name")
]
# Plot colours
colors = itertools.cycle(palette[20])
p = figure(width=1200, height=800, x_axis_label='Total Number of Users', y_axis_label='Total Number of Serviced Users', tooltips=TOOLTIPS, output_backend='svg')
for key, value in _output.items():
print(key + " : " + str(sum(value["result"])/len(value["result"])))
p.line(number_user, value["result"], legend_label=key, name=key, color=next(colors), line_width=3)
# p.legend.location = (20, 450)
p.xaxis.axis_label_text_font_size='20px'
p.xaxis.major_label_text_font_size='20px'
p.yaxis.axis_label_text_font_size='20px'
p.yaxis.major_label_text_font_size='20px'
p.legend.label_text_font_size='18px'
p.legend[0].orientation = "vertical"
legend_ref = p.legend[0]
p.add_layout(legend_ref, "right")
show(p)
p.toolbar.logo = None
p.toolbar_location = None
return p
| 31.715976 | 166 | 0.556903 |
4f4c189f90e64e08673a05ecca8a85e020e60b71 | 6,446 | py | Python | test/test_program_test.py | osarood/Testify | 9005a8866cbf099c26e6fbd74c3e2640a581a55b | [
"Apache-2.0"
] | 1 | 2020-08-11T09:52:12.000Z | 2020-08-11T09:52:12.000Z | test/test_program_test.py | osarood/Testify | 9005a8866cbf099c26e6fbd74c3e2640a581a55b | [
"Apache-2.0"
] | null | null | null | test/test_program_test.py | osarood/Testify | 9005a8866cbf099c26e6fbd74c3e2640a581a55b | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import mock
from testify import setup_teardown, TestCase, test_program
from testify.assertions import assert_equal, assert_raises, assert_in
from optparse import OptionParser
class OptionParserErrorException(Exception):
pass
class ParseTestRunnerCommandLineArgsTest(TestCase):
@setup_teardown
def patch_OptionParser_error(self):
def new_error(*args, **kwargs):
raise OptionParserErrorException(*args, **kwargs)
with mock.patch.object(OptionParser, 'error', side_effect=new_error):
yield
def test__parse_test_runner_command_line_module_method_overrides_empty_input(self):
"""Make sure _parse_test_runner_command_line_module_method_overrides returns something sensible if you pass it an empty list of arguments."""
assert_equal(test_program._parse_test_runner_command_line_module_method_overrides([]), (None, {}))
def test_parse_test_runner_command_line_args_rerun_test_file(self):
"""Make sure that when --rerun-test-file is passed, parse_test_runner_command_line_args doesn't complain about a missing test path."""
test_program.parse_test_runner_command_line_args([], ['--rerun-test-file', '-'])
def test_parse_test_runner_command_line_args_connect(self):
"""Make sure that when --connect is passed, parse_test_runner_command_line_args doesn't complain about a missing test path."""
test_program.parse_test_runner_command_line_args([], ['--connect', 'localhost:65537'])
def test_parse_test_runner_command_line_args_replay_json_inline(self):
"""Make sure that when --replay-json-inline is passed, parse_test_runner_command_line_args doesn't complain about a missing test path."""
test_program.parse_test_runner_command_line_args([], ['--replay-json-inline', '{something that obviously isnt json}'])
def test_parse_test_runner_command_line_args_replay_json(self):
"""Make sure that when --replay-json-inline is passed, parse_test_runner_command_line_args doesn't complain about a missing test path."""
test_program.parse_test_runner_command_line_args([], ['--replay-json', 'somejsonfile.txt'])
def test_parse_test_runner_command_line_args_no_test_path(self):
"""Make sure that if no options and no arguments are passed, parse_test_runner_command_line_args DOES complain about a missing test path."""
with assert_raises(OptionParserErrorException):
test_program.parse_test_runner_command_line_args([], [])
def test_call(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, command)
return stdout.strip()
class TestifyRunAcceptanceTestCase(TestCase):
expected_list = (
'testing_suite.example_test ExampleTestCase.test_one\n'
'testing_suite.example_test ExampleTestCase.test_two\n'
'testing_suite.example_test SecondTestCase.test_one'
)
expected_tests = 'PASSED. 3 tests'
def test_run_testify_from_bin_list_tests(self):
output = test_call(['bin/testify', '--list-tests', 'testing_suite'])
assert_equal(output, self.expected_list)
def test_run_testify_as_module_list_tests(self):
output = test_call([
'python', '-m', 'testify.test_program',
'--list-tests', 'testing_suite'])
assert_equal(output, self.expected_list)
def test_run_testify_from_bin(self):
output = test_call(['bin/testify', 'testing_suite', '-v'])
assert_in(self.expected_tests, output)
def test_run_testify_test_module(self):
output = test_call(['python', '-m', 'testing_suite.example_test', '-v'])
assert_in(self.expected_tests, output)
def test_run_testify_test_file(self):
output = test_call(['python', 'testing_suite/example_test.py', '-v'])
assert_in(self.expected_tests, output)
def test_run_testify_test_file_class(self):
output = test_call([
'python', 'testing_suite/example_test.py', '-v',
'ExampleTestCase'])
assert_in('PASSED. 2 tests', output)
def test_run_testify_test_file_class_and_method(self):
output = test_call([
'python', 'testing_suite/example_test.py', '-v',
'ExampleTestCase.test_one'])
assert_in('PASSED. 1 test', output)
def test_run_testify_with_failure(self):
assert_raises(
subprocess.CalledProcessError,
test_call,
['python', 'testing_suite/example_test.py', 'DoesNotExist'])
class TestClientServerReturnCode(TestCase):
def test_client_returns_zero_on_success(self):
server_process = subprocess.Popen(
[
'python', '-m', 'testify.test_program',
'testing_suite.example_test',
'--serve', '9001',
],
stdout=open(os.devnull, 'w'),
stderr=open(os.devnull, 'w'),
)
# test_call has the side-effect of asserting the return code is 0
ret = test_call([
'python', '-m', 'testify.test_program',
'--connect', 'localhost:9001',
])
assert_in('PASSED', ret)
assert_equal(server_process.wait(), 0)
def test_client_returns_nonzero_on_failure(self):
server_process = subprocess.Popen(
[
'python', '-m', 'testify.test_program',
'test.failing_test',
'--serve', '9001',
],
stdout=open(os.devnull, 'w'),
stderr=open(os.devnull, 'w'),
)
# Need two clients in order to finish running tests
client_1 = subprocess.Popen(
[
'python', '-m', 'testify.test_program',
'--connect', 'localhost:9001',
],
stdout=open(os.devnull, 'w'),
stderr=open(os.devnull, 'w'),
)
client_2 = subprocess.Popen(
[
'python', '-m', 'testify.test_program',
'--connect', 'localhost:9001',
],
stdout=open(os.devnull, 'w'),
stderr=open(os.devnull, 'w'),
)
assert_equal(client_1.wait(), 1)
assert_equal(client_2.wait(), 1)
assert_equal(server_process.wait(), 1)
| 41.057325 | 149 | 0.656997 |
4f4a8ab3e5659f4211afdeb605d1cb2dce9cc4e7 | 743 | py | Python | CHAPTER 14 (graph algorithm)/nested_edge_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 14 (graph algorithm)/nested_edge_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | CHAPTER 14 (graph algorithm)/nested_edge_class.py | ahammadshawki8/Data-Structures-Algorithms-in-Python- | fc18b54128cd5bc7639a14999d8f990190b524eb | [
"MIT"
] | null | null | null | class Edge:
"""Lightweight edge structure for a graph."""
__slots__ = "_origin","_destination","_element"
def __init__(self, u, v, x):
"""Do not call constructor directly. Use Graph's insert_edge(u,v,x)."""
self._origin = u
self._destination = v
self._element = x
def endpoints(self):
"""Return (u,v) tuple for vertics u and v."""
return (self._origin,self._destination)
def opposite(self,v):
"""Return a vertex that is opposite of v on this edge."""
return self._destination if v is self._origin else self._origin
def element(self):
"""Return element associated with this edge."""
return self._element
def __hash_(self): # will allow edgge to be a map/set key
return hash((self._origin,self._destination))
| 29.72 | 73 | 0.701211 |
4f4d214f20c9e5a584a29f76674e2cbfdb2c54e7 | 1,267 | py | Python | backend/jupyterlab_commenting_service_server/service.py | kgryte/jupyterlab-commenting | cbf3b1eacb42fde54ef0b095f218de7172dcddca | [
"BSD-3-Clause"
] | 99 | 2019-01-11T02:49:52.000Z | 2022-03-19T02:42:44.000Z | backend/jupyterlab_commenting_service_server/service.py | kgryte/jupyterlab-commenting | cbf3b1eacb42fde54ef0b095f218de7172dcddca | [
"BSD-3-Clause"
] | 43 | 2019-01-04T01:56:07.000Z | 2021-03-01T11:03:08.000Z | backend/jupyterlab_commenting_service_server/service.py | kgryte/jupyterlab-commenting | cbf3b1eacb42fde54ef0b095f218de7172dcddca | [
"BSD-3-Clause"
] | 25 | 2019-01-09T22:02:43.000Z | 2021-09-25T04:26:27.000Z | # @license BSD-3-Clause
#
# Copyright (c) 2019 Project Jupyter Contributors.
# Distributed under the terms of the 3-Clause BSD License.
"""Jupyterlab Commenting Service Server"""
import os
def start():
"""
Start Jupyterlab Commenting Service Server Start
Returns:
dict -- A dictionary with the command to start the Commenting Service Server
"""
path = os.path.dirname(os.path.abspath(__file__))
database = os.path.join(path, 'comments.db')
try:
open(database, "w+")
except FileNotFoundError as f:
print('The file %s could not be found or opened' % (f.filename))
return {
'command': [
'datasette',
'serve',
database,
'-p',
'{port}',
'--cors'
],
'timeout': 60,
'port': 0 # 40000
}
def fastapi():
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
return {
'command': [
'uvicorn',
'main:app',
'--host',
'0.0.0.0',
'--port',
'{port}',
'--proxy-headers',
'--reload'
],
'timeout': 60,
'port': 0, # 30000
'absolute_url': False
}
| 21.844828 | 84 | 0.50513 |
4f4bd465305098d4e546ba9a79f507fd64928cb2 | 7,834 | py | Python | tests/test_tagset.py | ericyd/inclusion-py | c891db8590dcf194b6e34c7055f9b19bb5a385ca | [
"MIT"
] | null | null | null | tests/test_tagset.py | ericyd/inclusion-py | c891db8590dcf194b6e34c7055f9b19bb5a385ca | [
"MIT"
] | null | null | null | tests/test_tagset.py | ericyd/inclusion-py | c891db8590dcf194b6e34c7055f9b19bb5a385ca | [
"MIT"
] | null | null | null | import unittest
from datatag import TagSet
from datatag.exceptions import TagNotUnique, ArgumentNotCallable, DataNotIterable, UndefinedTag, NonBooleanQueryValue
from datatag.tag_definition import TagDefinition
class TestDefineTag(unittest.TestCase):
def setUp(self):
self.ts = TagSet()
def test_duplicate_name(self):
self.ts.define_tag('test_tag', lambda x: True)
self.assertRaises(TagNotUnique, self.ts.define_tag, 'test_tag', lambda x: True)
def test_function_not_callable(self):
self.assertRaises(ArgumentNotCallable, self.ts.define_tag, 'test_tag', True)
def test_adds_tag_definition(self):
self.ts.define_tag('test_tag', lambda x: True)
self.assertIn('test_tag', self.ts.tag_definitions)
def test_adds_correct_type(self):
self.ts.define_tag('test_tag', lambda x: True)
self.assertIsInstance(self.ts.tag_definitions['test_tag'], TagDefinition)
def test_increments_integer(self):
self.ts.define_tag('tag1', lambda x: True)
self.ts.define_tag('tag2', lambda x: True)
self.ts.define_tag('tag3', lambda x: True)
self.assertEqual(self.ts.tag_definitions['tag1'].flag, 1 << 0)
self.assertEqual(self.ts.tag_definitions['tag2'].flag, 1 << 1)
self.assertEqual(self.ts.tag_definitions['tag3'].flag, 1 << 2)
class TestIsIncluded(unittest.TestCase):
def setUp(self):
self.ts = TagSet()
# set up bitflags for fake "tags" that data might have
self.blue = 1 << 0
self.red = 1 << 1
self.green = 1 << 2
self.orange = 1 << 4
def test_positive_included_negative_excluded(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.blue
negative = self.green
# apparently this is the syntax for calling private methods...
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, True)
def test_multiple_positive_included_multiple_negative_excluded(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.blue + self.red
negative = self.green + self.orange
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, True)
def test_positive_included_negative_included(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.blue
negative = self.red
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, False)
def test_positive_included_multiple_negative_included(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.blue
negative = self.red + self.green
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, False)
def test_positive_excluded_negative_included(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.green
negative = self.red
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, False)
def test_positive_excluded_negative_excluded(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.green
negative = self.orange
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, False)
def test_multiple_positive_excluded_multiple_negative_excluded(self):
# item is blue AND red, not green or orange
test_data_mask = self.blue + self.red
positive = self.green + self.blue
negative = self.orange + self.red
result = self.ts._TagSet__is_included(test_data_mask, positive, negative)
self.assertEqual(result, False)
class TestAnalyze(unittest.TestCase):
def setUp(self):
self.ts = TagSet()
def test_data_not_iterable(self):
data = 23
self.assertRaises(DataNotIterable, self.ts.analyze, data)
def test_purge_clears_dataset(self):
data = [1, 2, 3]
self.ts.analyze(data)
new_data = [4, 5, 6]
self.ts.analyze(new_data, purge = True)
self.assertEqual([datum.value for datum in self.ts.dataset], new_data)
def test_analyze_all_data(self):
data = [2, 4, 5, 7, 8]
self.ts.define_tag('even', lambda x: x % 2 == 0)
self.ts.analyze(data)
# bitmask for TaggedDatum should be 1 (1<<0) for data that _is_ even, and 0 for data that is _not_ even
self.assertEqual([d.value for d in self.ts.dataset if d.bitmask == 1], [2, 4, 8])
self.assertEqual([d.value for d in self.ts.dataset if d.bitmask == 0], [5, 7])
self.assertEqual(len(self.ts.dataset), len(data))
class TestQuery(unittest.TestCase):
def setUp(self):
self.ts = TagSet()
self.ts.define_tag('blue', lambda x: 'blue' in x)
self.ts.define_tag('red', lambda x: 'red' in x)
self.ts.define_tag('green', lambda x: 'green' in x)
self.ts.define_tag('orange', lambda x: 'orange' in x)
data = ['blue', 'red', 'green', 'orange',
'blue_red', 'blue_green', 'blue_orange',
'red_green', 'red_orange',
'green_orange',
'blue2', 'blue_green_2',
'blue_red_green_orange']
self.ts.analyze(data)
def test_query_for_undefined_tag(self):
self.assertRaises(UndefinedTag, self.ts.query, {'undefined_tag': True})
def test_query_for_non_boolean_value(self):
self.ts.define_tag('tag1', lambda x: True)
self.assertRaises(NonBooleanQueryValue, self.ts.query, {'tag1': "True"})
def test_finds_data_single_positive_param(self):
params = {'blue': True}
actual = self.ts.query(params)
expected = ['blue', 'blue_red', 'blue_green', 'blue_orange', 'blue2', 'blue_green_2', 'blue_red_green_orange']
self.assertEqual(actual, expected)
def test_finds_data_single_negative_param(self):
params = {'blue': False}
actual = self.ts.query(params)
expected = ['red', 'green', 'orange', 'red_green', 'red_orange', 'green_orange']
self.assertEqual(actual, expected)
def test_finds_data_multiple_positive_param(self):
params = {'blue': True, 'green': True}
actual = self.ts.query(params)
expected = ['blue_green', 'blue_green_2', 'blue_red_green_orange']
self.assertEqual(actual, expected)
def test_finds_data_multiple_negative_param(self):
params = {'blue': False, 'green': False}
actual = self.ts.query(params)
expected = ['red', 'orange', 'red_orange']
self.assertEqual(actual, expected)
def test_finds_data_single_positive_single_negative_param(self):
params = {'red': True, 'green': False}
actual = self.ts.query(params)
expected = ['red', 'blue_red', 'red_orange']
self.assertEqual(actual, expected)
def test_finds_data_multiple_positive_multiple_negative_param(self):
params = {'red': True, 'blue': True, 'green': False, 'orange': False}
actual = self.ts.query(params)
expected = ['blue_red']
self.assertEqual(actual, expected)
def test_finds_all_data(self):
params = {}
actual = self.ts.query(params)
expected = [d.value for d in self.ts.dataset]
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main() | 41.015707 | 118 | 0.656114 |
4f4eb25eae7e35aeaee4f6f9eb65b79078a7c8fc | 57,046 | py | Python | IOHMM/linear_models.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | 1 | 2021-09-02T14:16:14.000Z | 2021-09-02T14:16:14.000Z | IOHMM/linear_models.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | null | null | null | IOHMM/linear_models.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | 1 | 2021-09-20T02:59:47.000Z | 2021-09-20T02:59:47.000Z | '''
This is a unified interface/wrapper of general/generalized linear models from
sklearn/statsmodels packages.
Problems with sklearn:
1. No Generalized linear models available.
2. Does not estimate standard error of coefficients.
3. Logistic regression does not handle 1 class case.
4. For 2 class logistic regression, the 'ovr' result is not same as 'multinomial' result.
Problems with statsmodels:
1. No working version of multivariate OLS with sample weights.
2. MNLogit does not support sample weights.
Problem with both:
1. No interface to calculate loglike_per_sample,
which is need to calculate emission probability in IOHMM.
2. No json-serialization.
In this implementations,
we will mainly use statsmodels for
1. Generalized linear models with simple response
we will mainly use sklearn for
1. Univariate/Multivariate Ordinary least square (OLS) models,
2. Multinomial Logistic Regression with discrete output/probability outputs
Note:
1. If using customized arguments for constructor, you may encounter compalints
from the statsmodels/sklearn on imcompatible arguments.
This maybe especially true for the compatibility between solver and regularization method.
2. For the GLM, statsmodels is not great when fitting with regularizations
(espicially l1, and elstic_net). In this case the coefficients might be np.nan.
Try not using regularizations if you select GLM until statsmodels is stable on this.
'''
# //TODO in future add arguments compatibility check
from __future__ import division
from future import standard_library
from builtins import range
from builtins import object
import pickle as pickle
import logging
import numbers
import os
import numpy as np
from scipy.stats import multivariate_normal
from sklearn import linear_model
from sklearn.linear_model.base import _rescale_data
from sklearn.preprocessing import label_binarize
import statsmodels.api as sm
from statsmodels.genmod.families import Poisson, Binomial
from statsmodels.tools import add_constant
standard_library.install_aliases()
EPS = np.finfo(float).eps
class BaseModel(object):
"""
A generic supervised model for data with input and output.
BaseModel does nothing, but lays out the methods expected of any subclass.
"""
def __init__(self,
solver,
fit_intercept=True,
est_stderr=False,
tol=1e-4,
max_iter=100,
reg_method=None,
alpha=0,
l1_ratio=0,
coef=None,
stderr=None):
"""
Constructor
Parameters
----------
solver: specific solver for each linear model
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, l1, l2, elstic_net).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
-------
"""
self.solver = solver
self.fit_intercept = fit_intercept
self.est_stderr = est_stderr
self.tol = tol
self.max_iter = max_iter
self.reg_method = reg_method
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef = coef
self.stderr = stderr
def fit(self, X, Y, sample_weight=None):
"""
Fit the weighted model
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : observed response matrix of shape
(n_samples, ) or (n_samples, k) based on specific model
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
"""
raise NotImplementedError
def _raise_error_if_model_not_trained(self):
"""
Raise error if the model is not trained (thus has coef)
----------
"""
if self.coef is None:
raise ValueError('Model is not trained.')
def _raise_error_if_sample_weight_sum_zero(self, sample_weight):
"""
Raise error if the sum of sample_weight is 0
----------
sample_weight: array of (n_samples, )
"""
if np.sum(sample_weight) < EPS:
raise ValueError('Sum of sample weight is 0.')
def _transform_X(self, X):
"""
Transform the design matrix X
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
X : design matrix of shape (n_samples, n_features + 1) if fit intercept
"""
if self.fit_intercept:
X = add_constant(X, has_constant='add')
return X
def _transform_sample_weight(self, X, sample_weight=None):
"""
Transform the sample weight from anyform to array
----------
X : design matrix of shape (n_samples, n_features), 2d
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
Returns
-------
sample_weight: array of (n_samples, )
"""
if sample_weight is None:
sample_weight = np.ones(X.shape[0])
elif isinstance(sample_weight, numbers.Number):
sample_weight = np.ones(X.shape[0]) * sample_weight
assert X.shape[0] == sample_weight.shape[0]
return sample_weight
def _transform_X_sample_weight(self, X, sample_weight=None):
"""
Transform the design matrix X and sample_weight to the form they can be used to fit
----------
X : design matrix of shape (n_samples, n_features), 2d
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
Returns
-------
X : design matrix of shape (n_samples, n_features + 1) if fit intercept
sample_weight: array of (n_samples, )
"""
X = self._transform_X(X)
sample_weight = self._transform_sample_weight(X, sample_weight=sample_weight)
return X, sample_weight
def predict(self, X):
"""
Predict the Y value based on the model
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
predicted value: of shape (n_samples, ) or (n_samples, k) based on specific model
"""
raise NotImplementedError
def loglike_per_sample(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y_i value given each X_i value
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : observed response matrix of shape
(n_samples, ) or (n_samples, k) based on specific model
Returns
-------
log_p: array of shape (n_samples, )
"""
raise NotImplementedError
def loglike(self, X, Y, sample_weight=None):
"""
Given a set of X and Y, calculate the log probability of
observing Y, considering the sample weight.
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : observed response matrix of shape
(n_samples, ) or (n_samples, k) based on specific model
Returns
-------
log_likelihood: float
"""
self._raise_error_if_model_not_trained()
sample_weight = self._transform_sample_weight(X, sample_weight=sample_weight)
return np.sum(sample_weight * self.loglike_per_sample(X, Y))
def to_json(self, path):
"""
Generate json object of the model
Parameters
----------
path : the path to save the model
Returns
-------
json_dict: a dictionary containing the attributes of the model
"""
json_dict = {
'data_type': self.__class__.__name__,
'properties': {
'solver': self.solver,
'fit_intercept': self.fit_intercept,
'est_stderr': self.est_stderr,
'tol': self.tol,
'max_iter': self.max_iter,
'reg_method': self.reg_method,
'alpha': self.alpha,
'l1_ratio': self.l1_ratio,
'coef': {
'data_type': 'numpy.ndarray',
'path': os.path.join(path, 'coef.npy')
},
'stderr': {
'data_type': 'numpy.ndarray',
'path': os.path.join(path, 'stderr.npy')
}
}
}
if not os.path.exists(os.path.dirname(json_dict['properties']['coef']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['coef']['path']))
np.save(json_dict['properties']['coef']['path'], self.coef)
if not os.path.exists(os.path.dirname(json_dict['properties']['stderr']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['stderr']['path']))
np.save(json_dict['properties']['stderr']['path'], self.stderr)
return json_dict
@classmethod
def _from_json(cls, json_dict, solver, fit_intercept, est_stderr,
tol, max_iter, reg_method, alpha, l1_ratio, coef, stderr):
"""
Helper function to construct the linear model used by from_json.
This function is designed to be override by subclasses.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for each linear model
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, l1, l2, elstic_net).
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
linear model object: a linear model object specified by the json_dict and other arguments
"""
return cls(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
tol=tol, max_iter=max_iter,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr)
@classmethod
def from_json(cls, json_dict):
"""
Construct a linear model from a saved dictionary.
This function is NOT designed to be override by subclasses.
Parameters
----------
json_dict: a json dictionary containing the attributes of the linear model.
Returns
-------
linear model: a linear model object specified by the json_dict
"""
return cls._from_json(
json_dict,
solver=json_dict['properties']['solver'],
fit_intercept=json_dict['properties']['fit_intercept'],
est_stderr=json_dict['properties']['est_stderr'],
tol=json_dict['properties']['tol'],
max_iter=json_dict['properties']['max_iter'],
reg_method=json_dict['properties']['reg_method'],
alpha=json_dict['properties']['alpha'],
l1_ratio=json_dict['properties']['l1_ratio'],
coef=np.load(json_dict['properties']['coef']['path']),
stderr=np.load(json_dict['properties']['stderr']['path']))
class GLM(BaseModel):
"""
A wrapper for Generalized linear models.
fit_regularized only support Poisson and Binomial due to statsmodels,
and it is not stable. Try not using regularizations in GLM.
"""
def __init__(self,
family,
solver='IRLS',
fit_intercept=True,
est_stderr=False,
tol=1e-4,
max_iter=100,
reg_method=None,
alpha=0,
l1_ratio=0,
coef=None,
stderr=None,
dispersion=None):
"""
Constructor
Parameters
----------
solver: solver for GLM, default 'IRLS', otherwise will use gradient.
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: TRY NOT USING REGULARIZATIONS FOR GLM.
method to regularize the model, one of (None, elstic_net).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
family: GLM family in the family_wrapper
dispersion: dispersion/scale of the GLM
-------
"""
super(GLM, self).__init__(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
tol=tol, max_iter=max_iter,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr)
self.family = family
self.dispersion = dispersion
if self.coef is not None:
dummy_X = dummy_Y = dummy_weight = np.zeros(1)
self._model = sm.GLM(dummy_Y, dummy_X, family=self.family.family,
freq_weights=dummy_weight)
def fit(self, X, Y, sample_weight=None):
"""
Fit the weighted model
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, ) or (n_samples, k) depending on family
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
"""
def _estimate_dispersion():
"""
Estimate dispersion/scale based on the fitted model
Returns
-------
dispersion: float
"""
if isinstance(self.family.family, (Binomial, Poisson)):
return 1.
return self._model.scale
def _estimate_stderr():
"""
Estimate standard deviation of the coefficients.
Returns
-------
standard deviation of the coefficients: array with the same shape as coef
Notes
-------
I think the stderr of statsmodels is wrong.
It uses the WLS stderr as the std err of GLM, which does not make sense,
because the variance in WLS is inverse proportional to the weights.
Anyway I will leave it here, stderr is not important.
"""
if self.reg_method is None or self.alpha < EPS:
return fit_results.bse * np.sqrt(self.dispersion / self._model.scale)
return None
X, sample_weight = self._transform_X_sample_weight(X, sample_weight=sample_weight)
self._raise_error_if_sample_weight_sum_zero(sample_weight)
Y = self._transform_Y(Y)
self._model = sm.GLM(Y, X, family=self.family.family, freq_weights=sample_weight)
# dof in weighted regression does not make sense, hard code it to the total weights
self._model.df_resid = np.sum(sample_weight)
if self.reg_method is None or self.alpha < EPS:
fit_results = self._model.fit(
maxiter=self.max_iter, tol=self.tol, method=self.solver)
else:
fit_results = self._model.fit_regularized(
method=self.reg_method, alpha=self.alpha,
L1_wt=self.l1_ratio, maxiter=self.max_iter)
self.coef = fit_results.params
self.dispersion = _estimate_dispersion()
if self.est_stderr:
self.stderr = _estimate_stderr()
def _transform_Y(self, Y):
"""
Transform the response Y
----------
Y : response matrix of shape (n_samples, ) or (n_samples, k) depending on family
Returns
-------
Y : response matrix of shape (n_samples, ) or (n_samples, k) depending on family
"""
if Y.ndim == 2 and Y.shape[1] == 1:
Y = Y.reshape(-1,)
return Y
def predict(self, X):
"""
Predict the Y value based on the model
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
predicted value, of shape (n_samples, ), 1d
"""
self._raise_error_if_model_not_trained()
X = self._transform_X(X)
return self._model.predict(self.coef, exog=X)
def loglike_per_sample(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y value given each X value
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, ) or (n_samples, k) depending on family
Returns
-------
log_p: array of shape (n_samples, )
"""
self._raise_error_if_model_not_trained()
assert X.shape[0] == Y.shape[0]
Y = self._transform_Y(Y)
mu = self.predict(X)
return self.family.loglike_per_sample(Y, mu, scale=self.dispersion)
def to_json(self, path):
"""
Generate json object of the model
Parameters
----------
path : the path to save the model
Returns
-------
json_dict: a dictionary containing the attributes of the GLM
"""
json_dict = super(GLM, self).to_json(path=path)
json_dict['properties'].update(
{
'family': {
'data_type': self.family.__class__.__name__,
'path': os.path.join(path, 'family.p')
},
'dispersion': {
'data_type': 'numpy.ndarray',
'path': os.path.join(path, 'dispersion.npy')
}
})
if not os.path.exists(os.path.dirname(json_dict['properties']['family']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['family']['path']))
pickle.dump(self.family, open(json_dict['properties']['family']['path'], 'wb'))
if not os.path.exists(os.path.dirname(json_dict['properties']['dispersion']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['dispersion']['path']))
np.save(json_dict['properties']['dispersion']['path'], self.dispersion)
return json_dict
@classmethod
def _from_json(cls, json_dict, solver, fit_intercept, est_stderr,
tol, max_iter, reg_method, alpha, l1_ratio, coef, stderr):
"""
Helper function to construct the GLM used by from_json.
This function overrides the parent class.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for GLM
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, elstic_net).
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
GLM object: a GLM object specified by the json_dict and other arguments
"""
with open(json_dict['properties']['family']['path'], 'rb') as f:
return cls(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr, tol=tol, max_iter=max_iter,
family=pickle.load(f),
dispersion=np.load(json_dict['properties']['dispersion']['path']))
class OLS(BaseModel):
"""
A wrapper for Univariate and Multivariate Ordinary Least Squares (OLS).
"""
def __init__(self, solver='svd', fit_intercept=True, est_stderr=False,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None, n_targets=None):
"""
Constructor
Parameters
----------
solver: specific solver for OLS, default 'svd', possible solvers are:
{'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}.
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, l1, l2, elstic_net).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
n_targets: the number of dependent variables
dispersion: dispersion/scale mareix of the OLS
-------
"""
super(OLS, self).__init__(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
tol=tol, max_iter=max_iter,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr)
self.dispersion = dispersion
self.n_targets = n_targets
self._pick_model()
if self.coef is not None:
self._model.coef_ = coef
self._model.intercept_ = 0
def _pick_model(self):
"""
Helper function to select a proper sklearn linear regression model
based on the regulariztaion specified by the user.
"""
if self.reg_method is None or self.alpha < EPS:
self._model = linear_model.LinearRegression(
fit_intercept=False)
if self.reg_method == 'l1':
self._model = linear_model.Lasso(
fit_intercept=False, alpha=self.alpha,
tol=self.tol, max_iter=self.max_iter)
if self.reg_method == 'l2':
self._model = linear_model.Ridge(
fit_intercept=False, alpha=self.alpha, tol=self.tol,
max_iter=self.max_iter, solver=self.solver)
if self.reg_method == 'elastic_net':
self._model = linear_model.ElasticNet(
fit_intercept=False, alpha=self.alpha,
l1_ratio=self.l1_ratio, tol=self.tol,
max_iter=self.max_iter)
def fit(self, X, Y, sample_weight=None):
"""
Fit the weighted model
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, n_targets), 2d
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
"""
def _estimate_dispersion():
"""
Estimate dispersion matrix based on the fitted model
Returns
-------
dispersion matrix: array of shape (n_targets, n_targets), 2d
"""
mu, wendog = _rescale_data(self.predict(X), Y, sample_weight)
wresid = mu - wendog
return np.dot(wresid.T, wresid) / np.sum(sample_weight)
def _estimate_stderr():
"""
Estimate standard deviation of the coefficients.
Returns
-------
standard deviation of the coefficients: array with the same shape as coef
Notes
-------
It is not the same stderr as Weighted Least Squares (WLS).
WLS assumes sample weight is inversely proportional to the covariance.
Useful links:
http://www.public.iastate.edu/~maitra/stat501/lectures/MultivariateRegression.pdf
https://stats.stackexchange.com/questions/52704/covariance-of-linear-
regression-coefficients-in-weighted-least-squares-method
http://pj.freefaculty.org/guides/stat/Regression/GLS/GLS-1-guide.pdf
https://stats.stackexchange.com/questions/27033/in-r-given-an-output-from-
optim-with-a-hessian-matrix-how-to-calculate-paramet
http://msekce.karlin.mff.cuni.cz/~vorisek/Seminar/0910l/jonas.pdf
"""
if self.reg_method is None or self.alpha < EPS:
wexog, wendog = _rescale_data(X_train, Y, sample_weight)
stderr = np.zeros((self.n_targets, X_train.shape[1]))
try:
XWX_inverse_XW_sqrt = np.linalg.inv(np.dot(wexog.T, wexog)).dot(wexog.T)
except np.linalg.linalg.LinAlgError:
logging.warning('Covariance matrix is singular, cannot estimate stderr.')
return None
sqrt_diag_XWX_inverse_XW_sqrt_W_XWX_inverse_XW_sqrt = np.sqrt(np.diag(
XWX_inverse_XW_sqrt.dot(np.diag(sample_weight)).dot(XWX_inverse_XW_sqrt.T)))
for target in range(self.n_targets):
stderr[target, :] = (np.sqrt(self.dispersion[target, target]) *
sqrt_diag_XWX_inverse_XW_sqrt_W_XWX_inverse_XW_sqrt)
return stderr.reshape(self.coef.shape)
return None
X_train, sample_weight = self._transform_X_sample_weight(X, sample_weight=sample_weight)
self._raise_error_if_sample_weight_sum_zero(sample_weight)
Y = self._transform_Y(Y)
self.n_targets = Y.shape[1]
self._model.fit(X_train, Y, sample_weight)
self.coef = self._model.coef_
self.dispersion = _estimate_dispersion()
if self.est_stderr:
self.stderr = _estimate_stderr()
def _transform_Y(self, Y):
"""
Transform the response Y
----------
Y : response matrix of shape (n_samples, ) or (n_samples, n_targets) depending on family
Returns
-------
Y : response matrix of shape (n_samples, n_targets)
"""
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
return Y
def predict(self, X):
"""
Predict the Y value based on the model
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
predicted value, of shape (n_samples, n_targets), 2d
"""
self._raise_error_if_model_not_trained()
X = self._transform_X(X)
return self._model.predict(X).reshape(-1, self.n_targets)
def get_dispersion(self, Y_len):
zero_inds = np.where(np.diag(self.dispersion) < EPS)[0]
non_zero_inds = np.setdiff1d(
np.arange(Y_len), zero_inds, assume_unique=True)
dispersion = self.dispersion[np.ix_(non_zero_inds, non_zero_inds)]
return dispersion
def loglike_per_sample_predict(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y value given each X value
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : observed response matrix of shape (n_samples, n_targets), 2d
Returns
-------
log_p: array of shape (n_samples, )
"""
self._raise_error_if_model_not_trained()
assert X.shape[0] == Y.shape[0]
mu = self.predict(X)
# https://stackoverflow.com/questions/13312498/how-to-find-degenerate-
# rows-columns-in-a-covariance-matrix
Y = self._transform_Y(Y)
zero_inds = np.where(np.diag(self.dispersion) < EPS)[0]
log_p = np.zeros(Y.shape[0])
log_p[~np.isclose(
np.linalg.norm(
Y[:, zero_inds] - mu[:, zero_inds], axis=1), 0)] = - np.Infinity
non_zero_inds = np.setdiff1d(
np.arange(Y.shape[1]), zero_inds, assume_unique=True)
dispersion = self.dispersion[np.ix_(non_zero_inds, non_zero_inds)]
if dispersion.shape[0] == 0:
# print ('2',np.exp(log_p))
return log_p, dispersion
if np.linalg.cond(dispersion) < 1 / EPS:
# This is a harsh test, if the det is ensured to be > 0
# all diagonal of dispersion will be > 0
# for the zero parts:
rv = multivariate_normal(cov=dispersion)
log_p += rv.logpdf(Y[:, non_zero_inds] - mu[:, non_zero_inds])
# print ('1',np.exp(log_p))
return log_p, dispersion
else:
raise ValueError(
"""
Dispersion matrix is singular, cannot calculate likelike_per_sample.
Most like due to perfect correlations among dependent variables.
Try another model specification.
"""
)
def loglike_per_sample(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y value given each X value
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : observed response matrix of shape (n_samples, n_targets), 2d
Returns
-------
log_p: array of shape (n_samples, )
"""
self._raise_error_if_model_not_trained()
assert X.shape[0] == Y.shape[0]
mu = self.predict(X)
# https://stackoverflow.com/questions/13312498/how-to-find-degenerate-
# rows-columns-in-a-covariance-matrix
Y = self._transform_Y(Y)
zero_inds = np.where(np.diag(self.dispersion) < EPS)[0]
log_p = np.zeros(Y.shape[0])
log_p[~np.isclose(
np.linalg.norm(
Y[:, zero_inds] - mu[:, zero_inds], axis=1), 0)] = - np.Infinity
non_zero_inds = np.setdiff1d(
np.arange(Y.shape[1]), zero_inds, assume_unique=True)
dispersion = self.dispersion[np.ix_(non_zero_inds, non_zero_inds)]
if dispersion.shape[0] == 0:
# print ('2',np.exp(log_p))
return log_p
if np.linalg.cond(dispersion) < 1 / EPS:
# This is a harsh test, if the det is ensured to be > 0
# all diagonal of dispersion will be > 0
# for the zero parts:
rv = multivariate_normal(cov=dispersion)
log_p += rv.logpdf(Y[:, non_zero_inds] - mu[:, non_zero_inds])
#print ('1',np.exp(log_p))
return log_p
else:
raise ValueError(
"""
Dispersion matrix is singular, cannot calculate likelike_per_sample.
Most like due to perfect correlations among dependent variables.
Try another model specification.
"""
)
def to_json(self, path):
"""
Generate json object of the model
Parameters
----------
path : the path to save the model
Returns
-------
json_dict: a dictionary containing the attributes of the OLS
"""
json_dict = super(OLS, self).to_json(path=path)
json_dict['properties'].update(
{
'dispersion': {
'data_type': 'numpy.ndarray',
'path': os.path.join(path, 'dispersion.npy')
},
'n_targets': self.n_targets
})
if not os.path.exists(os.path.dirname(json_dict['properties']['dispersion']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['dispersion']['path']))
np.save(json_dict['properties']['dispersion']['path'], self.dispersion)
return json_dict
@classmethod
def _from_json(cls, json_dict, solver, fit_intercept, est_stderr,
tol, max_iter, reg_method, alpha, l1_ratio, coef, stderr):
"""
Helper function to construct the OLS used by from_json.
This function overrides the parent class.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for OLS
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, l1, l2, elstic_net).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: if elastic_net, the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
OLS object: an OLS object specified by the json_dict and other arguments
"""
return cls(solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr,
tol=tol, max_iter=max_iter,
dispersion=np.load(json_dict['properties']['dispersion']['path']),
n_targets=json_dict['properties']['n_targets'])
class BaseMNL(BaseModel):
"""
A Base Multinomial Logistic regression model.
BaseMNL does nothing, to be extended by
(1) MNL with discrete output (DiscreteMNL) and.
(2) MNL with probability output (CrossEntropyMNL).
"""
def __init__(self, solver='lbfgs', fit_intercept=True, est_stderr=False,
reg_method='l2', alpha=0, l1_ratio=0,
tol=1e-4, max_iter=100,
coef=None, stderr=None,
classes=None, n_classes=None):
"""
Constructor
Parameters
----------
solver: specific solver for each linear model, default 'lbfgs',
possible solvers are {'newton-cg', 'lbfgs', 'liblinear', 'sag'}.
Need to be consistent with the regularization method.
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
classes: an array of class labels
n_classes: the number of classes to be classified
-------
"""
super(BaseMNL, self).__init__(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
tol=tol, max_iter=max_iter,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr)
self.classes = classes
self.n_classes = n_classes
if self.coef is not None:
if self.n_classes >= 2:
self._pick_model()
self._model.coef_ = coef
self._model.classes_ = classes
self._model.intercept_ = 0
def _pick_model(self):
"""
Helper function to select a proper sklearn logistic regression model
based on the regulariztaion specified by the user.
"""
C = np.float64(1) / self.alpha
if self.n_classes == 2:
# perform logistic regression
self._model = linear_model.LogisticRegression(
fit_intercept=False, penalty=self.reg_method, C=C,
solver=self.solver, tol=self.tol, max_iter=self.max_iter)
else:
# perform multinomial logistic regression
self._model = linear_model.LogisticRegression(
fit_intercept=False, penalty=self.reg_method, C=C,
solver=self.solver, tol=self.tol, max_iter=self.max_iter,
multi_class='multinomial')
def fit(self, X, Y, sample_weight=None):
"""
Fit the weighted model
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, ) for DiscreteMNL and
(n_samples, n_classes) for CrossEntropyMNL
sample_weight: sample weight vector of shape (n_samples, ), or float, or None
"""
def _estimate_stderr():
"""
Estimate standard deviation of the coefficients.
Returns
-------
None for now, since I am not sure if we can estimate the stderr
under the case there is sample_weight since there is no likelihood,
thus no hessian of the log likelihood.
Notes
-------
http://mplab.ucsd.edu/tutorials/MultivariateLogisticRegression.pdf
https://github.com/cran/mlogit/blob/master/R/mlogit.methods.R
https://arxiv.org/pdf/1404.3177.pdf
https://stats.stackexchange.com/questions/283780/calculate-standard-
error-of-weighted-logistic-regression-coefficients
Two codes to calculate hessian:
1. with sample weights:
https://github.com/scikit-learn/scikit-learn/
blob/ab93d657eb4268ac20c4db01c48065b5a1bfe80d/sklearn/linear_model/logistic.py
2. without sample weights
http://www.statsmodels.org/dev/_modules/statsmodels/
discrete/discrete_model.html#MNLogit
"""
return None
X, sample_weight = self._transform_X_sample_weight(X, sample_weight=sample_weight)
self._raise_error_if_sample_weight_sum_zero(sample_weight)
X, Y, sample_weight = self._label_encoder(
X, Y, sample_weight)
assert Y.ndim == 1
classes = np.unique(Y)
self.n_classes = len(classes)
if self.n_classes == 1:
# no need to perform any model
# self.coef is a all zeros array of shape (n_features,1)
self.coef = np.zeros((X.shape[1], 1))
self.classes = classes
else:
self._pick_model()
self._model.fit(X, Y, sample_weight=sample_weight)
# self.coef shape is wierd in sklearn, I will stick with it
self.coef = self._model.coef_
self.classes = self._model.classes_
if self.est_stderr:
self.stderr = _estimate_stderr()
@staticmethod
def _label_encoder(X, Y, sample_weight):
"""
Convert input to proper format to be used by sklearn logistic regression.
Mainly transforms Y to a 1d vector containing the class label for each sample.
This function is designed to be override by subclasses.
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, ) for DiscreteMNL and
(n_samples, n_classes) for CrossEntropyMNL
sample_weight: sample weight vector of shape (n_samples, )
Returns
-------
X_transformed : design matrix of shape (n, n_features), 2d
Y_transformed : response matrix of shape (n, )
sample_weight_transformed: sample weight vector of shape (n, )
where n:
is n_samples in the discrete case and
is n_samples * n_classes in the cross entropy case
"""
raise NotImplementedError
def _label_decoder(self, Y):
"""
Convert the response vector to probability matrix.
This function is designed to be override by subclasses.
Parameters
----------
Y : response matrix of shape (n_samples, ) for DiscreteMNL and
(n_samples, n_classes) for CrossEntropyMNL
Returns
-------
Y_transformed : of shape (n_samples, n_classes).
"""
raise NotImplementedError
def predict_log_proba(self, X):
"""
Predict the log probability of each class
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
log probability matrix : of shape (n_samples, n_classes), 2d
"""
self._raise_error_if_model_not_trained()
X = self._transform_X(X)
# print (X)
if self.n_classes == 1:
return np.zeros((X.shape[0], 1))
return self._model.predict_log_proba(X)
def predict(self, X):
"""
Predict the most likely class label for each sample
----------
X : design matrix of shape (n_samples, n_features), 2d
Returns
-------
labels : of shape (n_samples, ), 1d
"""
self._raise_error_if_model_not_trained()
X = self._transform_X(X)
if self.n_classes == 1:
return self.classes[np.zeros(X.shape[0], dtype=np.int)]
return self._model.predict(X)
def loglike_per_sample(self, X, Y):
"""
Given a set of X and Y, calculate the log probability of
observing each of Y value given each X value
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, ) for DiscreteMNL and
(n_samples, n_classes) for CrossEntropyMNL
Returns
-------
log_p: array of shape (n_samples, )
"""
self._raise_error_if_model_not_trained()
assert X.shape[0] == Y.shape[0]
Y = self._label_decoder(Y)
assert X.shape[0] == Y.shape[0]
assert Y.shape[1] == self.n_classes
log_prob = self.predict_log_proba(X)
log_prob[log_prob == -np.inf] = -999999
log_p = np.sum(log_prob * Y, axis=1)
log_p[np.sum(Y, axis=1) < EPS] = -np.Infinity
return log_p
@classmethod
def _from_json_MNL(cls, json_dict, solver, fit_intercept, est_stderr,
tol, max_iter, reg_method, alpha, l1_ratio, coef, stderr):
"""
Helper function within the BaseMNL class to construct the specific MNL used by _from_json.
This function is designed to be override by subsubclasses.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for each MNL
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
Discrete/CrossEntropyMNL object: a MNL object specified by the json_dict and other arguments
"""
return cls(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr, tol=tol, max_iter=max_iter)
@classmethod
def _from_json(cls, json_dict, solver, fit_intercept, est_stderr,
tol, max_iter, reg_method, alpha, l1_ratio, coef, stderr):
"""
Helper function to construct the linear model used by from_json.
This function overrides the parent class.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for each MNL
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
Discrete/CrossEntropyMNL object: a MNL object specified by the json_dict and other arguments
"""
return cls._from_json_MNL(
json_dict,
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr, tol=tol, max_iter=max_iter)
class DiscreteMNL(BaseMNL):
"""
A MNL for the case where responses are discrete labels.
"""
def __init__(self, solver='lbfgs', fit_intercept=True, est_stderr=False,
reg_method='l2', alpha=0, l1_ratio=0,
tol=1e-4, max_iter=100,
coef=None, stderr=None,
classes=None):
"""
Constructor
Parameters
----------
solver: specific solver for each linear model, default 'lbfgs',
possible solvers are {'newton-cg', 'lbfgs', 'liblinear', 'sag'}.
Need to be consistent with the regularization method.
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
classes: class labels if loading from trained model
-------
"""
n_classes = None if classes is None else classes.shape[0]
super(DiscreteMNL, self).__init__(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
tol=tol, max_iter=max_iter,
coef=coef, stderr=stderr,
classes=classes, n_classes=n_classes)
@staticmethod
def _label_encoder(X, Y, sample_weight):
"""
Convert input to proper format to be used by sklearn logistic regression.
Basically do nothing for the discrete case.
This function overrides parent class.
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, )
sample_weight: sample weight vector of shape (n_samples, )
Returns
-------
X_transformed : design matrix of shape (n_samples, n_features), 2d
Y_transformed : response matrix of shape (n_samples, )
sample_weight_transformed: sample weight vector of shape (n_samples, )
"""
if Y.ndim == 2 and Y.shape[1] == 1:
Y = Y.reshape(-1,)
return X, Y, sample_weight
def _label_decoder(self, Y):
"""
Convert the response vector to probability matrix.
This function overrides parent classes.
Parameters
----------
Y : response matrix of shape (n_samples, )
Returns
-------
Y_transformed : of shape (n_samples, n_classes).
"""
# consider the case of outside labels
if Y.ndim == 2 and Y.shape[1] == 1:
Y = Y.reshape(-1,)
assert Y.ndim == 1
if self.n_classes == 1:
return (Y == self.classes).reshape(-1, 1).astype(float)
if self.n_classes == 2:
# sklearn is stupid here
label = np.zeros((Y.shape[0], self.n_classes))
for clas_i, clas in enumerate(self.classes):
label[:, clas_i] = (Y == clas).astype(float)
return label
return label_binarize(Y, self.classes)
def to_json(self, path):
"""
Generate json object of the model
Parameters
----------
path : the path to save the model
Returns
-------
json_dict: a dictionary containing the attributes of the DiscreteMNL
"""
json_dict = super(DiscreteMNL, self).to_json(path=path)
json_dict['properties'].update(
{
'classes': {
'data_type': 'numpy.ndarray',
'path': os.path.join(path, 'classes.npy')
}
})
if not os.path.exists(os.path.dirname(json_dict['properties']['classes']['path'])):
os.makedirs(os.path.dirname(json_dict['properties']['classes']['path']))
np.save(json_dict['properties']['classes']['path'], self.classes)
return json_dict
@classmethod
def _from_json_MNL(cls, json_dict, solver, fit_intercept, est_stderr,
reg_method, alpha, l1_ratio, coef, stderr,
tol, max_iter):
"""
Helper function within the construct the DiscreteMNL used by _from_json.
This function overrides parent class.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for each linear model
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (None, l1, l2, elstic_net).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
DiscreteMNL object: a DiscreteMNL object specified by the json_dict and other arguments
"""
return cls(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr,
tol=tol, max_iter=max_iter,
classes=np.load(json_dict['properties']['classes']['path']))
class CrossEntropyMNL(BaseMNL):
"""
A MNL for the case where responses are probabilities sum to one.
"""
def __init__(self, solver='lbfgs', fit_intercept=True, est_stderr=False,
reg_method='l2', alpha=0, l1_ratio=0,
tol=1e-4, max_iter=100,
coef=None, stderr=None,
n_classes=None):
"""
Constructor
Parameters
----------
solver: specific solver for each linear model, default 'lbfgs',
possible solvers are {'newton-cg', 'lbfgs', 'liblinear', 'sag'}.
Need to be consistent with the regularization method.
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients if loading from trained model
stderr: the std.err of coefficients if loading from trained model
n_classes: number of classes to be classified
-------
"""
classes = None if n_classes is None else np.arange(n_classes)
super(CrossEntropyMNL, self).__init__(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
tol=tol, max_iter=max_iter,
coef=coef, stderr=stderr,
classes=classes, n_classes=n_classes)
@staticmethod
def _label_encoder(X, Y, sample_weight):
"""
Convert input to proper format to be used by sklearn logistic regression.
Mainly transforms Y to a 1d vector containing the class label for each sample.
This function overrides parent class.
Parameters
----------
X : design matrix of shape (n_samples, n_features), 2d
Y : response matrix of shape (n_samples, n_classes)
sample_weight: sample weight vector of shape (n_samples, )
Returns
-------
X_repeated : design matrix of shape (n_samples * n_classes, n_features), 2d
Y_repeated : response matrix of shape (n_samples * n_classes, )
sample_weight_repeated: sample weight vector of shape (n_samples * n_classes, )
Notes
----------
idea from https://stats.stackexchange.com/questions/90622/
regression-model-where-output-is-a-probability
"""
n_samples, n_classes = X.shape[0], Y.shape[1]
X_repeated = np.repeat(X, n_classes, axis=0)
Y_repeated = np.tile(np.arange(n_classes), n_samples)
sample_weight_repeated = Y.reshape(-1, ) * np.repeat(sample_weight, n_classes)
return X_repeated, Y_repeated, sample_weight_repeated
def _label_decoder(self, Y):
"""
Convert the response vector to probability matrix.
In CrossEntropyMNL, this function basically does nothing.
This function overrides parent classes.
Parameters
----------
Y : response matrix of shape (n_samples, n_classes)
Returns
-------
Y_transformed : of shape (n_samples, n_classes).
"""
assert Y.ndim == 2
assert Y.shape[1] == self.n_classes
return Y
def to_json(self, path):
"""
Generate json object of the model
Parameters
----------
path : the path to save the model
Returns
-------
json_dict: a dictionary containing the attributes of the CrossEntropyMNL
"""
json_dict = super(CrossEntropyMNL, self).to_json(path=path)
json_dict['properties'].update(
{
'n_classes': self.n_classes
})
return json_dict
@classmethod
def _from_json_MNL(cls, json_dict, solver, fit_intercept, est_stderr,
reg_method, alpha, l1_ratio, coef, stderr,
tol, max_iter):
"""
Helper function within the construct the CrossEntropyMNL used by _from_json.
This function overrides parent class.
Parameters
----------
json_dict : the dictionary that specifies the model
solver: specific solver for each linear model
fit_intercept: boolean indicating fit intercept or not
est_stderr: boolean indicating calculte std.err of coefficients (usually expensive) or not
tol: tolerence of fitting error
max_iter: maximum iteraration of fitting
reg_method: method to regularize the model, one of (l1, l2).
Need to be compatible with the solver.
alpha: regularization strength
l1_ratio: the l1 alpha ratio
coef: the coefficients
stderr: the std.err of coefficients
Returns
-------
CrossEntropyMNL object:
a CrossEntropyMNL object specified by the json_dict and other arguments
"""
return cls(
solver=solver, fit_intercept=fit_intercept, est_stderr=est_stderr,
reg_method=reg_method, alpha=alpha, l1_ratio=l1_ratio,
coef=coef, stderr=stderr,
tol=tol, max_iter=max_iter,
n_classes=json_dict['properties']['n_classes'])
| 40.834646 | 100 | 0.601444 |
4f4e9b4ac10a6f1a772ea5762913a17d91017f6b | 413 | py | Python | chatrooms/apps/chats/pagination.py | EliasCampos/chatrooms_py | 9e1abaf5887293e1df6f0b91eb0f9587e8d405fc | [
"MIT"
] | null | null | null | chatrooms/apps/chats/pagination.py | EliasCampos/chatrooms_py | 9e1abaf5887293e1df6f0b91eb0f9587e8d405fc | [
"MIT"
] | null | null | null | chatrooms/apps/chats/pagination.py | EliasCampos/chatrooms_py | 9e1abaf5887293e1df6f0b91eb0f9587e8d405fc | [
"MIT"
] | null | null | null | from typing import List
from chatrooms.apps.common.pagination import PageNumberPagination
from chatrooms.apps.chats.schemas import ChatDetail, ChatOwn, ChatMessageDetail
class ChatPagination(PageNumberPagination):
results: List[ChatDetail]
class ChatOwnPagination(PageNumberPagination):
results: List[ChatOwn]
class ChatMessagePagination(PageNumberPagination):
results: List[ChatMessageDetail]
| 24.294118 | 79 | 0.828087 |
4f4a818a9f5f9959ad78acf5db0f5b53259306ab | 1,052 | py | Python | Geometry/HGCalCommonData/test/python/dumpHGCalGeometryDD4Hep_cfg.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | Geometry/HGCalCommonData/test/python/dumpHGCalGeometryDD4Hep_cfg.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | Geometry/HGCalCommonData/test/python/dumpHGCalGeometryDD4Hep_cfg.py | AlexDroll/cmssw | ef485116d14d07f9c9e591c01b4597c1c9a967cb | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("DDHGCalTBModuleXTest")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.MessageLogger.cerr.FwkReport.reportEvery = 5
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalGeom')
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/HGCalCommonData/data/dd4hep/testHGCalV10.xml'),
appendToDataLabel = cms.string('DDHGCal')
)
process.testDump = cms.EDAnalyzer("DDTestDumpFile",
outputFileName = cms.untracked.string('HGCalDD4Hep.root'),
DDDetector = cms.ESInputTag('','DDHGCal')
)
process.p = cms.Path(process.testDump)
| 40.461538 | 135 | 0.612167 |
4f4ec1f3ecca56f14724e079707b455f3d7340bf | 764 | py | Python | plugins/ok.py | ItzSjDude/PikaBotPlugins | 6b468a43caefbea5b8f3cd2c1ca83790380e1f5b | [
"Apache-2.0"
] | 1 | 2020-10-29T15:44:57.000Z | 2020-10-29T15:44:57.000Z | plugins/ok.py | ItzSjDude/PikaBotPlugins | 6b468a43caefbea5b8f3cd2c1ca83790380e1f5b | [
"Apache-2.0"
] | null | null | null | plugins/ok.py | ItzSjDude/PikaBotPlugins | 6b468a43caefbea5b8f3cd2c1ca83790380e1f5b | [
"Apache-2.0"
] | 4 | 2020-10-28T09:02:35.000Z | 2020-12-27T22:45:57.000Z | """Emoji
Available Commands:
.ok"""
import asyncio
from pikabot.utils import ItzSjDude
@ItzSjDude(outgoing=True, pattern="ok")
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 36)
await event.edit("ok")
animation_chars = [
"OK",
"BOSS",
"OK MAN",
"OK BITCH",
"OK FUKCER",
"OK SEXY BABE",
"OK GAY",
"OK SIR",
"GO AND SAY OK",
"OK LOL",
"YAA OK",
"FCUK",
"OK",
"Boss",
"Yeahhhhhh",
"O",
"K",
"Ok Boss! 😇",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
| 16.977778 | 49 | 0.502618 |
4f4e5d2f6099d8086f63842cb745aa6b06478611 | 1,655 | py | Python | tflite/ReverseSequenceOptions.py | szha/tensorflow-onnx | 25f8b0c78074784fcc33c1d068911f8363581e6c | [
"MIT"
] | 193 | 2017-12-20T16:46:20.000Z | 2022-03-29T07:40:54.000Z | tflite/ReverseSequenceOptions.py | szha/tensorflow-onnx | 25f8b0c78074784fcc33c1d068911f8363581e6c | [
"MIT"
] | 141 | 2017-12-21T08:00:20.000Z | 2021-06-15T14:53:03.000Z | tflite/ReverseSequenceOptions.py | szha/tensorflow-onnx | 25f8b0c78074784fcc33c1d068911f8363581e6c | [
"MIT"
] | 55 | 2017-12-22T18:40:13.000Z | 2022-01-17T05:43:51.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ReverseSequenceOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsReverseSequenceOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ReverseSequenceOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# ReverseSequenceOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ReverseSequenceOptions
def SeqDim(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# ReverseSequenceOptions
def BatchDim(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def ReverseSequenceOptionsStart(builder): builder.StartObject(2)
def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0)
def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0)
def ReverseSequenceOptionsEnd(builder): return builder.EndObject()
| 36.777778 | 114 | 0.726284 |
4f4cc3af6f1d5c626b3e2ea7939ecad0ee2d41f1 | 11,968 | py | Python | tensorflow/contrib/eager/python/examples/revnet/revnet_test.py | kyechou/tensorflow | 7a7ff1d40ff8a6bcac2d9c655b8cfb8500ee1d14 | [
"Apache-2.0"
] | 4 | 2021-06-15T17:26:07.000Z | 2021-11-17T10:58:08.000Z | tensorflow/contrib/eager/python/examples/revnet/revnet_test.py | swjwenjie/tensorflow | 690d9e3f465a55cbad43051574dde04189768a0c | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/eager/python/examples/revnet/revnet_test.py | swjwenjie/tensorflow | 690d9e3f465a55cbad43051574dde04189768a0c | [
"Apache-2.0"
] | 1 | 2019-12-20T01:12:47.000Z | 2019-12-20T01:12:47.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for basic building blocks used in eager mode RevNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks_test
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
from tensorflow.python.client import device_lib
tfe = tf.contrib.eager
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
logits, saved_hidden = model(inputs)
grads, loss = model.compute_gradients(
saved_hidden=saved_hidden, labels=labels)
optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
return logits, loss
class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
# Reduce the batch size for tests because the OSS version runs
# in constrained GPU environment with 1-2GB of memory.
config.batch_size = 2
shape = (config.batch_size,) + config.input_shape
self.model = revnet.RevNet(config=config)
self.x = tf.random_normal(shape=shape, dtype=tf.float64)
self.t = tf.random_uniform(
shape=[config.batch_size],
minval=0,
maxval=config.n_classes,
dtype=tf.int64)
self.config = config
def tearDown(self):
del self.model
del self.x
del self.t
del self.config
super(RevNetTest, self).tearDown()
def test_call(self):
"""Test `call` function."""
y, _ = self.model(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def _check_grad_angle_combined(self, grads, grads_true):
"""Verify that the reconstructed gradients has correct direction.
Due to numerical imprecision, the magnitude may be slightly different.
Yet according to the paper, the angle should be roughly the same.
Args:
grads: list of gradients from reconstruction
grads_true: list of true gradients
"""
def _combine(gs):
return [tf.reshape(g, [-1]) for g in gs]
g1_all = tf.concat(_combine(grads), axis=0)
g2_all = tf.concat(_combine(grads_true), axis=0)
self.assertEqual(len(g1_all.shape), 1)
self.assertEqual(len(g2_all.shape), 1)
degree = blocks_test.compute_degree(g1_all, g2_all)
self.assertLessEqual(degree, 1e0)
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
_, saved_hidden = self.model(self.x) # Initialize model
grads, loss = self.model.compute_gradients(
saved_hidden=saved_hidden, labels=self.t)
vars_ = self.model.trainable_variables
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
self.assertEqual(grad.shape, var.shape)
# Compare against the true gradient computed by the tape
with tf.GradientTape() as tape:
logits, _ = self.model(self.x)
loss_true = self.model.compute_loss(logits=logits, labels=self.t)
grads_true = tape.gradient(loss_true, vars_)
self.assertAllClose(loss, loss_true)
self.assertAllClose(grads, grads_true, rtol=1e-4, atol=1e-4)
self._check_grad_angle_combined(grads, grads_true)
def test_call_defun(self):
"""Test `call` function with defun."""
y, _ = tfe.defun(self.model.call)(self.x, training=False)
self.assertEqual(y.shape, [self.config.batch_size, self.config.n_classes])
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
compute_gradients = tfe.defun(self.model.compute_gradients)
_, saved_hidden = self.model(self.x)
grads, _ = compute_gradients(saved_hidden=saved_hidden, labels=self.t)
vars_ = self.model.trainable_variables
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
for grad, var in zip(grads, vars_):
if grad is not None:
self.assertEqual(grad.shape, var.shape)
def test_training_graph(self):
"""Test model training in graph mode."""
with tf.Graph().as_default():
config = config_.get_hparams_cifar_38()
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
shape=(self.config.batch_size,),
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
global_step = tf.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
_, saved_hidden = model(x)
grads, _ = model.compute_gradients(saved_hidden=saved_hidden, labels=t)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
train_op = optimizer.apply_gradients(
zip(grads, model.trainable_variables), global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(1):
sess.run(train_op)
# Benchmark related
def device_and_data_format():
return ("/gpu:0",
"channels_first") if tf.test.is_gpu_available() else ("/cpu:0",
"channels_last")
def random_batch(batch_size, config):
shape = (batch_size,) + config.input_shape
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=config.n_classes, dtype=tf.int32)
return images, labels
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class RevNetBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for RevNet."""
def _train_batch_sizes(self):
"""Shamelessly copied from `resnet50_test.py`.
Note: This is targeted towards ImageNet. CIFAR-10 should allow more
aggressive batch sizes.
Returns:
A tuple of possible batch sizes
"""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == "GPU":
if "K20" in device.physical_device_desc:
return (16,)
if "P100" in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == "TPU":
return (32,)
return (16, 32)
def _force_device_sync(self):
"""Shamelessly copied from `resnet50_test.py`."""
tf.constant(1.).cpu()
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = "%s_%s_batch_%d_%s" % (label, dev, batch_size, data_format)
extras = {"examples_per_sec": batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _benchmark_eager_apply(self,
label,
device_and_format,
defun=False,
execution_mode=None):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = revnet.RevNet(config=config)
if defun:
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 10
with tf.device(device):
images, _ = random_batch(batch_size, config)
for _ in range(num_burn):
model(images, training=False)
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in range(num_iters):
model(images, training=False)
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply(
"eager_apply_sync", device_and_data_format(), defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
"eager_apply_async",
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_call_defun(self):
self._benchmark_eager_apply(
"eager_apply_with_defun", device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
config = config_.get_hparams_imagenet_56()
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, config)
model = revnet.RevNet(config=config)
optimizer = tf.train.GradientDescentOptimizer(0.1)
if defun:
model.call = tfe.defun(model.call)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in range(num_burn):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in range(num_iters):
(images, labels) = iterator.next()
train_one_iter(model, images, labels, optimizer)
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train(
"eager_train_sync", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
"eager_train_async",
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_defun(self):
self._benchmark_eager_train(
"eager_train", MockIterator, device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device("/device:CPU:0"):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
"eager_train_dataset_with_defun",
make_iterator,
device_and_data_format(),
defun=True)
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| 35.096774 | 80 | 0.670538 |
4f4e7ab2d2522ddf52aebd6d4613c28b1ea42cb7 | 276 | py | Python | python/threadtest.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | python/threadtest.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | python/threadtest.py | KasperD/script-fun | 919c8cfca65ce2177213145e6c553c1f4fa33b63 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import time
from threading import Thread
def myFunc(i):
print "sleeping 5 sec from thread %d" % i
time.sleep(5)
print "finished sleeping from thread %d" % i
for i in range(10):
t = Thread(target=myFunc, args=(i,))
t.start
| 19.714286 | 49 | 0.623188 |
4f4e8ca4bad711b5bd38a6b91ba75ceb857a6473 | 46,972 | py | Python | putil/plot/panel.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | 6 | 2015-12-15T04:09:08.000Z | 2020-02-21T01:40:57.000Z | putil/plot/panel.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | null | null | null | putil/plot/panel.py | pmacosta/putil | 416cea52df8221981727e25d133e9b4e3f464798 | [
"MIT"
] | 2 | 2016-01-21T23:29:17.000Z | 2020-02-21T01:41:05.000Z | # panel.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0302,R0912,R0913,R0914,R0915,W0105,W0212
# PyPI imports
import numpy
import matplotlib.pyplot as plt
# Putil imports
import putil.exh
import putil.pcontracts
from .series import Series
from .functions import _F, _intelligent_ticks, _uniquify_tick_labels
from .constants import AXIS_LABEL_FONT_SIZE, AXIS_TICKS_FONT_SIZE, LEGEND_SCALE
###
# Exception tracing initialization code
###
"""
[[[cog
import os, sys
if sys.hexversion < 0x03000000:
import __builtin__
else:
import builtins as __builtin__
sys.path.append(os.environ['TRACER_DIR'])
import trace_ex_plot_panel
exobj_plot = trace_ex_plot_panel.trace_module(no_print=True)
]]]
[[[end]]]
"""
###
# Functions
###
def _legend_position_validation(obj):
""" Validate if a string is a valid legend position """
options = [
'BEST', 'UPPER RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT',
'RIGHT', 'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER',
'UPPER CENTER', 'CENTER'
]
if (obj is not None) and (not isinstance(obj, str)):
return True
if ((obj is None) or
(obj and any([item.lower() == obj.lower() for item in options]))):
return False
return True
###
# Class
###
class Panel(object):
r"""
Defines a panel within a figure
:param series: One or more data series
:type series: :py:class:`putil.plot.Series` *or list of*
:py:class:`putil.plot.Series` *or None*
:param primary_axis_label: Primary dependent axis label
:type primary_axis_label: string
:param primary_axis_units: Primary dependent axis units
:type primary_axis_units: string
:param primary_axis_ticks: Primary dependent axis tick marks. If not None
overrides automatically generated tick
marks if the axis type is linear. If None
automatically generated tick marks are used for
the primary axis
:type primary_axis_ticks: list, Numpy vector or None
:param secondary_axis_label: Secondary dependent axis label
:type secondary_axis_label: string
:param secondary_axis_units: Secondary dependent axis units
:type secondary_axis_units: string
:param secondary_axis_ticks: Secondary dependent axis tick marks. If not
None overrides automatically generated tick
marks if the axis type is linear. If None
automatically generated tick marks are used
for the secondary axis
:type secondary_axis_ticks: list, Numpy vector or None
:param log_dep_axis: Flag that indicates whether the dependent (primary and
/or secondary) axis is linear (False) or logarithmic
(True)
:type log_dep_axis: boolean
:param legend_props: Legend properties. See
:py:attr:`putil.plot.Panel.legend_props`. If None the
legend is placed in the best position in one column
:type legend_props: dictionary or None
:param display_indep_axis: Flag that indicates whether the independent axis
is displayed (True) or not (False)
:type display_indep_axis: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.__init__
:raises:
* RuntimeError (Argument \`display_indep_axis\` is not valid)
* RuntimeError (Argument \`legend_props\` is not valid)
* RuntimeError (Argument \`log_dep_axis\` is not valid)
* RuntimeError (Argument \`primary_axis_label\` is not valid)
* RuntimeError (Argument \`primary_axis_ticks\` is not valid)
* RuntimeError (Argument \`primary_axis_units\` is not valid)
* RuntimeError (Argument \`secondary_axis_label\` is not valid)
* RuntimeError (Argument \`secondary_axis_ticks\` is not valid)
* RuntimeError (Argument \`secondary_axis_units\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Legend property \`cols\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* TypeError (Legend property \`pos\` is not one of ['BEST', 'UPPER
RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', 'CENTER
LEFT', 'CENTER RIGHT', 'LOWER CENTER', 'UPPER CENTER', 'CENTER']
(case insensitive))
* ValueError (Illegal legend property \`*[prop_name]*\`)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
# pylint: disable=R0902,R0903,W0102
def __init__(self, series=None, primary_axis_label='',
primary_axis_units='', primary_axis_ticks=None,
secondary_axis_label='', secondary_axis_units='',
secondary_axis_ticks=None, log_dep_axis=False,
legend_props=None, display_indep_axis=False):
# Private attributes
self._series = None
self._primary_axis_label = None
self._secondary_axis_label = None
self._primary_axis_units = None
self._secondary_axis_units = None
self._primary_axis_ticks = None
self._secondary_axis_ticks = None
self._log_dep_axis = None
self._recalculate_series = False
self._legend_props = {'pos':'BEST', 'cols':1}
self._display_indep_axis = None
# Private attributes
self._legend_pos_list = [
'best', 'upper right', 'upper left', 'lower left', 'lower right',
'right', 'center left', 'center right', 'lower center',
'upper center', 'center'
]
self._panel_has_primary_axis = False
self._panel_has_secondary_axis = False
self._primary_dep_var_min = None
self._primary_dep_var_max = None
self._primary_dep_var_div = None
self._primary_dep_var_unit_scale = None
self._primary_dep_var_locs = None
self._primary_dep_var_labels = None
self._secondary_dep_var_min = None
self._secondary_dep_var_max = None
self._secondary_dep_var_div = None
self._secondary_dep_var_unit_scale = None
self._secondary_dep_var_locs = None
self._secondary_dep_var_labels = None
self._legend_props_list = ['pos', 'cols']
self._legend_props_pos_list = [
'BEST', 'UPPER RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT',
'RIGHT', 'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER',
'UPPER CENTER', 'CENTER'
]
# Exceptions definition
invalid_prim_ex = putil.exh.addai('primary_axis_ticks')
invalid_sec_ex = putil.exh.addai('secondary_axis_ticks')
invalid_prim_ex(
(primary_axis_ticks is not None) and (
(not isinstance(primary_axis_ticks, list)) and
(not isinstance(primary_axis_ticks, numpy.ndarray))
)
)
invalid_sec_ex(
(secondary_axis_ticks is not None) and (
(not isinstance(secondary_axis_ticks, list)) and
(not isinstance(secondary_axis_ticks, numpy.ndarray)))
)
# Assignment of arguments to attributes
# Order here is important to avoid unnecessary re-calculating of
# panel axes if log_dep_axis is True
self._set_log_dep_axis(log_dep_axis)
self._primary_axis_ticks = (
primary_axis_ticks
if not self.log_dep_axis else
None
)
self._secondary_axis_ticks = (
secondary_axis_ticks
if not self.log_dep_axis else
None
)
self._set_series(series)
self._set_primary_axis_label(primary_axis_label)
self._set_primary_axis_units(primary_axis_units)
self._set_secondary_axis_label(secondary_axis_label)
self._set_secondary_axis_units(secondary_axis_units)
self._set_legend_props(legend_props)
self._set_display_indep_axis(display_indep_axis)
def __bool__(self): # pragma: no cover
"""
Returns :code:`True` if the panel has at least a series associated
with it, :code:`False` otherwise
.. note:: This method applies to Python 3.x
"""
return self._series is not None
def __iter__(self):
"""
Returns an iterator over the series object(s) in the panel. For
example:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('plot_example_6.py', cog.out)
.. =]=
.. code-block:: python
# plot_example_6.py
from __future__ import print_function
import numpy, putil.plot
def panel_iterator_example(no_print):
source1 = putil.plot.BasicSource(
indep_var=numpy.array([1, 2, 3, 4]),
dep_var=numpy.array([1, -10, 10, 5])
)
source2 = putil.plot.BasicSource(
indep_var=numpy.array([100, 200, 300, 400]),
dep_var=numpy.array([50, 75, 100, 125])
)
series1 = putil.plot.Series(
data_source=source1,
label='Goals'
)
series2 = putil.plot.Series(
data_source=source2,
label='Saves',
color='b',
marker=None,
interp='STRAIGHT',
line_style='--'
)
panel = putil.plot.Panel(
series=[series1, series2],
primary_axis_label='Time',
primary_axis_units='sec',
display_indep_axis=True
)
if not no_print:
for num, series in enumerate(panel):
print('Series {0}:'.format(num+1))
print(series)
print('')
else:
return panel
.. =[=end=]=
.. code-block:: python
>>> import docs.support.plot_example_6 as mod
>>> mod.panel_iterator_example(False)
Series 1:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
<BLANKLINE>
Series 2:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
<BLANKLINE>
"""
return iter(self._series)
def __nonzero__(self): # pragma: no cover
"""
Returns :code:`True` if the panel has at least a series associated
with it, :code:`False` otherwise
.. note:: This method applies to Python 2.x
"""
return self._series is not None
def _get_series(self):
return self._series
def _set_series(self, series):
# pylint: disable=C0103
self._series = (
(series if isinstance(series, list) else [series])
if series is not None else
series
)
self._recalculate_series = False
if self.series is not None:
self._validate_series()
self._panel_has_primary_axis = any(
[not series_obj.secondary_axis for series_obj in self.series]
)
self._panel_has_secondary_axis = any(
[series_obj.secondary_axis for series_obj in self.series]
)
comp_prim_dep_var = (
(not self.log_dep_axis) and self._panel_has_primary_axis
)
comp_sec_dep_var = (
(not self.log_dep_axis) and self._panel_has_secondary_axis
)
panel_has_primary_interp_series = any(
[
(not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None)
for series_obj in self.series
]
)
panel_has_secondary_interp_series = any(
[
series_obj.secondary_axis and
(series_obj.interp_dep_var is not None)
for series_obj in self.series
]
)
# Compute panel scaling factor
primary_min = None
prim_interp_min = None
secondary_min = None
sec_interp_min = None
primary_max = None
prim_interp_max = None
secondary_max = None
sec_interp_max = None
panel_min = None
panel_max = None
# Find union of all data points and panel minimum and maximum.
# If panel has logarithmic dependent axis, limits are common and
# the union of the limits of both axis
# Primary axis
glob_prim_dep_var = (
numpy.unique(
numpy.concatenate(
[
series_obj.dep_var
for series_obj in self.series
if not series_obj.secondary_axis
]
)
)
if comp_prim_dep_var else
None
)
prim_interp_min = (
min(
[
min(series_obj.dep_var)
for series_obj in self.series
if ((not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None))
]
)
if panel_has_primary_interp_series else
None
)
prim_interp_max = (
max(
[
max(series_obj.dep_var)
for series_obj in self.series
if ((not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None))
]
)
if panel_has_primary_interp_series else
None
)
primary_min = (
min(min(glob_prim_dep_var), prim_interp_min)
if comp_prim_dep_var and (prim_interp_min is not None) else
(min(glob_prim_dep_var) if comp_prim_dep_var else None)
)
primary_max = (
max(max(glob_prim_dep_var), prim_interp_max)
if comp_prim_dep_var and (prim_interp_min is not None) else
(max(glob_prim_dep_var) if comp_prim_dep_var else None)
)
# Secondary axis
glob_sec_dep_var = (
numpy.unique(
numpy.concatenate(
[
series_obj.dep_var
for series_obj in self.series
if series_obj.secondary_axis
]
)
)
if comp_sec_dep_var else
None
)
sec_interp_min = (
min(
[
min(series_obj.dep_var)
for series_obj in self.series
if (series_obj.secondary_axis and
(series_obj.interp_dep_var is not None))
]
).tolist()
if panel_has_secondary_interp_series else
None
)
sec_interp_max = (
max(
[
max(series_obj.dep_var)
for series_obj in self.series
if (series_obj.secondary_axis and
(series_obj.interp_dep_var is not None))
]
).tolist()
if panel_has_secondary_interp_series else
None
)
secondary_min = (
min(min(glob_sec_dep_var), sec_interp_min)
if comp_sec_dep_var and (sec_interp_min is not None) else
(min(glob_sec_dep_var) if comp_sec_dep_var else None)
)
secondary_max = (
max(max(glob_sec_dep_var), sec_interp_max)
if comp_sec_dep_var and (sec_interp_max is not None) else
(max(glob_sec_dep_var) if comp_sec_dep_var else None)
)
# Global (for logarithmic dependent axis)
glob_panel_dep_var = (
None
if not self.log_dep_axis else
numpy.unique(
numpy.concatenate(
[series_obj.dep_var for series_obj in self.series]
)
)
)
panel_min = (
min(min(glob_panel_dep_var), prim_interp_min)
if self.log_dep_axis and panel_has_primary_interp_series else
(min(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_max = (
max(max(glob_panel_dep_var), prim_interp_max)
if self.log_dep_axis and panel_has_primary_interp_series else
(max(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_min = (
min(min(glob_panel_dep_var), sec_interp_min)
if self.log_dep_axis and panel_has_secondary_interp_series else
(min(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_max = (
max(max(glob_panel_dep_var), sec_interp_max)
if self.log_dep_axis and panel_has_secondary_interp_series else
(max(glob_panel_dep_var) if self.log_dep_axis else None)
)
# Get axis tick marks locations
if comp_prim_dep_var:
(
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_div,
self._primary_dep_var_unit_scale
) = _intelligent_ticks(
glob_prim_dep_var,
primary_min,
primary_max,
tight=False,
log_axis=self.log_dep_axis,
tick_list=self._primary_axis_ticks,
)
if comp_sec_dep_var:
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_div,
self._secondary_dep_var_unit_scale
) = _intelligent_ticks(
glob_sec_dep_var,
secondary_min,
secondary_max,
tight=False,
log_axis=self.log_dep_axis,
tick_list=self._secondary_axis_ticks,
)
if self.log_dep_axis and self._panel_has_primary_axis:
(
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_div,
self._primary_dep_var_unit_scale
) = _intelligent_ticks(
glob_panel_dep_var,
panel_min,
panel_max,
tight=False,
log_axis=self.log_dep_axis
)
if self.log_dep_axis and self._panel_has_secondary_axis:
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_div,
self._secondary_dep_var_unit_scale
) = _intelligent_ticks(
glob_panel_dep_var,
panel_min,
panel_max,
tight=False,
log_axis=self.log_dep_axis
)
# Equalize number of ticks on primary and secondary axis so that
# ticks are in the same percentage place within the dependent
# variable plotting interval (for non-logarithmic panels)
# If there is any tick override (primary and/or secondary) this
# is not done, the user assumes responsibility for aesthetics of
# final result
if ((not self.log_dep_axis) and
self._panel_has_primary_axis and
self._panel_has_secondary_axis and
(self._primary_axis_ticks is None) and
(self._secondary_axis_ticks is None)):
max_ticks = max(
len(self._primary_dep_var_locs),
len(self._secondary_dep_var_locs)
)-1
primary_delta = (
(
self._primary_dep_var_locs[-1]-
self._primary_dep_var_locs[0]
)
/
float(max_ticks)
)
secondary_delta = (
(
self._secondary_dep_var_locs[-1]-
self._secondary_dep_var_locs[0]
)
/
float(max_ticks)
)
self._primary_dep_var_locs = [
self._primary_dep_var_locs[0]+(num*primary_delta)
for num in range(max_ticks+1)
]
self._secondary_dep_var_locs = [
self._secondary_dep_var_locs[0]+(num*secondary_delta)
for num in range(max_ticks+1)
]
(
self._primary_dep_var_locs,
self._primary_dep_var_labels
) = _uniquify_tick_labels(
self._primary_dep_var_locs,
self._primary_dep_var_locs[0],
self._primary_dep_var_locs[-1]
)
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels
) = _uniquify_tick_labels(
self._secondary_dep_var_locs,
self._secondary_dep_var_locs[0],
self._secondary_dep_var_locs[-1]
)
self._primary_axis_ticks = self._primary_dep_var_locs
self._secondary_axis_ticks = self._secondary_dep_var_locs
# Scale panel
self._scale_dep_var(
self._primary_dep_var_div,
self._secondary_dep_var_div
)
def _get_primary_axis_scale(self):
return self._primary_dep_var_div
def _get_primary_axis_ticks(self):
return self._primary_axis_ticks
def _get_secondary_axis_scale(self):
return self._secondary_dep_var_div
def _get_secondary_axis_ticks(self):
return self._secondary_axis_ticks
def _get_primary_axis_label(self):
return self._primary_axis_label
@putil.pcontracts.contract(primary_axis_label='None|str')
def _set_primary_axis_label(self, primary_axis_label):
self._primary_axis_label = primary_axis_label
def _get_primary_axis_units(self):
return self._primary_axis_units
@putil.pcontracts.contract(primary_axis_units='None|str')
def _set_primary_axis_units(self, primary_axis_units):
self._primary_axis_units = primary_axis_units
def _get_secondary_axis_label(self):
return self._secondary_axis_label
@putil.pcontracts.contract(secondary_axis_label='None|str')
def _set_secondary_axis_label(self, secondary_axis_label):
self._secondary_axis_label = secondary_axis_label
def _get_secondary_axis_units(self):
return self._secondary_axis_units
@putil.pcontracts.contract(secondary_axis_units='None|str')
def _set_secondary_axis_units(self, secondary_axis_units):
self._secondary_axis_units = secondary_axis_units
def _get_log_dep_axis(self):
return self._log_dep_axis
@putil.pcontracts.contract(log_dep_axis='None|bool')
def _set_log_dep_axis(self, log_dep_axis):
self._recalculate_series = self.log_dep_axis != log_dep_axis
self._log_dep_axis = log_dep_axis
if self._recalculate_series:
self._set_series(self._series)
def _get_display_indep_axis(self):
return self._display_indep_axis
@putil.pcontracts.contract(display_indep_axis='None|bool')
def _set_display_indep_axis(self, display_indep_axis):
self._display_indep_axis = display_indep_axis
def _get_legend_props(self):
return self._legend_props
@putil.pcontracts.contract(legend_props='None|dict')
def _set_legend_props(self, legend_props):
invalid_ex = putil.exh.addex(
ValueError, 'Illegal legend property `*[prop_name]*`'
)
illegal_ex = putil.exh.addex(
TypeError,
"Legend property `pos` is not one of ['BEST', 'UPPER RIGHT', "
"'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', "
"'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER', "
"'UPPER CENTER', 'CENTER'] (case insensitive)"
)
cols_ex = putil.exh.addex(
RuntimeError, 'Legend property `cols` is not valid'
)
self._legend_props = (
legend_props
if legend_props is not None else
{'pos':'BEST', 'cols':1}
)
self._legend_props.setdefault('pos', 'BEST')
self._legend_props.setdefault('cols', 1)
for key, value in self.legend_props.items():
invalid_ex(
key not in self._legend_props_list, _F('prop_name', key)
)
illegal_ex(
(key == 'pos') and
_legend_position_validation(self.legend_props['pos'])
)
cols_ex(
((key == 'cols') and (not isinstance(value, int))) or
((key == 'cols') and
(isinstance(value, int) is True) and (value < 0))
)
self._legend_props['pos'] = self._legend_props['pos'].upper()
def __str__(self):
"""
Prints panel information. For example:
.. code-block:: python
>>> from __future__ import print_function
>>> import docs.support.plot_example_6 as mod
>>> print(mod.panel_iterator_example(True))
Series 0:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
Series 1:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
Primary axis label: Time
Primary axis units: sec
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: True
Legend properties:
cols: 1
pos: BEST
"""
ret = ''
if (self.series is None) or (len(self.series) == 0):
ret += 'Series: None\n'
else:
for num, element in enumerate(self.series):
ret += 'Series {0}:\n'.format(num)
temp = str(element).split('\n')
temp = [3*' '+line for line in temp]
ret += '\n'.join(temp)
ret += '\n'
ret += 'Primary axis label: {0}\n'.format(
self.primary_axis_label
if self.primary_axis_label not in ['', None] else
'not specified'
)
ret += 'Primary axis units: {0}\n'.format(
self.primary_axis_units
if self.primary_axis_units not in ['', None] else
'not specified'
)
ret += 'Secondary axis label: {0}\n'.format(
self.secondary_axis_label
if self.secondary_axis_label not in ['', None] else
'not specified'
)
ret += 'Secondary axis units: {0}\n'.format(
self.secondary_axis_units
if self.secondary_axis_units not in ['', None] else
'not specified'
)
ret += 'Logarithmic dependent axis: {0}\n'.format(self.log_dep_axis)
ret += (
'Display independent '
'axis: {0}\n'.format(self.display_indep_axis)
)
ret += 'Legend properties:\n'
iobj = enumerate(sorted(list(self.legend_props.items())))
for num, (key, value) in iobj:
ret += ' {0}: {1}{2}'.format(
key, value, '\n' if num+1 < len(self.legend_props) else ''
)
return ret
def _validate_series(self):
"""
Verifies that elements of series list are of the right type and
fully specified
"""
invalid_ex = putil.exh.addai('series')
incomplete_ex = putil.exh.addex(
RuntimeError, 'Series item *[number]* is not fully specified'
)
log_ex = putil.exh.addex(
ValueError,
'Series item *[number]* cannot be plotted in a logarithmic '
'axis because it contains negative data points'
)
for num, obj in enumerate(self.series):
invalid_ex(not isinstance(obj, Series))
incomplete_ex(not obj._complete, _F('number', num))
log_ex(
bool((min(obj.dep_var) <= 0) and self.log_dep_axis),
_F('number', num)
)
def _get_complete(self):
"""
Returns True if panel is fully specified, otherwise returns False
"""
return (self.series is not None) and (len(self.series) > 0)
def _scale_indep_var(self, scaling_factor):
""" Scale independent variable of panel series """
for series_obj in self.series:
series_obj._scale_indep_var(scaling_factor)
def _scale_dep_var(self, primary_scaling_factor, secondary_scaling_factor):
""" Scale dependent variable of panel series """
for series_obj in self.series:
if not series_obj.secondary_axis:
series_obj._scale_dep_var(primary_scaling_factor)
else:
series_obj._scale_dep_var(secondary_scaling_factor)
def _setup_axis(self, axis_type, axis_obj, dep_min, dep_max, tick_locs,
tick_labels, axis_label, axis_units, axis_scale):
""" Configure dependent axis """
# pylint: disable=R0201
# Set function pointers
xflist = [
axis_obj.xaxis.grid, axis_obj.set_xlim, axis_obj.xaxis.set_ticks,
axis_obj.xaxis.set_ticklabels, axis_obj.xaxis.set_label_text
]
yflist = [
axis_obj.yaxis.grid, axis_obj.set_ylim, axis_obj.yaxis.set_ticks,
axis_obj.yaxis.set_ticklabels, axis_obj.yaxis.set_label_text
]
(fgrid, flim, fticks, fticklabels, fset_label_text) = (
xflist
if axis_type.upper() == 'INDEP' else
yflist
)
# Process
fgrid(True, 'both')
flim((dep_min, dep_max), emit=True, auto=False)
fticks(tick_locs)
axis_obj.tick_params(
axis='x' if axis_type.upper() == 'INDEP' else 'y',
which='major',
labelsize=AXIS_TICKS_FONT_SIZE
)
fticklabels(tick_labels)
if (axis_label not in [None, '']) or (axis_units not in [None, '']):
axis_label = '' if axis_label is None else axis_label.strip()
unit_scale = '' if axis_scale is None else axis_scale.strip()
fset_label_text(
axis_label +
(
''
if (unit_scale == '') and (axis_units == '') else
(
' [{unit_scale}{units}]'.format(
unit_scale=unit_scale,
units='-' if axis_units == '' else axis_units
)
)
),
fontdict={'fontsize':AXIS_LABEL_FONT_SIZE}
)
def _draw_panel(self, axarr_prim, indep_axis_dict, print_indep_axis):
""" Draw panel series """
# pylint: disable=W0612
axarr_sec = (
axarr_prim.twinx()
if self._panel_has_secondary_axis else
None
)
# Place data series in their appropriate axis (primary or secondary)
for series_obj in self.series:
series_obj._draw_series(
axarr_prim if not series_obj.secondary_axis else axarr_sec,
indep_axis_dict['log_indep'],
self.log_dep_axis
)
# Set up tick labels and axis labels
if self._panel_has_primary_axis:
self._setup_axis(
'DEP',
axarr_prim,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self.primary_axis_label,
self.primary_axis_units,
self._primary_dep_var_unit_scale
)
if self._panel_has_secondary_axis:
self._setup_axis(
'DEP',
axarr_sec,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self.secondary_axis_label,
self.secondary_axis_units,
self._secondary_dep_var_unit_scale
)
if ((not self._panel_has_primary_axis) and
self._panel_has_secondary_axis):
axarr_prim.yaxis.set_visible(False)
# Print legend
if (len(self.series) > 1) and (len(self.legend_props) > 0):
_, primary_labels = (
axarr_prim.get_legend_handles_labels()
if self._panel_has_primary_axis else
(None, [])
)
_, secondary_labels = (
axarr_sec.get_legend_handles_labels()
if self._panel_has_secondary_axis else
(None, [])
)
lprim = len(primary_labels)
lsec = len(secondary_labels)
labels = (
(
[r'$\Leftarrow$'+label for label in primary_labels]+
[label+r'$\Rightarrow$' for label in secondary_labels]
)
if (lprim > 0) and (lsec > 0) else
primary_labels+secondary_labels
)
if any([bool(label) for label in labels]):
leg_artist = [
series_obj._legend_artist(LEGEND_SCALE)
for series_obj in self.series
if series_obj._check_series_is_plottable()
]
legend_axis = (
axarr_prim
if self._panel_has_primary_axis else
axarr_sec
)
loc_key = self._legend_pos_list.index(
self.legend_props['pos'].lower()
if 'pos' in self.legend_props else 'lower left'
)
legend_axis.legend(
leg_artist,
labels,
ncol=self.legend_props['cols']
if 'cols' in self.legend_props else
len(labels),
loc=self._legend_pos_list[loc_key],
numpoints=1,
fontsize=AXIS_LABEL_FONT_SIZE/LEGEND_SCALE
)
# Fix Matplotlib issue where when there is primary and
# secondary axis the legend box of one axis is transparent for
# the axis/series of the other
# From: http://stackoverflow.com/questions/17158469/
# legend-transparency-when-using-secondary-axis
if (self._panel_has_primary_axis and
self._panel_has_secondary_axis):
axarr_prim.set_zorder(1)
axarr_prim.set_frame_on(False)
axarr_sec.set_frame_on(True)
# Print independent axis tick marks and label
(indep_var_min, indep_var_max, indep_var_locs) = (
indep_axis_dict['indep_var_min'],
indep_axis_dict['indep_var_max'],
indep_axis_dict['indep_var_locs']
)
indep_var_labels = (
indep_axis_dict['indep_var_labels']
if ('indep_var_labels' in indep_axis_dict) and
(indep_axis_dict['indep_var_labels'] is not None) else
None
)
indep_axis_label = (
''
if indep_axis_dict['indep_axis_label'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_label'].strip()
)
indep_axis_units = (
''
if indep_axis_dict['indep_axis_units'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_units'].strip()
)
indep_axis_unit_scale = (
''
if indep_axis_dict['indep_axis_unit_scale'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_unit_scale'].strip()
)
self._setup_axis(
'INDEP',
axarr_prim,
indep_var_min,
indep_var_max,
indep_var_locs,
indep_var_labels,
indep_axis_label,
indep_axis_units,
indep_axis_unit_scale
)
plt.setp(axarr_prim.get_xticklabels(), visible=print_indep_axis)
return {
'primary':(
None
if not self._panel_has_primary_axis else
axarr_prim
),
'secondary':(
None
if not self._panel_has_secondary_axis else
axarr_sec
)
}
_complete = property(_get_complete)
display_indep_axis = property(
_get_display_indep_axis,
_set_display_indep_axis,
doc='Show independent axis flag'
)
r"""
Gets or sets the independent axis display flag; indicates whether the
independent axis is displayed (True) or not (False)
:type: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.display_indep_axis
:raises: (when assigned) RuntimeError (Argument \`display_indep_axis\`
is not valid)
.. [[[end]]]
"""
legend_props = property(
_get_legend_props, _set_legend_props, doc='Panel legend box properties'
)
r"""
Gets or sets the panel legend box properties; this is a dictionary that
has properties (dictionary key) and their associated values (dictionary
values). Currently supported properties are:
* **pos** (*string*) -- legend box position, one of :code:`'BEST'`,
:code:`'UPPER RIGHT'`, :code:`'UPPER LEFT'`, :code:`'LOWER LEFT'`,
:code:`'LOWER RIGHT'`, :code:`'RIGHT'`, :code:`'CENTER LEFT'`,
:code:`'CENTER RIGHT'`, :code:`'LOWER CENTER'`, :code:`'UPPER CENTER'`
or :code:`'CENTER'` (case insensitive)
* **cols** (integer) -- number of columns of the legend box
If :code:`None` the default used is :code:`{'pos':'BEST', 'cols':1}`
.. note:: No legend is shown if a panel has only one series in it or if no
series has a label
:type: dictionary
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.legend_props
:raises: (when assigned)
* RuntimeError (Argument \`legend_props\` is not valid)
* RuntimeError (Legend property \`cols\` is not valid)
* TypeError (Legend property \`pos\` is not one of ['BEST', 'UPPER
RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', 'CENTER
LEFT', 'CENTER RIGHT', 'LOWER CENTER', 'UPPER CENTER', 'CENTER']
(case insensitive))
* ValueError (Illegal legend property \`*[prop_name]*\`)
.. [[[end]]]
"""
log_dep_axis = property(
_get_log_dep_axis,
_set_log_dep_axis,
doc='Panel logarithmic dependent axis flag'
)
r"""
Gets or sets the panel logarithmic dependent (primary and/or secondary)
axis flag; indicates whether the dependent (primary and/or secondary) axis
is linear (False) or logarithmic (True)
:type: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.log_dep_axis
:raises: (when assigned)
* RuntimeError (Argument \`log_dep_axis\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
primary_axis_label = property(
_get_primary_axis_label,
_set_primary_axis_label,
doc='Panel primary axis label'
)
r"""
Gets or sets the panel primary dependent axis label
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.primary_axis_label
:raises: (when assigned) RuntimeError (Argument \`primary_axis_label\`
is not valid)
.. [[[end]]]
"""
primary_axis_scale = property(
_get_primary_axis_scale, doc='Primary axis scale'
)
"""
Gets the scale of the panel primary axis, :code:`None` if axis has no
series associated with it
:type: float or None
"""
primary_axis_ticks = property(
_get_primary_axis_ticks, doc='Primary axis tick locations'
)
"""
Gets the primary axis (scaled) tick locations, :code:`None` if axis has no
series associated with it
:type: list or None
"""
primary_axis_units = property(
_get_primary_axis_units,
_set_primary_axis_units,
doc='Panel primary axis units'
)
r"""
Gets or sets the panel primary dependent axis units
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.primary_axis_units
:raises: (when assigned) RuntimeError (Argument \`primary_axis_units\`
is not valid)
.. [[[end]]]
"""
secondary_axis_label = property(
_get_secondary_axis_label,
_set_secondary_axis_label,
doc='Panel secondary axis label'
)
r"""
Gets or sets the panel secondary dependent axis label
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.secondary_axis_label
:raises: (when assigned) RuntimeError (Argument
\`secondary_axis_label\` is not valid)
.. [[[end]]]
"""
secondary_axis_scale = property(
_get_secondary_axis_scale,
doc='Secondary axis scale'
)
"""
Gets the scale of the panel secondary axis, :code:`None` if axis has no
series associated with it
:type: float or None
"""
secondary_axis_ticks = property(
_get_secondary_axis_ticks, doc='secondary axis tick locations'
)
"""
Gets the secondary axis (scaled) tick locations, :code:`None` if axis has
no series associated with it
:type: list or None
with it
"""
secondary_axis_units = property(
_get_secondary_axis_units,
_set_secondary_axis_units,
doc='Panel secondary axis units'
)
r"""
Gets or sets the panel secondary dependent axis units
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.secondary_axis_units
:raises: (when assigned) RuntimeError (Argument
\`secondary_axis_units\` is not valid)
.. [[[end]]]
"""
series = property(_get_series, _set_series, doc='Panel series')
r"""
Gets or sets the panel series, :code:`None` if there are no series
associated with the panel
:type: :py:class:`putil.plot.Series`, list of
:py:class:`putil.plot.Series` or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.series
:raises: (when assigned)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
| 36.412403 | 79 | 0.556949 |
4f4dccf42fc01d77e8cf31fdd6bd01a78c51b39c | 1,100 | py | Python | Python3/482.py | vindhya2g/LeetCode | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | Python3/482.py | vindhya2g/LeetCode | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | Python3/482.py | vindhya2g/LeetCode | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def licenseKeyFormatting(self, S: str, K: int) -> str:
S=''.join(S.split('-')).upper()
result=[]
s=len(S)%K
n=len(S)//K
if s>0:
result.append(S[:s])
for i in range(1,n+1):
result.append(S[s+(i-1)*K:s+i*K])
return '-'.join(result)
__________________________________________________________________________________________________
sample 13392 kb submission
class Solution:
def licenseKeyFormatting(self, S: str, K: int) -> str:
ret = ''
count = 0
for i in range(len(S) - 1, -1, -1):
char = S[i].upper()
if char == '-':
continue
if count > 0 and count % K == 0:
ret = '-' + ret
count = 0
ret = char + ret
count += 1
return ret
__________________________________________________________________________________________________
| 34.375 | 98 | 0.589091 |
4f4f873733988b1e0ac7edf8d79360e465be56e0 | 1,713 | py | Python | towers/__init__.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | towers/__init__.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | towers/__init__.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | from .alchemist import Alchemist
from .banana_farm import BananaFarm
from .bomb_shooter import BombShooter
from .boomerang_monkey import BoomerangMonkey
from .dart_monkey import DartMonkey
from .dartling_gunner import DartlingGunner
from .druid import Druid
from .engineer_monkey import EngineerMonkey
from .glue_gunner import GlueGunner
from .heli_pilot import HeliPilot
from .ice_monkey import IceMonkey
from .monkey_ace import MonkeyAce
from .monkey_buccaneer import MonkeyBuccaneer
from .monkey_sub import MonkeySub
from .monkey_village import MonkeyVillage
from .mortar_monkey import MortarMonkey
from .ninja_monkey import NinjaMonkey
from .sniper_monkey import SniperMonkey
from .spike_factory import SpikeFactory
from .super_monkey import SuperMonkey
from .tack_shooter import TackShooter
from .wizard_monkey import WizardMonkey
ALL = [Alchemist, BananaFarm, BombShooter, BoomerangMonkey, DartMonkey, DartlingGunner, Druid, EngineerMonkey,
GlueGunner, HeliPilot, IceMonkey, MonkeyAce, MonkeyBuccaneer, MonkeySub, MonkeyVillage, MortarMonkey,
NinjaMonkey, SniperMonkey, SpikeFactory, SuperMonkey, TackShooter, WizardMonkey]
AQUATIC = []
for tower in ALL:
if tower.aquatic:
AQUATIC.append(tower)
SMALL = []
MEDIUM = []
LARGE = []
XL = []
RECTANGLE = []
SIZES = [SMALL, MEDIUM, LARGE, XL, RECTANGLE]
for tower in ALL:
if tower.width == 65 and tower.height == 57:
SMALL.append(tower)
elif tower.width == 75 and tower.height == 65:
MEDIUM.append(tower)
elif tower.width == 87 and tower.height == 75:
LARGE.append(tower)
elif tower.width == 119 and tower.height == 103:
XL.append(tower)
else:
RECTANGLE.append(tower)
| 30.589286 | 110 | 0.76474 |
4f4f85c3de86b6e5b761a60240cb46f002265bc9 | 1,678 | py | Python | confessions/confessions.py | hazre/AnimeTwist-Cogs | 3335dd17f1e115f84eb4e722ea2a9c625bb25129 | [
"MIT"
] | null | null | null | confessions/confessions.py | hazre/AnimeTwist-Cogs | 3335dd17f1e115f84eb4e722ea2a9c625bb25129 | [
"MIT"
] | null | null | null | confessions/confessions.py | hazre/AnimeTwist-Cogs | 3335dd17f1e115f84eb4e722ea2a9c625bb25129 | [
"MIT"
] | null | null | null | import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import pagify, box
from redbot.core import checks
import names
BaseCog = getattr(commands, "Cog", object)
class Confessions(BaseCog):
"""My custom cog"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def confess(self, ctx, *, text):
"""Sends a anonymous message to the #confession channel for people that want to vent and confess anonymously"""
embed = discord.Embed(
colour=discord.Colour(0x4a4a4a),
description= text)
embed.set_author(name= names.get_full_name() + " said:")
embed.set_footer(text= ctx.message.channel.id)
if isinstance(ctx.message.channel, discord.abc.PrivateChannel):
channel = self.bot.get_channel(467491789025050625)
message = await channel.send(embed=embed)
else:
authorchannel = ctx.message.author
await authorchannel.send("Please send your confession message via DM")
await ctx.message.delete()
@commands.command(pass_context=True)
@checks.admin_or_permissions(ban_members=True)
async def confessban(self, ctx, channel_id):
channel = self.bot.get_channel(int(channel_id))
print(channel)
if channel is None:
response = self.MESSAGE_NOT_FOUND
else:
server = self.bot.get_guild(220693787213561857)
member = server.get_member(channel.recipient.id)
await member.ban()
authorchannel = ctx.message.author
await authorchannel.send("User has been banned.") | 38.136364 | 119 | 0.662098 |
4f4e7ef1c7294c6c0fcf1a04159596be9c92981c | 7,584 | py | Python | files/Utilities/AutoDMG.app/Contents/Resources/main.py | samdoran/ansible-role-macos-server | a401b8d10914fd6208e681b971204184e6fab7e2 | [
"MIT"
] | null | null | null | files/Utilities/AutoDMG.app/Contents/Resources/main.py | samdoran/ansible-role-macos-server | a401b8d10914fd6208e681b971204184e6fab7e2 | [
"MIT"
] | null | null | null | files/Utilities/AutoDMG.app/Contents/Resources/main.py | samdoran/ansible-role-macos-server | a401b8d10914fd6208e681b971204184e6fab7e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# main.py
# AutoDMG
#
# Created by Per Olofsson on 2013-09-19.
# Copyright 2013-2014 Per Olofsson, University of Gothenburg. All rights reserved.
#
import os
import sys
import argparse
import traceback
import objc
import Foundation
objc.setVerbose(True)
from IEDLog import LogDebug, LogInfo, LogNotice, LogWarning, LogError, LogMessage
import IEDLog
from IEDUtil import *
import platform
def get_date_string():
formatter = NSDateFormatter.alloc().init()
formatter.setDateFormat_(u"yyyy-MM-dd")
return formatter.stringFromDate_(NSDate.date())
def get_log_dir():
logDir = os.path.expanduser(u"~/Library/Logs/AutoDMG")
if not os.path.exists(logDir):
try:
os.makedirs(logDir)
except OSError as e:
LogWarning(u"Couldn't create %s" % (logDir))
return logDir
def gui_unexpected_error_alert():
exceptionInfo = traceback.format_exc()
NSLog(u"AutoDMG died with an uncaught exception, %@", exceptionInfo)
from AppKit import NSAlertSecondButtonReturn
alert = NSAlert.alloc().init()
alert.setMessageText_(u"AutoDMG died with an uncaught exception")
alert.setInformativeText_(exceptionInfo)
alert.addButtonWithTitle_(u"Quit")
alert.addButtonWithTitle_(u"Save Log…")
while alert.runModal() == NSAlertSecondButtonReturn:
IEDLog.IEDLog.saveLog_(IEDLog.IEDLog, None)
sys.exit(os.EX_SOFTWARE)
def gui_main():
IEDLog.IEDLogToController = True
IEDLog.IEDLogToSyslog = True
IEDLog.IEDLogToStdOut = True
IEDLog.IEDLogStdOutLogLevel = IEDLog.IEDLogLevelDebug
logFile = os.path.join(get_log_dir(), u"AutoDMG-%s.log" % get_date_string())
try:
IEDLog.IEDLogFileHandle = open(logFile, u"a", buffering=1)
IEDLog.IEDLogToFile = True
except IOError as e:
IEDLog.IEDLogToFile = False
LogWarning(u"Couldn't open %s for writing" % (logFile))
import AppKit
from PyObjCTools import AppHelper
# import modules containing classes required to start application and load MainMenu.nib
import IEDAppDelegate
import IEDController
import IEDSourceSelector
import IEDAddPkgController
import IEDAppVersionController
# pass control to AppKit
AppHelper.runEventLoop(unexpectedErrorAlert=gui_unexpected_error_alert)
return os.EX_OK
def cli_main(argv):
IEDLog.IEDLogToController = False
IEDLog.IEDLogToSyslog = True
IEDLog.IEDLogToStdOut = True
IEDLog.IEDLogToFile = False
from IEDCLIController import IEDCLIController
clicontroller = IEDCLIController.alloc().init()
try:
# Initialize user defaults before application starts.
defaults = NSUserDefaults.standardUserDefaults()
defaultsPath = NSBundle.mainBundle().pathForResource_ofType_(u"Defaults", u"plist")
defaultsDict = NSDictionary.dictionaryWithContentsOfFile_(defaultsPath)
defaults.registerDefaults_(defaultsDict)
p = argparse.ArgumentParser()
p.add_argument(u"-v", u"--verbose", action=u"store_true", help=u"Verbose output")
p.add_argument(u"-L", u"--log-level",
type=int, choices=range(0, 8), default=6,
metavar=u"LEVEL", help=u"Log level (0-7), default 6")
p.add_argument(u"-l", u"--logfile", help=u"Log to file")
p.add_argument(u"-r", u"--root", action=u"store_true", help=u"Allow running as root")
sp = p.add_subparsers(title=u"subcommands", dest=u"subcommand")
# Populate subparser for each verb.
for verb in clicontroller.listVerbs():
verb_method = getattr(clicontroller, u"cmd%s_" % verb.capitalize())
addargs_method = getattr(clicontroller, u"addargs%s_" % verb.capitalize())
parser = sp.add_parser(verb, help=verb_method.__doc__)
addargs_method(parser)
parser.set_defaults(func=verb_method)
args = p.parse_args(argv)
if args.verbose:
IEDLog.IEDLogStdOutLogLevel = IEDLog.IEDLogLevelInfo
else:
IEDLog.IEDLogStdOutLogLevel = IEDLog.IEDLogLevelNotice
IEDLog.IEDLogFileLogLevel = args.log_level
if args.logfile == u"-":
# Redirect log to stdout instead.
IEDLog.IEDLogFileHandle = sys.stdout
IEDLog.IEDLogToFile = True
IEDLog.IEDLogToStdOut = False
else:
try:
if args.logfile:
logFile = args.logfile
else:
logFile = os.path.join(get_log_dir(), u"AutoDMG-%s.log" % get_date_string())
IEDLog.IEDLogFileHandle = open(logFile, u"a", buffering=1)
except OSError as e:
print >>sys.stderr, (u"Couldn't open %s for writing" % logFile).encode(u"utf-8")
return os.EX_CANTCREAT
IEDLog.IEDLogToFile = True
# Check if we're running with root.
if os.getuid() == 0:
if args.root:
fm = NSFileManager.defaultManager()
url, error = fm.URLForDirectory_inDomain_appropriateForURL_create_error_(NSApplicationSupportDirectory,
NSUserDomainMask,
None,
False,
None)
LogWarning(u"Running as root, using %@", os.path.join(url.path(), u"AutoDMG"))
else:
LogError(u"Running as root isn't recommended (use -r to override)")
return os.EX_USAGE
# Log version info on startup.
version, build = IEDUtil.getAppVersion()
LogInfo(u"AutoDMG v%@ build %@", version, build)
name, version, build = IEDUtil.readSystemVersion_(u"/")
LogInfo(u"%@ %@ %@", name, version, build)
LogInfo(u"%@ %@ (%@)", platform.python_implementation(),
platform.python_version(),
platform.python_compiler())
LogInfo(u"PyObjC %@", objc.__version__)
return args.func(args)
finally:
clicontroller.cleanup()
def main():
# Global exception handler to make sure we always log tracebacks.
try:
# Decode arguments as utf-8 and filter out arguments from Finder and
# Xcode.
decoded_argv = list()
i = 1
while i < len(sys.argv):
arg = sys.argv[i].decode(u"utf-8")
if arg.startswith(u"-psn"):
pass
elif arg == u"-NSDocumentRevisionsDebugMode":
i += 1
elif arg.startswith(u"-NS"):
pass
else:
decoded_argv.append(arg)
i += 1
# If no arguments are supplied, assume the GUI should be started.
if len(decoded_argv) == 0:
return gui_main()
# Otherwise parse the command line arguments.
else:
return cli_main(decoded_argv)
except SystemExit as e:
return e.code
except Exception:
NSLog(u"AutoDMG died with an uncaught exception, %@", traceback.format_exc())
return os.EX_SOFTWARE
if __name__ == '__main__':
sys.exit(main())
| 36.637681 | 119 | 0.593882 |
4f503b83f4fed725c75cc786a7e342c759d21033 | 1,058 | py | Python | utils.py | ParadoxZW/CIFAR100-PRACTICE | 175d9a72fc8e7d79ec3ef8670028d1efe830e5b9 | [
"MIT"
] | 1 | 2020-04-15T11:01:28.000Z | 2020-04-15T11:01:28.000Z | utils.py | ParadoxZW/CIFAR10-PRACTICE | 175d9a72fc8e7d79ec3ef8670028d1efe830e5b9 | [
"MIT"
] | null | null | null | utils.py | ParadoxZW/CIFAR10-PRACTICE | 175d9a72fc8e7d79ec3ef8670028d1efe830e5b9 | [
"MIT"
] | null | null | null | '''Some helper functions for PyTorch, including:
- msr_init: net parameter initialization.
- time_stamp: generate timstamp
'''
import time
import torch
import torch.nn as nn
import torch.nn.init as init
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# init.constant_(m.weight, 1)
# init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# init.normal_(m.weight, std=1e-5)
# if m.bias is not None:
# init.constant_(m.bias, 0)
def time_stamp():
'''generate timstamp'''
return time.strftime("%m_%d_%H%M", time.localtime())
def accuracy(predict, target):
'''compute accuracy'''
pred_y = torch.max(predict, 1)[1].data.squeeze()
acc = (pred_y == target).sum().item() / float(target.size(0))
return acc
| 28.594595 | 65 | 0.604915 |
4f4a93615262811e7b554eca86dcd96b7ae3654e | 4,687 | py | Python | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/containerinstance/azure-mgmt-containerinstance/azure/mgmt/containerinstance/aio/operations/_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerinstance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationListResult"]:
"""List the operations for Azure Container Instance service.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerinstance.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerInstance/operations'} # type: ignore
| 44.638095 | 133 | 0.662684 |
4f500613812351a6e4b04a8b97be3026237bdca8 | 463 | py | Python | scripts/python/ep_run_cdf_to_csv_dicast.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | 2 | 2020-06-03T15:59:50.000Z | 2020-12-21T11:11:57.000Z | scripts/python/ep_run_cdf_to_csv_dicast.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | null | null | null | scripts/python/ep_run_cdf_to_csv_dicast.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | 2 | 2019-10-02T06:47:23.000Z | 2020-02-02T18:32:23.000Z | #!/usr/bin/env python
import sys
import os
import vii_paths
import time
proc_script = "run_cdf_to_csv_dicast.py"
log_base = "%s/%s" % (vii_paths.LOG_DIR, "run_cdf_to_csv_dicast")
config_file = "%s/%s" % (vii_paths.CONFIG_DIR, "cdf_to_csv_dicast.cfg")
output_dir = "%s/%s" % (vii_paths.PROCESSED_DIR, "cdf_to_csv_dicast")
command = "%s -l %s %s %s" % (proc_script, log_base, config_file, output_dir)
ret = os.system(command)
if (ret != 0):
sys.exit(1)
| 22.047619 | 77 | 0.701944 |
4f4a3d7c1eeadd2469b0c54488e0ee1d05930dc9 | 443 | py | Python | lego/apps/users/migrations/0010_auto_20171021_2118.py | ion05/lego | 873f6898e35ed2d9a33bdfb8339eaf6c9a61470c | [
"MIT"
] | 45 | 2017-10-24T12:09:06.000Z | 2021-11-03T21:21:03.000Z | lego/apps/users/migrations/0010_auto_20171021_2118.py | ion05/lego | 873f6898e35ed2d9a33bdfb8339eaf6c9a61470c | [
"MIT"
] | 980 | 2017-10-24T12:29:07.000Z | 2022-03-31T04:04:31.000Z | lego/apps/users/migrations/0010_auto_20171021_2118.py | henrikhorluck/lego | 38c3fbe7f4e6793e69a88941e7650947c035d57e | [
"MIT"
] | 23 | 2018-04-11T16:34:22.000Z | 2021-11-23T12:28:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-21 21:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("users", "0009_auto_20171021_1521")]
operations = [
migrations.RemoveField(model_name="abakusgroup", name="internal_email"),
migrations.RemoveField(model_name="abakusgroup", name="internal_email_enabled"),
]
| 27.6875 | 88 | 0.722348 |
4f4d68892d330e6b6039f24e0f717c631088b42b | 899 | py | Python | SoftLayer/CLI/image/edit.py | briancline/softlayer-python | 679fb62ba2db095f0177f9d9488ff4a80c3b7387 | [
"MIT"
] | null | null | null | SoftLayer/CLI/image/edit.py | briancline/softlayer-python | 679fb62ba2db095f0177f9d9488ff4a80c3b7387 | [
"MIT"
] | null | null | null | SoftLayer/CLI/image/edit.py | briancline/softlayer-python | 679fb62ba2db095f0177f9d9488ff4a80c3b7387 | [
"MIT"
] | null | null | null | """Edit details of an image."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import helpers
import click
@click.command()
@click.argument('identifier')
@click.option('--name', help="Name of the image")
@click.option('--note', help="Additional note for the image")
@click.option('--tag', help="Tags for the image")
@environment.pass_env
def cli(env, identifier, name, note, tag):
"""Edit details of an image."""
image_mgr = SoftLayer.ImageManager(env.client)
data = {}
if name:
data['name'] = name
if note:
data['note'] = note
if tag:
data['tag'] = tag
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
if not image_mgr.edit(image_id, **data):
raise exceptions.CLIAbort("Failed to Edit Image")
| 28.09375 | 77 | 0.680756 |
4f4f5e9f93e52de10f50f435fc6c15c29d83c38a | 1,492 | py | Python | src/common/dtypes.py | mondrasovic/homography_ranking | c36a7b621d386f4a31d2926d4f5a9707e13d7847 | [
"MIT"
] | 1 | 2022-03-16T10:56:41.000Z | 2022-03-16T10:56:41.000Z | src/common/dtypes.py | mondrasovic/homography_ranking | c36a7b621d386f4a31d2926d4f5a9707e13d7847 | [
"MIT"
] | null | null | null | src/common/dtypes.py | mondrasovic/homography_ranking | c36a7b621d386f4a31d2926d4f5a9707e13d7847 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Milan Ondrasovic <milan.ondrasovic@gmail.com>
#
# MIT License
#
# Copyright (c) 2021 Milan Ondrašovič
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Tuple, Union
import numpy as np
# General image shape: height x width --> rows x cols.
ShapeT = Union[np.ndarray, Tuple[int, int]]
# A range (an interval) from a to b, such that a < b.
RangeT = Tuple[float, float]
| 39.263158 | 81 | 0.736595 |
4f4fa87207aaed61660858f7bc35f510a2ea7a43 | 22,028 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_inbound_nat_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_inbound_nat_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_08_01/aio/operations/_inbound_nat_rules_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_08_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_08_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2017_08_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_08_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| 50.63908 | 232 | 0.676639 |
4f5124b1ec16b38460f5d227340db39cb5fb3894 | 15,013 | py | Python | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py | alexryndin/ambari | 478eeb02ebecef1f7f0506885a041d2070d2bccb | [
"Apache-2.0"
] | null | null | null | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import urllib2
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.core.resources.system import Directory, File, Execute
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.functions.version import compare_versions
from resource_management.core import shell
from resource_management.core.shell import as_user, as_sudo
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.logger import Logger
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
from resource_management.libraries.script.script import Script
from zkfc_slave import ZkfcSlave
def safe_zkfc_op(action, env):
"""
Idempotent operation on the zkfc process to either start or stop it.
:param action: start or stop
:param env: environment
"""
Logger.info("Performing action {0} on zkfc.".format(action))
zkfc = None
if action == "start":
try:
zkfc = ZkfcSlave()
zkfc.status(env)
except ComponentIsNotRunning:
if zkfc:
zkfc.start(env)
if action == "stop":
try:
zkfc = ZkfcSlave()
zkfc.status(env)
except ComponentIsNotRunning:
pass
else:
if zkfc:
zkfc.stop(env)
def initiate_safe_zkfc_failover():
"""
If this is the active namenode, initiate a safe failover and wait for it to become the standby.
If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
will also have to start ZKFC manually.
"""
import params
# Must kinit before running the HDFS command
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user = params.hdfs_user)
active_namenode_id = None
standby_namenode_id = None
active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
if active_namenodes:
active_namenode_id = active_namenodes[0][0]
if standby_namenodes:
standby_namenode_id = standby_namenodes[0][0]
if active_namenode_id:
Logger.info(format("Active NameNode id: {active_namenode_id}"))
if standby_namenode_id:
Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
if unknown_namenodes:
for unknown_namenode in unknown_namenodes:
Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0]))
if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:
# Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)
Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby"))
failover_command = format("hdfs haadmin -failover {namenode_id} {other_namenode_id}")
check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname)
Logger.info(msg)
code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
Logger.info(format("Rolling Upgrade - failover command returned {code}"))
wait_for_standby = False
if code == 0:
wait_for_standby = True
else:
# Try to kill ZKFC manually
was_zkfc_killed = kill_zkfc(params.hdfs_user)
code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
if code == 255 and out:
Logger.info("Rolling Upgrade - NameNode is already down.")
else:
if was_zkfc_killed:
# Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
wait_for_standby = True
if wait_for_standby:
Logger.info("Waiting for this NameNode to become the standby one.")
Execute(check_standby_cmd,
user=params.hdfs_user,
tries=50,
try_sleep=6,
logoutput=True)
else:
msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname)
Logger.info(msg)
def kill_zkfc(zkfc_user):
"""
There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
Option 2. Silent failover (not supported as of IOP 4.0.0.0)
:param zkfc_user: User that started the ZKFC process.
:return: Return True if ZKFC was killed, otherwise, false.
"""
import params
if params.dfs_ha_enabled:
if params.zkfc_pid_file:
check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
code, out = shell.call(check_process)
if code == 0:
Logger.debug("ZKFC is running and will be killed.")
kill_command = format("kill -15 `cat {zkfc_pid_file}`")
Execute(kill_command,
user=zkfc_user
)
File(params.zkfc_pid_file,
action = "delete",
)
return True
return False
def get_service_pid_file(name, user):
"""
Get the pid file path that was used to start the service by the user.
:param name: Service name
:param user: User that started the service.
:return: PID file path
"""
import params
pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
return pid_file
def service(action=None, name=None, user=None, options="", create_pid_dir=False,
create_log_dir=False):
"""
:param action: Either "start" or "stop"
:param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
:param user: User to run the command as
:param options: Additional options to pass to command as a string
:param create_pid_dir: Create PID directory
:param create_log_dir: Crate log file directory
"""
import params
options = options if options else ""
pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
hadoop_env_exports = {
'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
}
log_dir = format("{hdfs_log_dir_prefix}/{user}")
# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
if name == "nfs3" :
pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
custom_export = {
'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
}
hadoop_env_exports.update(custom_export)
process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
# on STOP directories shouldn't be created
# since during stop still old dirs are used (which were created during previous start)
if action != "stop":
if name == "nfs3":
Directory(params.hadoop_pid_dir_prefix,
mode=0755,
owner=params.root_user,
group=params.root_group
)
else:
Directory(params.hadoop_pid_dir_prefix,
mode=0755,
owner=params.hdfs_user,
group=params.user_group
)
if create_pid_dir:
Directory(pid_dir,
owner=user,
create_parents = True)
if create_log_dir:
if name == "nfs3":
Directory(log_dir,
mode=0775,
owner=params.root_user,
group=params.user_group)
else:
Directory(log_dir,
owner=user,
create_parents = True)
if params.security_enabled and name == "datanode":
## The directory where pid files are stored in the secure data environment.
hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
if params.secure_dn_ports_are_in_use:
user = "root"
pid_file = format(
"{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
if action == 'stop' and os.path.isfile(hadoop_secure_dn_pid_file):
# We need special handling for this case to handle the situation
# when we configure non-root secure DN and then restart it
# to handle new configs. Otherwise we will not be able to stop
# a running instance
user = "root"
try:
check_process_status(hadoop_secure_dn_pid_file)
custom_export = {
'HADOOP_SECURE_DN_USER': params.hdfs_user
}
hadoop_env_exports.update(custom_export)
except ComponentIsNotRunning:
pass
hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
if user == "root":
cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
if options:
cmd += [options, ]
daemon_cmd = as_sudo(cmd)
else:
cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
if options:
cmd += " " + options
daemon_cmd = as_user(cmd, user)
if action == "start":
# remove pid file from dead process
File(pid_file, action="delete", not_if=process_id_exists_command)
Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
elif action == "stop":
Execute(daemon_cmd, only_if=process_id_exists_command, environment=hadoop_env_exports)
File(pid_file, action="delete")
def get_value_from_jmx(qry, property):
try:
response = urllib2.urlopen(qry)
data = response.read()
if data:
data_dict = json.loads(data)
return data_dict["beans"][0][property]
except:
return None
def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
"""
:param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
If not preceded, will use the encrypted param to determine.
:param modeler_type: Modeler type to query using startswith function
:param metric: Metric to return
:return: Return an object representation of the metric, or None if it does not exist
"""
if not nn_address or not modeler_type or not metric:
return None
nn_address = nn_address.strip()
if not nn_address.startswith("http"):
nn_address = ("https://" if encrypted else "http://") + nn_address
if not nn_address.endswith("/"):
nn_address = nn_address + "/"
nn_address = nn_address + "jmx"
Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
if security_enabled:
import params
data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
"jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
else:
data = urllib2.urlopen(nn_address).read()
my_data = None
if data:
data_dict = json.loads(data)
if data_dict:
for el in data_dict['beans']:
if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
if metric in el:
my_data = el[metric]
if my_data:
my_data = json.loads(str(my_data))
break
return my_data
def get_port(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None and len(m.groups()) >= 2:
return int(m.group(2))
else:
return None
def is_secure_port(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
def is_previous_fs_image():
"""
Return true if there's a previous folder in the HDFS namenode directories.
"""
import params
if params.dfs_name_dir:
nn_name_dirs = params.dfs_name_dir.split(',')
for nn_dir in nn_name_dirs:
prev_dir = os.path.join(nn_dir, "previous")
if os.path.isdir(prev_dir):
return True
return False
def get_hdfs_binary(distro_component_name):
"""
Get the hdfs binary to use depending on the stack and version.
:param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
:return: The hdfs binary to use
"""
import params
hdfs_binary = "hdfs"
return hdfs_binary
def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
"""
Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
:param hdfs_binary: path to hdfs binary to use
:param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
current namenode's address
:return: the constructed dfsadmin base command
"""
import params
dfsadmin_base_command = ""
if params.dfs_ha_enabled and use_specific_namenode:
dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
else:
dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
return dfsadmin_base_command
def is_previous_fs_image():
"""
Return true if there's a previous folder in the HDFS namenode directories.
"""
import params
if params.dfs_name_dir:
nn_name_dirs = params.dfs_name_dir.split(',')
for nn_dir in nn_name_dirs:
prev_dir = os.path.join(nn_dir, "previous")
if os.path.isdir(prev_dir):
return True
return False
| 36.796569 | 140 | 0.69986 |
4f4fd40113cc74c9a85f564ea5814c3fbb2bdc00 | 15,841 | py | Python | mars/services/storage/core.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | 1 | 2021-11-30T12:07:21.000Z | 2021-11-30T12:07:21.000Z | mars/services/storage/core.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | null | null | null | mars/services/storage/core.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
from ... import oscar as mo
from ...lib.aio import AioFileObject
from ...oscar import ActorRef
from ...oscar.backends.allocate_strategy import IdleLabel, NoIdleSlot
from ...storage import StorageLevel, get_storage_backend
from ...storage.base import ObjectInfo, StorageBackend
from ...storage.core import StorageFileObject
from ...utils import calc_data_size, dataslots
from ..cluster import ClusterAPI
from ..meta import MetaAPI
from .errors import DataNotExist
logger = logging.getLogger(__name__)
def _build_data_info(storage_info: ObjectInfo, level, size):
# todo handle multiple
band = 'numa-0' if storage_info.device is None \
else f'gpu-{storage_info.device}'
if storage_info.size is None:
store_size = size
else:
store_size = storage_info.size
return DataInfo(storage_info.object_id, level, size, store_size, band)
class _WrappedStorageFileObject(AioFileObject):
"""
Wrap to hold ref after write close
"""
def __init__(self,
file: StorageFileObject,
level: StorageLevel,
size: int,
session_id: str,
data_key: str,
storage_manager: Union[ActorRef, "StorageManagerActor"],
storage_handler: StorageBackend
):
super().__init__(file)
self._size = size
self._level = level
self._session_id = session_id
self._data_key = data_key
self._storage_manager = storage_manager
self._storage_handler = storage_handler
def __getattr__(self, item):
return getattr(self._file, item)
async def close(self):
self._file.close()
if 'w' in self._file._mode:
object_info = await self._storage_handler.object_info(self._file._object_id)
data_info = _build_data_info(object_info, self._level, self._size)
await self._storage_manager.put_data_info(
self._session_id, self._data_key, data_info, object_info)
class StorageQuota:
def __init__(self, total_size: Optional[int]):
self._total_size = total_size
self._used_size = 0
@property
def total_size(self):
return self._total_size
@property
def used_size(self):
return self._used_size
def update(self, size: int):
if self._total_size is not None:
self._total_size += size
def request(self, size: int) -> bool:
if self._total_size is None:
self._used_size += size
return True
elif self._used_size + size >= self._total_size:
return False
else:
self._used_size += size
return True
def release(self, size: int):
self._used_size -= size
@dataslots
@dataclass
class DataInfo:
object_id: object
level: StorageLevel
memory_size: int
store_size: int
band: str = None
@dataslots
@dataclass
class InternalDataInfo:
data_info: DataInfo
object_info: ObjectInfo
class DataManager:
def __init__(self):
# mapping key is (session_id, data_key)
# mapping value is list of InternalDataInfo
self._data_key_to_info: Dict[tuple, List[InternalDataInfo]] = defaultdict(list)
def put(self,
session_id: str,
data_key: str,
data_info: DataInfo,
object_info: ObjectInfo):
info = InternalDataInfo(data_info, object_info)
self._data_key_to_info[(session_id, data_key)].append(info)
def get_infos(self,
session_id: str,
data_key: str) -> List[DataInfo]:
if (session_id, data_key) not in self._data_key_to_info: # pragma: no cover
raise DataNotExist(f'Data key {session_id, data_key} not exists.')
return [info.data_info for info in
self._data_key_to_info.get((session_id, data_key))]
def get_info(self,
session_id: str,
data_key: str) -> DataInfo:
# if the data is stored in multiply levels,
# return the lowest level info
if (session_id, data_key) not in self._data_key_to_info: # pragma: no cover
raise DataNotExist(f'Data key {session_id, data_key} not exists.')
infos = sorted(self._data_key_to_info.get((session_id, data_key)),
key=lambda x: x.data_info.level)
return infos[0].data_info
def delete(self,
session_id: str,
data_key: str,
level: StorageLevel):
if (session_id, data_key) in self._data_key_to_info:
infos = self._data_key_to_info[(session_id, data_key)]
rest = [info for info in infos if info.data_info.level != level]
if len(rest) == 0:
del self._data_key_to_info[(session_id, data_key)]
else: # pragma: no cover
self._data_key_to_info[(session_id, data_key)] = rest
class StorageHandlerActor(mo.Actor):
def __init__(self,
storage_init_params: Dict,
storage_manager_ref: mo.ActorRef):
self._storage_init_params = storage_init_params
self._storage_manager_ref = storage_manager_ref
async def __post_create__(self):
self._clients = clients = dict()
for backend, init_params in self._storage_init_params.items():
storage_cls = get_storage_backend(backend)
client = storage_cls(**init_params)
for level in StorageLevel.__members__.values():
if client.level & level:
clients[level] = client
async def get(self,
session_id: str,
data_key: str,
conditions: List = None):
data_info = await self._storage_manager_ref.get_data_info(
session_id, data_key)
if conditions is None:
data = await self._clients[data_info.level].get(
data_info.object_id)
return data
else:
try:
return await self._clients[data_info.level].get(
data_info.object_id, conditions=conditions)
except NotImplementedError:
data = await self._clients[data_info.level].get(
data_info.object_id)
try:
sliced_value = data.iloc[tuple(conditions)]
except AttributeError:
sliced_value = data[tuple(conditions)]
return sliced_value
async def put(self,
session_id: str,
data_key: str,
obj: object,
level: StorageLevel) -> DataInfo:
size = calc_data_size(obj)
await self._storage_manager_ref.allocate_size(size, level=level)
object_info = await self._clients[level].put(obj)
if object_info.size is not None and size != object_info.size:
await self._storage_manager_ref.update_quota(
object_info.size - size, level=level)
data_info = _build_data_info(object_info, level, size)
await self._storage_manager_ref.put_data_info(
session_id, data_key, data_info, object_info)
return data_info
async def delete(self,
session_id: str,
data_key: str):
infos = await self._storage_manager_ref.get_data_infos(
session_id, data_key)
for info in infos or []:
level = info.level
await self._storage_manager_ref.delete_data_info(
session_id, data_key, level)
await self._clients[level].delete(info.object_id)
await self._storage_manager_ref.release_size(info.store_size, level)
async def open_reader(self,
session_id: str,
data_key: str) -> StorageFileObject:
data_info = await self._storage_manager_ref.get_data_info(
session_id, data_key)
reader = await self._clients[data_info.level].open_reader(
data_info.object_id)
return reader
async def open_writer(self,
session_id: str,
data_key: str,
size: int,
level: StorageLevel) -> _WrappedStorageFileObject:
await self._storage_manager_ref.allocate_size(size, level=level)
writer = await self._clients[level].open_writer(size)
return _WrappedStorageFileObject(writer, level, size, session_id, data_key,
self._storage_manager_ref, self._clients[level])
async def object_info(self,
session_id: str,
data_key: str,):
data_info = await self._storage_manager_ref.get_data_info(
session_id, data_key)
return await self._clients[data_info.level].object_info(
data_info.object_id)
async def list(self, level: StorageLevel) -> List:
return await self._clients[level].list()
async def prefetch(self,
session_id: str,
data_key: str):
if StorageLevel.REMOTE not in self._clients: # pragma: no cover
raise NotImplementedError
else:
data_info = yield self._storage_manager_ref.fetch_data_info(
session_id, data_key)
await self._clients[StorageLevel.REMOTE].prefetch(data_info.object_id)
class StorageManagerActor(mo.Actor):
def __init__(self,
storage_configs: Dict,
):
self._storage_configs = storage_configs
# params to init and teardown
self._init_params = dict()
self._teardown_params = dict()
# pinned_keys
self._pinned_keys = []
# stores the mapping from data key to storage info
self._data_manager = DataManager()
self._supervisor_address = None
async def __post_create__(self):
# setup storage backend
quotas = dict()
for backend, setup_params in self._storage_configs.items():
client = await self._setup_storage(backend, setup_params)
for level in StorageLevel.__members__.values():
if client.level & level:
quotas[level] = StorageQuota(client.size)
# create handler actors for every process
strategy = IdleLabel(None, 'StorageHandler')
while True:
try:
await mo.create_actor(StorageHandlerActor,
self._init_params,
self.ref(),
uid=StorageHandlerActor.default_uid(),
address=self.address,
allocate_strategy=strategy)
except NoIdleSlot:
break
self._quotas = quotas
self._storage_handler = await mo.actor_ref(address=self.address,
uid=StorageHandlerActor.default_uid())
async def __pre_destroy__(self):
for backend, teardown_params in self._teardown_params.items():
backend_cls = get_storage_backend(backend)
await backend_cls.teardown(**teardown_params)
async def _setup_storage(self,
storage_backend: str,
storage_config: Dict):
backend = get_storage_backend(storage_backend)
storage_config = storage_config or dict()
init_params, teardown_params = await backend.setup(**storage_config)
client = backend(**init_params)
self._init_params[storage_backend] = init_params
self._teardown_params[storage_backend] = teardown_params
return client
async def _get_meta_api(self, session_id: str):
if self._supervisor_address is None:
cluster_api = await ClusterAPI.create(self.address)
self._supervisor_address = (await cluster_api.get_supervisors())[0]
return await MetaAPI.create(session_id=session_id,
address=self._supervisor_address)
def get_client_params(self):
return self._init_params
def allocate_size(self,
size: int,
level: StorageLevel):
if self._quotas[level].request(size):
return
else: # pragma: no cover
raise NotImplementedError
async def prefetch(self,
session_id: str,
data_key: str,
level: StorageLevel):
try:
info = self._data_manager.get_info(session_id, data_key)
self.pin(info.object_id)
except DataNotExist: # pragma: no cover
# Not exist in local, fetch from remote worker
try:
yield self._storage_handler.prefetch(session_id, data_key)
except NotImplementedError: # pragma: no cover
raise
def update_quota(self,
size: int,
level: StorageLevel):
self._quotas[level].update(size)
def release_size(self,
size: int,
level: StorageLevel
):
self._quotas[level].release(size)
def get_data_infos(self,
session_id: str,
data_key: str) -> List[DataInfo]:
return self._data_manager.get_infos(session_id, data_key)
def get_data_info(self,
session_id: str,
data_key: str) -> DataInfo:
return self._data_manager.get_info(session_id, data_key)
def put_data_info(self,
session_id: str,
data_key: str,
data_info: DataInfo,
object_info: Union[ObjectInfo] = None):
self._data_manager.put(session_id, data_key, data_info, object_info)
async def fetch_data_info(self,
session_id: str,
data_key: str) -> DataInfo:
meta_api = await self._get_meta_api(session_id)
address = (await meta_api.get_chunk_meta(
data_key, fields=['bands']))['bands'][0][0]
remote_manager_ref = await mo.actor_ref(uid=StorageManagerActor.default_uid(),
address=address)
data_info = yield remote_manager_ref.get_data_info(session_id, data_key)
self.put_data_info(session_id, data_key, data_info, None)
def delete_data_info(self,
session_id: str,
data_key: str,
level: StorageLevel):
self._data_manager.delete(session_id, data_key, level)
def pin(self, object_id):
self._pinned_keys.append(object_id)
def unpin(self, object_id):
self._pinned_keys.remove(object_id)
| 37.806683 | 89 | 0.600152 |
4f4a54d0b22b0f26ad55027c1aba0fff19f297f1 | 1,836 | py | Python | pbc/gto/basis/parse_cp2k.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | pbc/gto/basis/parse_cp2k.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | pbc/gto/basis/parse_cp2k.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
#
# parse CP2K format
#
MAXL = 8
def parse(string):
'''Parse the basis text which is in CP2K format, return an internal
basis format which can be assigned to :attr:`Mole.basis`
Lines started with # are ignored.
'''
bastxt = []
for dat in string.splitlines():
x = dat.split('#')[0].strip()
if (x and not x.startswith('END') and not x.startswith('BASIS')):
bastxt.append(x)
return _parse(bastxt)
def load(basisfile, symb):
return _parse(search_seg(basisfile, symb))
def _parse(blines):
header_ln = blines.pop(0)
nsets = int(blines.pop(0))
basis = []
for n in range(nsets):
comp = [int(p) for p in blines.pop(0).split()]
n, lmin, lmax, nexps, ncontractions = comp[0], comp[1], comp[2], comp[3], comp[4:]
basis_n = [[l] for l in range(lmin,lmax+1)]
for nexp in range(nexps):
bfun = [float(x) for x in blines.pop(0).split()]
exp = bfun.pop(0)
for i,l in enumerate(range(lmin,lmax+1)):
cl = [exp]
for c in range(ncontractions[i]):
cl.append(bfun.pop(0))
basis_n[i].append(tuple(cl))
basis.extend(basis_n)
bsort = []
for l in range(MAXL):
bsort.extend([b for b in basis if b[0] == l])
return bsort
def search_seg(basisfile, symb):
fin = open(basisfile, 'r')
fdata = fin.read().split('#BASIS SET')
fin.close()
for dat in fdata[1:]:
if symb+' ' in dat:
# remove blank lines
return [x.strip() for x in dat.splitlines()[1:]
if x.strip() and 'END' not in x]
raise RuntimeError('Basis not found for %s in %s' % (symb, basisfile))
| 31.655172 | 90 | 0.564815 |
4f4b55d9750bbd41486543f262d8e3f737247c3e | 525 | py | Python | abc/abc122/c/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | abc/abc122/c/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | abc/abc122/c/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N, Q = na()
S = ns()
total = [0] * (N + 1)
for i in range(N - 1):
if S[i] == 'A' and S[i + 1] == 'C':
total[i + 1] = total[i] + 1
else:
total[i + 1] = total[i]
for _ in range(Q):
l, r = na()
l -= 1
r -= 1
print(total[r] - total[l]) | 19.444444 | 53 | 0.544762 |
4f4df7f28d54dd5a4319d8747c7ae2d5ce3effeb | 30,989 | py | Python | oscar/lib/python2.7/site-packages/IPython/core/tests/test_magic.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/tests/test_magic.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/tests/test_magic.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
from __future__ import absolute_import
import io
import os
import sys
import warnings
from unittest import TestCase
try:
from importlib import invalidate_caches # Required from Python 3.3
except ImportError:
def invalidate_caches():
pass
import nose.tools as nt
from IPython import get_ipython
from IPython.core import magic
from IPython.core.error import UsageError
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic,
register_line_magic, register_cell_magic)
from IPython.core.magics import execution, script, code
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils import py3compat
from IPython.utils.io import capture_output
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils.process import find_cmd
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
_ip = get_ipython()
@magic.magics_class
class DummyMagics(magic.Magics): pass
def test_extract_code_ranges():
instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
expected = [(0, 1),
(2, 3),
(4, 6),
(6, 9),
(9, 14),
(16, None),
(None, 9),
(9, None),
(None, 13),
(None, None)]
actual = list(code.extract_code_ranges(instr))
nt.assert_equal(actual, expected)
def test_extract_symbols():
source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
expected = [([], ['a']),
(["def b():\n return 42\n"], []),
(["class A: pass\n"], []),
(["class A: pass\n", "def b():\n return 42\n"], []),
(["class A: pass\n"], ['a']),
([], ['z'])]
for symbols, exp in zip(symbols_args, expected):
nt.assert_equal(code.extract_symbols(source, symbols), exp)
def test_extract_symbols_raises_exception_with_non_python_code():
source = ("=begin A Ruby program :)=end\n"
"def hello\n"
"puts 'Hello world'\n"
"end")
with nt.assert_raises(SyntaxError):
code.extract_symbols(source, "hello")
def test_config():
""" test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registerd as a
magic.
"""
## should not raise.
_ip.magic('config')
def test_rehashx():
# clear up everything
_ip.alias_manager.clear_aliases()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
nt.assert_true(len(_ip.alias_manager.aliases) > 10)
for name, cmd in _ip.alias_manager.aliases:
# we must strip dots from alias names
nt.assert_not_in('.', name)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
nt.assert_true(len(scoms) > 10)
def test_magic_parse_options():
"""Test that we don't mangle paths when parsing magic options."""
ip = get_ipython()
path = 'c:\\x'
m = DummyMagics(ip)
opts = m.parse_options('-f %s' % path,'f:')[0]
# argv splitting is os-dependent
if os.name == 'posix':
expected = 'c:x'
else:
expected = path
nt.assert_equal(opts['f'], expected)
def test_magic_parse_long_options():
"""Magic.parse_options can handle --foo=bar long options"""
ip = get_ipython()
m = DummyMagics(ip)
opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
nt.assert_in('foo', opts)
nt.assert_in('bar', opts)
nt.assert_equal(opts['bar'], "bubble")
@dec.skip_without('sqlite3')
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
"""
@dec.skip_without('sqlite3')
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
"""
@dec.skip_without('sqlite3')
def doctest_hist_op():
"""Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
"""
def test_hist_pof():
ip = get_ipython()
ip.run_cell(u"1+2", store_history=True)
#raise Exception(ip.history_manager.session_number)
#raise Exception(list(ip.history_manager._get_range_session()))
with TemporaryDirectory() as td:
tf = os.path.join(td, 'hist.py')
ip.run_line_magic('history', '-pof %s' % tf)
assert os.path.isfile(tf)
@dec.skip_without('sqlite3')
def test_macro():
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
ip.magic("macro test 1-3")
nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
# List macros
nt.assert_in("test", ip.magic("macro"))
@dec.skip_without('sqlite3')
def test_macro_run():
"""Test that we can run a multi-line macro successfully."""
ip = get_ipython()
ip.history_manager.reset()
cmds = ["a=10", "a+=1", py3compat.doctest_refactor_print("print a"),
"%macro test 2-3"]
for cmd in cmds:
ip.run_cell(cmd, store_history=True)
nt.assert_equal(ip.user_ns["test"].value,
py3compat.doctest_refactor_print("a+=1\nprint a\n"))
with tt.AssertPrints("12"):
ip.run_cell("test")
with tt.AssertPrints("13"):
ip.run_cell("test")
def test_magic_magic():
"""Test %magic"""
ip = get_ipython()
with capture_output() as captured:
ip.magic("magic")
stdout = captured.stdout
nt.assert_in('%magic', stdout)
nt.assert_in('IPython', stdout)
nt.assert_in('Available', stdout)
@dec.skipif_not_numpy
def test_numpy_reset_array_undec():
"Test '%reset array' functionality"
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
nt.assert_in('a', _ip.user_ns)
_ip.magic('reset -f array')
nt.assert_not_in('a', _ip.user_ns)
def test_reset_out():
"Test '%reset out' magic"
_ip.run_cell("parrot = 'dead'", store_history=True)
# test '%reset -f out', make an Out prompt
_ip.run_cell("parrot", store_history=True)
nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
_ip.magic('reset -f out')
nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
nt.assert_equal(len(_ip.user_ns['Out']), 0)
def test_reset_in():
"Test '%reset in' magic"
# test '%reset -f in'
_ip.run_cell("parrot", store_history=True)
nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
_ip.magic('%reset -f in')
nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
def test_reset_dhist():
"Test '%reset dhist' magic"
_ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
_ip.magic('cd ' + os.path.dirname(nt.__file__))
_ip.magic('cd -')
nt.assert_true(len(_ip.user_ns['_dh']) > 0)
_ip.magic('reset -f dhist')
nt.assert_equal(len(_ip.user_ns['_dh']), 0)
_ip.run_cell("_dh = [d for d in tmp]") #restore
def test_reset_in_length():
"Test that '%reset in' preserves In[] length"
_ip.run_cell("print 'foo'")
_ip.run_cell("reset -f in")
nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
nt.assert_equal(last_line, "SyntaxError: invalid syntax")
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n"
" %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop('run', None)
with tt.AssertNotPrints("not found", channel='stderr'):
ip.run_cell("%%time\n"
"run = 0\n"
"run += 1")
@dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
def test_time_futures():
"Test %time with __future__ environments"
ip = get_ipython()
ip.autocall = 0
ip.run_cell("from __future__ import division")
with tt.AssertPrints('0.25'):
ip.run_line_magic('time', 'print(1/4)')
ip.compile.reset_compiler_flags()
with tt.AssertNotPrints('0.25'):
ip.run_line_magic('time', 'print(1/4)')
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.magic('doctest_mode')
_ip.magic('doctest_mode')
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(py3compat.getcwd())[1].replace('\\','/')
curpath = py3compat.getcwd
startdir = py3compat.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.magic('cd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('cd -')
nt.assert_equal(curpath(), startdir)
_ip.magic('pushd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('popd')
nt.assert_equal(curpath(), startdir)
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(3):
_ip.magic("xmode")
nt.assert_equal(_ip.InteractiveTB.mode, xmode)
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
nt.assert_equal(monitor, [])
_ip.magic("reset -f")
nt.assert_equal(monitor, [1])
class TestXdel(tt.TempFileMixin):
def test_xdel(self):
"""Test that references from %run are cleared by xdel."""
src = ("class A(object):\n"
" monitor = []\n"
" def __del__(self):\n"
" self.monitor.append(1)\n"
"a = A()\n")
self.mktmp(src)
# %run creates some hidden references...
_ip.magic("run %s" % self.fname)
# ... as does the displayhook.
_ip.run_cell("a")
monitor = _ip.user_ns["A"].monitor
nt.assert_equal(monitor, [])
_ip.magic("xdel a")
# Check that a's __del__ method has been called.
nt.assert_equal(monitor, [1])
def doctest_who():
"""doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
"""
def test_whos():
"""Check that whos is protected against objects where repr() fails."""
class A(object):
def __repr__(self):
raise Exception()
_ip.user_ns['a'] = A()
_ip.magic("whos")
@py3compat.u_format
def doctest_precision():
"""doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: {u}'%.5f'
In [3]: f.float_format
Out[3]: {u}'%.5f'
In [4]: %precision %e
Out[4]: {u}'%e'
In [5]: f(3.1415927)
Out[5]: {u}'3.141593e+00'
"""
def test_psearch():
with tt.AssertPrints("dict.fromkeys"):
_ip.run_cell("dict.fr*?")
def test_timeit_shlex():
"""test shlex issues with timeit (#1109)"""
_ip.ex("def f(*a,**kw): pass")
_ip.magic('timeit -n1 "this is a bug".count(" ")')
_ip.magic('timeit -r1 -n1 f(" ", 1)')
_ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
_ip.magic('timeit -r1 -n1 ("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b ")')
def test_timeit_arguments():
"Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
_ip.magic("timeit ('#')")
def test_timeit_special_syntax():
"Test %%timeit with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
def test_timeit_return():
"""
test wether timeit -o return object
"""
res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
assert(res is not None)
def test_timeit_quiet():
"""
test quiet option of timeit magic
"""
with tt.AssertNotPrints("loops"):
_ip.run_cell("%timeit -n1 -r1 -q 1")
def test_timeit_return_quiet():
with tt.AssertNotPrints("loops"):
res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
assert (res is not None)
@dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
def test_timeit_futures():
"Test %timeit with __future__ environments"
ip = get_ipython()
ip.run_cell("from __future__ import division")
with tt.AssertPrints('0.25'):
ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
ip.compile.reset_compiler_flags()
with tt.AssertNotPrints('0.25'):
ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
@dec.skipif(execution.profile is None)
def test_prun_special_syntax():
"Test %%prun with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('prun', '-q %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('prun', '-q', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
@dec.skipif(execution.profile is None)
def test_prun_quotes():
"Test that prun does not clobber string escapes (GH #1302)"
_ip.magic(r"prun -q x = '\t'")
nt.assert_equal(_ip.user_ns['x'], '\t')
def test_extension():
# Debugging information for failures of this test
print('sys.path:')
for p in sys.path:
print(' ', p)
print('CWD', os.getcwd())
nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
sys.path.insert(0, daft_path)
try:
_ip.user_ns.pop('arq', None)
invalidate_caches() # Clear import caches
_ip.magic("load_ext daft_extension")
nt.assert_equal(_ip.user_ns['arq'], 185)
_ip.magic("unload_ext daft_extension")
assert 'arq' not in _ip.user_ns
finally:
sys.path.remove(daft_path)
def test_notebook_export_json():
_ip = get_ipython()
_ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
for i, cmd in enumerate(cmds, start=1):
_ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.ipynb")
_ip.magic("notebook -e %s" % outfile)
class TestEnv(TestCase):
def test_env(self):
env = _ip.magic("env")
self.assertTrue(isinstance(env, dict))
def test_env_get_set_simple(self):
env = _ip.magic("env var val1")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val1')
self.assertEqual(_ip.magic("env var"), 'val1')
env = _ip.magic("env var=val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2')
def test_env_get_set_complex(self):
env = _ip.magic("env var 'val1 '' 'val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], "'val1 '' 'val2")
self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
env = _ip.magic('env var=val2 val3="val4')
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2 val3="val4')
def test_env_set_bad_input(self):
self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
def test_env_set_whitespace(self):
self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
class CellMagicTestCase(TestCase):
def check_ident(self, magic):
# Manually called, we get the result
out = _ip.run_cell_magic(magic, 'a', 'b')
nt.assert_equal(out, ('a','b'))
# Via run_cell, it goes into the user's namespace via displayhook
_ip.run_cell('%%' + magic +' c\nd')
nt.assert_equal(_ip.user_ns['_'], ('c','d'))
def test_cell_magic_func_deco(self):
"Cell magic using simple decorator"
@register_cell_magic
def cellm(line, cell):
return line, cell
self.check_ident('cellm')
def test_cell_magic_reg(self):
"Cell magic manually registered"
def cellm(line, cell):
return line, cell
_ip.register_magic_function(cellm, 'cell', 'cellm2')
self.check_ident('cellm2')
def test_cell_magic_class(self):
"Cell magics declared via a class"
@magics_class
class MyMagics(Magics):
@cell_magic
def cellm3(self, line, cell):
return line, cell
_ip.register_magics(MyMagics)
self.check_ident('cellm3')
def test_cell_magic_class2(self):
"Cell magics declared via a class, #2"
@magics_class
class MyMagics2(Magics):
@cell_magic('cellm4')
def cellm33(self, line, cell):
return line, cell
_ip.register_magics(MyMagics2)
self.check_ident('cellm4')
# Check that nothing is registered as 'cellm33'
c33 = _ip.find_cell_magic('cellm33')
nt.assert_equal(c33, None)
def test_file():
"""Basic %%file"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("file", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_var_expand():
"""%%file $filename"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.user_ns['filename'] = fname
ip.run_cell_magic("file", '$filename', u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_unicode():
"""%%file with unicode cell"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("file", fname, u'\n'.join([
u'liné1',
u'liné2',
]))
with io.open(fname, encoding='utf-8') as f:
s = f.read()
nt.assert_in(u'liné1\n', s)
nt.assert_in(u'liné2', s)
def test_file_amend():
"""%%file -a amends files"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file2')
ip.run_cell_magic("file", fname, u'\n'.join([
'line1',
'line2',
]))
ip.run_cell_magic("file", "-a %s" % fname, u'\n'.join([
'line3',
'line4',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line3\n', s)
def test_script_config():
ip = get_ipython()
ip.config.ScriptMagics.script_magics = ['whoda']
sm = script.ScriptMagics(shell=ip)
nt.assert_in('whoda', sm.magics['cell'])
@dec.skip_win32
def test_script_out():
ip = get_ipython()
ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
@dec.skip_win32
def test_script_err():
ip = get_ipython()
ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_bg_out():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
@dec.skip_win32
def test_script_bg_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
@dec.skip_win32
def test_script_bg_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
def test_script_defaults():
ip = get_ipython()
for cmd in ['sh', 'bash', 'perl', 'ruby']:
try:
find_cmd(cmd)
except Exception:
pass
else:
nt.assert_in(cmd, ip.magics_manager.magics['cell'])
@magics_class
class FooFoo(Magics):
"""class with both %foo and %%foo magics"""
@line_magic('foo')
def line_foo(self, line):
"I am line foo"
pass
@cell_magic("foo")
def cell_foo(self, line, cell):
"I am cell foo, not line foo"
pass
def test_line_cell_info():
"""%%foo and %foo magics are distinguishable to inspect"""
ip = get_ipython()
ip.magics_manager.register(FooFoo)
oinfo = ip.object_inspect('foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
oinfo = ip.object_inspect('%%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
oinfo = ip.object_inspect('%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
def test_multiple_magics():
ip = get_ipython()
foo1 = FooFoo(ip)
foo2 = FooFoo(ip)
mm = ip.magics_manager
mm.register(foo1)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
mm.register(foo2)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
def test_alias_magic():
"""Test %alias_magic."""
ip = get_ipython()
mm = ip.magics_manager
# Basic operation: both cell and line magics are created, if possible.
ip.run_line_magic('alias_magic', 'timeit_alias timeit')
nt.assert_in('timeit_alias', mm.magics['line'])
nt.assert_in('timeit_alias', mm.magics['cell'])
# --cell is specified, line magic not created.
ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
nt.assert_in('timeit_cell_alias', mm.magics['cell'])
# Test that line alias is created successfully.
ip.run_line_magic('alias_magic', '--line env_alias env')
nt.assert_equal(ip.run_line_magic('env', ''),
ip.run_line_magic('env_alias', ''))
def test_save():
"""Test %save."""
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "testsave.py")
ip.run_line_magic("save", "%s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 1)
nt.assert_in('coding: utf-8', content)
ip.run_line_magic("save", "-a %s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 2)
nt.assert_in('coding: utf-8', content)
def test_store():
"""Test %store."""
ip = get_ipython()
ip.run_line_magic('load_ext', 'storemagic')
# make sure the storage is empty
ip.run_line_magic('store', '-z')
ip.user_ns['var'] = 42
ip.run_line_magic('store', 'var')
ip.user_ns['var'] = 39
ip.run_line_magic('store', '-r')
nt.assert_equal(ip.user_ns['var'], 42)
ip.run_line_magic('store', '-d var')
ip.user_ns['var'] = 39
ip.run_line_magic('store' , '-r')
nt.assert_equal(ip.user_ns['var'], 39)
def _run_edit_test(arg_s, exp_filename=None,
exp_lineno=-1,
exp_contents=None,
exp_is_temp=None):
ip = get_ipython()
M = code.CodeMagics(ip)
last_call = ['','']
opts,args = M.parse_options(arg_s,'prxn:')
filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
if exp_filename is not None:
nt.assert_equal(exp_filename, filename)
if exp_contents is not None:
with io.open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
nt.assert_equal(exp_contents, contents)
if exp_lineno != -1:
nt.assert_equal(exp_lineno, lineno)
if exp_is_temp is not None:
nt.assert_equal(exp_is_temp, is_temp)
def test_edit_interactive():
"""%edit on interactively defined objects"""
ip = get_ipython()
n = ip.execution_count
ip.run_cell(u"def foo(): return 1", store_history=True)
try:
_run_edit_test("foo")
except code.InteractivelyDefined as e:
nt.assert_equal(e.index, n)
else:
raise AssertionError("Should have raised InteractivelyDefined")
def test_edit_cell():
"""%edit [cell id]"""
ip = get_ipython()
ip.run_cell(u"def foo(): return 1", store_history=True)
# test
_run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
def test_bookmark():
ip = get_ipython()
ip.run_line_magic('bookmark', 'bmname')
with tt.AssertPrints('bmname'):
ip.run_line_magic('bookmark', '-l')
ip.run_line_magic('bookmark', '-d bmname')
def test_ls_magic():
ip = get_ipython()
json_formatter = ip.display_formatter.formatters['application/json']
json_formatter.enabled = True
lsmagic = ip.magic('lsmagic')
with warnings.catch_warnings(record=True) as w:
j = json_formatter(lsmagic)
nt.assert_equal(sorted(j), ['cell', 'line'])
nt.assert_equal(w, []) # no warnings
def test_strip_initial_indent():
def sii(s):
lines = s.splitlines()
return '\n'.join(code.strip_initial_indent(lines))
nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
nt.assert_equal(sii("a\n b"), "a\n b")
| 30.621542 | 99 | 0.574075 |
4f50ffe7b136b5aaa1836fca29517c3d8cbb2b62 | 17,925 | py | Python | cp93pytools/process.py | caph1993/caph1993-pytools | e3efee627a266f3a1c7fdad51e095f56c60d5f12 | [
"MIT"
] | null | null | null | cp93pytools/process.py | caph1993/caph1993-pytools | e3efee627a266f3a1c7fdad51e095f56c60d5f12 | [
"MIT"
] | null | null | null | cp93pytools/process.py | caph1993/caph1993-pytools | e3efee627a266f3a1c7fdad51e095f56c60d5f12 | [
"MIT"
] | null | null | null | from typing import Any, Optional, TypedDict, Union
from ._dict import Dict
from subprocess import (Popen, PIPE, DEVNULL, TimeoutExpired,
CalledProcessError)
from tempfile import NamedTemporaryFile
from threading import Thread, Timer
from queue import Queue, Empty
import sys, time, io, asyncio, threading
from .interrupt import terminate_thread
ON_POSIX = 'posix' in sys.builtin_module_names
def _silent_interrupt(func):
def wrapper(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except KeyboardInterrupt:
pass
return
wrapper.__name__ = func.__name__
return wrapper
class MyProcess():
'''
Tool for launching a process with support for live stdout/stderr
handling, as well as timeout, custom input and stdout/stderr
capturing/non-caputring.
Uses threads and queues.
Arguments:
args: arguments passed to Popen
shell: passed to Popen
input: string that simulates stdin
timeout: None or float timeout in seconds
check: bool. If true and an error ocurred, throws an exception
capture_stdout: bool. If true, output is assigned to 'self.stdout'
live_stdout: None, io.BufferedWriter instance, or any object
with methods write and flush. If given, write(line) and flush()
are called for each line of stdout as soon as it is received
(as soon as the program flushes), with a maximum delay of
'micro_delay'.
timeout_stdout: None (infinity) or float. Time in seconds to
wait for the live_stdout handler after the process terminates.
Recall that the live_stdout handler may still have some
remaining lines to handle once the process finishes.
The additional time will not affect 'self.elapsed'.
max_stdout: None, int or string. If more than max_stdout bytes
are received, the process is killed. Notice that if capturing
and live_stdout are disabled, no bytes will be received at all.
Accepts string of the form '#', '#k', '#M', '#G', '#T'.
capture_stderr: (see capture_stdout)
stderr_handler: (see stdout_handler)
wait_stderr_handler: (see wait_stdout_handler)
max_stderr: (see max_stdout)
encoding: string used for encoding/decoding stdin/stdout/stderr
micro_delay: float seconds (see stdout_handler)
block: bool. Blocks the current thread unitl the process finishes.
If false, you must call wait()
Returns None but sets:
self.stdout: string, stdout of the process (if captured)
self.stderr: string, stderr of the process (if captured)
self.elapsed: float, approximate elapsed time of the process
in seconds.
self.timeout: float, copy of the timeout argument
self.error: None or string, either 'TimeoutExpired',
'ExcessOfOutput', 'KeyboardInterrupt',
'NonZeroExitCode #', or an unexpected exception as string.
self.returncode: int, exit code of the process
'''
def __init__(self, args, shell=False, env=None, cwd=None):
kwargs = locals()
kwargs.pop('self')
self.kwargs = Dict(kwargs)
def run(
self,
input: str = None,
timeout: float = None,
check: bool = False,
capture_stdout=True,
live_stdout=None,
timeout_stdout=None,
max_stdout=None,
capture_stderr=False,
live_stderr=None,
timeout_stderr=None,
max_stderr=None,
encoding='utf-8',
micro_delay=1e-3,
):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
interrupt = Thread(
target=self._kill,
args=['KeyboardInterrupt'],
)
try:
#self._sync.wait() was here before but
#the loop is needed for quick handling of
#terminate_thread(thread, KeyboardInterrupt)
while not self._sync.is_set():
time.sleep(1e-4)
except KeyboardInterrupt:
interrupt.start()
except Exception as e:
self._error = str(e)
while 1:
try:
self._sync.wait()
if check and self.error:
raise CalledProcessError(
returncode=self.returncode,
cmd=self.kwargs.args,
)
break
except KeyboardInterrupt:
pass
return self
async def async_run(self, input=None, timeout=None, check=False,
capture_stdout=True, live_stdout=None,
timeout_stdout=None, max_stdout=None,
capture_stderr=False, live_stderr=None,
timeout_stderr=None, max_stderr=None, encoding='utf-8',
micro_delay=1e-3):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
assert self._async
await self._async.wait()
if check and self.error:
raise CalledProcessError(
returncode=self.returncode,
cmd=self.kwargs.args,
)
return self
def run_detached(
self,
input: str = None,
timeout: float = None,
check: bool = False,
capture_stdout=True,
live_stdout=None,
timeout_stdout=None,
max_stdout=None,
capture_stderr=False,
live_stderr=None,
timeout_stderr=None,
max_stderr=None,
encoding='utf-8',
micro_delay=1e-3,
):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
return self
def _start(self):
self._done = False
self._stop = False
self._error = None
self._timeout = self._parse_time(self.kwargs.timeout, None)
self._micro_delay = self.kwargs.micro_delay
self._encoding = self.kwargs.encoding
self._max_stdout = self._parse_eng(self.kwargs.max_stdout)
self._max_stderr = self._parse_eng(self.kwargs.max_stderr)
self._timeout_stdout = self._parse_time(self.kwargs.timeout_stdout,
float('inf'))
self._timeout_stderr = self._parse_time(self.kwargs.timeout_stderr,
float('inf'))
self._threads = {}
self._check = self.kwargs.check
self._sync = threading.Event()
try:
self._async = asyncio.Event()
except RuntimeError:
self._async = None
self._threads['waiter'] = Thread(target=self._waiter)
if self._timeout:
self._threads['timer'] = Timer(
self._timeout,
self._kill,
args=['TimeoutExpired'],
)
config = {
'out': {
'capture': self.kwargs.capture_stdout,
'live': self.kwargs.live_stdout,
'wait': self._timeout_stdout,
'max_size': self._max_stdout,
},
'err': {
'capture': self.kwargs.capture_stderr,
'live': self.kwargs.live_stderr,
'wait': self._timeout_stderr,
'max_size': self._max_stderr,
},
}
for key, val in config.items():
piped = val['capture'] or val['live']
val['pipe'] = PIPE if piped else DEVNULL
self._start_time = time.time()
try:
self._process = Popen(
self.kwargs.args,
shell=self.kwargs.shell,
stdin=PIPE,
stdout=config['out']['pipe'],
stderr=config['err']['pipe'],
#bufsize=1,
close_fds=ON_POSIX,
env=self.kwargs.env,
cwd=self.kwargs.cwd,
)
except FileNotFoundError as e:
self._stop = True
self._done = True
self._sync.set()
if self._async:
self._async.set()
self.error = str(e)
if self.kwargs.check:
raise
return
assert self._process.stdin
if self.kwargs.input != None:
_input = self.kwargs.input.encode(self.kwargs.encoding)
self._process.stdin.write(_input)
self._process.stdin.close()
config['out']['source'] = self._process.stdout
config['err']['source'] = self._process.stderr
self._buffer_stdout = io.StringIO(
) if config['out']['capture'] else None
config['out']['buffer'] = self._buffer_stdout
self._buffer_stderr = io.StringIO(
) if config['err']['capture'] else None
config['err']['buffer'] = self._buffer_stderr
for key, val in config.items():
queues = []
h = {}
if val['capture']:
h['capture'] = dict(ostream=val['buffer'], flush=False,
wait=float('inf'))
if val['live']:
h['handler'] = dict(ostream=val['live'], flush=True,
wait=val['wait'])
for name, kwargs in h.items():
queues.append(Queue())
self._threads[f'std{key}_{name}'] = Thread(
target=self._live_handler,
args=[queues[-1]],
kwargs=kwargs,
)
if queues:
self._threads[f'{key}-main'] = Thread(
target=self._non_blocking_reader,
kwargs=dict(istream=val['source'], queues=queues,
max_size=val['max_size']))
for key, t in self._threads.items():
t.start()
return
def _waiter(self):
try:
while not self._stop:
if self._process.poll() != None:
self._stop = True
else:
time.sleep(self._micro_delay)
if self._process.stdout:
self._process.stdout.close()
if self._process.stderr:
self._process.stderr.close()
def get_value(buffer):
if buffer == None:
return None
value = buffer.getvalue()
buffer.close()
return value
self._end = time.time()
if 'timer' in self._threads:
self._threads['timer'].cancel()
self.stdout = get_value(self._buffer_stdout)
self.stderr = get_value(self._buffer_stderr)
self.elapsed = self._end - self._start_time
self.timeout = self._timeout
self.returncode = self._process.wait()
if self._error:
self.error = self._error
elif self.returncode != 0:
self.error = f'NonZeroExitCode {self.returncode}'
else:
self.error = None
for key, t in self._threads.items():
if key != 'waiter':
t.join()
finally:
self._done = True
self._sync.set()
if self._async:
self._async.set()
return
def kill(self):
self._kill('KilledByUser')
while not self._done:
time.sleep(1e-3)
return
def _kill(self, error):
if self.is_active():
self._error = error
if 'timer' in self._threads:
self._threads['timer'].cancel()
self._stop = True
self._process.kill()
for k, t in self._threads.items():
if k != 'waiter' and k != 'timer':
terminate_thread(t, KeyboardInterrupt)
for k, t in self._threads.items():
if k != 'waiter' and k != 'timer':
t.join()
@_silent_interrupt
def _non_blocking_reader(self, istream, queues, max_size):
#https://stackoverflow.com/a/4896288/3671939
for line in iter(istream.readline, b''):
max_size -= len(line)
if max_size < 0:
self._stop = True
self._error = 'ExcessOfOutput'
if self._stop:
break
line = line.decode(self._encoding)
for q in queues:
q.put(line)
return istream.close()
@_silent_interrupt
def _live_handler(self, queue, ostream, flush, wait):
waiting = False
waiting_start = None
while not self._stop or waiting:
try:
elem = queue.get(timeout=self._micro_delay)
except Empty:
waiting = False
else:
ostream.write(elem)
if flush:
ostream.flush()
if self._stop:
if waiting_start == None:
waiting_start = time.time()
waiting = True
if time.time() - waiting_start > wait:
waiting = False
return
def _parse_eng(self, x):
units = {'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12}
if x == None:
return float('inf')
elif isinstance(x, int):
return x
elif isinstance(x, float):
return round(x)
elif x.isdigit():
return int(x)
else:
return int(x[:-1]) * int(units[x[-1]])
def _parse_time(self, x, ifNone):
return ifNone if x == None else x
def is_active(self):
return self._done == False
class Tee(io.BufferedWriter):
"""
Simple BufferedWriter that broadcasts
data to multiple BufferedWriters
"""
def __init__(self, *outputs):
self.outputs = outputs
def write(self, s):
for out in self.outputs:
out.write(s)
def flush(self):
for out in self.outputs:
out.flush()
class CustomOStream(io.BufferedWriter):
def __init__(self, write_function, flush_function=None):
self.write = write_function
self.flush = flush_function or (lambda: 0) # type:ignore
self.tmp = NamedTemporaryFile('r', suffix='.out')
self.fileno = self.tmp.fileno # Provide a dummy fileno
def __del__(self):
self.tmp.close()
object.__del__(self) # type:ignore
class TemporaryStdout(io.RawIOBase):
'''
Replace stdout temporarily with another stream
'''
def __enter__(self):
self.prev = sys.stdout
sys.stdout = self
def __exit__(self, *args):
sys.stdout = self.prev
def test():
# Testing mode
cmd1 = ' && '.join(f'sleep 0.25 && echo "{i} "' for i in range(4))
cmd2 = "python3 -c 'import time; [print(i, flush=True) or time.sleep(0.25) for i in range(4)] ; print(input().upper());'"
cmd3 = "python3 -c 'import time; [print(i, flush=True) or time.sleep(0.25) for i in range(4)] ; print(input().upper()); exit(1)'"
cmd4 = "python3 -c 'for i in range(10**6): print(str(0)*i, flush=True)'"
class TmpWriter:
def write(self, s):
print(s, end='', flush=True) or time.sleep(0.6)
tests = [
{
'title': 'No live printing, no error and capture stdout',
'cmd': cmd1,
'kwargs': dict(
shell=True,
timeout=None,
capture_stdout=True,
)
},
{
'title':
'Print 1..4 (live), no error and capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=1.1,
live_stdout=sys.stdout,
capture_stdout=True,
)
},
{
'title':
'Print 1..4 (live), no error and do not capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=1.1,
live_stdout=sys.stdout,
capture_stdout=False,
)
},
{
'title':
'Print 1..? (live), Timeout error, capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=0.6,
live_stdout=sys.stdout,
capture_stdout=True,
)
},
{
'title':
'Live printing, Timeout error, no capture, wait for handler',
'cmd':
cmd2,
'kwargs':
dict(
shell=True,
timeout=0.6,
live_stdout=TmpWriter(),
#wait_live_stdout=False,
capture_stdout=False,
)
},
{
'title': 'Live printing, Excess of Output',
'cmd': cmd4,
'kwargs': dict(
shell=True,
live_stdout=sys.stdout,
max_stdout='1k',
)
},
]
for i, test in enumerate(tests):
print('-' * 10, f'TEST {i+1}', '-' * 10)
print(test['title'])
p = MyProcess(
test['cmd'],
shell=test['kwargs'].pop('shell', False),
)
p.run(**test['kwargs'])
print('Elapsed:', p.elapsed)
print('Error:', p.error)
print('Stdout:', p.stdout)
exit(0) # Required for some reason
| 32.950368 | 133 | 0.51643 |
4f511f53bad6d3c7d9aac61bb757133b62f80e8e | 6,125 | py | Python | PaddleCV/image_classification/models/resnext101_wsl.py | yangapku/models | b50bc7b77288bcdaed676e70353310786c658d6e | [
"Apache-2.0"
] | 1 | 2020-03-25T11:32:22.000Z | 2020-03-25T11:32:22.000Z | PaddleCV/image_classification/models/resnext101_wsl.py | yangapku/models | b50bc7b77288bcdaed676e70353310786c658d6e | [
"Apache-2.0"
] | null | null | null | PaddleCV/image_classification/models/resnext101_wsl.py | yangapku/models | b50bc7b77288bcdaed676e70353310786c658d6e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import math
from paddle.fluid.param_attr import ParamAttr
__all__ = ["ResNeXt101_32x8d_wsl", "ResNeXt101_32x16d_wsl", "ResNeXt101_32x32d_wsl", "ResNeXt101_32x48d_wsl", "Fix_ResNeXt101_32x48d_wsl"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNeXt101_wsl():
def __init__(self, layers=101, cardinality=32, width=48):
self.params = train_parameters
self.layers = layers
self.cardinality = cardinality
self.width = width
def net(self, input, class_dim=1000):
layers = self.layers
cardinality = self.cardinality
width = self.width
depth = [3, 4, 23, 3]
base_width = cardinality * width
num_filters = [base_width * i for i in [1, 2, 4, 8]]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
name="conv1") #debug
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv_name = 'layer' + str(block+1) + "." + str(i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
name=conv_name)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),name='fc.weight'),
bias_attr=fluid.param_attr.ParamAttr(name='fc.bias'))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None):
if "downsample" in name:
conv_name = name + '.0'
else:
conv_name = name
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=conv_name + ".weight"),
bias_attr=False)
if "downsample" in name:
bn_name = name[:9] + 'downsample' + '.1'
else:
if "conv1" == name:
bn_name = 'bn' + name[-1]
else:
bn_name = (name[:10] if name[7:9].isdigit() else name[:9]) + 'bn' + name[-1]
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + '.weight'),
bias_attr=ParamAttr(bn_name + '.bias'),
moving_mean_name=bn_name + '.running_mean',
moving_variance_name=bn_name + '.running_var', )
def shortcut(self, input, ch_out, stride, name):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality, name):
cardinality = self.cardinality
width = self.width
conv0 = self.conv_bn_layer(
input=input,
num_filters=num_filters,
filter_size=1,
act='relu',
name=name + ".conv1")
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu',
name=name + ".conv2")
conv2 = self.conv_bn_layer(
input=conv1,
num_filters=num_filters//(width//8),
filter_size=1,
act=None,
name=name + ".conv3")
short = self.shortcut(
input, num_filters//(width//8), stride, name=name + ".downsample")
return fluid.layers.elementwise_add(
x=short, y=conv2, act='relu')
def ResNeXt101_32x8d_wsl():
model = ResNeXt101_wsl(cardinality=32, width=8)
return model
def ResNeXt101_32x16d_wsl():
model = ResNeXt101_wsl(cardinality=32, width=16)
return model
def ResNeXt101_32x32d_wsl():
model = ResNeXt101_wsl(cardinality=32, width=32)
return model
def ResNeXt101_32x48d_wsl():
model = ResNeXt101_wsl(cardinality=32, width=48)
return model
def Fix_ResNeXt101_32x48d_wsl():
model = ResNeXt101_wsl(cardinality=32, width=48)
return model
| 33.108108 | 138 | 0.565061 |
4f50c065f04f548e03900444a9ba5c555fb2ad70 | 16,784 | py | Python | openpnm/algorithms/ReactiveTransport.py | Eravalord/OpenPNM | 505bc37318a3ba40d8343f89cd347d0073dd9d14 | [
"MIT"
] | 1 | 2020-11-20T06:14:34.000Z | 2020-11-20T06:14:34.000Z | openpnm/algorithms/ReactiveTransport.py | Eravalord/OpenPNM | 505bc37318a3ba40d8343f89cd347d0073dd9d14 | [
"MIT"
] | null | null | null | openpnm/algorithms/ReactiveTransport.py | Eravalord/OpenPNM | 505bc37318a3ba40d8343f89cd347d0073dd9d14 | [
"MIT"
] | null | null | null | import numpy as np
from openpnm.algorithms import GenericTransport
# Uncomment this line when we stop supporting Python 3.6
# from dataclasses import dataclass, field
# from typing import List
from openpnm.utils import logging, Docorator, GenericSettings
docstr = Docorator()
logger = logging.getLogger(__name__)
# class RelaxationSettings(GenericSettings):
# r"""
# This class is a demonstration of how we can add nested settings classes
# to other settings classes to make categories for some settings. This is
# being appended to the ReactiveTransportSettings class under the
# 'relaxation' attribute, and it works as planned by allowing the nested
# dot access to its parameters. More work would be required to get it
# functional such as dealing with deeply nested dicts and so on, but it
# works in principal.
# """
# source = 1.0
# quantity = 1.0
@docstr.get_sectionsf('ReactiveTransportSettings',
sections=['Parameters', 'Other Parameters'])
@docstr.dedent
# Uncomment this line when we stop supporting Python 3.6
# @dataclass
class ReactiveTransportSettings(GenericSettings):
r"""
Parameters
----------
%(GenericTransportSettings.parameters)s
quantity : str
The name of the physical quantity to be calculated
conductance : str
The name of the pore-scale transport conductance values. These are
typically calculated by a model attached to a *Physics* object
associated with the given *Phase*.
Other Parameters
----------------
sources : list
List of source terms that have been added
variable_props : list
List of props that are variable throughout the algorithm
relaxation_source : float (default = 1.0)
A relaxation factor to control under-relaxation of the source term.
Factor approaching 0 leads to improved stability but slower simulation.
Factor approaching 1 gives fast simulation but may be unstable.
relaxation_quantity : float (default = 1.0)
A relaxation factor to control under-relaxation for the quantity
solving for. Factor approaching 0 leads to improved stability but
slower simulation. Factor approaching 1 gives fast simulation but
may be unstable.
nlin_max_iter : int
Maximum number of iterations allowed for the nonlinear solver to
converge. This parameter is different that ``GenericTransport``'s
``solver_max_iter``.
----
**The following parameters pertain to the ``GenericTransport`` class**
%(GenericTransportSettings.other_parameters)s
"""
nlin_max_iter = 5000
# relaxation = RelaxationSettings()
relaxation_source = 1.0
relaxation_quantity = 1.0
# Swap the following 2 lines when we stop supporting Python 3.6
# sources: List = field(default_factory=lambda: [])
sources = []
# Swap the following 2 lines when we stop supporting Python 3.6
variable_props = []
# variable_props: List = field(default_factory=lambda: [])
@docstr.get_sectionsf('ReactiveTransport', sections=['Parameters'])
@docstr.dedent
class ReactiveTransport(GenericTransport):
r"""
A subclass for steady-state simulations with (optionally) source terms
Parameters
----------
%(GenericTransport.parameters)s
Notes
-----
This subclass performs steady simulations of transport phenomena with
reactions when source terms are added.
"""
def __init__(self, settings={}, phase=None, **kwargs):
super().__init__(**kwargs)
self.settings._update_settings_and_docs(ReactiveTransportSettings)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
@docstr.get_sectionsf('ReactiveTransport.setup',
sections=['Parameters', 'Notes'])
@docstr.dedent
def setup(self, phase=None, quantity='', conductance='',
nlin_max_iter=None, relaxation_source=None,
relaxation_quantity=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings
Parameters
----------
%(GenericTransportSettings.parameters)s
%(ReactiveTransportSettings.parameters)s
Notes
-----
Under-relaxation is a technique used for improving stability of a
computation, particularly in the presence of highly non-linear terms.
Under-relaxation used here limits the change in a variable from one
iteration to the next. An optimum choice of the relaxation factor is
one that is small enough to ensure stable simulation and large enough
to speed up the computation.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if nlin_max_iter:
self.settings['nlin_max_iter'] = nlin_max_iter
if relaxation_source:
self.settings['relaxation_source'] = relaxation_source
if relaxation_quantity:
self.settings['relaxation_quantity'] = relaxation_quantity
super().setup(**kwargs)
def run(self, x0=None):
r"""
Builds the A and b matrices, and calls the solver specified in the
``settings`` attribute.
Parameters
----------
x0 : ND-array
Initial guess of unknown variable
"""
quantity = self.settings['quantity']
logger.info('Running ReactiveTransport')
x0 = np.zeros(self.Np, dtype=float) if x0 is None else x0
self["pore.initial_guess"] = x0
x = self._run_reactive(x0)
self[quantity] = x
@docstr.dedent
def reset(self, source_terms=False, variable_props=False, **kwargs):
r"""
%(GenericTransport.reset.full_desc)s
Parameters
----------
%(GenericTransport.reset.parameters)s
source_terms : boolean
If ``True`` removes source terms. The default is ``False``.
variable_props : boolean
If ``True`` removes variable properties. The default is ``False``.
"""
super().reset(**kwargs)
if source_terms:
# Remove item from label dictionary
for item in self.settings['sources']:
self.pop(item)
# Reset the settings dict
self.settings['sources'] = []
if variable_props:
self.settings['variable_props'] = []
def set_source(self, propname, pores):
r"""
Applies a given source term to the specified pores
Parameters
----------
propname : string
The property name of the source term model to be applied
pores : array_like
The pore indices where the source term should be applied
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised.
"""
locs = self.tomask(pores=pores)
# Check if any BC is already set in the same locations
locs_BC = np.isfinite(self['pore.bc_value']) + np.isfinite(self['pore.bc_rate'])
if (locs & locs_BC).any():
raise Exception('Boundary conditions already present in given '
+ 'pores, cannot also assign source terms')
# Set source term
self[propname] = locs
# Check if propname already in source term list
if propname not in self.settings['sources']:
self.settings['sources'].append(propname)
def _set_variable_props(self, propnames):
r"""
Inform the algorithm which properties are variable, so those on which
they depend will be updated on each solver iteration.
Parameters
----------
propnames : string or list of strings
The propnames of the properties that are variable throughout
the algorithm.
"""
if isinstance(propnames, str): # Convert string to list if necessary
propnames = [propnames]
d = self.settings["variable_props"]
self.settings["variable_props"] = list(set(d) | set(propnames))
def _update_iterative_props(self):
"""r
Update physics using the current value of ``quantity``
Notes
-----
The algorithm directly writes the value of 'quantity' into the phase.
This method was implemented relaxing one of the OpenPNM rules of
algorithms not being able to write into phases.
"""
phase = self.project.phases()[self.settings['phase']]
physics = self.project.find_physics(phase=phase)
geometries = self.project.geometries().values()
# Put quantity on phase so physics finds it when regenerating
phase.update(self.results())
# Regenerate iterative props with new guess
iterative_props = self._get_iterative_props()
phase.regenerate_models(propnames=iterative_props)
for geometry in geometries:
geometry.regenerate_models(iterative_props)
for phys in physics:
phys.regenerate_models(iterative_props)
def _apply_sources(self):
"""r
Update ``A`` and ``b`` applying source terms to specified pores
Notes
-----
- Applying source terms to ``A`` and ``b`` is performed after (optionally)
under-relaxing the source term to improve numerical stability. Physics
are also updated before applying source terms to ensure that source
terms values are associated with the current value of 'quantity'.
- For source term under-relaxation, old values of S1 and S2 need to be
stored somewhere, we chose to store them on the algorithm object. This is
because storing them on phase/physics creates unintended problems, ex.
storing them on physics -> IO complains added depth to the NestedDict, and
storing them on the phase object results in NaNs in case source term is
only added to a subset of nodes, which breaks our _check_for_nans algorithm.
Warnings
--------
In the case of a transient simulation, the updates in ``A`` and ``b``
also depend on the time scheme. So, ``_correct_apply_sources()`` needs to
be run afterwards to correct the already applied relaxed source terms.
"""
phase = self.project.phases()[self.settings['phase']]
w = self.settings['relaxation_source']
for item in self.settings['sources']:
element, prop = item.split(".")
_item = ".".join([element, "_" + prop])
first_iter = False if _item + ".S1.old" in self.keys() else True
Ps = self.pores(item)
# Fetch S1/S2 and their old values (don't exist on 1st iter)
S1 = phase[item + ".S1"][Ps]
S2 = phase[item + ".S2"][Ps]
X1 = self[_item + ".S1.old"][Ps] if not first_iter else S1
X2 = self[_item + ".S2.old"][Ps] if not first_iter else S2
# Source term relaxation
S1 = phase[item + '.S1'][Ps] = w * S1 + (1.0 - w) * X1
S2 = phase[item + '.S2'][Ps] = w * S2 + (1.0 - w) * X2
# Modify A and b based on "relaxed" S1/S2
datadiag = self._A.diagonal().copy()
datadiag[Ps] = datadiag[Ps] - S1
self._A.setdiag(datadiag)
self._b[Ps] = self._b[Ps] + S2
# Replace old values of S1/S2 by their current values
self[_item + ".S1.old"] = phase[item + ".S1"]
self[_item + ".S2.old"] = phase[item + ".S2"]
def _run_reactive(self, x0):
r"""
Repeatedly updates ``A``, ``b``, and the solution guess within according
to the applied source term then calls ``_solve`` to solve the resulting
system of linear equations.
Stops when the residual falls below ``solver_tol * norm(b)`` or when
the maximum number of iterations is reached.
Parameters
----------
x0 : ND-array
Initial guess of unknown variable
Returns
-------
x : ND-array
Solution array.
Notes
-----
The algorithm must at least complete one iteration, and hence the check for
itr >= 1, because otherwise, _check_for_nans() never get's called in case
there's something wrong with the data, and therefore, the user won't get
notified about the root cause of the algorithm divergence.
"""
w = self.settings['relaxation_quantity']
quantity = self.settings['quantity']
max_it = self.settings['nlin_max_iter']
# Write initial guess to algorithm obj (for _update_iterative_props to work)
self[quantity] = x = x0
# Update A and b based on self[quantity]
self._update_A_and_b()
# Just in case you got a lucky guess, i.e. x0!
if self._is_converged():
logger.info(f'Solution converged: {self._get_residual():.4e}')
return x
for itr in range(max_it):
# Solve, use relaxation, and update solution on algorithm obj
self[quantity] = x = self._solve(x0=x) * w + x * (1 - w)
self._update_A_and_b()
# Check solution convergence
if self._is_converged():
logger.info(f'Solution converged: {self._get_residual():.4e}')
return x
logger.info(f'Tolerance not met: {self._get_residual():.4e}')
if not self._is_converged():
raise Exception(f"Not converged after {max_it} iterations.")
def _update_A_and_b(self):
r"""
Updates A and b based on the most recent solution stored on algorithm object.
"""
# Update iterative properties on phase, geometries, and physics
self._update_iterative_props()
# Build A and b, apply BCs/source terms
self._build_A()
self._build_b()
self._apply_BCs()
self._apply_sources()
def _get_iterative_props(self):
r"""
Find and return properties that need to be iterated while running the
algorithm
Notes
-----
This method was moved from ReactiveTransport class to GenericTransport
because source terms are not necessarily the only properties that need
iteration during an algorithm (ex. concentration-dependent conductance)
"""
import networkx as nx
phase = self.project.phases(self.settings['phase'])
physics = self.project.find_physics(phase=phase)
geometries = self.project.geometries().values()
# Combine dependency graphs of phase and all physics/geometries
dg = phase.models.dependency_graph(deep=True)
for g in geometries:
dg = nx.compose(dg, g.models.dependency_graph(deep=True))
for p in physics:
dg = nx.compose(dg, p.models.dependency_graph(deep=True))
base_props = [self.settings["quantity"]] + self.settings["variable_props"]
if base_props is None:
return []
# Find all props downstream that rely on "quantity" and variable_props
dg = nx.DiGraph(nx.edge_dfs(dg, source=base_props))
if len(dg.nodes) == 0:
return []
iterative_props = list(nx.dag.lexicographical_topological_sort(dg))
# "quantity" shouldn't be in the returned list but "variable_props" should
iterative_props.remove(self.settings["quantity"])
return iterative_props
@docstr.dedent
def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
r"""
Apply boundary conditions to specified pores if no source terms are
already assigned to these pores. Otherwise, raise an error.
Parameters
----------
%(GenericTransport._set_BC.parameters)s
Notes
-----
%(GenericTransport._set_BC.notes)s
"""
# First check that given pores do not have source terms already set
for item in self.settings['sources']:
if np.any(self[item][pores]):
raise Exception('Source term already present in given '
+ 'pores, cannot also assign boundary '
+ 'conditions')
# Then call parent class function if above check passes
super()._set_BC(pores=pores, bctype=bctype, bcvalues=bcvalues, mode=mode)
| 39.584906 | 88 | 0.627026 |
4f4d1c23e14583210dc0a1435a65aa1ecb43e0bb | 5,648 | py | Python | ibis/pandas/execution/tests/test_temporal.py | vnlitvinov/ibis | fd6ecfcfbad43157fbdc934077b656774fc9a2e5 | [
"Apache-2.0"
] | 1 | 2021-06-15T07:28:59.000Z | 2021-06-15T07:28:59.000Z | ibis/pandas/execution/tests/test_temporal.py | vnlitvinov/ibis | fd6ecfcfbad43157fbdc934077b656774fc9a2e5 | [
"Apache-2.0"
] | 1 | 2020-10-02T23:51:48.000Z | 2020-10-03T00:54:29.000Z | ibis/pandas/execution/tests/test_temporal.py | vnlitvinov/ibis | fd6ecfcfbad43157fbdc934077b656774fc9a2e5 | [
"Apache-2.0"
] | 1 | 2020-04-12T19:51:50.000Z | 2020-04-12T19:51:50.000Z | import datetime
from operator import methodcaller
import numpy as np
import pandas as pd
import pandas.util.testing as tm # noqa: E402
import pytest
from pkg_resources import parse_version
from pytest import param
import ibis
from ibis import literal as L # noqa: E402
from ibis.expr import datatypes as dt
pytestmark = pytest.mark.pandas
@pytest.mark.parametrize(
('case_func', 'expected_func'),
[
(lambda v: v.strftime('%Y%m%d'), lambda vt: vt.strftime('%Y%m%d')),
(lambda v: v.year(), lambda vt: vt.year),
(lambda v: v.month(), lambda vt: vt.month),
(lambda v: v.day(), lambda vt: vt.day),
(lambda v: v.hour(), lambda vt: vt.hour),
(lambda v: v.minute(), lambda vt: vt.minute),
(lambda v: v.second(), lambda vt: vt.second),
(lambda v: v.millisecond(), lambda vt: int(vt.microsecond / 1e3)),
]
+ [
(methodcaller('strftime', pattern), methodcaller('strftime', pattern))
for pattern in [
'%Y%m%d %H',
'DD BAR %w FOO "DD"',
'DD BAR %w FOO "D',
'DD BAR "%w" FOO "D',
'DD BAR "%d" FOO "D',
'DD BAR "%c" FOO "D',
'DD BAR "%x" FOO "D',
'DD BAR "%X" FOO "D',
]
],
)
def test_timestamp_functions(case_func, expected_func):
v = L('2015-09-01 14:48:05.359').cast('timestamp')
vt = datetime.datetime(
year=2015,
month=9,
day=1,
hour=14,
minute=48,
second=5,
microsecond=359000,
)
result = case_func(v)
expected = expected_func(vt)
assert ibis.pandas.execute(result) == expected
@pytest.mark.parametrize(
'column',
['datetime_strings_naive', 'datetime_strings_ny', 'datetime_strings_utc'],
)
def test_cast_datetime_strings_to_date(t, df, column):
expr = t[column].cast('date')
result = expr.execute()
expected = (
pd.to_datetime(df[column], infer_datetime_format=True)
.dt.normalize()
.dt.tz_localize(None)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'column',
['datetime_strings_naive', 'datetime_strings_ny', 'datetime_strings_utc'],
)
def test_cast_datetime_strings_to_timestamp(t, df, column):
expr = t[column].cast('timestamp')
result = expr.execute()
expected = pd.to_datetime(df[column], infer_datetime_format=True)
if getattr(expected.dtype, 'tz', None) is not None:
expected = expected.dt.tz_convert(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'column',
['plain_datetimes_naive', 'plain_datetimes_ny', 'plain_datetimes_utc'],
)
def test_cast_integer_to_temporal_type(t, df, column):
column_type = t[column].type()
expr = t.plain_int64.cast(column_type)
result = expr.execute()
expected = pd.Series(
pd.to_datetime(df.plain_int64.values, unit='ns').values,
index=df.index,
name='plain_int64',
).dt.tz_localize(column_type.timezone)
tm.assert_series_equal(result, expected)
def test_cast_integer_to_date(t, df):
expr = t.plain_int64.cast('date')
result = expr.execute()
expected = pd.Series(
pd.to_datetime(df.plain_int64.values, unit='D').values,
index=df.index,
name='plain_int64',
)
tm.assert_series_equal(result, expected)
def test_times_ops(t, df):
result = t.plain_datetimes_naive.time().between('10:00', '10:00').execute()
expected = pd.Series(np.zeros(len(df), dtype=bool))
tm.assert_series_equal(result, expected)
result = t.plain_datetimes_naive.time().between('01:00', '02:00').execute()
expected = pd.Series(np.ones(len(df), dtype=bool))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('tz', 'rconstruct'),
[('US/Eastern', np.zeros), ('UTC', np.ones), (None, np.ones)],
)
@pytest.mark.parametrize(
'column', ['plain_datetimes_utc', 'plain_datetimes_naive']
)
def test_times_ops_with_tz(t, df, tz, rconstruct, column):
expected = pd.Series(rconstruct(len(df), dtype=bool))
time = t[column].time()
expr = time.between('01:00', '02:00', timezone=tz)
result = expr.execute()
tm.assert_series_equal(result, expected)
# Test that casting behavior is the same as using the timezone kwarg
ts = t[column].cast(dt.Timestamp(timezone=tz))
expr = ts.time().between('01:00', '02:00')
result = expr.execute()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('op', 'expected'),
[
param(lambda x, y: x + y, lambda x, y: x.values * 2, id='add'),
param(lambda x, y: x - y, lambda x, y: x.values - y.values, id='sub'),
param(lambda x, y: x * 2, lambda x, y: x.values * 2, id='mul'),
param(
lambda x, y: x // 2,
lambda x, y: x.values // 2,
id='floordiv',
marks=pytest.mark.xfail(
parse_version(pd.__version__) < parse_version('0.23.0'),
raises=TypeError,
reason=(
'pandas versions less than 0.23.0 do not support floor '
'division involving timedelta columns'
),
),
),
],
)
def test_interval_arithmetic(op, expected):
data = pd.timedelta_range('0 days', '10 days', freq='D')
con = ibis.pandas.connect(
{'df1': pd.DataFrame({'td': data}), 'df2': pd.DataFrame({'td': data})}
)
t1 = con.table('df1')
expr = op(t1.td, t1.td)
result = expr.execute()
expected = pd.Series(expected(data, data), name='td')
tm.assert_series_equal(result, expected)
| 31.909605 | 79 | 0.614554 |
4f4aa2f7cbaa00768a4e98251e58616d6ee72d6a | 757 | py | Python | jira/komand_jira/actions/find_users/action.py | TonyHamil/insightconnect-plugins | 63d985c798623a54b4be135d81197acea11f99ad | [
"MIT"
] | null | null | null | jira/komand_jira/actions/find_users/action.py | TonyHamil/insightconnect-plugins | 63d985c798623a54b4be135d81197acea11f99ad | [
"MIT"
] | null | null | null | jira/komand_jira/actions/find_users/action.py | TonyHamil/insightconnect-plugins | 63d985c798623a54b4be135d81197acea11f99ad | [
"MIT"
] | null | null | null | import komand
from .schema import FindUsersInput, FindUsersOutput
# Custom imports below
from ...util import *
class FindUsers(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='find_users',
description='Search for a set of users',
input=FindUsersInput(),
output=FindUsersOutput())
def run(self, params={}):
"""Search for users"""
max = params.get('max')
query = params.get('query')
users = self.connection.client.search_users(user=query, maxResults=max)
results = list(map(lambda user: normalize_user(user, logger=self.logger), users))
results = komand.helper.clean(results)
return {'users': results}
| 29.115385 | 89 | 0.634082 |
4f4af673a0348f4e948c4a87596355d888adc340 | 1,790 | py | Python | test/oscillator.py | cooperuser/rl_ctrnn | 5f05585dc6f017de60eec9929b8523a8d4da80f7 | [
"MIT"
] | 1 | 2021-10-05T13:30:00.000Z | 2021-10-05T13:30:00.000Z | test/oscillator.py | cooperuser/rl_ctrnn | 5f05585dc6f017de60eec9929b8523a8d4da80f7 | [
"MIT"
] | null | null | null | test/oscillator.py | cooperuser/rl_ctrnn | 5f05585dc6f017de60eec9929b8523a8d4da80f7 | [
"MIT"
] | null | null | null | from numpy import random
import wandb
from rl_ctrnn.ranges import CtrnnRanges
import unittest
from rl_ctrnn.ctrnn import Ctrnn
from evaluator.oscillator import Oscillator
def rand(n: float = 1):
return n * (random.random() * 2 - 1)
class Tests(unittest.TestCase):
def test_known(self):
for i in range(1):
wandb.init(entity="ampersand", project="beep")
ctrnn = Ctrnn(2)
ctrnn.set_bias(0, -2.75 + rand(0.05))
ctrnn.set_bias(1, -1.75 + rand(0.05))
ctrnn.set_weight(0, 0, 4.5 + rand(0.25))
ctrnn.set_weight(1, 0, 1.0 + rand(0.25))
ctrnn.set_weight(0, 1, -1.0 + rand(0.25))
ctrnn.set_weight(1, 1, 4.5 + rand(0.25))
# ctrnn.randomize(CtrnnRanges())
eval = Oscillator(ctrnn)
for j in range(3000):
eval._step()
if j >= 2500:
outputs = eval.ctrnn.get_output(eval.voltages)
step = j - 2500
wandb.log({
"a": outputs[0],
"b": outputs[1],
"fitness": eval.fitness / (eval.ctrnn.size + step),
"avg_a": eval.report.averages[0] / (step + 1),
"avg_b": eval.report.averages[0] / (step + 1),
})
report = eval.generate_report()
wandb.finish()
# for n in range(ctrnn.size):
# self.assertAlmostEqual(report.ranges[n].min, 0.1865, 4)
# self.assertAlmostEqual(report.ranges[n].max, 0.8135, 4)
# self.assertAlmostEqual(report.averages[n] / 10, 0.05, 2)
# self.assertGreaterEqual(report.fitness, 0.043)
# self.assertTrue(report.beers_metric)
| 39.777778 | 75 | 0.52067 |
4f4d19f5c3b72e00bae70a5d8ae10e058e142aeb | 63,560 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/operations/_resources_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/operations/_resources_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/operations/_resources_operations.py | JayDoubleu/azure-sdk-for-python | f3760fc8d7ea1b46b0def0628579d36abe75976f | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourcesOperations(object):
"""ResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _move_resources_initial(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._move_resources_initial.metadata['url'] # type: ignore
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourcesMoveInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'} # type: ignore
def begin_move_resources(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'} # type: ignore
def list(
self,
filter=None, # type: Optional[str]
expand=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceListResult"]
"""Get all the resources in a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resource groups.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2016_09_01.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resources'} # type: ignore
def check_existence(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks whether a resource exists.
:param resource_group_name: The name of the resource group containing the resource to check.
The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider of the resource to check.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to check whether it exists.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.check_existence.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a resource.
:param resource_group_name: The name of the resource group that contains the resource to
delete. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to delete.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create.
:type resource_type: str
:param resource_name: The name of the resource to create.
:type resource_name: str
:param parameters: Parameters for creating or updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update.
:type resource_type: str
:param resource_name: The name of the resource to update.
:type resource_name: str
:param parameters: Parameters for updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GenericResource"
"""Gets a resource.
:param resource_group_name: The name of the resource group containing the resource to get. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def check_existence_by_id(
self,
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks by ID whether a resource exists.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-
name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self.check_existence_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _delete_by_id_initial(
self,
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_delete_by_id(
self,
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-
name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_by_id_initial(
resource_id=resource_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _create_or_update_by_id_initial(
self,
resource_id, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_create_or_update_by_id(
self,
resource_id, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-
name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param parameters: Create or update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_by_id_initial(
resource_id=resource_id,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _update_by_id_initial(
self,
resource_id, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_update_by_id(
self,
resource_id, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-
name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param parameters: Update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_by_id_initial(
resource_id=resource_id,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def get_by_id(
self,
resource_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GenericResource"
"""Gets a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format, /subscriptions/{guid}/resourceGroups/{resource-group-
name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2016_09_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
| 49.772905 | 223 | 0.661045 |
4f51bc4558f1e8f01d9030a6624f24e749eb82bb | 533 | py | Python | client/verta/verta/_swagger/_public/modeldb/model/ModeldbDeleteJobResponse.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 835 | 2017-02-08T20:14:24.000Z | 2020-03-12T17:37:49.000Z | client/verta/verta/_swagger/_public/modeldb/model/ModeldbDeleteJobResponse.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 651 | 2019-04-18T12:55:07.000Z | 2022-03-31T23:45:09.000Z | client/verta/verta/_swagger/_public/modeldb/model/ModeldbDeleteJobResponse.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 170 | 2017-02-13T14:49:22.000Z | 2020-02-19T17:59:12.000Z | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbDeleteJobResponse(BaseType):
def __init__(self, status=None):
required = {
"status": False,
}
self.status = status
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('status', None)
if tmp is not None:
d['status'] = tmp
return ModeldbDeleteJobResponse(**d)
| 22.208333 | 62 | 0.645403 |
4f524c8b021b298f5cca35b42697a7e7dcafa25e | 20,308 | py | Python | espnet/bin/asr_train.py | creatorscan/espnet-asrtts | e516601bd550aeb5d75ee819749c743fc4777eee | [
"Apache-2.0"
] | 5 | 2021-04-17T13:12:20.000Z | 2022-02-22T09:36:45.000Z | espnet/bin/asr_train.py | creatorscan/espnet-asrtts | e516601bd550aeb5d75ee819749c743fc4777eee | [
"Apache-2.0"
] | null | null | null | espnet/bin/asr_train.py | creatorscan/espnet-asrtts | e516601bd550aeb5d75ee819749c743fc4777eee | [
"Apache-2.0"
] | 5 | 2020-02-24T08:13:54.000Z | 2022-02-22T09:03:09.000Z | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import configargparse
import logging
import os
import random
import subprocess
import sys
import numpy as np
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
# general configuration
parser.add('--config', is_config_file=True, help='config file path')
parser.add('--config2', is_config_file=True,
help='second config file path that overwrites the settings in `--config`.')
parser.add('--config3', is_config_file=True,
help='third config file path that overwrites the settings in `--config` and `--config2`.')
parser.add_argument('--ngpu', default=None, type=int,
help='Number of GPUs. If not given, use all visible devices')
parser.add_argument('--train-dtype', default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help='Data type for training (only pytorch backend). '
'O0,O1,.. flags require apex. See https://nvidia.github.io/apex/amp.html#opt-levels')
parser.add_argument('--backend', default='chainer', type=str,
choices=['chainer', 'pytorch'],
help='Backend library')
parser.add_argument('--sampling', default='None', type=str,
choices=['chainer', 'pytorch'],
help='Backend library')
parser.add_argument('--outdir', type=str, required=required,
help='Output directory')
parser.add_argument('--debugmode', default=1, type=int,
help='Debugmode')
parser.add_argument('--dict', required=required,
help='Dictionary')
parser.add_argument('--seed', default=1, type=int,
help='Random seed')
parser.add_argument('--ctxt', default=-10, type=int,
help='Context for decoder')
parser.add_argument('--duration', default=100, type=int,
help='Duration of iteration between increment')
parser.add_argument('--debugdir', type=str,
help='Output directory for debugging')
parser.add_argument('--resume', '-r', default='', nargs='?',
help='Resume the training from snapshot')
parser.add_argument('--minibatches', '-N', type=int, default='-1',
help='Process only N minibatches (for debug)')
parser.add_argument('--verbose', '-V', default=0, type=int,
help='Verbose option')
parser.add_argument('--tensorboard-dir', default=None, type=str, nargs='?', help="Tensorboard log dir path")
parser.add_argument('--report-interval-iters', default=100, type=int,
help="Report interval iterations")
# task related
parser.add_argument('--train-json', type=str, default=None,
help='Filename of train label data (json)')
parser.add_argument('--valid-json', type=str, default=None,
help='Filename of validation label data (json)')
# network architecture
parser.add_argument('--model-module', type=str, default=None,
help='model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)')
# loss related
parser.add_argument('--ctc_type', default='warpctc', type=str,
choices=['builtin', 'warpctc'],
help='Type of CTC implementation to calculate loss.')
parser.add_argument('--mtlalpha', default=0.5, type=float,
help='Multitask learning coefficient, alpha: alpha*ctc_loss + (1-alpha)*att_loss ')
parser.add_argument('--lsm-type', const='', default='', type=str, nargs='?', choices=['', 'unigram'],
help='Apply label smoothing with a specified distribution type')
parser.add_argument('--lsm-weight', default=0.0, type=float,
help='Label smoothing weight')
# recognition options to compute CER/WER
parser.add_argument('--report-cer', default=False, action='store_true',
help='Compute CER on development set')
parser.add_argument('--report-wer', default=False, action='store_true',
help='Compute WER on development set')
parser.add_argument('--nbest', type=int, default=1,
help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=4,
help='Beam size')
parser.add_argument('--penalty', default=0.0, type=float,
help='Incertion penalty')
parser.add_argument('--maxlenratio', default=0.0, type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""")
parser.add_argument('--minlenratio', default=0.0, type=float,
help='Input length ratio to obtain min output length')
parser.add_argument('--ctc-weight', default=0.3, type=float,
help='CTC weight in joint decoding')
parser.add_argument('--rnnlm', type=str, default=None,
help='RNNLM model file to read')
parser.add_argument('--rnnlm-conf', type=str, default=None,
help='RNNLM model config file to read')
parser.add_argument('--lm-weight', default=0.1, type=float,
help='RNNLM weight.')
parser.add_argument('--sym-space', default='<space>', type=str,
help='Space symbol')
parser.add_argument('--sym-blank', default='<blank>', type=str,
help='Blank symbol')
# minibatch related
parser.add_argument('--sortagrad', default=0, type=int, nargs='?',
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs")
parser.add_argument('--batch-count', default='auto', choices=BATCH_COUNT_CHOICES,
help='How to count batch_size. The default (auto) will find how to count by args.')
parser.add_argument('--batch-size', '--batch-seqs', '-b', default=0, type=int,
help='Maximum seqs in a minibatch (0 to disable)')
parser.add_argument('--batch-bins', default=0, type=int,
help='Maximum bins in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-in', default=0, type=int,
help='Maximum input frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-out', default=0, type=int,
help='Maximum output frames in a minibatch (0 to disable)')
parser.add_argument('--batch-frames-inout', default=0, type=int,
help='Maximum input+output frames in a minibatch (0 to disable)')
parser.add_argument('--maxlen-in', '--batch-seq-maxlen-in', default=800, type=int, metavar='ML',
help='When --batch-count=seq, batch size is reduced if the input sequence length > ML.')
parser.add_argument('--maxlen-out', '--batch-seq-maxlen-out', default=150, type=int, metavar='ML',
help='When --batch-count=seq, batch size is reduced if the output sequence length > ML')
parser.add_argument('--n-iter-processes', default=0, type=int,
help='Number of processes of iterator')
parser.add_argument('--preprocess-conf', type=str, default=None, nargs='?',
help='The configuration file for the pre-processing')
# optimization related
parser.add_argument('--opt', default='adadelta', type=str,
choices=['adadelta', 'adam', 'noam'],
help='Optimizer')
parser.add_argument('--accum-grad', default=1, type=int,
help='Number of gradient accumuration')
parser.add_argument('--eps', default=1e-8, type=float,
help='Epsilon constant for optimizer')
parser.add_argument('--eps-decay', default=0.01, type=float,
help='Decaying ratio of epsilon')
parser.add_argument('--weight-decay', default=0.0, type=float,
help='Weight decay ratio')
parser.add_argument('--criterion', default='acc', type=str,
choices=['loss', 'acc'],
help='Criterion to perform epsilon decay')
parser.add_argument('--threshold', default=1e-4, type=float,
help='Threshold to stop iteration')
parser.add_argument('--epochs', '-e', default=30, type=int,
help='Maximum number of epochs')
parser.add_argument('--early-stop-criterion', default='validation/main/acc', type=str, nargs='?',
help="Value to monitor to trigger an early stopping of the training")
parser.add_argument('--patience', default=3, type=int, nargs='?',
help="Number of epochs to wait without improvement before stopping the training")
parser.add_argument('--grad-clip', default=5, type=float,
help='Gradient norm threshold to clip')
parser.add_argument('--num-save-attention', default=3, type=int,
help='Number of samples of attention to be saved')
parser.add_argument('--grad-noise', type=strtobool, default=False,
help='The flag to switch to use noise injection to gradients during training')
# asr_mix related
parser.add_argument('--num-spkrs', default=1, type=int,
choices=[1, 2],
help='Maximum number of speakers in the speech for multi-speaker speech recognition task.')
# speech translation related
parser.add_argument('--context-residual', default=False, type=strtobool, nargs='?',
help='The flag to switch to use context vector residual in the decoder network')
parser.add_argument('--replace-sos', default=False, nargs='?',
help='Replace <sos> in the decoder with a target language ID \
(the first token in the target sequence)')
# finetuning related
parser.add_argument('--asr-init', default=None, type=str,
help='Pretrained ASR model for intialization')
parser.add_argument('--enc-init', default=None, type=str,
help='Pre-trained ASR model to initialize encoder.')
parser.add_argument('--enc-init-mods', default='enc.enc.',
type=lambda s: [str(mod) for mod in s.split(',') if s != ''],
help='List of encoder modules to initialize, separated by a comma.')
parser.add_argument('--dec-init', default=None, type=str,
help='Pre-trained ASR, MT or LM model to initialize decoder.')
parser.add_argument('--dec-init-mods', default='att., dec.',
type=lambda s: [str(mod) for mod in s.split(',') if s != ''],
help='List of decoder modules to initialize, separated by a comma.')
# front end related
parser.add_argument('--use-frontend', type=strtobool, default=False,
help='The flag to switch to use frontend system.')
# WPE related
parser.add_argument('--use-wpe', type=strtobool, default=False,
help='Apply Weighted Prediction Error')
parser.add_argument('--wtype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture '
'of the mask estimator for WPE. '
'')
parser.add_argument('--wlayers', type=int, default=2,
help='')
parser.add_argument('--wunits', type=int, default=300,
help='')
parser.add_argument('--wprojs', type=int, default=300,
help='')
parser.add_argument('--wdropout-rate', type=float, default=0.0,
help='')
parser.add_argument('--wpe-taps', type=int, default=5,
help='')
parser.add_argument('--wpe-delay', type=int, default=3,
help='')
parser.add_argument('--use-dnn-mask-for-wpe', type=strtobool,
default=False,
help='Use DNN to estimate the power spectrogram. '
'This option is experimental.')
# Beamformer related
parser.add_argument('--use-beamformer', type=strtobool,
default=True, help='')
parser.add_argument('--btype', default='blstmp', type=str,
choices=['lstm', 'blstm', 'lstmp', 'blstmp', 'vgglstmp', 'vggblstmp', 'vgglstm', 'vggblstm',
'gru', 'bgru', 'grup', 'bgrup', 'vgggrup', 'vggbgrup', 'vgggru', 'vggbgru'],
help='Type of encoder network architecture '
'of the mask estimator for Beamformer.')
parser.add_argument('--blayers', type=int, default=2,
help='')
parser.add_argument('--bunits', type=int, default=300,
help='')
parser.add_argument('--bprojs', type=int, default=300,
help='')
parser.add_argument('--badim', type=int, default=320,
help='')
parser.add_argument('--bnmask', type=int, default=2,
help='Number of beamforming masks, '
'default is 2 for [speech, noise].')
parser.add_argument('--ref-channel', type=int, default=-1,
help='The reference channel used for beamformer. '
'By default, the channel is estimated by DNN.')
parser.add_argument('--bdropout-rate', type=float, default=0.0,
help='')
# Feature transform: Normalization
parser.add_argument('--stats-file', type=str, default=None,
help='The stats file for the feature normalization')
parser.add_argument('--apply-uttmvn', type=strtobool, default=True,
help='Apply utterance level mean '
'variance normalization.')
parser.add_argument('--uttmvn-norm-means', type=strtobool,
default=True, help='')
parser.add_argument('--uttmvn-norm-vars', type=strtobool, default=False,
help='')
# Feature transform: Fbank
parser.add_argument('--fbank-fs', type=int, default=16000,
help='The sample frequency used for '
'the mel-fbank creation.')
parser.add_argument('--n-mels', type=int, default=80,
help='The number of mel-frequency bins.')
parser.add_argument('--fbank-fmin', type=float, default=0.,
help='')
parser.add_argument('--fbank-fmax', type=float, default=None,
help='')
return parser
def main(cmd_args):
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32.")
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(f"--train-dtype {args.train_dtype} does not support the CPU backend.")
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if 'chainer_backend' in args.model_module:
args.backend = 'chainer'
if 'pytorch_backend' in args.model_module:
args.backend = 'pytorch'
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(
level=logging.WARN, format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
logging.warning('Skip DEBUG/INFO messages')
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(','))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(['nvidia-smi', '-L'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split('\n')) - 1
else:
if "fit.vutbr.cz" in subprocess.check_output(["hostname", "-f"]).decode():
command = 'nvidia-smi --query-gpu=memory.free,memory.total \
--format=csv |tail -n+2| awk \'BEGIN{FS=" "}{if ($1 / $3 > 0.99) print NR-1}\''
try:
cvd = str(subprocess.check_output(command, shell=True).decode().rsplit('\n')[0:args.ngpu])
cvd = cvd.replace("]", "")
cvd = cvd.replace("[", "")
cvd = cvd.replace("'", "")
logging.info("Selected GPU is: " + str(cvd))
os.environ['CUDA_VISIBLE_DEVICES'] = cvd
except subprocess.CalledProcessError:
logging.info("No GPU seems to be available")
#ngpu = args.ngpu
#logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info('python path = ' + os.environ.get('PYTHONPATH', '(None)'))
# set random seed
logging.info('random seed = %d' % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, 'rb') as f:
dictionary = f.readlines()
char_list = [entry.decode('utf-8').split(' ')[0]
for entry in dictionary]
char_list.insert(0, '<blank>')
char_list.append('<eos>')
args.char_list = char_list
else:
args.char_list = None
# train
logging.info('backend = ' + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == '__main__':
main(sys.argv[1:])
| 53.583113 | 116 | 0.578245 |
4f4fdaaa7c7a53d607515c5f23e18a5a8d1d53eb | 1,821 | py | Python | server/src/finote_api/admin.py | kentaiwami/KI_App0 | 4cc9dbc5cc34f7ca52b5d0487624f687696b3fb4 | [
"MIT"
] | null | null | null | server/src/finote_api/admin.py | kentaiwami/KI_App0 | 4cc9dbc5cc34f7ca52b5d0487624f687696b3fb4 | [
"MIT"
] | 24 | 2016-06-21T09:05:26.000Z | 2022-02-10T09:27:38.000Z | server/src/finote_api/admin.py | kentaiwami/FiNote | 4cc9dbc5cc34f7ca52b5d0487624f687696b3fb4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
class GenreAdmin(admin.ModelAdmin):
list_display = ('genre_id',)
class MovieUserInline(admin.TabularInline):
model = Movie_User
extra = 1
class MovieOnomatopoeiaInline(admin.TabularInline):
model = Movie_Onomatopoeia
extra = 1
class MovieAdmin(admin.ModelAdmin):
list_display = ('pk', 'title', 'created_at', 'updated_at', 'tmdb_id', 'genres', 'overview')
search_fields = ('title',)
inlines = (MovieUserInline, MovieOnomatopoeiaInline)
@staticmethod
def genres(obj):
return '\n'.join([g.name for g in obj.genre.all()])
class OnomatopoeiaAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
inlines = (MovieOnomatopoeiaInline,)
class UserAdmin(admin.ModelAdmin):
list_display = ('pk', 'username', 'email', 'birthyear', 'password')
search_fields = ('username',)
inlines = (MovieUserInline,)
class MovieUserAdmin(admin.ModelAdmin):
list_display = ('pk', 'movie', 'user', 'dvd', 'fav', 'created_at', 'updated_at')
class MovieUserOnomatopoeiaAdmin(admin.ModelAdmin):
list_display = ('pk', 'user', 'movie', 'onomatopoeia', 'created_at')
@staticmethod
def user(obj):
return obj.movie_user.user
@staticmethod
def movie(obj):
return obj.movie_user.movie
class MovieOnomatopoeiaAdmin(admin.ModelAdmin):
list_display = ('pk', 'movie', 'onomatopoeia', 'count')
admin.site.register(Movie, MovieAdmin)
admin.site.register(AuthUser, UserAdmin)
admin.site.register(Genre, GenreAdmin)
admin.site.register(Onomatopoeia, OnomatopoeiaAdmin)
admin.site.register(Movie_User, MovieUserAdmin)
admin.site.register(Movie_User_Onomatopoeia, MovieUserOnomatopoeiaAdmin)
admin.site.register(Movie_Onomatopoeia, MovieOnomatopoeiaAdmin)
| 27.179104 | 95 | 0.717188 |
4f529e4fb9836cfe54eaf4a9b57ae2cb5043c7cf | 2,069 | py | Python | codes/projects/test_discrete_parameter/utils_project/plot_bivariate_gaussian.py | hwangoh/uq-vae | 382548e6f6dd7f9d72feff0e0752beec871db348 | [
"MIT"
] | 2 | 2021-07-28T16:47:18.000Z | 2021-08-03T00:53:58.000Z | codes/projects/test_discrete_parameter/utils_project/plot_bivariate_gaussian.py | HwanGoh/uq-vae | 24a3d26987e2ec807d57601b14c68b22f3652a18 | [
"MIT"
] | null | null | null | codes/projects/test_discrete_parameter/utils_project/plot_bivariate_gaussian.py | HwanGoh/uq-vae | 24a3d26987e2ec807d57601b14c68b22f3652a18 | [
"MIT"
] | 2 | 2021-09-29T08:31:46.000Z | 2021-11-07T10:26:45.000Z | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff() # Turn interactive plotting off
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
def plot_bivariate_gaussian(filepath, mean, cov,
fig_size, lim_min, lim_max, colorbar_limits,
title, xlabel, ylabel):
#=== Our 2-dimensional distribution will be over variables X and Y ===#
x_axis = np.linspace(lim_min, lim_max, 100)
y_axis = np.linspace(lim_min, lim_max, 100)
X, Y = np.meshgrid(x_axis, y_axis)
#=== Pack X and Y into a single 3-dimensional array ===#
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X
pos[:, :, 1] = Y
#=== The distribution on the variables X, Y packed into pos ===#
Z = multivariate_gaussian(pos, mean.flatten(), cov)
#=== Plot ===#
fig_contour, ax = plt.subplots(1,1)
v = np.linspace(colorbar_limits[0], colorbar_limits[1], 40, endpoint=True)
cp = ax.contourf(X, Y, Z, v)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cp, cax = cax)
#=== Save figure ===#
plt.savefig(filepath, dpi=100, bbox_inches = 'tight', pad_inches = 0)
plt.close(fig_contour)
def multivariate_gaussian(pos, mean, cov):
"""Return the multivariate Gaussian distribution on array pos.
pos is an array constructed by packing the meshed arrays of variables
x_1, x_2, x_3, ..., x_k into its _last_ dimension.
"""
n = mean.shape[0]
cov_det = np.linalg.det(cov)
cov_inv = np.linalg.inv(cov)
N = np.sqrt((2*np.pi)**n * cov_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
# way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos-mean, cov_inv, pos-mean)
return np.exp(-fac / 2) / N
| 35.672414 | 78 | 0.649106 |
4f520305fad753c5af1e9e6038c3469122a64819 | 24 | py | Python | btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Assets/Scripts/Unity/UI_New/Main/DailyRewards/daily_rewards.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class DailyRewards: pass | 24 | 24 | 0.875 |
4f4f87883f682b1763453c16d87870c275fd1cf1 | 7,778 | py | Python | algorithms/scaling/cross_validation/cross_validate.py | ndevenish/dials-fork | e3970ffcd2870ccd0e340f5e94999efcc978fb2b | [
"BSD-3-Clause"
] | 2 | 2021-03-17T11:25:46.000Z | 2021-11-18T04:20:54.000Z | algorithms/scaling/cross_validation/cross_validate.py | ndevenish/dials-fork | e3970ffcd2870ccd0e340f5e94999efcc978fb2b | [
"BSD-3-Clause"
] | null | null | null | algorithms/scaling/cross_validation/cross_validate.py | ndevenish/dials-fork | e3970ffcd2870ccd0e340f5e94999efcc978fb2b | [
"BSD-3-Clause"
] | null | null | null | """
This module defines a general cross validation function that can be used
with any valid CrossValidator. To use in a command line program, the
phil_scope should be included. Provided here is a description of the options,
using the example of running cross_validation in dials.scale
General description:
cross_validate runs the script defined by the CrossValidator, running each
option in turn, using a free set to score the model - the results are printed
in a table and the model with the lowest free set rmsd is indicated. For each
option, the analysis will be repeated nfolds times, with a different free set
chosen each time, and the final rmsds averaged. For full k-fold cross-validation,
nfolds should be set to 100/free_set_percentage, which would be nfolds=10 for
the default free_set_percentage=10.0.
Two different modes are currently supported, controlled by cross_validation_mode=;
1) cross_validation_mode=single
dials.scale is run nfolds times for the user specified dials.scale options
2) cross_validation_mode=multi
optimise a dials.scale parameter, specified by parameter= .The full phil path
to the parameter must be specified, e.g. physical.decay_correction. The phil
structure can be seen by running dials.scale -ce2
parameter_values must also be specified as a string of space separated values,
unless the dials.scale parameter type is bool.
Therefore one must choose:
cross_validation_mode= (single or multi)
parameter= (a supported command line option of the script run by
CrossValidator, optional if cross_validation_mode=single)
parameter_values= (values to test, only optional if parameter= selects a
boolean command-line parameter)
For example
cross_validation_mode=multi parameter=physical.absorption_correction
cross_validation_mode=multi parameter=physical.decay_interval parameter_values="5.0 10.0 15.0"
cross_validation_mode=multi parameter=model parameter_values="array physical"
"""
from __future__ import absolute_import, division, print_function
import itertools
import logging
import time
import six
from libtbx import phil
logger = logging.getLogger("dials")
phil_scope = phil.parse(
"""
cross_validation {
cross_validation_mode = multi single
.type = choice
.help = "Choose the cross validation running mode, for a full description"
"see the module docstring. Choice is used for testing a parameter"
"that can only have discreet values (a choice or bool phil parameter)."
"Variable is used for testing a parameter that can have a float or"
"int value (that is also not a 'choice' type). Single just performs"
"cross validation on one parameter configuration."
.expert_level = 2
parameter = None
.type = str
.help = "Optimise a command-line parameter. The full phil path must be":
"specified e.g. physical.absorption_correction. The option"
"parameter_values must also be specified, unless the parameter is"
"a True/False option."
.expert_level = 2
parameter_values = None
.type = strings
.help = "Parameter values to compare, entered as a string of space"
"separated values."
.expert_level = 2
nfolds = 1
.type = int(value_min=1)
.help = "Number of cross-validation folds to perform. If nfolds > 1, the"
"minimisation for each option is repeated nfolds times, with an"
"incremental offset for the free set. The max number of folds"
"allowed is 1/free_set_percentage; if set greater than this then"
"the repetition will finish afer 1/free_set_percentage folds."
.expert_level = 2
}
"""
)
def cross_validate(params, cross_validator):
"""Run cross validation script."""
start_time = time.time()
free_set_percentage = cross_validator.get_free_set_percentage(params)
options_dict = {}
if params.cross_validation.cross_validation_mode == "single":
# just run the setup nfolds times
cross_validator.create_results_dict(n_options=1)
for n in range(params.cross_validation.nfolds):
if n < 100.0 / free_set_percentage:
params = cross_validator.set_free_set_offset(params, n)
cross_validator.run_script(params, config_no=0)
elif params.cross_validation.cross_validation_mode == "multi":
# run each option nfolds times
if params.cross_validation.parameter is None:
raise ValueError(
"parameter= must be set to specify what command line option should be optimised"
)
choice = params.cross_validation.parameter
# #TODO extract allowed values to allow checking of user input
# inspect the phil scope to see what the parameter type is e.g bool, int
typ = cross_validator.get_parameter_type(choice)
if typ == "bool" and not params.cross_validation.parameter_values:
# values not specified, implied that should test both True and False
options_dict[choice] = [True, False]
else:
if not params.cross_validation.parameter_values:
raise ValueError(
"parameter_values= must be set to specify what options should be tested"
)
options_dict[choice] = []
if typ == "bool":
if (
"true" in params.cross_validation.parameter_values
or "True" in params.cross_validation.parameter_values
):
options_dict[choice].append(True)
if (
"false" in params.cross_validation.parameter_values
or "False" in params.cross_validation.parameter_values
):
options_dict[choice].append(False)
elif typ == "choice":
for option in params.cross_validation.parameter_values:
options_dict[choice].append(option)
elif typ == "int":
for value in params.cross_validation.parameter_values:
options_dict[choice].append(int(value))
elif typ == "float":
for value in params.cross_validation.parameter_values:
options_dict[choice].append(float(value))
else:
raise ValueError("Error in interpreting parameter and parameter_values")
# this code below should work for more than one parameter to be optimised,
# but one cannot specify this yet from the command line
keys, values = zip(*options_dict.items())
cross_validator.create_results_dict(len(values[0]))
cross_validator.set_results_dict_configuration(keys, values)
for i, v in enumerate(itertools.product(*values)):
e = dict(zip(keys, v))
for k, val in six.iteritems(e):
params = cross_validator.set_parameter(params, k, val)
for n in range(params.cross_validation.nfolds):
if n < 100.0 / free_set_percentage:
params = cross_validator.set_free_set_offset(params, n)
cross_validator.run_script(params, config_no=i)
else:
raise ValueError("Error in interpreting mode and options.")
st = cross_validator.interpret_results()
logger.info("Summary of the cross validation analysis: \n %s", st.format())
finish_time = time.time()
logger.info(
"\nCross-validation finished.\nTotal time taken: {:.4f}s ".format(
finish_time - start_time
)
)
logger.info("\n" + "=" * 80 + "\n")
| 43.943503 | 96 | 0.66611 |
4f508a54ae1955eb316830dff6958dd2b325e64f | 7,240 | py | Python | scorpio_bringup/launch/ver_1.py | applejenny66/goldpaint | cbb368d2d21cc1e26177314eb1e6fceb7c6e6c8d | [
"MIT"
] | null | null | null | scorpio_bringup/launch/ver_1.py | applejenny66/goldpaint | cbb368d2d21cc1e26177314eb1e6fceb7c6e6c8d | [
"MIT"
] | 1 | 2020-08-10T05:33:02.000Z | 2020-08-10T05:33:02.000Z | scorpio_bringup/launch/ver_1.py | applejenny66/goldpaint | cbb368d2d21cc1e26177314eb1e6fceb7c6e6c8d | [
"MIT"
] | 1 | 2021-05-20T09:08:05.000Z | 2021-05-20T09:08:05.000Z | #!/usr/bin/env python
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
from control_msgs.msg import GripperCommandAction, GripperCommandGoal
from control_msgs.msg import GripperCommand
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Pose, Quaternion
from moveit_msgs.msg import MoveGroupAction
group = None
gripperClient = None
gripperSrv = None
offset_z = 0.05
sim = True
def add_obj_A():
scene = moveit_commander.PlanningSceneInterface()
box_name = " "
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "base_link"
box_pose.pose.position.x = 0.0
box_pose.pose.position.y = 0.815
box_pose.pose.position.z = 0.2
box_pose.pose.orientation.w = 1.0
box_name = "areaA_box"
scene.add_box(box_name, box_pose, size=(1.5, 0.47, 0.87))
box_pose.header.frame_id = "base_link"
box_pose.pose.position.x=-0.8
box_pose.pose.position.y=0.0
box_pose.pose.position.z=0.25
box_pose.pose.orientation.w = 1.0
box_name = "wall"
scene.add_box(box_name, box_pose, size=(0.3, 2.0, 1.0))
return box_name
def go_idel():
global group
joint_goal = group.get_current_joint_values()
joint_goal[0] = 0
joint_goal[1] = 0.78
joint_goal[2] = -1.54
joint_goal[3] = 0
joint_goal[4] = -0.13
joint_goal[5] = 0
plan_res = group.go(joint_goal)
group.stop()
return plan_res
def pickCB():
global group, offset_z, sim
ps = [Pose(), Pose(), Pose(), Pose()]
pose_goal = Pose()
InitPose = Pose()
InitPose.position.x = -0.560; InitPose.position.y = 0.123; InitPose.position.z = 1.041
InitPose.orientation.x = -0.891; InitPose.orientation.y = 0.0; InitPose.orientation.z = 0.453; InitPose.orientation.w = 0.0
ps = [Pose(), Pose(), Pose(), Pose()]
ps[0].position.x = -0.727; ps[0].position.y = 0.299; ps[0].position.z = 0.802
ps[0].orientation.x = -0.999; ps[0].orientation.y = -0.042; ps[0].orientation.z = 0.004; ps[0].orientation.w = 0.013
ps[0].position.x = -0.119; ps[0].position.y = 0.650; ps[0].position.z = 0.947
ps[0].orientation.x = -0.700; ps[0].orientation.y = 0.714; ps[0].orientation.z = -0.011; ps[0].orientation.w = 0.027
ps[1].position.x = 0.161; ps[1].position.y = -0.046; ps[1].position.z = 0.501
ps[1].orientation.x = 0.320; ps[1].orientation.y = 0.947; ps[1].orientation.z = -0.041; ps[1].orientation.w = 0.004
ps[1].position.x = 0.089; ps[1].position.y = -0.037; ps[1].position.z = 0.305
ps[1].orientation.x = -0.374; ps[1].orientation.y = -0.928; ps[1].orientation.z = -0.003; ps[1].orientation.w = 0.007
ps[2].position.x = 0.016; ps[2].position.y = -0.175; ps[2].position.z = 0.295
ps[2].orientation.x = -0.374; ps[2].orientation.y = -0.928; ps[2].orientation.z = -0.003; ps[2].orientation.w = 0.007
ps[2].position.x = 0.021; ps[2].position.y = -0.110; ps[2].position.z = 0.269
ps[2].orientation.x = 0.042; ps[2].orientation.y = -0.999; ps[2].orientation.z = -0.011; ps[2].orientation.w = 0.007
ps[0].position.x = -0.727; ps[0].position.y = 0.299; ps[0].position.z = 0.802
ps[0].orientation.x = -0.999; ps[0].orientation.y = -0.042; ps[0].orientation.z = 0.004; ps[0].orientation.w = 0.013
ps[1].position.x = 0.090; ps[1].position.y = -0.112; ps[1].position.z = 0.235
ps[1].orientation.x = 0.042; ps[1].orientation.y = -0.999; ps[1].orientation.z = -0.011; ps[1].orientation.w = 0.007
ps[0].position.x = -0.119; ps[0].position.y = 0.650; ps[0].position.z = 0.947
ps[0].orientation.x = -0.700; ps[0].orientation.y = 0.714; ps[0].orientation.z = -0.011; ps[0].orientation.w = 0.027
ps[0].position.x = -0.181; ps[0].position.y = 0.680; ps[0].position.z = 0.847
ps[0].orientation.x = -0.715; ps[0].orientation.y = 0.699; ps[0].orientation.z = -0.005; ps[0].orientation.w = 0.033
ps[0].position.x = -0.838; ps[0].position.y = 0.280; ps[0].position.z = 0.505
ps[0].orientation.x = -0.999; ps[0].orientation.y = -0.042; ps[0].orientation.z = 0.004; ps[0].orientation.w = 0.013
ps[0].position.x = 0.061; ps[0].position.y = -0.110; ps[0].position.z = 0.269
ps[0].orientation.x = 0.042; ps[0].orientation.y = -0.999; ps[0].orientation.z = -0.011; ps[0].orientation.w = 0.007
ps[1].position.x = 0.086; ps[1].position.y = -0.105; ps[1].position.z = 0.340
ps[1].orientation.x = -0.374; ps[1].orientation.y = -0.928; ps[1].orientation.z = -0.003; ps[1].orientation.w = 0.007
ps[1].position.x = -0.857; ps[1].position.y = 0.260; ps[1].position.z = 0.540
ps[1].orientation.x = -0.374; ps[1].orientation.y = -0.928; ps[1].orientation.z = -0.003; ps[1].orientation.w = 0.007
#ps[1].position.x = 0.080; ps[1].position.y = 0.424; ps[1].position.z = 1.108
#ps[1].orientation.x = -0.707; ps[1].orientation.y = 0.706; ps[1].orientation.z = 0.024; ps[1].orientation.w = 0.023
#ps[1].position.x = 0.544; ps[1].position.y = -0.298; ps[1].position.z = 0.663
#ps[1].orientation.x = -0.372; ps[1].orientation.y = -0.928; ps[1].orientation.z = -0.006; ps[1].orientation.w = 0.009
ps[1].position.x = 0.095; ps[1].position.y = -0.656; ps[1].position.z = 1.053
ps[1].orientation.x = 0.707; ps[1].orientation.y = 0.707; ps[1].orientation.z = -0.027; ps[1].orientation.w = 0.027
ps[1].position.x = -0.252; ps[1].position.y = 0.053; ps[1].position.z = 0.530
ps[1].orientation.x = 0.905; ps[1].orientation.y = -0.425; ps[1].orientation.z = -0.035; ps[1].orientation.w = 0.003
ps[1].position.x = -0.288; ps[1].position.y = -0.239; ps[1].position.z = 0.655
ps[1].orientation.x = 1.000; ps[1].orientation.y = 0.002; ps[1].orientation.z = -0.015; ps[1].orientation.w = 0.004
ps[0].position.x = 0.095; ps[0].position.y = -0.706; ps[0].position.z = 0.753
ps[0].orientation.x = 0.707; ps[0].orientation.y = 0.707; ps[0].orientation.z = -0.027; ps[0].orientation.w = 0.027
#ps[1].position.x = 0.245; ps[1].position.y = -0.706; ps[1].position.z = 0.853
#ps[1].orientation.x = 0.707; ps[1].orientation.y = 0.707; ps[1].orientation.z = -0.027; ps[1].orientation.w = 0.027
ps[0].position.x = 0.178; ps[0].position.y = -0.641; ps[0].position.z = 0.488
ps[0].orientation.x = 0.757; ps[0].orientation.y = 0.653; ps[0].orientation.z = -0.010; ps[0].orientation.w = 0.017
#ps[4].position.x = -0.288; ps[4].position.y = -0.189; ps[4].position.z = 0.475
#ps[4].orientation.x = 1.000; ps[4].orientation.y = 0.002; ps[4].orientation.z = -0.015; ps[4].orientation.w = 0.004
group.set_pose_target(ps[0])
rospy.loginfo('GO')
group.go()
rospy.loginfo('Finish')
return
if __name__ == '__main__':
rospy.init_node('pick_and_place')
#sim = rospy.get_param('~sim', True)
#offset_z = rospy.get_param('~above_target_dist', 0.05)
moveGroupClient = actionlib.SimpleActionClient('move_group', MoveGroupAction)
#moveGroupClient.wait_for_server(rospy.Duration())
#group = moveit_commander.MoveGroupCommander(
# 'arm', '/robot_description', '')
group = moveit_commander.MoveGroupCommander('arm')
group.set_goal_orientation_tolerance(0.01)
group.set_goal_position_tolerance(0.01)
rospy.loginfo('Ready to plan.')
#add_obj_A()
raw_input()
pickCB()
#go_idel()
rospy.spin()
| 51.714286 | 125 | 0.65884 |
4f528d35fea0e94ae6ee525d6f3496b1fb0a4d6e | 431 | py | Python | tests/predict/classification/py/run_server.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | tests/predict/classification/py/run_server.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | tests/predict/classification/py/run_server.py | janove51/ml-app | 0d66aa4c25648f2059eb645b7f8081f028fac703 | [
"MIT"
] | null | null | null | import requests
import json
import codecs
import pandas as pd
import sys, os
sys.path.append(os.path.abspath('../../../../ml-app'))
from interface.prediction_api import server
from sklearn.datasets import make_classification
## start the server and pass test model
model_file = '/Users/user/testing/ml-app/models/classifier/RF/test_model_1.pickle'
server.run(model_file, endpoint = '/ml-app/v1.0/predict/', probability = False)
| 28.733333 | 82 | 0.770302 |
4f4efc0baccade19baf1dbc438dedc89d25c7ef5 | 5,238 | py | Python | origami-ms/mainWindow.py | lukasz-migas/ORIGAMI-MS | 8709c11aba1dc981d76a5256039486f0709d283f | [
"Apache-2.0"
] | null | null | null | origami-ms/mainWindow.py | lukasz-migas/ORIGAMI-MS | 8709c11aba1dc981d76a5256039486f0709d283f | [
"Apache-2.0"
] | null | null | null | origami-ms/mainWindow.py | lukasz-migas/ORIGAMI-MS | 8709c11aba1dc981d76a5256039486f0709d283f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Copyright (C) 2017 Lukasz G. Migas <lukasz.migas@manchester.ac.uk>
# This program is free software. Feel free to redistribute it and/or
# modify it under the condition you cite and credit the authors whenever
# appropriate.
# The program is distributed in the hope that it will be useful but is
# provided WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
# -------------------------------------------------------------------------
# Load libraries
import wx.aui
from IDs import ID_helpCite
from IDs import ID_helpDocumentation
from IDs import ID_helpGitHub
from IDs import ID_helpNewVersion
from IDs import ID_on_export_config
from IDs import ID_on_import_config
from IDs import ID_on_set_masslynx_path
from IDs import ID_on_set_wrens_path
from IDs import ID_SHOW_ABOUT
from origamiStyles import makeMenuItem
from panelAbout import panelAbout
from panelControls import panelControls
from panelPlot import panelPlot
class MyFrame(wx.Frame):
def __init__(self, parent, config, icons, title="ORIGAMI-MS"):
wx.Frame.__init__(self, None, title=title, style=wx.DEFAULT_FRAME_STYLE & ~(wx.RESIZE_BORDER | wx.MAXIMIZE_BOX))
self.SetSize(600, 700)
self.Centre()
self.presenter = parent
self.config = config
self.icons = icons
icon = wx.Icon()
icon.CopyFromBitmap(self.icons.iconsLib["origami_logo_16"])
self.SetIcon(icon)
# Setup Notebook manager
self._mgr = wx.aui.AuiManager(self)
self._mgr.SetDockSizeConstraint(1, 1)
self.panelControls = panelControls(self, self.presenter, self.config) # Settings
self.panelPlots = panelPlot(self, self.config) # Settings
self._mgr.AddPane(
self.panelControls,
wx.aui.AuiPaneInfo()
.Top()
.CloseButton(False)
.GripperTop()
.MinSize((400, 200))
.Gripper(False)
.BottomDockable(False)
.TopDockable(False)
.CaptionVisible(False)
.Resizable(False),
)
self._mgr.AddPane(
self.panelPlots,
wx.aui.AuiPaneInfo()
.Bottom()
.CloseButton(False)
.GripperTop(False)
.MinSize((500, 400))
.Gripper(False)
.BottomDockable(False)
.TopDockable(False)
.CaptionVisible(False)
.Resizable(False),
)
# Load other parts
self._mgr.Update()
self.make_statusbar()
self.make_menubar()
self.Bind(wx.EVT_CLOSE, self.presenter.quit)
def _setup_after_startup(self):
"""Bind functions after intilization of `data_handling` module"""
self.Bind(wx.EVT_MENU, self.presenter.data_handling.on_update_wrens_path, id=ID_on_set_wrens_path)
def make_menubar(self):
# FILE MENU
self.mainMenu = wx.MenuBar()
menuFile = wx.Menu()
menuFile.Append(ID_on_set_masslynx_path, "Set MassLynx file path\tCtrl+O")
menuFile.AppendSeparator()
menuFile.Append(ID_on_set_wrens_path, "Set WREnS runner (ScriptRunnerLight.exe) path")
menuFile.AppendSeparator()
menuFile.Append(ID_on_import_config, "Import configuration file\tCtrl+C")
menuFile.Append(ID_on_export_config, "Export configuration file\tCtrl+Shift+C")
self.mainMenu.Append(menuFile, "&File")
# HELP MENU
menuHelp = wx.Menu()
menuHelp.Append(ID_helpNewVersion, "Check for updates (online)")
menuHelp.Append(ID_helpDocumentation, "Open documentation site (online)")
menuHelp.Append(ID_helpGitHub, "Go to GitHub site (online)")
menuHelp.Append(ID_helpCite, "Go to ORIGAMI publication site (online)")
menuHelp.AppendSeparator()
menuHelp.Append(makeMenuItem(parent=menuHelp, id=ID_SHOW_ABOUT, text="About ORIGAMI\tCtrl+Shift+A"))
self.mainMenu.Append(menuHelp, "&Help")
self.SetMenuBar(self.mainMenu)
self.Bind(wx.EVT_MENU, self.presenter.on_get_masslynx_path, id=ID_on_set_masslynx_path)
self.Bind(wx.EVT_MENU, self.presenter.on_import_config, id=ID_on_import_config)
self.Bind(wx.EVT_MENU, self.presenter.on_export_config, id=ID_on_export_config)
self.Bind(wx.EVT_MENU, self.on_open_about, id=ID_SHOW_ABOUT)
self.Bind(wx.EVT_MENU, self.presenter.on_open_link, id=ID_helpCite)
self.Bind(wx.EVT_MENU, self.presenter.on_open_link, id=ID_helpNewVersion)
self.Bind(wx.EVT_MENU, self.presenter.on_open_link, id=ID_helpDocumentation)
self.Bind(wx.EVT_MENU, self.presenter.on_open_link, id=ID_helpGitHub)
def on_open_about(self, evt):
"""Show About mMass panel."""
about = panelAbout(self, self.presenter, "About ORIGAMI", self.config, self.icons)
about.Centre()
about.Show()
about.SetFocus()
def make_statusbar(self):
self.mainStatusbar = self.CreateStatusBar(3, wx.STB_SIZEGRIP, wx.ID_ANY)
self.mainStatusbar.SetStatusWidths([120, 50, -1])
| 38.8 | 120 | 0.654066 |
4f517e04c1fde462c356fa8955e0742ceb5cbadd | 15,659 | py | Python | seymour/accounts/forms.py | lerouxb/seymour | 124a93da67b6d6757b738469673232739ff0a4dc | [
"MIT"
] | null | null | null | seymour/accounts/forms.py | lerouxb/seymour | 124a93da67b6d6757b738469673232739ff0a4dc | [
"MIT"
] | null | null | null | seymour/accounts/forms.py | lerouxb/seymour | 124a93da67b6d6757b738469673232739ff0a4dc | [
"MIT"
] | null | null | null | from django.conf import settings
from django import forms
from seymour.accounts.models import Account
__all__ = (
'LoginForm', 'SignupForm',
'ResetPasswordForm', 'ConfirmResetPasswordForm',
'EditOpenIDProfileForm', 'EditEmailProfileForm'
)
class LoginForm(forms.Form):
method = forms.ChoiceField(label='Login using', required=True, choices=(('email', 'Email & Password'), ('openid', 'OpenID')), widget=forms.RadioSelect())
openid = forms.CharField(label='Your OpenID', max_length=255, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
email = forms.EmailField(label='Email address', max_length=75, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
password = forms.CharField(label='Password', max_length=30, required=False, widget=forms.PasswordInput(attrs={'class': 'text'}))
def clean(self):
if self.cleaned_data['method'] == 'email':
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
# must I use a forms.ValidationError()? must this be a sequence?
# the documentation isn't clear..
if not email:
self.errors['email'] = "This field is required."
if not password:
self.errors['password'] = "This field is required."
if email and password:
try:
account = Account.objects.get(email=email)
except Account.DoesNotExist:
self.data['password'] = ''
raise forms.ValidationError("Please enter a correct email address and password. Note that both fields are case-sensitive.")
if not account.check_password(password):
self.data['password'] = ''
raise forms.ValidationError("Please enter a correct email address and password. Note that both fields are case-sensitive.")
if not account.is_active:
raise forms.ValidationError("This account is inactive.")
self._account = account
return self.cleaned_data
else:
# TODO: do some basic checks to make sure this is actually a URL..
return self.cleaned_data
def get_openid(self):
# for now handle it all inside the view
return self.cleaned_data['openid']
def get_account(self):
if hasattr(self, '_account'):
return self._account
else:
return None
email_help = """Please use a valid email address as you will use this to log-in.
We will never sell your email address to anyone."""
password_help = """Password should be at least 6 characters."""
confirmation_help = """We sent you this in an email. If you clicked the link we
sent it should automatically be filled in."""
class SignupForm(forms.Form):
method = forms.ChoiceField(label='Signup using', required=True, choices=(('email', 'Email & Password'), ('openid', 'OpenID')), widget=forms.RadioSelect())
openid = forms.CharField(label='Your OpenID', max_length=255, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
firstname = forms.CharField(label='First name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
lastname = forms.CharField(label='Last name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
email = forms.EmailField(label='Email address', help_text=email_help, max_length=75, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
password = forms.CharField(label='Password', help_text=password_help, initial='', max_length=30, required=False, widget=forms.PasswordInput(attrs={'class': 'text'}))
confirm_password = forms.CharField(label='Re-type password', initial='', max_length=30, required=False, widget=forms.PasswordInput(attrs={'class': 'text'}))
captcha = forms.CharField(label='Please type the word you see in the image', initial='', max_length=128, required=True, widget=forms.TextInput(attrs={'class': 'vTextField'}))
def __init__(self, *args, **kwargs):
self.word = kwargs['word']
del kwargs['word']
super(SignupForm, self).__init__(*args, **kwargs)
def clean_email(self):
value = self.cleaned_data['email']
try:
account = Account.objects.get(email=value)
except Account.DoesNotExist:
pass
else:
raise forms.ValidationError("The email address is already used.")
return value
def clean_captcha(self):
value = self.cleaned_data['captcha']
if value != self.word:
self.data['captcha'] = ''
raise forms.ValidationError("Please fill in this code.")
return value
def clean(self):
if self.cleaned_data['method'] == 'email':
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
# must I use a forms.ValidationError()? must this be a sequence?
# the documentation isn't clear..
if not email:
self.errors['email'] = "This field is required."
if not password:
self.errors['password'] = "This field is required."
if not confirm_password:
self.errors['confirm_password'] = "This field is required."
if password or confirm_password:
if password != confirm_password:
self.data['password'] = ''
self.data['confirm_password'] = ''
raise forms.ValidationError("The Passwords don't match.")
else:
if len(password) < 6:
self._errors['password'] = "Passwords must be at least 6 characters long."
self.data['password'] = ''
self.data['confirm_password'] = ''
return self.cleaned_data
else:
# TODO: do some basic checks to make sure this is actually a URL..
return self.cleaned_data
def save(self):
data = self.cleaned_data
account = Account(
firstname=data['firstname'],
lastname=data['lastname'],
email=data['email'],
is_active=True,
)
account.set_password(data['password'])
account.save()
# email the password to the user
from django.core.mail import send_mail
from django.template import Template, Context, loader
context = Context({
'account': account,
'password': data['password'],
'sitename': settings.SITENAME,
'seymour_domain': settings.SEYMOUR_DOMAIN
})
subject = u"Welcome to %s" % (settings.SITENAME)
t = loader.get_template('emails/account_added.email')
email_body = t.render(context)
if settings.DEBUG:
print "Subject: " + subject.encode('utf8')
print "-"*80
print email_body.encode('utf8')
else:
send_mail(subject, email_body, settings.EMAIL_FROM, [account.email], fail_silently=True)
return account
def get_openid(self):
# for now handle it all inside the view
return self.cleaned_data['openid']
class ResetPasswordForm(forms.Form):
email = forms.EmailField(label='Email address', max_length=75, required=True, widget=forms.TextInput(attrs={'class': 'vTextField'}))
def clean_email(self):
email = self.cleaned_data.get('email')
if email:
try:
account = Account.objects.get(email=email)
except Account.DoesNotExist:
raise forms.ValidationError("This email address is not on our system.")
return email
def save(self):
import sha
import random
confirmation_code = sha.new(str(random.random())).hexdigest()[:5].upper()
email = self.cleaned_data['email']
account = Account.objects.get(email=email)
account.confirmation_code = confirmation_code
account.save()
# send email
from django.core.mail import send_mail
from django.template import Template, Context, loader
context = Context({
'account': account,
'sitename': settings.SITENAME,
'seymour_domain': settings.SEYMOUR_DOMAIN
})
subject = u"[%s] Reset Password Confirmation" % (settings.SITENAME)
t = loader.get_template('emails/reset_password_confirm.email')
email_body = t.render(context)
if settings.DEBUG:
print "Subject: " + subject.encode('utf8')
print "-"*80
print email_body.encode('utf8')
else:
send_mail(subject, email_body, settings.EMAIL_FROM, [account.email], fail_silently=True)
class ConfirmResetPasswordForm(forms.Form):
email = forms.EmailField(label='Email address', max_length=75, required=True, widget=forms.TextInput(attrs={'class': 'vTextField'}))
confirmation_code = forms.CharField(label='Confirmation code', max_length=75, help_text=confirmation_help, required=True, widget=forms.TextInput(attrs={'class': 'vTextField'}))
def clean_email(self):
email = self.cleaned_data.get('email')
if email:
try:
account = Account.objects.get(email=email)
except Account.DoesNotExist:
raise forms.ValidationError("This email address is not on our system.")
return email
def clean(self):
email = self.cleaned_data.get('email')
confirmation_code = self.cleaned_data.get('confirmation_code')
if email:
try:
account = Account.objects.get(email=email)
if account.confirmation_code != confirmation_code:
self._errors['confirmation_code'] = "Invalid confirmation code. Please try again. If you keep having problems, please contact support."
return self.cleaned_data
except Account.DoesNotExist:
self._errors['email'] = "This email address is not on our system."
def save(self):
import sha
import random
email = self.cleaned_data['email']
account = Account.objects.get(email=email)
new_password = sha.new(str(random.random())).hexdigest()[:5].upper()
account.confirmation_code = None
account.set_password(new_password)
account.save()
# send email
from django.core.mail import send_mail
from django.template import Template, Context, loader
context = Context({
'account': account,
'password': new_password,
'sitename': settings.SITENAME,
'seymour_domain': settings.SEYMOUR_DOMAIN
})
subject = u"[%s] Password Changed" % (settings.SITENAME)
t = loader.get_template('emails/changed_password.email')
email_body = t.render(context)
if settings.DEBUG:
print "Subject: " + subject.encode('utf8')
print "-"*80
print email_body.encode('utf8')
else:
send_mail(subject, email_body, settings.EMAIL_FROM, [account.email], fail_silently=True)
class EditOpenIDProfileForm(forms.Form):
firstname = forms.CharField(label='First name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
lastname = forms.CharField(label='Last name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
# TODO: allow changing OpenID (will probably require re-authentication,
# because otherwise you can't log back in..)
def __init__(self, *args, **kwargs):
self.update = kwargs['update']
del kwargs['update']
super(EditOpenIDProfileForm, self).__init__(*args, **kwargs)
def save(self):
data = self.cleaned_data
account = self.update
account.firstname = data['firstname']
account.lastname = data['lastname']
account.save()
class EditEmailProfileForm(forms.Form):
firstname = forms.CharField(label='First name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
lastname = forms.CharField(label='Last name', max_length=100, required=False, widget=forms.TextInput(attrs={'class': 'text'}))
email = forms.EmailField(label='Email address', max_length=75, required=True, widget=forms.TextInput(attrs={'class': 'text'}))
password = forms.CharField(label='Password', initial='', max_length=30, required=False, widget=forms.PasswordInput(attrs={'class': 'text'}))
confirm_password = forms.CharField(label='Re-type password', initial='', max_length=30, required=False, widget=forms.PasswordInput(attrs={'class': 'text'}))
def __init__(self, *args, **kwargs):
self.update = kwargs['update']
del kwargs['update']
super(EditEmailProfileForm, self).__init__(*args, **kwargs)
def clean_email(self):
value = self.cleaned_data['email']
if value == self.update.email:
return value
try:
account = Account.objects.get(email=value)
except Account.DoesNotExist:
pass
else:
raise forms.ValidationError("The email address is already used.")
return value
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
confirm_password = self.cleaned_data.get('confirm_password')
if password or confirm_password:
if password != confirm_password:
self.data['password'] = ''
self.data['confirm_password'] = ''
raise forms.ValidationError("The Passwords don't match.")
else:
if len(password) < 6:
self._errors['password'] = "Passwords must be at least 6 characters long."
self.data['password'] = ''
self.data['confirm_password'] = ''
return self.cleaned_data
def save(self):
data = self.cleaned_data
account = self.update
account.email = data['email']
account.firstname = data['firstname']
account.lastname = data['lastname']
if data['password']:
account.set_password(data['password'])
account.save()
| 42.321622 | 181 | 0.577687 |
4f4a55391c8b98ccff4f8fa197a7774edbdb672f | 819 | py | Python | scripts/feature_gen.py | ratulesrar3/sotu-approval-analysis | 4e4575e81795d09ce951ae289eb30158392ef37d | [
"MIT"
] | null | null | null | scripts/feature_gen.py | ratulesrar3/sotu-approval-analysis | 4e4575e81795d09ce951ae289eb30158392ef37d | [
"MIT"
] | null | null | null | scripts/feature_gen.py | ratulesrar3/sotu-approval-analysis | 4e4575e81795d09ce951ae289eb30158392ef37d | [
"MIT"
] | null | null | null | ### helper functions to create features for the classification problem
from nltk.tokenize import word_tokenize, sent_tokenize
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as datetime
def binary_approval(rating):
if rating > 50:
return 1
else:
return 0
def speech_num_words(speech):
return len(word_tokenize(speech))
def speech_num_sentences(speech):
return len(sent_tokenize(speech))
def approval_features(all_df, speeches):
for col in ['Approve', 'Disapprove']:
for agg_type in ['mean', 'median', 'min', 'max', 'std']:
new_col = '{}_{}'.format(col.lower(), agg_type)
speeches[new_col] = pd.Series(list(round(all_df.groupby(['date'])[col].agg(agg_type), 3)), index=speeches.index) | 29.25 | 124 | 0.697192 |
4f5331d3c367c60ed6ec78c671db79d182b01e02 | 466 | py | Python | rbc/tests/test_utils.py | krunalkharat/rbc | f3a4665bde03d2cef6521dcea030f22236435f79 | [
"BSD-3-Clause"
] | null | null | null | rbc/tests/test_utils.py | krunalkharat/rbc | f3a4665bde03d2cef6521dcea030f22236435f79 | [
"BSD-3-Clause"
] | null | null | null | rbc/tests/test_utils.py | krunalkharat/rbc | f3a4665bde03d2cef6521dcea030f22236435f79 | [
"BSD-3-Clause"
] | null | null | null | from rbc.utils import is_localhost, get_local_ip, triple_matches
def test_is_localhost():
assert is_localhost(get_local_ip())
def test_triple_matches():
assert triple_matches('cuda', 'nvptx64-nvidia-cuda')
assert triple_matches('nvptx64-nvidia-cuda', 'cuda')
assert triple_matches('cuda32', 'nvptx-nvidia-cuda')
assert triple_matches('nvptx-nvidia-cuda', 'cuda32')
assert triple_matches('x86_64-pc-linux-gnu', 'x86_64-unknown-linux-gnu')
| 33.285714 | 76 | 0.748927 |
4f52501b0d901a4a091b0594ba4c58c6b6648438 | 6,167 | py | Python | alipay/aop/api/domain/MybankPaymentTradeNormalpayOrderPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MybankPaymentTradeNormalpayOrderPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MybankPaymentTradeNormalpayOrderPayModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeNormalpayOrderPayModel(object):
def __init__(self):
self._amount = None
self._currency_value = None
self._ext_info = None
self._operate_scene_type = None
self._order_no = None
self._order_type = None
self._payee_fund_detail = None
self._payer_fund_detail = None
self._remark = None
self._request_no = None
self._request_time = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def currency_value(self):
return self._currency_value
@currency_value.setter
def currency_value(self, value):
self._currency_value = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def operate_scene_type(self):
return self._operate_scene_type
@operate_scene_type.setter
def operate_scene_type(self, value):
self._operate_scene_type = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def payee_fund_detail(self):
return self._payee_fund_detail
@payee_fund_detail.setter
def payee_fund_detail(self, value):
self._payee_fund_detail = value
@property
def payer_fund_detail(self):
return self._payer_fund_detail
@payer_fund_detail.setter
def payer_fund_detail(self, value):
self._payer_fund_detail = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def request_time(self):
return self._request_time
@request_time.setter
def request_time(self, value):
self._request_time = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.currency_value:
if hasattr(self.currency_value, 'to_alipay_dict'):
params['currency_value'] = self.currency_value.to_alipay_dict()
else:
params['currency_value'] = self.currency_value
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.operate_scene_type:
if hasattr(self.operate_scene_type, 'to_alipay_dict'):
params['operate_scene_type'] = self.operate_scene_type.to_alipay_dict()
else:
params['operate_scene_type'] = self.operate_scene_type
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = self.order_type.to_alipay_dict()
else:
params['order_type'] = self.order_type
if self.payee_fund_detail:
if hasattr(self.payee_fund_detail, 'to_alipay_dict'):
params['payee_fund_detail'] = self.payee_fund_detail.to_alipay_dict()
else:
params['payee_fund_detail'] = self.payee_fund_detail
if self.payer_fund_detail:
if hasattr(self.payer_fund_detail, 'to_alipay_dict'):
params['payer_fund_detail'] = self.payer_fund_detail.to_alipay_dict()
else:
params['payer_fund_detail'] = self.payer_fund_detail
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.request_time:
if hasattr(self.request_time, 'to_alipay_dict'):
params['request_time'] = self.request_time.to_alipay_dict()
else:
params['request_time'] = self.request_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeNormalpayOrderPayModel()
if 'amount' in d:
o.amount = d['amount']
if 'currency_value' in d:
o.currency_value = d['currency_value']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'operate_scene_type' in d:
o.operate_scene_type = d['operate_scene_type']
if 'order_no' in d:
o.order_no = d['order_no']
if 'order_type' in d:
o.order_type = d['order_type']
if 'payee_fund_detail' in d:
o.payee_fund_detail = d['payee_fund_detail']
if 'payer_fund_detail' in d:
o.payer_fund_detail = d['payer_fund_detail']
if 'remark' in d:
o.remark = d['remark']
if 'request_no' in d:
o.request_no = d['request_no']
if 'request_time' in d:
o.request_time = d['request_time']
return o
| 32.287958 | 87 | 0.604021 |
4f4c95f74358defd6290b5df4ac0fc0d0ae3c8b4 | 867 | py | Python | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | 1 | 2020-04-25T15:20:39.000Z | 2020-04-25T15:20:39.000Z | from ....report.structure.variables.render_boolean import render_boolean
from ....report.structure.variables.render_categorical import (
render_categorical,
)
from ....report.structure.variables.render_common import render_common
from ....report.structure.variables.render_complex import render_complex
from ....report.structure.variables.render_count import render_count
from ....report.structure.variables.render_date import render_date
from ....report.structure.variables.render_path import render_path
from ....report.structure.variables.render_path_image import (
render_path_image,
)
from ....report.structure.variables.render_real import render_real
from ....report.structure.variables.render_url import render_url
from ....report.structure.variables.render_generic import render_generic
from ....report.structure.variables.render_int import render_int | 54.1875 | 72 | 0.828143 |
4f4b8aeea732f909db8d476ccedc5225748394c6 | 3,105 | py | Python | indico/modules/events/models/roles.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | 1 | 2021-12-27T17:51:27.000Z | 2021-12-27T17:51:27.000Z | indico/modules/events/models/roles.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | null | null | null | indico/modules/events/models/roles.py | javfg/indico | 2634756ba1e9caf6dd8fc9afc3f47291fda5816d | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.util.locators import locator_property
from indico.util.string import format_repr
class EventRole(db.Model):
__tablename__ = 'roles'
__table_args__ = (db.CheckConstraint('code = upper(code)', 'uppercase_code'),
db.Index(None, 'event_id', 'code', unique=True),
{'schema': 'events'})
principal_order = 2
principal_type = PrincipalType.event_role
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=False,
index=True
)
name = db.Column(
db.String,
nullable=False
)
code = db.Column(
db.String,
nullable=False
)
color = db.Column(
db.String,
nullable=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'roles',
cascade='all, delete-orphan',
lazy=True
)
)
members = db.relationship(
'User',
secondary='events.role_members',
lazy=True,
collection_class=set,
backref=db.backref('event_roles', lazy=True, collection_class=set),
)
# relationship backrefs:
# - in_attachment_acls (AttachmentPrincipal.event_role)
# - in_attachment_folder_acls (AttachmentFolderPrincipal.event_role)
# - in_contribution_acls (ContributionPrincipal.event_role)
# - in_event_acls (EventPrincipal.event_role)
# - in_event_settings_acls (EventSettingPrincipal.event_role)
# - in_session_acls (SessionPrincipal.event_role)
# - in_track_acls (TrackPrincipal.event_role)
def __contains__(self, user):
return user is not None and self in user.event_roles
def __repr__(self):
return format_repr(self, 'id', 'code', _text=self.name)
@locator_property
def locator(self):
return dict(self.event.locator, role_id=self.id)
@property
def identifier(self):
return f'EventRole:{self.id}'
@property
def obj(self):
return self.event
@property
def css(self):
return 'color: #{0} !important; border-color: #{0} !important'.format(self.color)
@property
def style(self):
return {'color': '#' + self.color, 'borderColor': '#' + self.color}
role_members_table = db.Table(
'role_members',
db.metadata,
db.Column(
'role_id',
db.Integer,
db.ForeignKey('events.roles.id'),
primary_key=True,
nullable=False,
index=True
),
db.Column(
'user_id',
db.Integer,
db.ForeignKey('users.users.id'),
primary_key=True,
nullable=False,
index=True
),
schema='events'
)
| 25.875 | 89 | 0.616747 |
4f506dd62871d6bfa71697dcb44e4d51541a56f9 | 17,574 | py | Python | tests/terraform/checks/resource/azure/test_NSGRuleUDPAccessRestricted.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | tests/terraform/checks/resource/azure/test_NSGRuleUDPAccessRestricted.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | tests/terraform/checks/resource/azure/test_NSGRuleUDPAccessRestricted.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import unittest
import hcl2
from checkov.terraform.checks.resource.azure.NSGRuleUDPAccessRestricted import check
from checkov.common.models.enums import CheckResult
class TestNSGRuleUDPAccessRestricted(unittest.TestCase):
def test_failure1(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure2(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "any"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure3(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "<nw>/0"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure4(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "/0"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure5(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "internet"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success1(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Deny"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success2(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Outbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success3(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Production"
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_failure_rule_1(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_rule_2(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "any"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_rule_3(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "<nw>/0"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_rule_4(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "/0"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_rule_5(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "internet"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success_rule_1(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Deny"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_rule_2(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Outbound"
access = "Allow"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_rule_3(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_rule" "example" {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_rule']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_unsupported_syntax(self):
hcl_res = hcl2.loads("""
resource "azurerm_network_security_group" "example" {
name = "${var.autoscaler_prefix}autoscaler-nsg"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
security_rule = [for idx, rule in var.autoscaler_ssh_permit: {
name = "allow-${rule.name}"
priority = 100 + idx
direction = "Inbound"
access = "Allow"
protocol = "TCP"
source_address_prefix = rule.ip
source_port_range = "*"
destination_address_prefix = "*"
destination_port_range = "22"
description = ""
destination_address_prefixes = null
destination_application_security_group_ids = null
destination_port_ranges = null
source_address_prefixes = null
source_application_security_group_ids = null
source_port_ranges = null
}]
tags = var.autoscaler_tags_nsg
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_network_security_group']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 42.346988 | 91 | 0.491351 |
4f532ecc30c9ae2df2afc94717b747874b64a9fa | 2,764 | py | Python | webapp/flaskapp/views.py | bradysalz/SavingChristmas | 29bd36b33e414ffae88001b878eec041363cf766 | [
"MIT"
] | null | null | null | webapp/flaskapp/views.py | bradysalz/SavingChristmas | 29bd36b33e414ffae88001b878eec041363cf766 | [
"MIT"
] | 1 | 2017-01-16T21:52:41.000Z | 2017-01-16T21:52:41.000Z | webapp/flaskapp/views.py | bradysalz/SavingChristmas | 29bd36b33e414ffae88001b878eec041363cf766 | [
"MIT"
] | null | null | null | import requests
from flaskapp import app
from flaskapp.models import db, User, add_user_to_db
from flask import Flask, render_template, request, redirect, url_for
from .config import SECRET_KEY
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///lights.db'
app.secret_key = SECRET_KEY
# First time init, run python in webapp/
# >>> import flaskapp
# >>> flaskapp.models.db.create_all(app=flaskapp.app)
db.init_app(app)
@app.route('/')
def home():
"""
List all users or add new user
"""
users = User.query.all()
return render_template('home.html', users=users)
@app.route('/getmycolor', methods=['POST'])
def get_my_color():
"""
called by particle when it wakes up
"""
print("WOW")
print(request.data)
print(request.form)
return "hello"
@app.route('/newuser', methods=['GET', 'POST'])
def add_user():
"""
Create new user
"""
if request.method == 'GET':
users = User.query.all()
return render_template('add.html', users=users)
if request.method == 'POST':
print(request.form['name'])
add_user_to_db(db, request.form)
return redirect(url_for('home'), code=302)
@app.route('/user/<user_url>', methods=['GET', 'POST'])
def user_page(user_url):
"""
User home page, can edit their settings from here
Currently no authentication
"""
current_user = User.query.filter(User.url == user_url).first()
users = User.query.all()
users.remove(current_user)
if users is None:
users = []
# TODO: @KAREN
# Debug why this doesn't work
# test.py checks all of this and passes
# throws up an error about how it can't find the
# follower.id column in followers table
is_following = [False if current_user.is_following(user) is None else True
for user in users]
followers = zip(users, is_following)
if request.method == 'GET':
return render_template('userpage.html', current_user=current_user,
followers=followers, update=False)
if request.method == 'POST':
# TODO
# take POST data and update database
sel_users = request.form.getlist('check')
chosen_color = request.values['color']
return render_template('userpage.html', current_user=current_user,
followers=followers, update=True)
@app.route('/touch/<user>')
def send_colors(user):
"""
iterate over families
find matching key
send to all users in family
"""
return render_template('base.html')
def update_chosen_colors(user, color):
"""
change User's LED to Color
"""
return render_template('base.html')
| 26.834951 | 79 | 0.642909 |
4f4edca9375fa7b12958902446d74c84464b8163 | 287,441 | py | Python | bom_manager/bom.py | waynegramlich/bom_manager | f59576aaa22b2925f3c57c913aab219b9fdc3a6b | [
"MIT"
] | 1 | 2018-03-15T23:30:01.000Z | 2018-03-15T23:30:01.000Z | bom_manager/bom.py | waynegramlich/bom_manager | f59576aaa22b2925f3c57c913aab219b9fdc3a6b | [
"MIT"
] | 1 | 2017-02-19T02:18:07.000Z | 2017-02-19T03:17:02.000Z | bom_manager/bom.py | waynegramlich/bom_manager | f59576aaa22b2925f3c57c913aab219b9fdc3a6b | [
"MIT"
] | null | null | null | # BOM Manager
#
# BOM Manager is a program for managing one or more Bill of Materials.
#
# ## License
#
# MIT License
#
# Copyright (c) 2019 Wayne C. Gramlich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# <------------------------------------------- 100 characters -----------------------------------> #
#
# ## Coding standards:
#
# * General:
# * Python 3.6 or greater is used.
# * The PEP 8 coding standards are generally adhered to.
# * All code and docmenation lines must be on lines of 100 characters or less. No exceptions.
# The comment labeled "100 characters" above is 100 characters long for editor width resizing
# purposes.
# * Indentation levels are multiples of 4 spaces.
# * Use `flake8 --max-line-length=100 PROGRAM.py` to check for issues.
# * Class/Function standards:
# * Classes:
# * Classes are named in CamelCase as per Python recommendation.
# * Classes are listed alphabetically with sub-classes are listed alphabetically
# immediately after their base class definition.
# * Methods:
# * All methods are in lower case. If mutliple words, the words are separated by underscores.
# The words are order as Adjectives, Noun, Adverbs, Verb. Thus, *xml_file_load* instead of
# *load_xml_file*.
# * All methods are listed alphabetically, where an underscore is treated as a space.
# * All methods check their argument types (i.e. no duck typing!!!)
# * Inside a method, *self* is almost always replaced with more descriptive variable name.
# * To aid debugging, many functions have an optional *tracing* argument of the form
# `tracing=""`. If the @trace(LEVEL) decorator preceeds the function/method, the current
# indentation string is assigned to *tracing*.
# * Functions:
# * The top-level main() function occurs first.
# * Top-level fuctions use the same coding standards as methods (see above.)
# * Variables:
# * Variables are lower case with underscores between words.
# * No single letter variables except for standard mathematical concepts such as X, Y, Z.
# Use `index` instead of `i`.
# * Comments:
# * All code comments are written in [Markdown](https://en.wikipedia.org/wiki/Markdown).
# * Code is organized into blocks are preceeded by comment that explains the code block.
# * For classes, a comment of the form # CLASS_NAME: is before each class definition as an
# aid for editor searching.
# * For methods, a comment of the form `# CLASS_NAME.METHOD_NAME():` is before each method
# definition as an aid for editor searching.
# * Print statements that were used for debugging are left commented out rather than deleted.
# * Misc:
# * The relatively new formatted string style `f"..."` is heavily used.
# * Generally, single character strings are in single quotes (`'`) and multi characters in double
# quotes (`"`). Empty strings are represented as `""`. Strings with multiple double quotes
# can be enclosed in single quotes (e.g. ` f'<Tag foo="{foo}" bar="{bar}"/>' `.)
#
# ## Install Notes:
#
# ## Tasks:
#
# The following tasks are outstanding:
#
# * Decode Digi-Key parametric search URL's.
# * Refactor the code to separate the GUI from the bom engine.
# * Start providing ordering operations.
# * Reorder tables/parameters/enumerations/searches.
# * Footprint hooks
# * Better parametric search
#
# ## Software Overview:
#
# This program is called *bom_manager* (i.e. Bill of Materials Manager.)
# The program fundamentally deals with the task of binding parts in a
# schematic with actual parts that can be ordered from one or more
# vendors (i.e. distributors, sellers, etc.) It also deals with binding
# the schematic parts with footprints that can be used with the final PCB
# (i.e. Printed Circuit Project.)
#
# In short we have:
#
# Schematic Part => Manufacturer Part
#
# and
#
# Manufacturer Part => (Footprint, Vendor Part)
#
# and
#
# Vendor Part => Pricing
#
# The footprints are passed into the printed circuit design workflow
# and the vendor parts are collected together into one or more orders
# that are sent off to various vendors (i.e. distributors) to be fulfilled.
# The decision of which vendor to order a part from depends upon parts
# availability and pricing. The availability and pricing information
# is obtained via a process of visiting various web sites and "screen
# scraping" the needed information from the associated web pages.
#
# ## Schematic Parts
#
# KiCad really only deals with schematic parts in the schematic drawing
# program *and* footprints in the PCB layout program. While in theory
# the binding between a schematic part and PCB footprint is one-to-one
# in practice KiCad keeps the two pretty decoupled. There is a "program"
# in KiCad called "CvPcb" that is responsible for binding schematic parts
# to PCB footprints. In general, it is very easy to make mistakes with
# CvPcb and generate incorrect PCB's where the actual parts do not fit
# properly onto the PCB's. It would be very nice to make this process
# less error prone.
#
# Numerous people have come up with a strategy of explicitly binding
# the schematic part to (footprint, manufacturer part, vendor_part)
# into the schematic part library. The KiCad fields was explicitly
# added to support these kinds of experiments. The problem with
# KiCad fields is that they are currently quite brittle. If you make
# a mistake in the schematic part library (which happens all the time),
# it is necessary to first correct the erroneous fields in the schematic
# part library *and* manually find and update each associated schematic
# part in the schematic. This strategy is currently extremely error
# prone.
#
# `bom_manager` uses a different strategy. In KiCad, each schematic
# part in the schematic has a reference name (e.g. R123, U7, etc.)
# and name (e.g. 74HC08, etc.) The name can be pretty much anything
# that the end user decides provides enough information to identify
# the desired part. What `bom_manager` does is structure this name
# as follows:
#
# name;footprint:comment
#
# where:
#
# * *name* specifies the part name,
#
# * *footprint* specifies the footprint to associate with the name, and
#
# * *comment* is an optional comment field that is ignored by `bom_manager`
#
# For example the Atmel ATmega328 comes in several different IC packages --
# DIP28, QFP32, and QFN32. The `bom_manager` program wants
# to see these symbols show up in the schematic as `ATMEGA328;DIP28`,
# `ATMEGA328;QFP32`, and `ATMEGA328;QFN32`. There is a textual database
# that maps these `bom_manager` names into the actual manufacturer part
# numbers of `ATmega328P-PU`, `ATmega328P-AU`, and `ATmega328-MU`.
# In addition, this database provides a binding to the correct KiCad
# footprint. If there is an error in the database, the database can
# be corrected, and the next time `bom_manager` is run, both the
# vendor orders and the footprints will be automatically propagated.
#
# As another example, most people use fairly common resistor values
# in their electrical designs -- 10K, 22K, 33K, 47K, etc. These are
# considered to be generic parts (sometimes called common parts) and
# designer is happy as long as the resistors have 5% tolerance and
# can dissipate up to 1/8 Watt. Having said that, there are numerous
# footprints to choose from. Using inches, the common sizes
# are .008"x.005", .006"x.003", .004"x.002", and .002"x.001".
# These are abbreviated as 0805, 0603, 0402, and 0201. `bom_manager`
# uses the metric equivalent values of 2013, 1608, 1005, and 0503.
# Thus, a 5% 1/8W 10K resistor in a .006"x.003" package would be
# listed as "10K;1608". Again, the database is responsible for specifying
# a list of acceptable manufacturer parts that have the same .16mm x.08mm
# footprint. `bom_manager` is responsible for selecting the specific
# manufacturer part based on availability and price.
#
# Once the designer (i.e. you) have used schematic part names that
# adhere to the `bom_manager` format, the footprint and vendor
# selection is totally automated.
#
# ## Screen Scraping
#
# In the context of `bom_manager` "screen scraping" is the process
# of fetching a web page and obtaining information from the web page
# to be feed into the ordering and footprint process. In Python,
# screen scraping is typically done using the `BeautifulSoup` library
# which can parse and search HTML. In general, every distributor
# provides a web interface for searching the parts that they can
# supply. In addition, there are some electronic part aggregation
# sites (e.g. Octopart, FindChips, SnapEDA, etc.) that serve up
# useful information from their web servers.
#
# In general, the distribution and aggregation outfits are not
# thrilled with screen scrapers. Here are some reasons why:
#
# * The web vendors are constantly tweaking their HTML. This causes
# screen scrapers to break on a regular basis. They feel no
# particular obligation to support screen scrapers.
#
# * Distributors do not want it to be easy for their competitors
# to match prices.
#
# * Aggregators have a business model where they want to sell
# premium access to their databases. Screen scraping makes
# easier for other aggregators to set up shop.
#
# There are a number of ethical issues here. It costs real money
# to hire people to set up a good distributor web server.
# In general, distributors recoup the web server development
# expense when people purchase the parts from distributor.
# Again, it costs real money to hire people and acquire data feeds
# to set up aggregation web servers. Charging users for access
# is a reasonable business model for aggregations sites.
#
# The position of the `bom_manager` is that free market economics
# is the way to go. If the distributors/aggregators provide a
# viable alternative to screen scrapers at a price that is acceptable
# to the designers, the alternatives will be used; if not, screen
# scrapers will continue to be developed and supported by the free
# software community.
# Import some libraries (alphabetical order):
from argparse import ArgumentParser
# from bs4 import BeautifulSoup # HTML/XML data structucure searching
# import bs4
# import copy # Used for the old pickle code...
import csv
# from currency_converter import CurrencyConverter # Currency converter
# import fnmatch # File Name Matching
# import glob # Unix/Linux style command line file name pattern matching
# import io # I/O stuff
import lxml.etree as etree # type: ignore
# import pickle # Python data structure pickle/unpickle
import pkg_resources # Used to find plug-ins.
# import pkgutil
from bom_manager.tracing import trace, trace_level_get, trace_level_set, tracing_get
import os
import re # Regular expressions
# import requests # HTML Requests
# import sexpdata # (LISP) S_EXpresson Data
# from sexpdata import Symbol # (LISP) S-EXpression Symbol
# import subprocess
import sys
import time # Time package
from typing import Any, Callable, Dict, IO, List, Optional, Tuple, Union
Number = Union[int, float]
PreCompiled = Any
Quad = Tuple[int, float, int, str]
Quint = Tuple[float, int, int, int, int, int]
# *Quint* is misnamed, it currently has 6 fields:
# * Total Cost (float):
# * Order Quantity (int):
# * Actual Part Index (int):
# * Vendor Part Index (int):
# * Price Break Index (int):
# * Price Breaks Size (int):
# import xmlschema
# Data Structure and Algorithm Overview:
#
# There are a plethora of interlocking data structures. The top level
# concepts are listed below:
#
# *Order*: An order corresponds to parts order. The parts order may
# be split between multiple *Vendor*'s.
#
# *Vendor*: A vendor corresponds to a distributor such as DigiKey, Mouser,
# Newark, etc.
#
# *Project*: A project corresponds to a single PCB (e.g. a .kicad_pcb file.)
# A single order may order specify multiple PCB's and different quantities
# for each PCB.
#
# *Manufacturer*: A manufacturer is the company that owns the factory that
# creates an electronic part (e.g. MicroChip, Atmel, Texas Instruments, etc.)
# (Note: The *Manufacturer* class has not been defined in this code yet.)
#
# *Database*: The *Database* is responsible for maintaining the bindings
# between various symbols, manufacturer parts, vendor part numbers, etc.
#
# Footprint: A footprint is a description of the footprint to use
# with the part. There is a concept of a short footprint which
# can be used to disambiguate between different packages of the
# same basic part (e.g. QFP32 vs. DIP28) and a fully specified
# KiCad footprint.
#
# Part: The concept of a part is a bit more fluid. The manufacturer
# has its parts, the vendor (i.e. distributor) has its parts, and
# the schematic has it parts.
#
# There is a fairly complex set of data structures that link the above
# data structures together. They are listed below:
#
# *PosePart*: A *PosePart* is essentially one-to-one with a Schematic
# symbol in KiCad. In particular, it specifies both the annotation
# reference (e.g. SW12, U7, R213, etc.) and a *Schematic_Symbol_Name*
# (e.g. ATMEGA328-PU;QFP32, 74HC08;SOIC14, etc.)
#
# *Schematic_Symbol_Name*: A *Schematic_Symbol_Name" is string that
# has the following structure "Name;Footprint:Comment", where "Name"
# is a logical part name (e.g. ATmega328, 74HC08, 10K, .1uF, etc.),
# "footprint" is a short footprint name (QFP32, SOIC14, 1608, etc.),
# and ":Comment" is a optional comment like ":DNI" (Do Not Install, ...)
# (Note: The *Schematic_Symbol_Name* has not yet been defined as a
# Python class.)
#
# *ProjectPart*: A schematic part is one-to-one with a
# *Schematic_Symbol_Name* (excluding the comment field.) A *ProjectPart*
# essentially provides a mapping from a *Schematic_Symbol_Name" to a list of
# acceptable manufacturer parts, which in turn provides a mapping to
# acceptable vendor parts. There are three sub-classes of *ProjectPart* --
# *ChoicePart*, *AliasPart*, and *FractionalPart*. As the algorithm
# proceeds, all *AliasPart*'s and *FractionalPart*'s are converted into
# associated *ChoicePart*'s. Thus, *ChoicePart* is the most important
# sub-class of *ProjectPart*.
#
# *ChoicePart*: A *ChoicePart* is sub-classed from *ProjectPart*
# and lists one or more acceptable *ActualPart*'s. (An actual part
# is one-to-one with a manufacturer part -- see below.) A *ChoicePart*
# also specifies a full KiCad Footprint.
#
# *AliasPart*: An *AliasPart* is also sub-classed from *ProjectPart*
# and specifies one or more *ProjectParts* to substitute.
#
# *FractionalPart*: A *FractionalPart* is also sub-classed from
# *ProjectPart* and corresponds to a 1xN or 2xN break away header.
# It is common special case that specifies a smaller number of pins
# than the full length header.
#
# *ActualPart*: An *ActualPart* should probably have been defined
# as a *Manufacturer_Part*. An *ActualPart* consists of a *Manufacturer*
# (e.g "Atmel"), and Manufacturer part name (e.g. "ATMEGA328-PU".)
#
# *VendorPart*: A *VendorPart* should have probably been defined
# as a *Distributor_Part*. A *Vendor* part consists of a *Vendor*
# (e.g. "Mouser") and a *VendorPart_Name* (e.g. "123-ATMEGA328-PU").
#
# Notice that there are 6 different part classes: *ProjectPart*,
# *ChoicePart*, *AliasPart*, *FractionalPart*, *ActualPart* and
# *VendorPart*. Having this many different part classes is needed
# to precisely keep track of everything.
#
# There are a few more classes to worry about:
#
# *Order*: An *Order* specifies a list of *Project*'s and a quantity
# for each *Project*. Also, an order can specify a list of vendors
# to exclude from the order.
#
# *Project*: A *Project* is one-to-one with KiCad PCB. It is basicaly
# consists of a list of *PosePart*'s.
#
# *PosePart*: A *PosePart* is basically a *Schematic_Symbol_Name*
# along with a project annotation reference (e.g. R123, U7, etc.)
#
# **:
#
#
# There are three sub_classes of *ProjectPart*:
#
# * ChoicePart*: A list of possible *ActualPart*'s to choose from.
#
# * AliasPart*: An alias specifies one or more schematic parts to
# redirect to.
#
# * FractionalPart*: A fractional part is an alias to another schematic
# part that specifes a fraction of the part. A fractional part is
# usually a 1x40 or 2x40 break-away male header. They are so common
# they must be supported.
#
# Now the algorithm iterates through each *ProjectPart* to convert
# each *FractionalPart* and *AliasPart* into *ChoicePart*.
# Errors are flagged.
#
# The *Database* maintains a list of *Vendor*'s and *VendorParts*.
#
# For each *ChoicePart*, the *ActualPart* list is iterated over.
# (Warning: the code has evolved somewhat here...)
# Octopart is queried to find build up a *VendorPart* list. The
# queries are cached to avoid making excessive queries to Octopart.
# Only *ActualPart*'s that are not in the cache get sent off to
# Octopart to fill the cache. A log of Octopart quries is kept to
# get an idea of how often the database is queried. It may be the
# case that there is a flag that disables queries until the user
# explicitly asks for it.
#
# Now that there is a list of *VendorPart*'s for each *ActualPart*,
# the lowest cost *VendorPart* is selected based on the number of
# *ActualPart*'s needed. The code identifies the cheapest *VendorPart*
# and it may adjust the quantity ordered up to get the benefit of a
# price break. This is where vendor exclusion occurs. Errors are
# generated if there are no *VendorPart* left due to exclusion or
# unavailable stock.
#
# Now various reports are generated based on sorting by vendor,
# sorting by cost, etc. The final BOM's for each project is generated
# as a .csv file.
# main():
@trace(1)
def main() -> int:
# Run the *Encode* class unit tests:
Encode.test()
collections_directories: List[str]
searches_root: str
order: Order
collections_directories, searches_root, order = command_line_arguments_process()
gui: Gui = Gui()
partial_load: bool = True
collections: Collections = Collections("Collections", collections_directories,
searches_root, partial_load, gui)
order.process(collections)
return 0
# command_line_arguments_process():
@trace(1)
def command_line_arguments_process() -> Tuple[List[str], str, "Order"]:
# Set up command line *parser* and parse it into *parsed_arguments* dict:
tracing: str = tracing_get()
parser: ArgumentParser = ArgumentParser(description="Bill of Materials (BOM) Manager.")
parser.add_argument("-b", "--bom", action="append", default=[],
help="Bom file (.csv, .net). Preceed with 'NUMBER:' to increase count. ")
parser.add_argument("-s", "--search", default="searches",
help="BOM Manager Searches Directory.")
parser.add_argument("-o", "--order", default=os.path.join(os.getcwd(), "order"),
help="Order Information Directory")
parser.add_argument("-v", "--verbose", action="count",
help="Set tracing level (defaults to 0 which is off).")
# Now parse the command line arguments:
parsed_arguments: Dict[str, Any] = vars(parser.parse_args())
trace_level: int = 0 if parsed_arguments["verbose"] is None else parsed_arguments["verbose"]
trace_level_set(trace_level)
# Fill in the *pandas* list with *Panda* objects for doing pricing and availabity checking:
pandas: List[Panda] = list()
entry_point_key: str = "bom_manager_panda_get"
index: int
entry_point: pkg_resources.EntryPoint
for index, entry_point in enumerate(pkg_resources.iter_entry_points(entry_point_key)):
entry_point_name: str = entry_point.name
if tracing:
print(f"{tracing}Panda_Entry_Point[{index}]: '{entry_point_name}'")
assert entry_point_name == "panda_get"
panda_get: Callable = entry_point.load()
assert callable(panda_get)
panda: Panda = panda_get()
pandas.append(panda)
# Fill in the *cads* list with *CAD* objects for reading in :
cads: List[Cad] = list()
entry_point_key = "bom_manager_cad_get"
for index, entry_point in enumerate(pkg_resources.iter_entry_points(entry_point_key)):
entry_point_name = entry_point.name
if tracing:
print(f"{tracing}Cad_Entry_Point[{index}]: '{entry_point_name}'")
assert entry_point_name == "cad_get"
cad_get: Callable = entry_point.load()
assert callable(cad_get)
cad: Cad = cad_get()
cads.append(cad)
# Now create the *order* object. It is created here because we need *order*
# for dealing with *bom_file_names* immediately below:
order_root: str = parsed_arguments["order"]
order: Order = Order(order_root, cads, pandas)
if tracing:
print(f"{tracing}order_created")
# Deal with *bom_file_names* from *parsed_arguments*:
bom_file_names: List[str] = parsed_arguments["bom"]
bom_file_name: str
for bom_file_name in bom_file_names:
if bom_file_name.endswith(".net") or bom_file_name.endswith(".csv"):
# We have a `.net` file name:
colon_index: int = bom_file_name.find(':')
# print(f"colon_index={colon_index}")
count: int = 1
if colon_index >= 0:
count = int(bom_file_name[:colon_index])
bom_file_name = bom_file_name[colon_index:]
# print(f"count={count}")
assert os.path.isfile(bom_file_name), f"'{bom_file_name}' does not exist."
path: str
base_name: str
path, base_name = os.path.split(bom_file_name)
name: str = base_name[:-4]
revision_letter: str = 'A'
if len(path) >= 2:
revision_letter = path[-1].upper()
if tracing:
print(f"path={path}")
print(f"base_name='{base_name}'")
print(f"name='{name}'")
print(f"revision_letter='{revision_letter}'")
# Create an order project:
order.project_create(name, revision_letter, bom_file_name, count)
else:
print(f"Ignoring file '{bom_file_name}' does not with '.net' or '.csv' suffix.")
if tracing:
print(f"{tracing}nets processed")
collection_directories: List[str] = list()
searches_root: str = os.path.abspath(parsed_arguments["search"])
return collection_directories, searches_root, order
# # "se" stands for "S Expression":
# def se_find(se, base_name, key_name):
# """ {}: Find *key_name* in *se* and return its value. """
#
# # *se* is a list of the form:
# #
# # [base_name, [key1, value1], [key2, value2], ..., [keyN, valueN]]
# #
# # This routine searches through the *[keyI, valueI]* pairs
# # and returnts the *valueI* that corresponds to *key_name*.
#
# # Check argument types:
# # assert isinstance(se, list)
# # assert isinstance(base_name, str)
# # assert isinstance(key_name, str)
#
# # Do some sanity checking:
# # size = len(se)
# # assert size > 0
# # assert se[0] == Symbol(base_name)
#
# result = None
# # key_symbol = Symbol(key_name)
# # for index in range(1, size):
# # sub_se = se[index]
# # if len(sub_se) > 0 and sub_se[0] == key_symbol:
# # result = sub_se
# # break
# return result
# def text2safe_attribute(text):
# # Sweep across *text* one *character* at a time performing any neccesary conversions:
# new_characters = list()
# for character in text:
# new_character = character
# if character == '&':
# new_character = "&"
# elif character == '<':
# new_character = "<"
# elif character == '>':
# new_character = ">"
# elif character == "'":
# new_character = "'"
# elif character == '"':
# new_character = """
# new_characters.append(new_character)
# safe_attribute = "".join(new_characters)
# return safe_attribute
# text_filter():
def text_filter(text: str, function: Callable) -> str:
return "".join([character for character in text if function(character)])
# ActualPart:
class ActualPart:
# An *ActualPart* represents a single manufacturer part.
# A list of vendor parts specifies where the part can be ordered from.
ACTUAL_PART_EXCHANGE_RATES: Dict[str, float] = dict()
# ActualPart.__init__():
def __init__(self, manufacturer_name: str, manufacturer_part_name: str) -> None:
""" *ActualPart*: Initialize *self* to contain *manufacturer* and
*manufacturer_part_name*. """
# Create the *key* for *actual_part* (i.e. *self*):
# actual_part: ActualPart = self
key: Tuple[str, str] = (manufacturer_name, manufacturer_part_name)
# Load up *actual_part* (i.e. *self*):
# actual_part: Actual_Part = self
self.manufacturer_name: str = manufacturer_name
self.manufacturer_part_name: str = manufacturer_part_name
self.key: Tuple[str, str] = key
# Fields used by algorithm:
self.quantity_needed: int = 0
self.vendor_parts: List[VendorPart] = []
self.selected_vendor_part: Optional[VendorPart] = None
# ActualPart.__eq__():
def __eq__(self, actual_part2: object) -> bool:
equal: bool = False
if isinstance(actual_part2, ActualPart):
actual_part1: ActualPart = self
equal = actual_part1.key == actual_part2.key
if equal:
# Extract *vendor_parts* making sure that they are sorted:
vendor_parts1: List[VendorPart] = actual_part1.sorted_vendor_parts_get()
vendor_parts2: List[VendorPart] = actual_part2.sorted_vendor_parts_get()
equal &= len(vendor_parts1) == len(vendor_parts2)
if equal:
index: int
vendor_part1: VendorPart
for index, vendor_part1 in enumerate(vendor_parts1):
vendor_part2: VendorPart = vendor_parts2[index]
if vendor_part1 != vendor_part2:
equal = False
break
return equal
# ActualPart.__str__():
def __str__(self) -> str:
actual_part: ActualPart = self
manufacturer_part_name: str = "??"
if hasattr(actual_part, "manufacturer_part_name"):
manufacturer_part_name = actual_part.manufacturer_part_name
return (f"ActualPart('{manufacturer_part_name}')")
# ActualPart.sorted_vendor_parts_get():
def sorted_vendor_parts_get(self) -> "List[VendorPart]":
actual_part: ActualPart = self
vendor_parts: List[VendorPart] = actual_part.vendor_parts
sort_function: Callable = lambda vendor_part: vendor_part.vendor_key
vendor_parts.sort(key=sort_function)
return vendor_parts
# ActualPart.vendor_names_load():
def vendor_names_load(self, vendor_names_table: Dict[str, None],
excluded_vendor_names: Dict[str, None]) -> None:
""" *ActualPart*:*: Add each possible to vendor name for the
*ActualPart* object (i.e. *self*) to *vendor_names_table*:
"""
# Add the possible vendor names for *vendor_part* to
# *vendor_names_table*:
vendor_part: VendorPart
for vendor_part in self.vendor_parts:
vendor_name: str = vendor_part.vendor_name
if vendor_name not in excluded_vendor_names:
vendor_names_table[vendor_name] = None
# ActualPart.vendor_part_append():
def vendor_part_append(self, vendor_part: "VendorPart") -> None:
""" *ActualPart: Append *vendor_part* to the vendor parts of *self*. """
# Append *vendor_part* to the *actual_part* (i.e. *self*):
actual_part: ActualPart = self
actual_part.vendor_parts.append(vendor_part)
# ActualPart.vendor_parts_restore():
def vendor_parts_restore(self, order: "Order") -> bool:
# FIXME: What does this routine actually do?:
assert False
result: bool = False
return result
# ActualPart.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *actual_part* (i.e. *self*):
actual_part: ActualPart = self
manufacturer_name: str = actual_part.manufacturer_name
manufacturer_part_name: str = actual_part.manufacturer_part_name
vendor_parts: List[VendorPart] = actual_part.vendor_parts
# Output the `<ActualPart ...>` tag first:
xml_lines.append(f'{indent}<ActualPart '
f'manufacturer_name="{Encode.to_attribute(manufacturer_name)}" '
f'manufacturer_part_name="{Encode.to_attribute(manufacturer_part_name)}">')
# Output the nested `<VendorPart ...>` tags:
next_indent: str = indent + " "
vendor_part: VendorPart
for vendor_part in vendor_parts:
vendor_part.xml_lines_append(xml_lines, next_indent)
# Close out with the `</ActualPart>` tag:
xml_lines.append(f"{indent}</ActualPart>")
# ActualPart.xml_parse():
@staticmethod
def xml_parse(actual_part_tree: etree._Element) -> "ActualPart":
# Grab the attribute information out of *actual_part_tree*:
assert actual_part_tree.tag == "ActualPart"
attributes_table: Dict[str, str] = actual_part_tree.attrib
manufacturer_name: str = attributes_table["manufacturer_name"]
manufacturer_part_name: str = attributes_table["manufacturer_part_name"]
vendor_part_trees: List[etree._Element] = list(actual_part_tree)
# Create *actual_part* with empty *vendor_parts*:
actual_part: ActualPart = ActualPart(manufacturer_name, manufacturer_part_name)
vendor_parts: List[VendorPart] = actual_part.vendor_parts
# Process all of the `<VendorPart ...>` tags:
for vendor_part_tree in vendor_part_trees:
vendor_part: VendorPart = VendorPart.xml_parse(vendor_part_tree, actual_part)
vendor_parts.append(vendor_part)
return actual_part
# Cad:
class Cad:
# Cad Stands for Computer Aided Design:
# Cad.__init__():
def __init__(self, name: str) -> None:
pass # This is just a place holder class that is sub-classed against.
# Cad.file_read():
def file_read(self, file_name: str, project: "Project") -> bool:
cad: Cad = self
class_name: str = cad.__class__.__name__
assert False, f"{class_name}.file_read() has not been implemented."
return False
# Comment:
class Comment:
# Comment.__init__():
def __init__(self, language: str, lines: List[str]) -> None:
# Load up *comment* (i.e. *self*):
# comment: Comment = self
self.language: str = language
self.lines: List[str] = lines
# Comment.__eq__():
def __eq__(self, comment2: object) -> bool:
# `mypy` recommends that the *__eq__* method work with any *object*. So we start
# with *equal* set to *False* and only set it to *True* on success:
equal: bool = False
if isinstance(comment2, Comment):
comment1: Comment = self
language_equal: bool = comment1.language == comment2.language
lines_equal: bool = comment1.lines == comment2.lines
equal = language_equal and lines_equal
return equal
# Comment.__str__():
def __str__(self) -> str:
# Return a simple string since there is no need to expose the contents of the comment
# (i.e. *self*):
comment: Comment = self
language: str = "??"
if hasattr(comment, "language"):
language = comment.language
class_name: str = comment.__class__.__name__
return f"{class_name}('{language}')"
# Comment.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *comment* (i.e. *self*):
comment: Comment = self
class_name: str = comment.__class__.__name__
language: str = comment.language
lines: List[str] = comment.lines
# Output the initial element XML tag (i.e. *class_name*):
xml_lines.append(f'{indent}<{class_name} language="{language}">')
# Output the comment *lines*:
line: str
for line in lines:
xml_lines.append(f"{indent} {line}")
# Output the closing element XML tag:
xml_lines.append('f{indent}</{class_name}>")')
# Comment.xml_parse_helper():
@staticmethod
def xml_parse_helper(comment_tree: etree._Element) -> Tuple[Dict[str, str], str, List[str]]:
# Grab some values from *comment_tree*:
attributes_table: Dict[str, str] = comment_tree.attrib
assert "language" in attributes_table
language: str = attributes_table["language"]
# Grab the *text* from *comment_tree*, split it into *lines*, clean up each line:
text: str = comment_tree.text.strip()
lines: List[str] = text.split('\n')
for index, line in enumerate(lines):
lines[index] = line.strip().replace("<", "<").replace(">", ">")
# Return everything:
return attributes_table, language, lines
# EnumerationComment:
class EnumerationComment(Comment):
# EnumerationComment.__init__():
def __init__(self, language: str, lines: List[str]) -> None:
# An *EnumerationComment* is just pure sub-class of *Comment*:
super().__init__(language, lines)
# EnumerationComment.xml_parse():
@staticmethod
def xml_parse(comment_tree: etree._Element) -> "EnumerationComment":
# Grab some values from *comment_tree*):
attributes_table: Dict[str, str]
langauge: str
lines: List[str]
attributes_table, language, lines = Comment.xml_parse_helper(comment_tree)
# Construct and return the final *enumeration_comment*:
enumeration_comment: EnumerationComment = EnumerationComment(language, lines)
return enumeration_comment
# ParameterComment:
class ParameterComment(Comment):
# ParameterComment.__init__():
def __init__(self, language: str, lines: List[str],
long_heading: str, short_heading: str) -> None:
# Initialize the parent *Comment* class with *language* and *lines*:
super().__init__(language, lines)
# Initialize the remaining fields of *parameter_comment*:
# parameter_comment: ParameterComment = self
self.long_heading: str = long_heading
self.short_heading: str = short_heading
# ParameterComment.__eq__():
def __eq__(self, parameter_comment2: object) -> bool:
# `mypy` recommends that the *__eq__* method work with any *object*. So we start
# with *equal* set to *False* and only set it to *True* on success:
equal: bool = False
if isinstance(parameter_comment2, ParameterComment):
parameter_comment1: ParameterComment = self
language_equal: bool = parameter_comment1.language == parameter_comment2.language
lines_equal: bool = parameter_comment1.lines == parameter_comment2.lines
long_equal: bool = parameter_comment1.long_heading == parameter_comment2.long_heading
short_equal: bool = parameter_comment1.short_heading == parameter_comment2.short_heading
equal = language_equal and lines_equal and long_equal and short_equal
return equal
# ParameterComment.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *parameter_comment* (i.e. *self*):
parameter_comment: ParameterComment = self
language: str = parameter_comment.language
lines: List[str] = parameter_comment.lines
long_heading: str = parameter_comment.long_heading
short_heading: str = parameter_comment.short_heading
# Append an XML `<ParameterComment ...>` element to *xml_lines*.
short_heading_text = f" shortHeading={short_heading}" if short_heading else ""
xml_line: str = (f'{indent}<ParameterComment'
f' language="{language}"'
f' longHeading="{long_heading}"'
f'{short_heading_text}>')
# Append *lines* to *xml_lines* indented by *indent* and tack on the closing
# `</ParameterComment>` XML element:
xml_lines.append(xml_line)
line: str
for line in lines:
xml_lines.append(f'{indent} {line}')
xml_lines.append(f'{indent}</ParameterComment>')
# ParameterComment.xml_parse():
@staticmethod
def xml_parse(comment_tree: etree._Element) -> "ParameterComment":
# Grab some values from *comment_tree*:
attributes_table: Dict[str, str]
language: str
lines: List[str]
attributes_table, language, lines = Comment.xml_parse_helper(comment_tree)
# Grab some values from *attributes_table:
assert "longHeading" in attributes_table
long_heading: str = attributes_table["longHeading"]
short_heading: str = (attributes_table["shortHeading"]
if "shortHeading" in attributes_table else "")
# Construct the final *parameter_comment* and return it:
parameter_comment: ParameterComment = ParameterComment(language, lines,
long_heading, short_heading)
return parameter_comment
# SearchComment:
class SearchComment(Comment):
# SearchComment.__init()
def __init__(self, language: str, lines: List[str]) -> None:
# A *SearchComment* is a pure sub-class of the parent *Comment* class:
super().__init__(language, lines)
# SearchComment.xml_parse()
@staticmethod
def xml_parse(comment_tree: etree._Element) -> "SearchComment":
# Grab some values out of *comment_tree*:
attributes_table: Dict[str, str]
language: str
lines: List[str]
attributes_table, language, lines = Comment.xml_parse_helper(comment_tree)
# Construct and return the final *search_comment*:
search_comment: SearchComment = SearchComment(language, lines)
return search_comment
# TableComment:
class TableComment(Comment):
# TableComment.__init__():
def __init__(self, language: str, lines: List[str]) -> None:
# *TableComment* is just a pure sub-class of the parent *Comment* class:
super().__init__(language, lines)
# TableComment.xml_parse():
@staticmethod
def xml_parse(comment_tree: etree._Element) -> "TableComment":
# Grab some values out of *comment_tree*:
attributes_table: Dict[str, str]
language: str
lines: List[str]
attributes_table, language, lines = Comment.xml_parse_helper(comment_tree)
# Construct and return the final *table_comment*:
table_comment: TableComment = TableComment(language, lines)
return table_comment
# Encode:
class Encode:
# Encode.from_attribute():
@staticmethod
def from_attribute(attribute: str) -> str:
characters: List[str] = list()
attribute_size: int = len(attribute)
index: int = 0
while index < attribute_size:
# Grab the *character* and compute the *next_index:
character: str = attribute[index]
next_index: int = index + 1
# Determine if we have an HTML entity:
if character == '&':
# We do have an HTML entity; find the closing ';':
# rest = attribute[index:]
# print(f"rest='{rest}'")
entity: str = ""
for entity_index in range(index, attribute_size):
entity_character = attribute[entity_index]
# print(f"Attribute[{entity_index}]='{entity_character}'")
if entity_character == ';':
next_index = entity_index + 1
entity = attribute[index:next_index]
break
else:
assert False, "No closing ';' for entity"
# print(f"entity='{entity}'")
# Parse the expected entities:
assert len(entity) >= 2, f"Empty HTML entity '{entity}'"
if entity[1] == '#':
# Numeric entity of the form `&#d...d;`, try to parse the decimal digits:
try:
character = chr(int(entity[2:-1]))
except ValueError:
assert False, f"Entity '{entity}' is broken."
elif entity == "&":
character = '&'
elif entity == "<":
character = '<'
elif entity == ">":
character = '>'
elif entity == "'":
character = "'"
elif entity == """:
character = '"'
else:
assert False, f"Unrecognized HTML entity '{entity}'"
else:
# *character* is not the start of an HTML entity. Leave it alone:
pass
# Tack *character* onto *characters* and advance to *next_index*:
characters.append(character)
index = next_index
# Concatenate *characters* into final *text* and return it:
text: str = "".join(characters)
return text
# Encode.from_file_name():
@staticmethod
def from_file_name(file_name: str) -> str:
# Construct a list of *characters* one at a time to join together into final *text*:
characters: List[str] = list()
index: int = 0
file_name_size: int = len(file_name)
while index < file_name_size:
# Dispatch on *character* and compute *next_index*:
character: str = file_name[index]
next_index: int = index + 1
# Dispatch on *character*:
if character == '_':
# Underscores are always translated to spaces:
character = ' '
elif character == '%':
# We should have either "%XX" or "%%XXXX, where "X" is a hexadecimal digit.
# First, ensure that there is a *next_character* following the initial '%':
if next_index < file_name_size:
next_character = file_name[next_index]
# Dispatch on *next_character* to figure out whether we have a 2 or 4
# digit number:
if next_character == '%':
# We have "%%XXXX"" to parse:
hex_index: int = index + 2
next_index = index + 6
else:
# We have "%XX" to parse into a single *character*:
hex_index = index + 1
next_index = index + 3
# Extract the *hex_text* from *file_name* to parse:
assert next_index <= file_name_size, "'%' at end of string is wrong"
hex_text: str = file_name[hex_index:next_index]
# Now attempt top arse *hex_text* into *character*:
try:
character = chr(int(hex_text, 16))
# print(f"'{hex_text}'=>'{character}'")
except ValueError:
assert False, f"'{hex_text}' is invalid from '{file_name}'"
else:
# No character after '%":
assert False, "'%' at end of string"
else:
# Everything else just taken as is:
pass
# Tack *character* (which now may be multiple characters) onto *characters*
# and advance *index* to *next_index*:
characters.append(character)
assert next_index > index
index = next_index
# Join *characters* back into a single *text* string:
text: str = "".join(characters)
return text
# Encode.to_attribute():
@staticmethod
def to_attribute(text: str) -> str:
assert isinstance(text, str)
characters: List[str] = list()
ord_space: int = ord(' ')
ord_tilde: int = ord('~')
character: str
for character in text:
ord_character: int = ord(character)
if ord_space <= ord_character <= ord_tilde:
# *character* is ASCII printable; now convert some of them to HTML entity:
if character == '&':
character = "&"
elif character == '<':
character = "<"
elif character == '>':
character = ">"
elif character == "'":
character = "'"
elif character == '"':
character = """
else:
# Non-ASII printable, so use decimal version of HTML entity syntax:
character = f"&#{ord_character};"
characters.append(character)
# Convert *characters* to an *attribute* string and return it:
attribute: str = "".join(characters)
return attribute
# Encode.to_csv():
@staticmethod
def to_csv(text: str) -> str:
updated_text: str = text.replace('"', '""')
return f'"{updated_text}"'
# Encode.to_file_name():
@staticmethod
def to_file_name(text: str) -> str:
characters: List[str] = list()
ord_space: int = ord(' ')
ord_tilde: int = ord('~')
ord_del: int = ord('\xff')
character: str
for character in text:
# Dispatch on the integer *ord_character*:
ord_character: int = ord(character)
if ord_character == ord_space:
# Convert *character* space (' ') to an underscore ('_'):
character = '_'
elif ord_space < ord_character <= ord_tilde:
# *character* is in normal visible printing ASCII range, but not a space:
# Since the Unix/Linux shell treats many of the non-alphanumeric ones
# specially, most of them are convert to '%XX' format. The ones that are
# not converted are '+', ',', '.', and ':'. Note that '_' must be converted
# because spaces have been converted to underscores:
if character in "!\"#$%&'()*/;<=>?[\\]^_`{|}~":
character = "%{0:02x}".format(ord_character)
elif ord_character < ord_space or ord_character == ord_del:
# *character* is one of the ASCII control characters to convert into '%XX':
character = "%{0:02x}".format(ord_character)
else:
# *character* is a larger unicode character to convert into '%%XXXX':
character = "%%{0:04x}".format(ord_character)
# Collect the new *character* (which might be several characters) onto *characters*:
characters.append(character)
# Concatenate *characters* into *file_name* and return it:
file_name: str = "".join(characters)
return file_name
# Encode.to_url():
@staticmethod
def to_url(text: str) -> str:
# Convert *text* into the %XX encoding system used by URL's as per RFC 3986:
character: str
return "".join([character if character.isalnum() or character in "-.!"
else "%0:02x".format(ord(character)) for character in text])
# Encode.test():
@staticmethod
def test() -> None:
printable_ascii: str = "".join([chr(index) for index in range(ord(' '), ord('~')+1)])
Encode.test_both(printable_ascii)
control_ascii: str = "".join([chr(index) for index in range(ord(' ')-1)]) + "\xff"
Encode.test_both(control_ascii)
unicode_characters: str = "\u03a9Ω\u03bcμ"
Encode.test_both(unicode_characters)
# Encode.test_attribute():
@staticmethod
def test_attribute(before_attribute: str) -> None:
assert isinstance(before_attribute, str)
# print(f"before_attribute='{before_attribute}'")
attribute_text: str = Encode.to_attribute(before_attribute)
# print(f"attribute_text='{attribute_text}'")
after_attribute: str = Encode.from_attribute(attribute_text)
# print(f"after_attribute='{after_attribute}'")
Encode.test_compare(before_attribute, after_attribute)
# Encode.test_both():
@staticmethod
def test_both(text: str) -> None:
assert isinstance(text, str)
Encode.test_attribute(text)
Encode.test_file_name(text)
# Encode.test_compare():
@staticmethod
def test_compare(text1: str, text2: str) -> None:
if text1 != text2:
text1_size: int = len(text1)
text2_size: int = len(text2)
text_size: int = min(text1_size, text2_size)
index: int
character: str
for index in range(text_size):
character1: str = text1[index]
character2: str = text2[index]
assert character1 == character2, (f"Mismatch at index={index}"
f" '{character1}' != '{character2}'"
f" text1='{text1}' text2='{text2}'")
assert text1_size == text2_size
# Encode.test_file_name():
@staticmethod
def test_file_name(before_text: str) -> None:
assert isinstance(before_text, str)
file_name_text: str = Encode.to_file_name(before_text)
after_text: str = Encode.from_file_name(file_name_text)
Encode.test_compare(before_text, after_text)
# Enumeration:
class Enumeration:
# Enumeration.__init__():
def __init__(self, name: str, comments: List[EnumerationComment]) -> None:
# Load values into *enumeration* (i.e. *self*):
# enumeration: Enumeration = self
self.name: str = name
self.comments: List[EnumerationComment] = comments
# Enumeration.__eq__():
def __eq__(self, enumeration2: object) -> bool:
equal: bool = False
if isinstance(enumeration2, Enumeration):
enumeration1: Enumeration = self
name_equal: bool = enumeration1.name == enumeration2.name
comments_equal: bool = enumeration1.comments == enumeration2.comments
equal = name_equal and comments_equal
return equal
# Enumeration.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Append an `<Enumeration>` element to *xml_lines*:
enumeration: Enumeration = self
name: str = enumeration.name
name_attribute: str = Encode.to_attribute(name)
xml_lines.append(f'{indent}<Enumeration name="{name_attribute}">')
comment: EnumerationComment
for comment in enumeration.comments:
comment.xml_lines_append(xml_lines, indent + " ")
xml_lines.append(f'{indent}</Enumeration>')
# Enumeration.xml_parse():
@staticmethod
def xml_parse(enumeration_tree: etree._Element) -> "Enumeration":
assert enumeration_tree.tag == "Enumeration"
attributes_table: Dict[str, str] = enumeration_tree.attrib
assert "name" in attributes_table
name: str = attributes_table["name"]
comments_tree: etree._Element = list(enumeration_tree)
comments: List[EnumerationComment] = list()
comment_tree: etree._Element
for comment_tree in comments_tree:
comment: EnumerationComment = EnumerationComment.xml_parse(comment_tree)
comments.append(comment)
assert comments
enumeration: Enumeration = Enumeration(name, comments)
return enumeration
# Filter:
class Filter:
# Filter.__init__():
def __init__(self, table: "Table", parameter: "Parameter", use: bool, select: str) -> None:
# Load up *filter* (i.e. *self*):
# filter: Filter = self
self.table: Table = table
self.parameter: Parameter = parameter
self.select: str = select
self.use: bool = use
# Filter.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *filter* (i.e. *self*):
filter: "Filter" = self
parameter: Parameter = filter.parameter
use: bool = filter.use
select: str = filter.select
parameter_name: str = parameter.name
# Output the initial `<Filter ...>` XML element:
xml_lines.append(f'{indent}<Filter '
f'name="{parameter_name}" '
f'use="{use}" '
f'select="{select}">')
# Append any *enumerations* from *parameter*:
enumerations: List[Enumeration] = parameter.enumerations
if enumerations:
xml_lines.append(f'{indent} <FilterEnumerations>')
enumeration: Enumeration
for enumeration in enumerations:
enumeration_name: str = enumeration.name
match: bool = False
xml_lines.append(f'{indent} <FilterEnumeration '
f'name="{enumeration_name}" '
f'match="{match}"/>')
xml_lines.append(f'{indent} </FilterEnumerations>')
# Wrap up `<Filter...>` element:
xml_lines.append(f'{indent}</Filter>')
# Filter.xml_parse()
@staticmethod
def xml_parse(filter_tree: etree._Element, table: "Table") -> "Filter":
# Grab the attributes from *filter_tree*:
attributes_table: Dict[str, str] = filter_tree.attrib
assert "name" in attributes_table
parameter_name: str = attributes_table["name"]
assert "match" in attributes_table
match_text: str = attributes_table["match"].lower()
assert match_text in ("true", "false")
use = match_text == "true"
parameters: List[Parameter] = table.parameters
parameter: Parameter
for parameter in parameters:
if parameter.name == parameter_name:
break
else:
assert False, f"No parameter name '{parameter_name}' not found"
# Create the resulting *filter* and return it:
select: str = ""
assert False, "What is select?"
filter: Filter = Filter(table, parameter, use, select)
return filter
# Footprint:
class Footprint:
""" *Footprint*: Represents a PCB footprint. """
# Footprint.__init__():
def __init__(self, name: str, rotation: float) -> None:
""" *Footprint*: Initialize a new *FootPrint* object.
The arguments are:
* *name* (str): The unique footprint name.
* *rotation* (degrees): The amount to rotate the footprint to match the feeder tape with
holes on top.
"""
# Stuff values into *footprint* (i.e. *self*):
# footprint: Footprint = self
self.name = name
self.rotation = rotation
# Inventory:
class Inventory:
# Inventory.__init__():
def __init__(self, project_part: "ProjectPart", amount: int) -> None:
""" *Inventory*: Initialize *self* to contain *scheamtic_part* and
*amount*. """
# Load up *inventory* (i.e. *self*):
# inventory: Inventory = self
self.project_part: ProjectPart = project_part
self.amount: int = amount
# The *Node* class and its associated sub-classes *Collections*, *Collection*, *Directory*
# *Table*, *Search* (and eventually *Filter*) are designed to be displayed in the GUI
# (Graphical User Interface) using a *QTreeView* widget. The graphical display is not
# required, so non-GUI code that uses this *bom_manager* module use the code as well.
# When the GUI is displayed, it uses a *QTreeView* widget in conjunction with the *TreeModel*
# class (which is a sub-class of *QAbstractItemModel*.)
#
# The tree model looks like:
#
# Collections
# Collection1
# Directory1
# SubDirectory1
# ...
# Sub...SubDirectory1
# Table1
# Search1
# Search2
# ..
# SearchN
# Table2
# ...
# TableN
# ...
# Sub...SubDirectoryN
# ...
# SubDirectoryN
# ...
# DirectoryN
# ...
# CollectionN
#
# To summarize:
#
# * There is a top-most *Collections* object that is the root of the tree.
# * There are zero, one or more *Collection* objects under the *Collections* object.
# * There ar zero, one or more *Directory* objects under each *Collections* object.
# * Each *Directory* object can have zero, one or more sub-*Directory* objects and/or
# zero,one or more *Table* objects.
# * Each *Table* object can have zero, one or more *Search* objects.
#
# {Talk about file system structure here:}
# Gui:
class Gui:
""" Represents some callback interfaces to the GUI if it exists. """
# Gui.__init__():
def __init__(self) -> None:
# Construct a bunch of regular expressions:
si_units_re_text: str = Units.si_units_re_text_get()
float_re_text: str = "-?([0-9]+\\.[0-9]*|\\.[0-9]+)"
white_space_text: str = "[ \t]*"
integer_re_text: str = "-?[0-9]+"
integer_re: PreCompiled = re.compile(integer_re_text + "$")
float_re: PreCompiled = re.compile(float_re_text + "$")
url_re: PreCompiled = re.compile("(https?://)|(//).*$")
empty_re: PreCompiled = re.compile("-?$")
funits_re: PreCompiled = re.compile(float_re_text +
white_space_text + si_units_re_text + "$")
iunits_re: PreCompiled = re.compile(integer_re_text + white_space_text +
si_units_re_text + "$")
range_re: PreCompiled = re.compile("[^~]+~[^~]+$")
list_re: PreCompiled = re.compile("([^,]+,)+[^,]+$")
re_table: Dict[str, PreCompiled] = {
"Empty": empty_re,
"Float": float_re,
"FUnits": funits_re,
"Integer": integer_re,
"IUnits": iunits_re,
"List": list_re,
"Range": range_re,
"URL": url_re,
}
# gui: Gui = self
self.re_table: Dict[str, PreCompiled] = re_table
# Gui.__str():
def __str__(self) -> str:
return "GUI()"
# Gui.begin_model_reset():
def begin_model_reset(self) -> None:
pass
# Gui.begin_rows_insert():
def begin_rows_insert(self, node: "Node",
start_row_index: int, end_row_index: int) -> None:
pass # Do nothing for the non-GUI version of the code:
# Gui.begin_rows_remove():
def begin_rows_remove(self, node: "Node",
start_row_index: int, end_row_index: int) -> None:
pass # Do nothing for the non-GUI version of the code:
# Gui.collection_clicked():
def collection_clicked(self, collection: "Collection") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.collection_clicked() has not been implemented yet"
# Gui.collection_panel_update():
@trace(1)
def collection_panel_update(self, collection: "Collection") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.collection_panel_update() has not been implmented yet"
# Gui.collections_clicked():
def collections_clicked(self, collections: "Collections") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.collections_clicked() has not been implemented yet"
# Gui.data_changed():
def data_changed(self, node: "Node", begin_row_index: int, end_row_index: int) -> None:
pass # Do nothing for the non-GUI version of the code:
# Gui.directory_clicked():
def directory_clicked(self, directory: "Directory") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.directory_clicked() has not been implemented yet"
# Gui.directory_panel_update():
def directory_panel_update(self, directory: "Directory") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.directory_panel_update() has not been implemented yet"
# Gui.end_model_reset():
def end_model_reset(self):
pass # Do nothing for the non-GUI version of the code:
# Gui.end_rows_insert():
def end_rows_insert(self, node: "Node", start_row_index: int, end_row_index: int) -> None:
pass # Do nothing for the non-GUI version of the code:
# Gui.end_rows_remove():
def end_rows_remove(self, node: "Node", start_row_index: int, end_row_index: int) -> None:
pass # Do nothing for the non-GUI version of the code:
# Gui.search_clicked():
def search_clicked(self, search: "Search") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.search_clicked() has not been implemented yet"
# Gui.search_panel_update()
def search_panel_update(self, search: "Search") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.search_panel_update() has not been implmented yet."
# Gui.table_clicked():
def table_clicked(self, table: "Table") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.table_clicked() has not been implemented yet"
# Gui.table_panel_update()
def table_panel_update(self, table: "Table") -> None:
gui: Gui = self
class_name: str = gui.__class__.__name__
assert False, f"{class_name}.table_panel_update() has not been implmented yet."
# Node:
class Node:
""" Represents a single *Node* suitable for use in a *QTreeView* tree. """
# Node.__init__():
def __init__(self, name: str, parent: "Node", collection: "Collection",
gui: Optional[Gui] = None) -> None:
# Do some additional checking for *node* (i.e. *self*):
node: Node = self
# We have to special case the computation of *relative_path* base on *node* type:
relative_path: str = "??"
if isinstance(node, Collections):
# A *Collections* object has no meaningful *relative_path*:
relative_path = ""
elif isinstance(node, Collection):
# A *Collection* object start with with its file encoded *name* as the root directory:
relative_path = Encode.to_file_name(name)
else:
# All other *node*'s construct their *relative_path* from the *parent* and *name*:
relative_path = os.path.join(parent.relative_path, Encode.to_file_name(name))
# Make sure we have a valid *gui* object:
if gui is None:
assert collection is not None
gui = collection.gui
assert isinstance(gui, Gui)
# Load up *node* (i.e. *self*):
node = self
self._children: List[Node] = list() # *Node* sub-classes should use *chldren_get*()
self.is_sorted: bool = False # Set to *True* when children sorted
self.gui: Gui = gui # The *gui* object to use for GUI updates
self.collection: Collection = collection # Parent *Collection* for *node*
self.name: str = name # Human readable name of version of *node*
self.parent: Node = parent # Parent *Node* (*self* for *Collections*)
self.relative_path: str = relative_path # Relative path from root to *node* name wo/suffix
# To construct a path to the file/directory associated with a *node*:
# 1. Start with either *node.collection.collection_root* or
# *node.collection.searches_root*,
# 2. Append *relative_path*,
# 3. If appropriate, append `.xml`.
# Force *node* to be a child of *parent*:
if parent is not node:
parent.child_append(node)
# Node.can_fetch_more():
def can_fetch_more(self) -> bool:
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.can_fetch_more() needs to be implemented"
return True
# Node.child_append():
@trace(1)
def child_append(self, child: "Node") -> None:
node: Node = self
children: List[Node] = node._children
children_size: int = len(children)
node.child_insert(child, children_size)
node.is_sorted = False
# Node.child_count():
def child_count(self) -> int:
# Return the number of children associated with *node* (i.e. *self*):
node: Node = self
count: int = len(node._children)
return count
# Node.child_delete():
def child_delete(self, index: int) -> None:
# Let *gui* know that the *index*'th row is about to be removed:
node: Node = self
gui = node.gui
gui.begin_rows_remove(node, index, index)
# pgui.begin_model_reset()
# Grab some values out of *node* (i.e. *self*):
node = self
if not node.is_sorted:
node.sort()
# Verify that *index* is in bounds:
children = node._children
children_size = len(children)
assert 0 <= index < children_size, f"Index out of bounds {index} >= {children_size}"
# Perform the actual deletion:
del children[index]
# Let *gui* know that the row has been deleted:
gui.end_rows_remove(node, index, index)
# gui.end_model_reset()
# Node.child_fetch():
def child_fetch(self, index: int) -> "Node":
node: Node = self
if not node.is_sorted:
node.sort()
children: List[Node] = node._children
children_size: int = len(children)
assert 0 <= index < len(children), f"Index out of bounds {index} >= {children_size}"
child: Node = children[index]
return child
# Node.child_insert():
def child_insert(self, child: "Node", index: int) -> None:
# Verify that *index* is valid for inserting into *node* (i.e. *self*):
node: Node = self
children = node._children
children_size = len(children)
assert 0 <= index <= children_size, f"Bad index={index} size={children_size}"
# Let *gui* know that we are about to insert *node* at the of *children*
# (i.e. at *index*):
gui: Gui = node.gui
gui.begin_rows_insert(node, index, index)
# gui.begin_model_reset()
# Now stuff *child* into *children* at *index*:
children.insert(index, child)
child.parent = node
# Wrap up *gui* row insert:
gui.end_rows_insert(node, index, index)
# gui.end_model_reset()
# Node.child_remove()
def child_remove(self, child: "Node") -> None:
# Find the *index* of *child* in *node* (i.e. *self*) and delete it:
node: Node = self
children: List[Node] = node._children
index: int = children.index(child)
assert index >= 0
node.child_delete(index)
# Node.children_get():
def children_get(self) -> "List[Node]":
# Return the children of *node* (i.e. *self*):
node: Node = self
if not node.is_sorted:
node.sort()
children: "List[Node]" = node._children
return children
# Node.clicked():
def clicked(self, gui: Gui) -> None:
# Fail with a more useful error message better than "no such method":
node: Node = self
assert False, f"Node.clicked() needs to be overridden for type ('{type(node)}')"
# Node.csvs_download():
def csvs_download(self, csvs_directory: str, downloads_count: int) -> int:
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.csvs_download() has not been implmented yet!"
return 0
# Node.csv_read_and_process():
def csv_read_and_process(self, csv_directory: str, bind: bool, gui: Gui) -> None:
# Fail with a more useful error message better than "no such method":
node: Node = self
assert False, f"Node sub-class '{type(node)}' does not implement csv_read_and_process"
# Node.directories_get():
def directories_get(self) -> "List[Directory]":
node: Node = self
assert False, f"Node.directories_get() for node of type '{type(node)}'"
return list()
# Node.directory_create():
def directory_create(self, root_path: str) -> None:
node: "Node" = self
parent: Node = node.parent
parent_relative_path: str = parent.relative_path
directory_path: str = os.path.join(root_path, parent_relative_path)
if not os.path.isdir(directory_path):
os.makedirs(directory_path)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}Created directory '{directory_path}'")
# Node.fetch_more():
def fetch_more(self) -> None:
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.fetch_more() has not been implmented yet."
# Node.has_child():
def has_child(self, sub_node: "Node") -> bool:
# Start with *found* set to *False* and only set to *True* if *sub_node* is found
# in the *children* of *node* (i.e. *self*):
node: "Node" = self
found: bool = False
children: List[Node] = node._children
child: "Node"
for child in children:
if sub_node is child:
found = True
break
return found
# Node.has_children():
def has_children(self) -> bool:
# Return *True* if *node* (i.e. *self*) has one or more children:
node: "Node" = self
children: "List[Node]" = node._children
has_children: bool = len(children) > 0
return has_children
# Node.gui_get():
def gui_get(self) -> Gui:
# Return *gui* for *node* (i.e. *self*):
node: Node = self
gui: Gui = node.gui
return gui
# Node.key_function_get():
def key_function_get(self) -> "Callable[[Node], Any]":
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.key_function_get() is not implemented yet"
# Node.name_get():
def name_get(self) -> str:
# Grab *title* from *node* (i.e. *self*):
node: "Node" = self
name: str = node.name
return name
# Node.panel_update():
@trace(1)
def panel_update(self, gui: Gui) -> None:
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.panel_update() is not implmented yet."
# Node.remove():
def remove(self, remove_node: "Node") -> None:
node: "Node" = self
children: "List[Node]" = node._children
index: int
child: Node
for index, child in enumerate(children):
if child is remove_node:
del children[index]
break
else:
assert False, f"Node '{remove_node.name}' not in '{node.name}' remove failed"
# Node.row():
def row(self) -> int:
# Return the index of *node* (i.e. *self*) from its parent children list:
node: Node = self
parent: Node = node.parent
parent_children: List[Node] = parent._children
result: int = parent_children.index(node)
return result
# Node.sort():
@trace(1)
def sort(self) -> None:
node: Node = self
if not node.is_sorted:
# Grab* the *key_function* for *node* and sort *children* using it:
children: List[Node] = node._children
if len(children) >= 1:
child0: Node = children[0]
key_function: "Callable[[Node], Any]" = child0.key_function_get()
children.sort(key=key_function)
# Generate the *data_changed* signal:
collection: Collection = node.collection
gui: Gui = collection.gui
children_size: int = len(children)
gui.data_changed(node, 0, children_size - 1)
# Remember that *node* *is_sorted8:
node.is_sorted = True
# Node.sort_helper():
@trace(1)
def sort_helper(self, key_get: "Callable[[Node], Any]") -> None:
# Sort the *children* of *node* (i.e. *self*) using *key_function*:
node: Node = self
children: List[Node] = node._children
children_size: int = len(children)
children.sort(key=key_get)
# Let the *gui* know that all of the rows may have changed their values
gui: Gui = node.gui
gui.data_changed(node, 0, children_size - 1)
# Node.tables_get():
def tables_get(self) -> "List[Table]":
# This routine should never actuall be called:
node: Node = self
assert False, f"Node.tables_get() called with a node of type '{type(node)}'"
# Node.type_letter_get():
def type_letter_get(self) -> str:
node: Node = self
class_name: str = node.__class__.__name__
assert False, f"{class_name}.type_lettet_get() has not been implemented yet."
return "N"
# Directory:
class Directory(Node):
# Directory.__init__():
def __init__(self, name, parent) -> None:
# Perform some additional checking on *parent*:
assert isinstance(parent, Directory) or isinstance(parent, Collection)
# Initlialize the *Node* super class for directory (i.e. *self*):
parent_collection: Collection = parent.collection
super().__init__(name, parent, parent_collection)
# The parent *Node* class initialized *directory* (i.e. *self*) to have a *relative_path*:
directory: Directory = self
relative_path: str = directory.relative_path
tracing: str = tracing_get()
if tracing:
print(f"{tracing}relative_path='{relative_path}'")
# Directory.__str__():
def __str__(self):
directory: Directory = self
return f"Directory('{directory.name}')"
# Directory.append():
def append(self, node: Node) -> None:
assert isinstance(node, Directory) or isinstance(node, Table)
directory: Directory = self
directory.child_append(node)
# Directory.can_fetch_more():
def can_fetch_more(self) -> bool:
# The call to *Directiory.partial_load*, pre-loaded all of the sub-directories and
# tables for *directory* (i.e. *self*). That means there is nothing more to fetch.
return False
# Directory.clicked():
def clicked(self, gui: Gui) -> None:
# Send the clicked event back to the *gui* along with *directory* (i.e. *self*):
directory: Directory = self
gui.directory_clicked(directory)
# Directory.directories_get():
def directories_get(self) -> "List[Directory]":
directory: Directory = self
directories: List[Directory] = [directory]
node: Node
for node in directory.children_get():
directories.extend(node.directories_get())
return directories
# Directory.name_get():
def name_get(self) -> str:
directory: Directory = self
name: str = directory.name
return name
# Directory.key():
@staticmethod
def key(directory: Node) -> Any:
name: str = directory.name
return (name, )
# Directory.key_function_get():
def key_function_get(self) -> Callable[[Node], Any]:
return Directory.key
# Directory.partial_load():
def partial_load(self) -> None:
# Compute the *full_path* for the *collection* sub-*directory*:
directory: Directory = self
relative_path: str = directory.relative_path
assert isinstance(directory.collection, Collection)
collection: Collection = directory.collection
collection_root: str = collection.collection_root
full_path: str = os.path.join(collection_root, relative_path)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}collection_root='{collection_root}'")
print(f"{tracing}relative_path='{relative_path}'")
print(f"{tracing}full_path='{full_path}'")
assert os.path.isdir(full_path), f"Directory '{full_path}' does not exist.!"
# Visit all of the files and directories in *directory_path*:
index: int
file_or_directory_name: str
for index, file_or_directory_name in enumerate(sorted(list(os.listdir(full_path)))):
if tracing:
print(f"{tracing}File_Name[{index}]:'{file_or_directory_name}'")
# Skip over any files/directories that start with '.':
if not file_or_directory_name.startswith('.'):
# Recursively do a partial load for *full_path*:
sub_relative_path: str = os.path.join(relative_path, file_or_directory_name)
sub_full_path: str = os.path.join(full_path, file_or_directory_name)
if tracing:
print(f"{tracing}sub_relative_path='{sub_relative_path}'")
print(f"{tracing}sub_full_path='{sub_full_path}'")
if os.path.isdir(sub_full_path):
# *full_path* is a directory:
name: str = Encode.from_file_name(file_or_directory_name)
sub_directory: Directory = Directory(name, directory)
assert directory.has_child(sub_directory)
sub_directory.partial_load()
elif sub_full_path.endswith(".xml"):
# Full path is a *Table* `.xml` file:
name = Encode.from_file_name(file_or_directory_name[:-4])
url: str = "bogus URL"
table: Table = Table(name, directory, url)
assert directory.has_child(table)
sub_relative_path = os.path.join(relative_path, name)
table.partial_load()
else:
assert False, f"'{full_path}' is neither an .xml nor a directory"
# Directory.panel_update():
@trace(1)
def panel_update(self, gui: Gui) -> None:
directory: Directory = self
gui.directory_panel_update(directory)
# Directory.tables_append():
def tables_get(self) -> "List[Table]":
directory: Directory = self
tables: List[Table] = list()
node: Node
for node in directory.children_get():
node_tables: List[Table] = node.tables_get()
tables.extend(node_tables)
return tables
# Directory.type_letter_get():
def type_letter_get(self) -> str:
return 'D'
# Collection:
class Collection(Node):
# Collection.__init__():
@trace(1)
def __init__(self, name: str, parent: Node,
collection_root: str, searches_root: str, gui: Gui) -> None:
# Intialize the *Node* super class of *collection* (i.e. *self*).
collection: Collection = self
super().__init__(name, parent, collection, gui=gui)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}collection.relative_path='{collection.relative_path}'")
# Do some additional checking on *collections* (i.e. *parent*):
assert isinstance(parent, Collections)
collections: Collections = parent
assert collections.has_child(collection)
# Stuff some additional values into *collection*:
self.collection_root: str = collection_root
self.plugin: Optional[Callable] = None
self.searches_root: str = searches_root
self.urls_table: Dict[str, Search] = dict()
self.searches_table: Dict[str, Search] = dict()
self.gui: Gui = collections.gui
# Ensure that *type_letter_get()* returns 'C' for Collection:
assert collection.type_letter_get() == 'C'
# Collection.__str__():
def __str__(self) -> str:
collection: Collection = self
name: str = "??"
if hasattr(collection, "name"):
name = collection.name
return f"Collection('{name}')"
# Collection.actual_parts_lookup():
@trace(1)
def actual_parts_lookup(self, choice_part: "ChoicePart") -> List[ActualPart]:
# Grab some values from *collection* (i.e. *self*) and *choice_part*:
collection: Collection = self
searches_table: Dict[str, Search] = collection.searches_table
searches_root: str = collection.searches_root
choice_part_name: str = choice_part.name
# Get some time values:
stale_time: int = 2 * 24 * 60 * 60 # 2 days in seconds
now: int = int(time.time())
# FIXME: This code should be in Search.actual_parts_lookup()!!!
tracing: str = tracing_get()
actual_parts: List[ActualPart] = []
# Build up *actual_parts* from *collection* (i.e. *self*):
if choice_part_name in searches_table:
# We have a *search* that matches *search_name*:
search = searches_table[choice_part_name]
# Force *search* to read in all of its information from its associated `.xml` file:
search.file_load()
# Grab some values from *search*:
assert isinstance(search.collection, Collection)
search_name: str = search.name
search_url: str = search.url
relative_path: str = search.relative_path
if tracing:
print(f"{tracing}search_name='{search_name}'")
print(f"{tracing}search_url='{search_url}'")
print(f"{tracing}relative_path='relative_path'")
assert search_name == choice_part_name
# Compute the *csv_file_name* of where the `.csv` file associated with *search_url*
# is (or will be) stored:
csv_file_name: str = os.path.join(searches_root, relative_path + ".csv")
if tracing:
print(f"{tracing}csv_file_name='{csv_file_name}'")
# Compute *the
csv_modification_time: int = (int(os.path.getmtime(csv_file_name))
if os.path.isfile(csv_file_name) else 0)
if csv_modification_time + stale_time < now:
assert isinstance(collection, Collection)
collection.csv_fetch(search_url, csv_file_name)
# Read in the *csv_file_name*:
assert os.path.isfile(csv_file_name)
data_rows: List[List[str]] = []
column_names: List[str] = []
csv_file: IO[str]
with open(csv_file_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
row_index: int
row: List[str]
for row_index, row in enumerate(csv_reader):
# print(f"[{index}]: {row}")
if row_index == 0:
column_names = row
else:
data_rows.append(row)
if tracing:
print(f"len(data_rows)={len(data_rows)} ; excludes header")
manufacturer_part_number_index: int = column_names.index("Manufacturer Part Number")
assert manufacturer_part_number_index >= 0
manufacturer_index: int = column_names.index("Manufacturer")
assert manufacturer_index >= 0
duplicate_removal_table: Dict[Tuple[str, str], Tuple[str, str]] = dict()
manufacturer: str
manufacturer_part_number: str
pair: Tuple[str, str]
for index, data_row in enumerate(data_rows):
manufacturer = data_row[manufacturer_index]
manufacturer_part_number = data_row[manufacturer_part_number_index]
pair = (manufacturer, manufacturer_part_number)
duplicate_removal_table[pair] = pair
# print(f"Row[{index}]: '{manufacturer} : '{manufacturer_part_number}'")
pairs: List[Tuple[str, str]] = list(duplicate_removal_table.keys())
for index, pair in enumerate(pairs):
manufacturer, part_number = pair
if tracing:
print(f"{tracing}Unique_Actual_Part[{index}]: '{manufacturer}': "
f"'{part_number}'")
actual_part: ActualPart = ActualPart(manufacturer, part_number)
actual_parts.append(actual_part)
return actual_parts
# Collection.can_fetch_more():
def can_fetch_more(self) -> bool:
# All of the directores for *collection* (i.e. *self*) have be previously found
# using using *Collection.partial_load*(). So, there are no more *Directory*'s
# to be loaded.
return False
# Collection.clicked():
def clicked(self, gui: Gui) -> None:
collection: Collection = self
gui.collection_clicked(collection)
# Collection.directories_get():
def directories_get(self) -> List[Directory]:
collection: Collection = self
directories: List[Directory] = list()
node: Node
for node in collection.children_get():
directories.extend(node.directories_get())
return directories
# Collection.key():
@staticmethod
def key(collection: "Collection") -> Any:
name: str = collection.name
return (name, )
# Collection.key_function_get():
def key_function_get(self) -> Callable[[Node], Any]:
return Collection.key
# Collection.partial_load():
@trace(1)
def partial_load(self) -> None:
# Visit all of the directories and files in *collection_root*:
collection: Collection = self
collection_root: str = collection.collection_root
relative_path: str = collection.relative_path
assert relative_path == Encode.to_file_name(collection.name)
directory_path: str = os.path.join(collection_root, relative_path)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}collection_root='{collection_root}'")
print(f"{tracing}relative_path='{relative_path}'")
print(f"{tracing}directory_path='{directory_path}'")
assert os.path.isdir(directory_path), f"'{directory_path}' is not a directory"
index: int
base_name: str
for index, base_name in enumerate(list(sorted(os.listdir(directory_path)))):
if tracing:
print(f"{tracing}File_Name[{index}]:'{base_name}'")
# Compute a *full_path* from *collection_root* and *base_name*:
full_path: str = os.path.join(directory_path, base_name)
if tracing:
print(f"{tracing}full_path='{full_path}'")
if not base_name.startswith('.'):
if base_name.endswith(".xml"):
assert False, "Top level tables not implemented yet"
elif os.path.isdir(full_path):
name: str = Encode.from_file_name(base_name)
directory: Directory = Directory(name, collection)
assert collection.has_child(directory)
directory.partial_load()
else:
assert False, f"'{base_name}' is neither an .xml file nor a directory"
# Collection.searches_find():
def searches_find(self, search_name: str) -> "Optional[Search]":
# Grab some values from *collection* (i.e. *self*):
collection: Collection = self
searches_table: Dict[str, Search] = collection.searches_table
# Find a *search* that matches *search_name*:
search: Optional[Search] = None
if search_name in searches_table:
search = searches_table[search_name]
return search
# Collection.searches_insert():
def searches_insert(self, search: "Search") -> None:
search_name: str = search.name
if search_name[0] != '@':
collection: Collection = self
searches_table: Dict[str, Search] = collection.searches_table
assert search_name not in searches_table, f"Search '{search_name}' already in table"
searches_table[search_name] = search
# Collection.searches_remove():
def searches_remove(self, search: "Search") -> None:
collection: Collection = self
searches_table: Dict[str, Search] = collection.searches_table
search_name: str = search.name
search_url: str = search.url
assert search_name[0] != '@', f"Trying to remove template '{search_name}' from table"
assert search_name in searches_table, "Search '{search_name} not found"
del searches_table[search_name]
collection.url_remove(search_url)
# Collection.tables_get():
def tables_get(self) -> "List[Table]":
collection: Collection = self
tables: List[Table] = list()
node: Node
for node in collection.children_get():
tables.extend(node.tables_get())
return tables
# Collection.type_leter_get()
def type_letter_get(self) -> str:
# print("Collection.type_letter_get(): name='{0}'".format(self.name))
return 'C'
# Collection.url_find():
def url_find(self, url: str) -> "Optional[Search]":
# Grab some values from *collection* (i.e. *self*):
collection: Collection = self
urls_table: Dict[str, Search] = collection.urls_table
# Find a *search* that matches *search_name*:
search: Optional[Search] = None
if url in urls_table:
search = urls_table[url]
return search
# Collection.url_insert():
def url_insert(self, search: "Search") -> None:
collection: Collection = self
urls_table: Dict[str, Search] = collection.urls_table
url: str = search.url
assert url not in urls_table, f"URL is already in table '{url}'"
urls_table[url] = search
# Collection.url_remove():
def url_remove(self, url: str) -> None:
collection: Collection = self
urls_table: Dict[str, Search] = collection.urls_table
assert url in urls_table, f"URL not in table '{url}'"
del urls_table[url]
# Collections:
class Collections(Node):
# Collections.__init__():
@trace(1)
def __init__(self, name: str, searches_root: str, partial_load: bool, gui: Gui) -> None:
# This code is pretty fragile. In order for the *Node* object to have a
# *parent* attribute that is of type *Node* rather than *Optional[Node]*,
# we use make the *Collections* object parent be itself. Thus,
#
# collections = collections.parent
#
# The *Node* initializer (i.e. *Node.__init__()* needs a *Collection* object. So, we
# have the *Collections* object needing a *Collection* and vice versa. We break
# this self dependency by creating a *bogus_collection* first. This works because
# the *Collection* initializer (i.e. *Collection.__init__()* uses itself as its
# *Collections* object.
#
# In addition, there is some code in *Node.__init__()* that special cases
# the creation of *Collections* and *Collection* objects:
# We start by preinitializing some fields of *collections* (i.e. *self*) before
# calling *Collection.__init__()* initializer (which needs these fields):
bogus_children: List[Node] = list()
self._children: List[Node] = bogus_children
self.gui: Gui = gui
# Create a *bogus_collection* which we need to feed to the *Node.__init__*():
collections: Collections = self
bogus_collection: Collection = Collection("Bogus Collection", collections, "", "", gui)
# Intialize the *Node* super class of *collections* (i.e. *self*):
super().__init__(name, collections, bogus_collection, gui=gui)
# Now we can define the additional fields that we need
self.searches_root: str = searches_root
self.bogus_collection: Collection = bogus_collection
# Construct the collections list:
tracing: str = tracing_get()
entry_point_key: str = "bom_manager_collection_get"
index: int
entry_point: pkg_resources.EntryPoint
for index, entry_point in enumerate(pkg_resources.iter_entry_points(entry_point_key)):
entry_point_name: str = entry_point.name
if tracing:
print(f"{tracing}Collection_Entry_Point[{index}]: '{entry_point_name}'")
assert entry_point_name == "collection_get"
collection_get: Callable[[Collections, str, Gui], Collection] = entry_point.load()
assert callable(collection_get)
collection: Collection = collection_get(collections, searches_root, gui)
assert isinstance(collection, Collection)
if partial_load:
collection.partial_load()
# collections.child_append(collection)
# Do some *tracing*:
if tracing:
relative_path = collections.relative_path
print(f"{tracing}searchs_root='{searches_root}'")
print(f"{tracing}relative_path='{relative_path}'")
# Ensure that *type_letter_get()* returns 'R' is for collections Root:
assert collections.type_letter_get() == 'R'
# Collections.__str__():
def __str__(self) -> str:
# collections = self
return f"Collections('Collections')"
# Collections.actual_parts_lookup():
@trace(1)
def actual_parts_lookup(self, choice_part: "ChoicePart") -> List[ActualPart]:
# Visit each *collection* in *collections* (i.e. *self*) and find any
# *ActualPart*'s that match *search_name*:
collections: Collections = self
actual_parts: List[ActualPart] = []
tracing: str = tracing_get()
index: int
collection: Node
for index, collection in enumerate(collections.children_get()):
assert isinstance(collection, Collection)
if tracing:
print(f"{tracing}Collection[{index}]:{collection.name}")
collection_actual_parts: List[ActualPart] = collection.actual_parts_lookup(choice_part)
actual_parts.extend(collection_actual_parts)
# FIXME: Cull out duplicate acutal parts (i.e. for the same manufacturer.):
pass
return actual_parts
# Collections.can_fetch_more():
def can_fetch_more(self) -> bool:
# The children of *collections* (i.e. self*) have already be preloaded by
# *Collections.partial_load*(). There is nothing more to fetch:
return False
# Collections.check():
def check(self, search_name: str, project_name: str, reference: str) -> None:
# Find all *matching_searches* that matach *search_name* from *collections* (i.e. *self*):
collections: Collections = self
matching_searches: List[Search] = list()
collection: Node
for collection in collections.children_get():
assert isinstance(collection, Collection)
searches_table: Dict[str, Search] = collection.searches_table
if search_name in searches_table:
matching_search: Search = searches_table[search_name]
matching_searches.append(matching_search)
# Output error if nothing is found:
if not matching_searches:
print(f"{project_name}: {reference} '{search_name}' not found")
# Collections.clicked():
def clicked(self, gui: Gui) -> None:
collections: Collections = self
gui.collections_clicked(collections)
# Collections.key():
@staticmethod
def key(collections: "Collections") -> Any:
name: str = collections.name
return (name, )
# Collections.key_function_get():
def key_function_get(self) -> Callable[[Node], Any]:
return Collections.key
# Collections.partial_load():
def partial_load(self) -> None:
# Extract some values from *collections*:
collections: Collections = self
gui: Gui = collections.gui
searches_root: str = collections.searches_root
tracing: str = tracing_get()
if tracing:
print(f"{tracing}searches_root='{searches_root}'")
# Find all of the the *collections* by searching through install Python packages
# for matching plugins:
entry_point_key: str = "bom_manager_collection_get"
index: int
entry_point: pkg_resources.EntryPoint
for index, entry_point in enumerate(pkg_resources.iter_entry_points(entry_point_key)):
entry_point_name: str = entry_point.name
if tracing:
print(f"{tracing}Entry_Point[{index}]:'{entry_point_name}'")
assert entry_point_name == "collection_get", (f"'{entry_point_name}' is "
"not 'collection_get''")
collection_get: Callable[[Collections, str, Gui], Collection] = entry_point.load()
# Create *collection*:
collection: Collection = collection_get(collections, searches_root, gui)
assert isinstance(collection, Collection)
assert collections.has_child(collection)
# Recursively perfrom *partial_load*'s down from *collection*:
collection.partial_load()
# Collections.searches_find():
@trace(1)
def searches_find(self, search_name: str) -> "List[Search]":
# Visit each *collection in *collections* (i.e. *self*) to see if it has *search_name*:
collections: Collections = self
searches: List[Search] = []
collection: Node
for collection in collections.children_get():
assert isinstance(collection, Collection)
search: Optional[Search] = collection.search_find(search_name)
if search is not None:
# We have a matching *search*:
assert search_name == search.name, f"'{search_name}'!='{search.name}'"
searches.append(search)
return searches
# Collections.type_leter_get():
def type_letter_get(self) -> str:
# print("Collections.type_letter_get(): name='{0}'".format(self.name))
return 'R'
# Search:
class Search(Node):
# FIXME: This tale belongs in *Units*:
ISO_MULTIPLIER_TABLE: Dict[str, float] = {
"E": 1.0e18,
"P": 1.0e15,
"T": 1.0e12,
"G": 1.0e9,
"M": 1.0e6,
"k": 1.0e3,
"m": 1.0e-3,
"μ": 1.0e-6,
"u": 1.0e-6,
"n": 1.0e-9,
"p": 1.0e-12,
"f": 1.0e-15,
"a": 1.0e-18,
}
# Search.__init__():
@trace(1)
def __init__(self, name: str, parent: "Table", search_parent: "Optional[Search]",
url: str) -> None:
# Grab some values from *search* (i.e. *self*):
search: Search = self
assert name.find("%3b") < 0
# Initialize the super class for *search* (i.e. *self*):
super().__init__(name, parent, parent.collection)
# The *parent* is known to be a *table* and must contain *search*:
table: Table = parent
assert table.has_child(search)
table.searches_sorted = False
# Mark that the *table* is no longer sorted, since the *Node.__init__()* just
# appended *search* to its *children* list:
table.searches_sorted = False
# Stuff values into *search*:
self.comments: List[SearchComment] = list()
self.loaded: bool = False
self._relative_path: str = ""
self.filters: List[Filter] = list()
self.search_parent: Optional[Search] = search_parent
self.search_parent_title: str = ""
self.search_parent_name: str = "" # Used by *Search.tree_load*()
self.url: str = url
# Collect global information about the search *name* and *url*:
collection: Optional[Collection] = parent.collection
assert isinstance(collection, Collection)
collection.searches_insert(search)
# Make sure that the URL is not a duplicate:
if url != "":
prior_search: Optional[Search] = collection.url_find(url)
if isinstance(prior_search, Search):
# We have a duplicate *url*:
print(f"URL '{url}' for search '{name}' is the same as search "
f"'{prior_search.name}' in collection '{collection.name}.")
else:
# *url* is unique, so stuff it into the URL table of *collection*:
collection.url_insert(search)
# Search.__str__(self):
def __str__(self) -> str:
search: Search = self
name: str = "??"
if hasattr(search, "name"):
name = search.name
return f"Search('{name}')"
# Search.can_fetch_more():
def can_fetch_more(self) -> bool:
# Currently, all *Search* objects never have any childre. Hence, there is nothing fetch:
return False
# Search.children_count():
def children_count(self) -> Tuple[int, int]:
search: Search = self
table: Node = search.parent
assert isinstance(table, Table)
children: List[Node] = table.children_get()
child: Node
immediate_children: int = 0
all_children: int = 0
for child in children:
assert isinstance(child, Search)
distance: int = child.distance(search)
if distance == 1:
immediate_children += 1
all_children += 1
elif distance >= 2:
all_children += 1
return (immediate_children, all_children)
# Search.clicked()
def clicked(self, gui: Gui) -> None:
# Send the clicked event back to *gui* along with *search* (i.e. *self*):
search: Search = self
gui.search_clicked(search)
# Search.comments_append():
def comments_append(self, comments: List[SearchComment]) -> None:
# Tack *comments* onto the the comments list in *search* (i.e. *self*):
search: Search = self
search_comments: List[SearchComment] = search.comments
search_comments.extend(comments)
# Search.distance():
def distance(self, target_search: "Search") -> int:
search: Search = self
distance: int = 0
while search is not target_search:
search_parent: Optional[Node] = search.search_parent
if search_parent is None:
distance = -1
break
assert isinstance(search_parent, Search)
distance += 1
search = search_parent
return distance
# Search.file_load():
def file_load(self) -> None:
# Grab some informtation from parent *table* of *search*:
search: Search = self
table: Node = search.parent
assert isinstance(table, Table)
table_name: str = table.name
searches: List[Node] = table.children_get()
searches_size: int = len(searches)
# Only load *search* (i.e. *self*) if it is not already *loaded*:
loaded: bool = search.loaded
tracing: str = tracing_get()
trace_level: int = trace_level_get()
if tracing and trace_level >= 2:
print(f"{tracing}loaded={loaded} table='{table_name}' searches_size={searches_size}")
if not loaded:
# Grab some values from *search*:
collection: Optional[Collection] = search.collection
assert isinstance(collection, Collection)
searches_root: str = collection.searches_root
relative_path: str = search.relative_path
search_full_file_name: str = os.path.join(searches_root, relative_path + ".xml")
# if tracing:
# print(f"{tracing}search_full_file_name={search_full_file_name}")
search_file: IO[str]
with open(search_full_file_name, "r") as search_file:
# Read in *search_xml_text* from *search_file*:
search_xml_text: str = search_file.read()
# Parse the XML in *search_xml_text* into *search_tree*:
search_tree: etree._Element = etree.fromstring(search_xml_text)
# Now process the contents of *search_tree* and stuff the result:
search.tree_load(search_tree)
# Mark that *table* is no longer sorted since we may updated the
# *search_parent* and *search_parent_title* fields:
table.searches_sorted = False
# Mark *search* as *loaded*:
search.loaded = True
# Search.file_delete
@trace(1)
def file_delete(self) -> None:
search: Search = self
collection: Optional[Collection] = search.collection
assert isinstance(collection, Collection)
searches_root: str = collection.searches_root
relative_path: str = search.relative_path
search_full_file_name: str = os.path.join(searches_root, relative_path + ".xml")
tracing: str = tracing_get()
if tracing:
print(f"{tracing}search_full_file_name='{search_full_file_name}'")
if os.path.isfile(search_full_file_name):
os.remove(search_full_file_name)
assert not os.path.isfile(search_full_file_name)
# Search.filters_refresh():
def filters_refresh(self) -> None:
# Before we do anything we have to make sure that *search* has an associated *table*.
# Frankly, it is should be impossible not to have an associated table, but we must
# be careful:
search: Search = self
table: Node = search.parent
assert isinstance(table, Table)
if True:
# Now we have to make sure that there is a *filter* for each *parameter* in
# *parameters*. We want to preserve the order of *filters*, so this is pretty
# tedious:
# Step 1: Start by deleting any *filter* from *filters* that does not have a
# matching *parameter* in parameters. This algorithme is O(n^2), so it could
# be improved:
filters: List[Filter] = search.filters
parameters: List[Parameter] = table.parameters
new_filters: List[Filter] = list()
filter: Filter
for filter in filters:
parameter: Parameter
for parameter in parameters:
if filter.parameter is parameter:
new_filters.append(filter)
break
# Carefully replace the entire contents of *filters* with the contents of *new_filters*:
filters[:] = new_filters[:]
# Step 2: Sweep through *parameters* and create a new *filter* for each *parameter*
# that does not already have a matching *filter* in *filters*. Again, O(n^2):
parameter_index: int
for pararmeter_index, parameter in enumerate(parameters):
for filter in filters:
if filter.parameter is parameter:
break
else:
filter = Filter(table, parameter, use=False, select="")
filters.append(filter)
# Search.is_deletable():
def is_deletable(self) -> bool:
# Grab *search_name* from *search* (i.e. *self*):
search: Search = self
# Search through *sibling_searches* of *table* to ensure that *search* is not
# a parent of any *sibling_search* object:
table: Node = search.parent
assert isinstance(table, Table)
sibling_searches: List[Node] = table.children_get()
# Make sure that there are now *sibling_search*'s that depend upon *search*:
deletable: bool = True
sibling_search: Node
for sibling_search in sibling_searches:
if sibling_search.parent is search:
deletable = False
break
return deletable
# Search.key():
@staticmethod
def key(search: "Search") -> Any:
""" Return a sorting key for the *Search* object (i.e. *self*):
The sorting key is a three tuple consisting of (*Depth*, *UnitsNumber*, *Text*), where:
* *Depth*: This is the number of templates between "@ALL" and the search.
* *UnitsNumber*: This is the number that matches a number followed by ISO units
(e.g. "1KOhm", ".01uF", etc.)
* *Text*: This is the remaining text after *UnitsNumber* (if it is present.)
"""
#
# In the Tree view, we want searches to order templates (which by convention
# start with an '@' character) before the other searches. In addition, we would
# like to order searches based on a number followed by an ISO type (e.g. "4.7KOhm",
# ".1pF", etc.) to be sorted in numberical order from smallest to largest (e.g.
# ".01pF", ".1pf", "10nF", ".1uF", "10uF", etc.) Furthermore, the template searches
# are organized as a heirachical set of templates and we want the ones closest to
# to top
# Grab *table* and *searches_table* from *search*:
table: Node = search.parent
assert isinstance(table, Table)
# Figure out template *depth*:
depth: int = 0
nested_search: Search = search
while nested_search.search_parent is not None:
depth += 1
nested_search = nested_search.search_parent
# Sweep through the *search_name* looking for a number, optionally followed by an
# ISO unit mulitplier.:
number_end_index: int = -1
search_name: str = search.name
character_index: int
character: str
for character_index, character in enumerate(search_name):
if character in ".0123456789":
# We a *character* that "could" be part of a number:
number_end_index = character_index + 1
else:
break
# Extract *number* from *search_name* if possible:
number: float = 0.0
if number_end_index >= 0:
try:
number = float(search_name[0:number_end_index])
except ValueError:
pass
# Figure out the ISO *multiplier* and adjust *number* appropriately:
multiplier: float = 1.0
if number_end_index >= 0 and number_end_index < len(search_name):
multiplier_character: str = search_name[number_end_index]
iso_multiplier_table: Dict[str, float] = Search.ISO_MULTIPLIER_TABLE
if character in iso_multiplier_table:
multiplier = iso_multiplier_table[multiplier_character]
number *= multiplier
# Return a tuple used for sorting:
rest: str = search_name if number_end_index < 0 else search_name[number_end_index:]
key: Tuple[int, float, str] = (depth, number, rest)
return key
# Search.key_function_get():
def key_function_get(self) -> Callable[[Node], Any]:
return Search.key
# Search.panel_update():
@trace(1)
def panel_update(self, gui: Gui) -> None:
search: Search = self
gui.search_panel_update(search)
# Search.search_parent_set():
def search_parent_set(self, search_parent: "Search") -> None:
# Stuff *search_parent* into *search* (i.e. *self*):
search: Search = self
search.search_parent = search_parent
# Search.search_parent_title_set():
def search_parent_title_set(self, search_parent_title: str) -> None:
# Stuff *search_parent_title* into *search* (i.e. *self*):
search: Search = self
search.search_parent_title = search_parent_title
# Search.name_get():
def name_get(self) -> str:
# Grab some values from *search* (i.e. *self*):
search: Search = self
search_name: str = search.name
table: Node = search.parent
assert isinstance(table, Table)
# Make sure that all *searches* associated with *table* are loaded from their
# associated `.xml` files:
if not table.searches_loaded:
table.searches_load()
# Construct the *name*:
search_parent: Optional[Search] = search.search_parent
name: str = (search_name if search_parent is None
else f"{search_name} ({search_parent.name})")
return name
# Search.tree_load():
def tree_load(self, search_tree: etree._Element) -> None:
# The basic format of the *search_tree* is:
#
# <Search name="..." parent="..." table="..." url="...">
# <SerachComments>
# <SerachComment language="EN">
# </SerachComment language="EN">
# ...
# </SerachComments>
# <Filters>
# ...
# </Filters>
# </Search>
# Extract the attributes from *attributes_table* of the `<Search ...>` tag:
attributes_table: Dict[str, str] = search_tree.attrib
assert "name" in attributes_table
name: str = Encode.from_attribute(attributes_table["name"])
search_parent_name: str = (Encode.from_attribute(attributes_table["search_parent"])
if "search_parent" in attributes_table else "")
assert "url" in attributes_table, "attributes_table={0}".format(attributes_table)
url: str = attributes_table["url"]
# Extract the `<SearchComments>...</SearchComments>` XML:
search_tree_elements: List[etree._Element] = list(search_tree)
assert search_tree_elements, "No <SearchComments> found."
comments_tree: etree._Element = search_tree_elements[0]
assert comments_tree.tag == "SearchComments", (f"<{comments_tree.tag}> found "
f"instead of <SearchComments>")
assert not comments_tree.attrib, "<SearchComments> should not have any attributes"
comments: List[SearchComment] = list()
comment_tree: etree._Element
for comment_tree in comments_tree:
comment: SearchComment = SearchComment.xml_parse(comment_tree)
comments.append(comment)
# Load values from *search_tree* into *search* (i.e. *self*):
search: Search = self
search.name = name
search.comments[:] = comments[:]
# search.filters[:] = filters[:]
search.search_parent = None # This is filled in later on
search.search_parent_name = search_parent_name
search.url = url
if search.url != "":
collection: Collection = search.collection
prior_search: Optional[Search] = collection.url_find(url)
if isinstance(prior_search, Search):
# We have a duplicate *url*:
print(f"URL '{url}' for search '{name}' is the same as search "
f"'{prior_search.name}' in collection '{collection.name}.")
else:
collection.url_insert(search)
# Search.type_letter_get():
def type_letter_get(self) -> str:
return 'S'
# Search.url_set():
def url_set(self, url: str) -> None:
# Stuff *url* into *search* (i.e. *self*):
search: Search = self
search.url = url
# Search.xml_file_save():
def xml_file_save(self) -> None:
# Compute *xml_file_name* and the *xml_file_directory* starting from *search* (i.e. *self*):
search: Search = self
collection: Optional[Collection] = search.collection
assert isinstance(collection, Collection)
searches_root: str = collection.searches_root
relative_path: str = search.relative_path
xml_file_name: str = os.path.join(searches_root, relative_path + ".xml")
xml_directory: str = os.path.split(xml_file_name)[0]
tracing: str = tracing_get()
if tracing:
print(f"{tracing}searches_root='{searches_root}'")
print(f"{tracing}relative_path='{relative_path}'")
print(f"{tracing}xml_file_name='{xml_file_name}'")
print(f"{tracing}xml_directory='{xml_directory}'")
# Create *xml_text* from *search*:
xml_lines: List[str] = list()
xml_lines.append('<?xml version="1.0"?>')
search.xml_lines_append(xml_lines, "")
xml_lines.append("")
xml_text: str = "\n".join(xml_lines)
# Ensure that *xml_directory* exists:
if not os.path.isdir(xml_directory):
os.makedirs(xml_directory)
# Write *xml_text* out to *xml_file_name*:
xml_file: IO[str]
with open(xml_file_name, "w") as xml_file:
xml_file.write(xml_text)
# Mark *search* as *loaded* since we just wrote out the contents:
search.loaded = True
# Search.xml_lines_append()
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *search* (i.e. *self*):
search: Search = self
table: Node = search.parent
assert isinstance(table, Table)
search_parent: Optional[Search] = search.search_parent
search_name: str = search.name
# Figure out *search_parent_title* which is empty only for the `@ALL` *Search* object:
search_parent_text: str = ("" if search_parent is None else
f'search_parent="{Encode.to_attribute(search_parent.name)}" ')
# Start the `<Search...>` element:
xml_lines.append(f'{indent}<Search '
f'name="{Encode.to_attribute(search_name)}" '
f'{search_parent_text}'
f'table="{Encode.to_attribute(table.name)}" '
f'url="{Encode.to_attribute(search.url)}">')
# Append the `<SearchComments>` element:
xml_lines.append(f'{indent} <SearchComments>')
search_comments: List[SearchComment] = search.comments
search_comment_indent = indent + " "
for search_comment in search_comments:
search_comment.xml_lines_append(xml_lines, search_comment_indent)
xml_lines.append(f'{indent} </SearchComments>')
# Wrap up the `<Search>` element:
xml_lines.append(f'{indent}</Search>')
# Table:
class Table(Node):
# Table.__init__():
def __init__(self, name: str, parent: Node, url: str) -> None:
# Initialize the parent class:
super().__init__(name, parent, parent.collection)
# Load additional values into *table* (i.e. *self*):
# table: Table = self
self.comments: List[TableComment] = list()
self.import_column_triples: List[List[Tuple[int, str, str]]] = list()
self.import_headers: List[str] = list() # Read from .csv file
self.import_rows: List[str] = list() # Read from .csv file
self.searches_loaded: bool = False
self.searches_sorted: bool = False
self.loaded: bool = False
self.parameters: List[Parameter] = list()
self._relative_path: str = ""
self.searches_table: Dict[str, Search] = dict()
self.url: str = ""
# Table.__str__():
def __str__(self) -> str:
table: Table = self
name: str = "??"
if hasattr(table, "name"):
name = table.name
return f"Table('{name}')"
# Table.can_fetch_more():
def can_fetch_more(self) -> bool:
# Conceptually, every table as a default `@ALL` search. We return *True* if
# the `@ALL` search has not actually been created yet for *table* (i.e. *self*):
table: Table = self
searches: List[Node] = table.children_get()
can_fetch_more: bool = (len(searches) == 0)
return can_fetch_more
# Table.clicked():
def clicked(self, gui: Gui) -> None:
# Forward clicked event back to *gui* along with *table* (i.e. *self*):
table: Table = self
gui.table_clicked(table)
# Table.column_tables_extract():
@trace(1)
def column_tables_extract(self, rows: List[List[str]]) -> List[Dict[str, int]]:
# Create and return a *column_tables* which has one dictionary for each column in *rows*.
# Each *column_table* dictionary that contains an occurance count for each different
# value in the column.
# Figure out how many *columns* there are for each row. Each row is assumed
# to have the same number of *columns*:
assert rows, "No data to extract"
row0: List[str] = rows[0]
columns: int = len(row0)
# Create *column_tables* and fill in one *column_table* per *column*:
column_tables: List[Dict[str, int]] = list()
for column in range(columns):
column_table: Dict[str, int] = dict()
column_tables.append(column_table)
# Sweep across each *row* in *rows* and fill in *column_table*:
for row in rows:
assert len(row) == columns
value: str = row[column]
if value in column_table:
# We have seen *value* before in this *column*, so increment its count:
column_table[value] += 1
else:
# This is the first time we seen *value* in this *column*, so insert it
# into *column_table* as the first one:
column_table[value] = 1
# Return *column_tables*:
return column_tables
# Table.csv_file_read():
@trace(1)
def csv_file_read(self) -> Tuple[List[str], List[List[str]]]:
# Grab some values from *table* (i.e. *self*):
table: Table = self
csv_full_name: str = table.csv_full_name_get()
# Open *csv_full_name* and read in the *headers* and *rows*:
assert os.path.isfile(csv_full_name), "File '{csv_full_file_name}' does not exist."
headers: List[str]
rows: List[List[str]] = list()
csv_file: IO[str]
with open(csv_full_name, newline="") as csv_file:
row_index: int
row: List[str]
for row_index, row in enumerate(csv.reader(csv_file, delimiter=',', quotechar='"')):
if row_index == 0:
# The first *row* is actually the *headers*:
headers = row
else:
# All others are data *rows*:
rows.append(row)
# Return the resulting *headers* and *rows*:
return headers, rows
# Table.csv_full_name_get():
def csv_full_name_get(self) -> str:
table: Table = self
class_name: str = table.__class__.__name__
assert False, f"{class_name}.csv_full_name_get() needs to be implemented."
return ""
# Table.csv_read_and_process():
@trace(1)
def csv_read_and_process(self, csv_directory: str, bind: bool, gui: Gui) -> None:
# This delightful piece of code reads in a `.csv` file and attempts to catagorize
# each column of the table with a "type". The types are stored in *re_table*
# (from *gui*) as dictionary of named pre compiled regualar expressions.
# If there is no good match for the table column contents, it is given a type
# of "String". This code is actually pretty involved and convoluted.
# Read the example `.csv` file associated with *table* (i.e. *self*) into *headers* and
# *rows*:
table: Table = self
headers: List[str]
rows: List[List[str]]
headers, rows = table.csv_file_read()
# Extract *column_tables* which is a list of dictionaries where each dictionary
# has an occurence count for each unique value in a column:
column_tables: List[Dict[str, int]] = table.column_tables_extract(rows)
# Extract *type_tables* which is a list of dictionaries, where each dictionary
# has an occurence count for each unique type name in the column:
types_tables: List[Dict[str, int]] = table.type_tables_extract(column_tables, gui)
# If requested, bind the *types_tables* to *parameters*:
if bind:
table.parameters_bind(headers, types_tables)
# We are done and can write out *table* now:
table.xml_file_save()
# Table.directories_get():
def directories_get(self) -> "List[Directory]":
# A *table* has no sub-directories, so the empty list is returned:
return []
# Table.fetch_more():
def fetch_more(self) -> None:
# Create *all_search* if it does not already exist (i.e. *searches_size* is 0):
table: Table = self
searches: List[Node] = table.children_get()
searches_size = len(searches)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}1:searches_size={searches_size}")
if searches_size == 0:
# Note that the call to the *Search*() has the side-effect of appending
# *all_search* to the children of *table*:
# base_name = Encode.to_file_name(name)
all_search: Search = Search("@ALL", table, None, table.url)
assert table.has_child(all_search)
assert len(searches) == 1
all_search.xml_file_save()
# Make sure that *table* is fully loaded so we can grab the *url*:
table.file_load()
searches_size = len(searches)
if tracing:
print(f"{tracing}2:searches_size={searches_size}")
assert searches_size == 1
url: str = table.url
# Fill in the rest of *all_search* from *table*:
comment: SearchComment = SearchComment(language="EN", lines=list())
all_search.comments.append(comment)
all_search.url = url
# Force *all_search* out to the file system:
all_search.xml_file_save()
if tracing:
searches_size = len(searches)
print(f"{tracing}3:searches_size={searches_size}")
# Table.file_load():
def file_load(self) -> None:
# Only load *table* (i.e. *self*) if it is not already *loaded*:
table: Table = self
loaded: bool = table.loaded
if not table.loaded:
searches_size: int = table.child_count()
tracing: str = tracing_get()
if tracing:
print(f"{tracing}loaded={loaded} searches_size={searches_size}")
# Get *table_file_name* for *table*:
relative_path: str = table.relative_path
collection: Optional[Collection] = table.collection
assert isinstance(collection, Collection)
collection_root: str = collection.collection_root
table_file_name: str = os.path.join(collection_root, relative_path + ".xml")
assert os.path.exists(table_file_name), f"'{table_file_name}' does not exist"
# Read *table_tree* in from *full_file_name*:
table_file: IO[str]
with open(table_file_name) as table_file:
# Read in *table_xml_text* from *table_file*:
table_xml_text: str = table_file.read()
# Parse the XML in *table_xml_text* into *table_tree*:
table_tree: etree._Element = etree.fromstring(table_xml_text)
# FIXME: Catch XML parsing errors here!!!
# Now process the contents of *table_tree* and stuff the results into *table*:
table.tree_load(table_tree)
# Mark *table* as *loaded*:
table.loaded = True
# Table.has_children():
def has_children(self) -> bool:
# This is a bit obscure. A *Table* object conceptually always has an "@ALL" search.
# *True* is returned even if the *table* (i.e. *self*) does not actually have
# any children. When *Table.fetch_more*() is called the "@ALL" search will auto-magically
# be created under the covers.
return True
# Table.header_labels_get():
def header_labels_get(self) -> List[str]:
table: Table = self
parameters: List[Parameter] = table.parameters
parameters_size: int = len(parameters)
assert parameters_size >= 1, "Table is empty"
header_labels: List[str] = list()
for parameter in parameters:
parameter_comments: List[ParameterComment] = parameter.comments
header_label: str = "?"
if len(parameter_comments) >= 1:
parameter_comment: ParameterComment = parameter_comments[0]
short_heading: str = parameter_comment.short_heading
long_heading: str = parameter_comment.long_heading
header_label = short_heading if short_heading is not None else long_heading
header_labels.append(header_label)
return header_labels
# Table.key():
@staticmethod
def key(table: Node) -> Any:
name: str = table.name
return (name, )
# Table.key_function_get():
def key_function_get(self) -> Callable[[Node], Any]:
return Table.key
# Table.name_get():
def name_get(self) -> str:
# Force *table* (i.e. *self*) *load* if it has not already been loaded:
table: Table = self
name: str = table.name
table.file_load()
# Augment *name* with the *searches_size*:
searches_size: int = table.child_count()
if searches_size >= 2:
name += f" ({searches_size})"
return name
# Table.panel_update():
@trace(1)
def panel_update(self, gui: Gui) -> None:
table: Table = self
gui.table_panel_update(table)
# Table.parameters_bind():
@trace(1)
def parameters_bind(self, headers: List[str], type_tables: List[Dict[str, int]]) -> None:
# Grab *parameters* from *table* and make sure that there is a 1-to-1 correspondance
# between *paramters* and *type_tables*:
table: Table = self
parameters: List[Parameter] = table.parameters
# Sweep through *Paramters* finding the *type_name* with the best match:
index: int
header: str
csv: str = ""
default: str = ""
optional: bool = False
for index, header in enumerate(headers):
# Convert *type_table* into *type_counts*:
type_table: Dict[str, int] = type_tables[index]
type_counts: List[Tuple[str, int]] = list(type_table.items())
# Sort *type_counts* based on count:
type_counts.sort(key=lambda name_count: (name_count[1], name_count[0]))
# Grab the *name_count_last* which will have the highest count, and stuff
# the associated *type_name* into *parameter*:
name_count_last: Tuple[str, int] = type_counts[-1]
type_name: str = name_count_last[0]
parameter: Parameter
if len(parameters) <= index:
comments: List[ParameterComment] = [ParameterComment("EN", [], header, "")]
enumerations: List[Enumeration] = list()
parameter = Parameter(header, type_name, csv, index, default, optional,
comments, enumerations)
parameters.append(parameter)
else:
parameter.type = type_name
# Table.partial_load():
def partial_load(self) -> None:
# Grab some values from *table* (i.e. *self*):
table: Table = self
relative_path: str = table.relative_path
collection: Optional[Collection] = table.collection
assert isinstance(collection, Collection)
tracing: str = tracing_get()
# Compute *searches_path* which is the directory that contains the *Search* `.xml` files:
collection_root: str = collection.collection_root
searches_root: str = collection.searches_root
searches_directory: str = os.path.join(searches_root, relative_path)
if tracing:
print(f"{tracing}collection_root='{collection_root}'")
print(f"{tracing}searches_root='{searches_root}'")
print(f"{tracing}relative_path='{relative_path}'")
print(f"{tracing}searches_directory='{searches_directory}'")
# Scan through *searches_path* looking for `.xml` files:
if os.path.isdir(searches_directory):
# *searches_path* is a directory so we scan it:
index: int
search_file_name: str
for index, search_file_name in enumerate(sorted(list(os.listdir(searches_directory)))):
# Preform requested *tracing*:
if tracing:
print(f"{tracing}Search[{index}]:'{search_file_name}'")
# Skip over any files that do not end with `.xml` suffix:
if search_file_name.endswith(".xml"):
# Extract *name* and *title* from *file_name* (ignoring the `.xml` suffix):
file_base: str = search_file_name[:-4]
search_name: str = Encode.from_file_name(file_base)
# Create *search* and then save it out to the file system:
search: Search = Search(search_name, table, None, "")
assert table.has_child(search)
search.loaded = False
# Table.search_directory_get():
# def search_directory_get(self) -> str:
# # Compute *search_directory*:
# table: Table = self
# searches_root: str = table.searches_root_get()
# relative_path: str = table.relative_path
# table_name: str = table.name
# table_base_name: str = Encode.to_file_name(table_name)
# search_directory: str = os.path.join(searches_root, relative_path, table_base_name)
# if tracing:
# print(f"{tracing}searches_root='{searches_root}'")
# print(f"{tracing}relative_path='{relative_path}'")
# # print(f"{tracing}table__directory='{table_directory}'")
# print(f"{tracing}search_directory='{search_directory}'")
# # Make sure *search_directory* exists:
# if not os.path.isdir(search_directory):
# os.makedirs(search_directory)
# if tracing:
# print(f"{tracing}Created directory '{search_directory}'")
# return search_directory
# Table.searches_load():
@trace(1)
def searches_load(self) -> None:
# Grab some values from *table* (i.e. *self*):
tracing: str = tracing_get()
table: Table = self
if not table.searches_loaded:
table_searches: Dict[str, Search] = dict()
searches_loaded_count: int = 0
searches: List[Node] = table.children_get()
search: Node
for search in searches:
# Make sure *search* is *loaded*. We test *loaded* up here to prevent
# a lot of unnecessary calls to *file_load*:
assert isinstance(search, Search)
if not search.loaded:
search.file_load()
assert search.loaded
searches_loaded_count += 1
# Build up *tables_searches_table_table* with all of the *searches* to used for
# for the upcoming parent search fix-up step:
table_searches[search.name] = search
# Fix up the search parent links:
if searches_loaded_count >= 1:
for search in searches:
search_parent_name: str = search.search_parent_name
if tracing:
print(f"{tracing}Search '{search.name}' parent name is "
f"'{search_parent_name}'")
if search_parent_name != "":
assert search_parent_name in table_searches, (f"'{search_parent_name} '"
"not in "
f"{table_searches.keys()}")
search_parent: Search = table_searches[search_parent_name]
search.search_parent = search_parent
if tracing:
print(f"{tracing}Setting search '{search.name}' "
f"search parent to '{search_parent.name}'")
else:
if tracing:
print(f"{tracing}Search '{search.name}' has no search parent.")
# Finally, mark *searches_loaded*:
table.searches_loaded = True
table.is_sorted = False
# Now force *table* to be sorted:
# searches = table.children_get()
if tracing:
print(f"{tracing}After searches sort:")
for index, search in enumerate(searches):
key: Any = Search.key(search)
print(f"{tracing}[{index}]:'{search.name}': {key}")
# Table.searches_table_set():
# def searches_table_set(self, searches_table):
# # Verify argument types:
# assert isinstance(searches_table, dict)
#
# # Stuff *searches_table* into *table* (i.e. *self*):
# table = self
# table.searches_stable = searches_table
# Table.tables_get():
def tables_get(self) -> "List[Table]":
table: Table = self
return [table]
# Table.tree_load():
def tree_load(self, table_tree: etree._Element) -> None:
# The format of a *Table* `.xml` file is basically:
#
# <Table name="..." url="...">
# <TableComments>
# ...
# </TableComments>
# <Parameters>
# ...
# </Parameters>
# </Table>
# Extract the attributes from *attributes_table*:
assert table_tree.tag == "Table"
attributes_table: Dict[str, str] = table_tree.attrib
assert "name" in attributes_table
name: str = Encode.from_attribute(attributes_table["name"])
url: str = Encode.from_attribute(attributes_table["url"])
# Extract the *comments* from *comments_tree_element*:
table_tree_elements: List[etree._Element] = list(table_tree)
comments_tree: etree._Element = table_tree_elements[0]
assert comments_tree.tag == "TableComments"
comments: List[TableComment] = list()
comment_tree: etree._Element
for comment_tree in comments_tree:
comment: TableComment = TableComment.xml_parse(comment_tree)
comments.append(comment)
# Extract the *parameters* from *parameters_tree_element*:
parameters: List[Parameter] = list()
parameters_tree: etree._element = table_tree_elements[1]
assert parameters_tree.tag == "Parameters"
parameter_tree: etree._element
for parameter_tree in parameters_tree:
parameter: Parameter = Parameter.xml_parse(parameter_tree)
parameters.append(parameter)
# Ensure that there are no extra elements:
assert len(table_tree_elements) == 2
# Load the extracted information into *table* (i.e. *self*):
table: Table = self
table.comments[:] = comments[:]
table.name = name
table.parameters[:] = parameters[:]
table.url = url
# Table.type_tables_extract():
@trace(1)
def type_tables_extract(self, column_tables: List[Dict[str, int]],
gui: Gui) -> List[Dict[str, int]]:
# The *re_table* comes from *gui* contains some regular expression for catagorizing
# values. The key of *re_table* is the unique *type_name* associated with the regular
# expression that matches a given type. The regular expressions are *PreCompiled*
# to improve efficiency:
re_table: Dict[str, PreCompiled] = gui.re_table
# Constuct *type_tables*, which is a list *type_table* that is 1-to-1 with the columns
# in *column_tables*. Each *type_table* collects a count of the number of column entries
# that match a given *type_name*. If none of the *type_names* match a given *value*,
# the default *type_name* of "String" is used:
type_tables: List[Dict[str, int]] = list()
column_table: Dict[str, int]
for column_table in column_tables:
# Create *type_table*, create the "String" *type_name*, and tack it onto *type_tables*:
type_table: Dict[str, int] = dict()
type_table["String"] = 0
type_tables.append(type_table)
# Sweep through *column_table* characterizing which values match which *type_names*:
value: str
count: int
for value, count in column_table.items():
type_name: str
regex: PreCompiled
match: bool = False
# Now test *value* against *re* to see if we have a match:
for type_name, regex in re_table.items():
if regex.match(value) is not None:
# We have a match, so make sure *type_name* is in *type_table*
# update the count appropriately:
if type_name in type_table:
type_table[type_name] += count
else:
type_table[type_name] = count
match = True
# If we did not *match*, mark the *value* as a "String" type:
if not match:
type_table["String"] += count
return type_tables
# Table.type_letter_get():
def type_letter_get(self) -> str:
return 'T'
# Table.xml_file_save():
def xml_file_save(self) -> None:
# Compute *xml_file_name* and *xml_directory* from *table* (i.e. *self*):
table: Table = self
relative_path: str = table.relative_path
collection: Optional[Collection] = table.collection
assert isinstance(collection, Collection)
collection_root: str = collection.collection_root
xml_file_name: str = os.path.join(collection_root, relative_path + ".xml")
xml_directory: str = os.path.split(xml_file_name)[0]
tracing: str = tracing_get()
if tracing:
print("{tracing}relative_path='{relative_path}'")
print("{tracing}collection_root='{collection_root}'")
print("{tracing}xml_file_name='{xml_file_name}'")
print("{tracing}xml_directory='{xml_directory}'")
# Ensure that *xml_directory* exists:
os.mkdir(xml_directory)
# Construct the final *xml_lines*:
xml_lines: List[str] = list()
xml_lines.append('<?xml version="1.0"?>')
table.xml_lines_append(xml_lines, "")
xml_lines.append("")
xml_text: str = '\n'.join(xml_lines)
# Now write *xml_text* out to *xml_file_name*:
xml_file: IO[str]
with open(xml_file_name, "w") as xml_file:
xml_file.write(xml_text)
# Table.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Start appending the `<Table...>` element:
table: Table = self
xml_lines.append(f'{indent}<Table '
f'name="{Encode.to_attribute(table.name)}" '
f'url="{Encode.to_attribute(table.url)}">')
# Append the `<TableComments>` element:
xml_lines.append(f'{indent} <TableComments>')
table_comments: List[TableComment] = table.comments
table_comment: TableComment
next_indent: str = indent + " "
for table_comment in table_comments:
table_comment.xml_lines_append(xml_lines, next_indent)
xml_lines.append(f'{indent} </TableComments>')
# Append the `<Parameters>` element:
xml_lines.append(f'{indent} <Parameters>')
parameters: List[Parameter] = table.parameters
parameter: Parameter
for parameter in parameters:
parameter.xml_lines_append(xml_lines, next_indent)
xml_lines.append(f'{indent} </Parameters>')
# Close out the `<Table>` element:
xml_lines.append(f'{indent}</Table>')
# Order:
class Order:
# An Order consists of a list of projects to orders parts for.
# In addition, the list of vendors to exclude from the ordering
# process is provided as well. Vendors are excluded because the
# shipping costs exceed the part cost savings. Finally, sometimes
# you want to order extra stuff, so there is a mechanism for that
# as well. Sometimes, you have previous inventory, so that is
# listed as well.
# Order.__init__():
def __init__(self, order_root: str, cads: List[Cad], pandas: "List[Panda]") -> None:
""" *Order*: Initialize *self* for an order. """
# Ensure that *order_root* exists:
if not os.path.isdir(order_root):
try:
os.makedirs(order_root)
print(f"Created order directory '{order_root}'.")
except PermissionError:
cwd = os.getcwd()
print(f"Unable to create order directory '{order_root}', using '{cwd}' instead!")
order_root = cwd
order_root = os.path.abspath(order_root)
assert os.path.isdir(order_root), f"'{order_root} is not a directory!'"
# Create *vendor_searches_root*:
vendor_searches_root: str = os.path.join(order_root, "vendor_searches")
if not os.path.isdir(vendor_searches_root):
os.mkdir(vendor_searches_root)
assert os.path.isdir(vendor_searches_root)
# Priorities 0-9 are for vendors with significant minimum
# order amounts or trans-oceanic shipping costs:
vendor_priorities: Dict[str, int] = {}
vendor_priorities["Verical"] = 0
vendor_priorities["Chip1Stop"] = 1
vendor_priorities["Farnell element14"] = 2
vendor_priorities["element14 Asia-Pacific"] = 2
vendor_priorities["Heilind Electronics - Asia"] = 2
# Priorities 100-999 are auto assigned for vendors that are
# not explicitly prioritiezed below.
# Priorities 1000+ are for explicitly preferred vendors:
# vendor_priorities["Arrow"] = 1000
# vendor_priorities["Avnet Express"] = 1001
# vendor_priorities["Newark"] = 1002
vendor_priorities["Mouser"] = 1003
vendor_priorities["Digi-Key"] = 1004
vendor_minimums: Dict[str, float] = {}
# Stuff values into *order* (i.e. *self*):
# order: Order = self
self.cads: List[Cad] = cads
self.excluded_vendor_names: Dict[str, None] = {} # Excluded vendors
self.final_choice_parts: List[ChoicePart] = []
self.inventories: List[Inventory] = [] # List[Inventory]: Existing inventoried parts
self.order_root: str = order_root
self.pandas: List[Panda] = pandas
self.projects: List[Project] = [] # List[Project]
self.projects_table: Dict[str, Project] = {} # Dict[Net_File_Name, Project]
self.selected_vendor_names: List[str] = []
self.stale: int = 2 * 7 * 24 * 60 * 60 # 2 weeks
# self.requests: List[Request] = [] # List[Request]: Additional requested parts
self.vendor_minimums: Dict[str, float] = vendor_minimums
self.vendor_priorities: Dict[str, int] = vendor_priorities
self.vendor_priority: int = 10
self.vendor_searches_root: str = vendor_searches_root
# Order.__str__():
def __str__(self) -> str:
result = "Order()"
return result
# Order.project_create():
def project_create(self, name: str, revision: str, net_file_name: str, count: int,
positions_file_name: str = "") -> "Project":
""" *Order*: Create a *Project* containing *name*, *revision*,
*net_file_name* and *count*. """
# Grab some values from *order* (i.e. *self*):
order: Order = self
projects: List[Project] = order.projects
projects_table: Dict[str, Project] = order.projects_table
# Ignore duplicate *net_file_names*:
if net_file_name in projects_table:
print(f"Duplicate .net file '{net_file_name}' specified.")
else:
# Create the new *project* and stuff into the appropriate data structures:
project: Project = Project(name, revision, net_file_name, count,
order, positions_file_name)
projects_table[net_file_name] = project
projects.append(project)
return project
# Order.bom_write():
@trace(1)
def bom_write(self, bom_file_name: str, key_function: "Callable[[ChoicePart], Any]") -> None:
""" *Order*: Write out the BOM (Bill Of Materials) for the
*Order* object (i.e. *self*) to *bom_file_name* ("" for stdout)
using *key_function* to provide the sort key for each
*ChoicePart*.
"""
# Grab some values from *order* (i.e. *self*):
order: Order = self
excluded_vendor_names: Dict[str, None] = order.excluded_vendor_names
final_choice_parts: List[ChoicePart] = order.final_choice_parts
tracing: str = tracing_get()
if tracing:
print(f"{tracing}len(final_choice_parts)={len(final_choice_parts)}")
# Sort *final_choice_parts* using *key_function*.
final_choice_parts.sort(key=key_function)
# Open *bom_file*
bom_file: IO[str]
with (sys.stdout if bom_file_name == "" else open(bom_file_name, "w")) as bom_file:
# Now generate a BOM summary:
total_cost: float = 0.0
choice_part: ChoicePart
for choice_part in final_choice_parts:
# Sort the *pose_parts* by *project* followed by reference:
pose_parts: List[PosePart] = choice_part.pose_parts
pose_parts.sort(key=lambda pose_part:
(pose_part.project.name, pose_part.reference.upper(),
int(text_filter(pose_part.reference, str.isdigit))))
# Write the first line out to *bom_file*:
part_name: str = choice_part.name
part_footprint: str = choice_part.footprint
part_description: str = choice_part.description
part_count: int = choice_part.count_get()
part_references_text: str = choice_part.references_text_get()
bom_file.write(f" {part_name}:{part_footprint};{part_description}"
f" {part_count}:{part_references_text}\n")
# Select the vendor_part and associated quantity/cost
choice_part.select(excluded_vendor_names, True)
# selected_actual_part = choice_part.selected_actual_part
selected_vendor_part: Optional[VendorPart] = choice_part.selected_vendor_part
assert isinstance(selected_vendor_part, VendorPart)
selected_order_quantity: int = choice_part.selected_order_quantity
selected_total_cost: float = choice_part.selected_total_cost
selected_price_break_index: int = choice_part.selected_price_break_index
# It should be impossible not to have a *VendorPart*:
if isinstance(selected_vendor_part, VendorPart):
# Grab the *vendor_name*:
assert isinstance(selected_vendor_part, VendorPart)
# vendor_name = selected_vendor_part.vendor_name
# Show the *price breaks* on each side of the
# *selected_price_breaks_index*:
price_breaks: List[PriceBreak] = selected_vendor_part.price_breaks
# print("len(price_breaks)={0} selected_price_break_index={1}".
# format(len(price_breaks), selected_price_break_index))
selected_price_break: PriceBreak = price_breaks[selected_price_break_index]
minimum_index: int = max(selected_price_break_index - 1, 0)
maximum_index: int = min(selected_price_break_index + 2, len(price_breaks))
price_breaks = price_breaks[minimum_index: maximum_index]
# Compute the *price_breaks_text*:
price_breaks_text: str = ""
price_break: PriceBreak
for price_break in price_breaks[minimum_index: maximum_index]:
price_breaks_text += "{0}/${1:.3f} ".format(
price_break.quantity, price_break.price)
# Print out the line:
selected_actual_key: Tuple[str, str] = selected_vendor_part.actual_part_key
selected_manufacturer_name: str = selected_actual_key[0]
selected_manufacturer_part_name: str = selected_actual_key[1]
vendor_name: str = selected_vendor_part.vendor_name
vendor_part_name: str = selected_vendor_part.vendor_part_name
bom_file.write(f" {vendor_name}:"
f"{vendor_part_name} "
f"[{selected_manufacturer_name}: "
f"{selected_manufacturer_part_name}] "
f"{price_breaks_text}\n")
# Print out the result:
bom_file.write(" {0}@({1}/${2:.3f})={3:.2f}\n".format(
selected_order_quantity,
selected_price_break.quantity, selected_price_break.price,
selected_total_cost))
total_cost += selected_total_cost
else:
# It should be impossible to get here:
print(f"{tracing}type(selected_vendor_part)={type(selected_vendor_part)}")
# Wrap up the *bom_file*:
bom_file.write(f"{tracing}Total: ${0:.2f}\n".format(total_cost))
# Order.check():
@trace(1)
def check(self, collections: Collections) -> None:
# Check each of the *projects* in *order* (i.e. *self*):
order: Order = self
projects: List[Project] = order.projects
project: Project
for project in projects:
project.check(collections)
# Order.csvs_write():
@trace(1)
def csv_write(self) -> None:
""" *Order*: Write out the *Order* object (i.e. *self) BOM (Bill Of Materials)
for each vendor as a .csv (Comma Seperated Values).
"""
# Grab some values from *order* (i.e. *self*):
order: Order = self
excluded_vendor_names: Dict[str, None] = order.excluded_vendor_names
final_choice_parts: List[ChoicePart] = order.final_choice_parts
# Sort *final_choice_parts*:
final_choice_parts.sort(key=lambda choice_part:
(choice_part.selected_vendor_name,
choice_part.selected_total_cost,
choice_part.name))
vendor_boms: Dict[str, List[str]] = {}
choice_part: ChoicePart
for choice_part in final_choice_parts:
# Sort the *pose_parts* by *project* followed by reference:
pose_parts: List[PosePart] = choice_part.pose_parts
pose_parts.sort(key=lambda pose_part:
(pose_part.project.name, pose_part.reference.upper(),
int(text_filter(pose_part.reference, str.isdigit))))
# Select the vendor_part and associated quantity/cost
choice_part.select(excluded_vendor_names, True)
selected_actual_part: Optional[ActualPart] = choice_part.selected_actual_part
selected_vendor_part: Optional[VendorPart] = choice_part.selected_vendor_part
selected_order_quantity: int = choice_part.selected_order_quantity
if selected_vendor_part is not None and selected_actual_part is not None:
# Grab the *vendor_name* and *vendor_part_name*:
vendor_name: str = selected_vendor_part.vendor_name
# vendor_part_name = selected_vendor_part.vendor_name
# Make sure we have a *vendor_bom* line list:
if vendor_name not in vendor_boms:
vendor_boms[vendor_name] = []
lines: List[str] = vendor_boms[vendor_name]
# Create *line* and append it to *vendor_bom*:
line: str = (f'"{selected_order_quantity}",'
f'"{selected_vendor_part.vendor_part_name},"'
f'"{selected_actual_part.manufacturer_name},"'
f'"{selected_actual_part.manufacturer_part_name},"'
f'"{choice_part.name}"')
lines.append(line)
# Wrap up the *bom_file*:
order_root: str = order.order_root
for vendor_name in vendor_boms.keys():
# Create the *vendor_text*:
vendor_lines: List[str] = vendor_boms[vendor_name]
vendor_text: str = '\n'.join(vendor_lines) + '\n'
# Write *vendor_text* out to *vendor_full_file*:
vendor_base_name: str = Encode.to_file_name(vendor_name) + ".csv"
vendor_full_name: str = os.path.join(order_root, vendor_base_name)
vendor_file: IO[str]
with open(vendor_full_name, "w") as vendor_file:
# Write out each line in *lines*:
print(f"Writing '{vendor_full_name}'")
vendor_file.write(vendor_text)
# Order.exclude_vendors_to_reduce_shipping_costs():
def exclude_vendors_to_reduce_shipping_costs(self, choice_parts: "List[ChoicePart]",
excluded_vendor_names: Dict[str, None],
reduced_vendor_messages: List[str]) -> None:
""" *Order*: Sweep through *choice_parts* and figure out which vendors
to add to *excluded_vendor_names* to reduce shipping costs.
"""
# First figure out the total *missing_parts*. We will stop if
# excluding a vendor increases above the *missing_parts* number:
order: Order = self
quad: Quad = order.quad_compute(choice_parts, excluded_vendor_names, "")
missing_parts: int = quad[0]
# Sweep through and figure out what vendors to order from:
tracing: str = tracing_get()
done: bool = False
while not done:
# Get the base cost for the current *excluded_vendor_names*:
base_quad: Quad = \
order.quad_compute(choice_parts, excluded_vendor_names, "")
# print(">>>>base_quad={0}".format(base_quad))
# If the *base_missing_parts* increases, we need to stop because
# excluding additional vendors will cause the order to become
# incomplete:
base_missing_parts: int = base_quad[0]
if base_missing_parts > missing_parts:
break
# Grab *base_cost*:
base_cost: float = base_quad[1]
# Figure out what vendors are still available for *choice_parts*:
base_vendor_names: List[str] = order.vendor_names_get(choice_parts,
excluded_vendor_names)
# print("base: {0} {1}".format(base_cost, base_vendor_names))
# For small designs, sometimes the algorithm will attempt to
# throw everything out. The test below makes sure we always
# have one last remaining vendor:
if len(base_vendor_names) <= 1:
break
# Iterate through *vendor_names*, excluding one *vendor_name*
# at a time:
trial_quads: List[Quad] = []
for vendor_name in base_vendor_names:
# Create *trial_excluded_vendor_names* which is a copy
# of *excluded_vendor_names* plus *vendor_name*:
trial_excluded_vendor_names: Dict[str, None] = dict(excluded_vendor_names)
trial_excluded_vendor_names[vendor_name] = None
# Get the base cost for *trial_excluded_vendor_names*
# and tack it onto *trial_quads*:
trial_quad: Quad = order.quad_compute(choice_parts, trial_excluded_vendor_names,
vendor_name)
trial_quads.append(trial_quad)
# For debugging only:
# trial_cost = trial_quad[0]
# trial_vendor_name = trial_quad[1]
# print(" {0:.2f} with {1} excluded".
# format(trial_cost, trial_vendor_name))
# Sort the *trial_quads* to bring the most interesting one to the front:
trial_quads.sort(key=lambda quad: (quad[0], quad[1]))
# For debugging:
# for trial_quad in trial_quads:
# print(" {0}".format(trial_quad))
# Quickly ignore all vendors that have zero cost savings:
while len(trial_quads) >= 2:
# We want to ensure that *trial_quads* always has at least 2
# entries for so that the next step after this loop will be
# guaranteed to have at least one entry in *trial_quads*:
lowest_quad = trial_quads[0]
lowest_cost = lowest_quad[1]
lowest_vendor_name = lowest_quad[3]
savings = lowest_cost - base_cost
if savings == 0.0:
# This vendor offers no savings; get rid of the vendor:
# print("trail_quads[0]={0}".format(trial_quads))
reduced_vendor_messages.append("Excluding '{0}': saves nothing\n".format(
lowest_vendor_name, savings))
excluded_vendor_names[lowest_vendor_name] = None
del trial_quads[0]
else:
# We are done skipping over zero *savings*:
break
assert len(trial_quads) >= 1
# Grab some values from *lowest_quad*:
lowest_quad = trial_quads[0]
lowest_cost = lowest_quad[1]
lowest_vendor_name = lowest_quad[3]
# lowest_vendor_name = text_filter(lowest_vendor_name, str.isprintable)
savings = lowest_cost - base_cost
print(" Price is ${0:.2f} when '{1}' is excluded".
format(lowest_cost, lowest_vendor_name))
# We use $15.00 as an approximate minimum shipping cost.
# If the savings is less that the shipping cost, we exclude
# the vendor:
if savings < 15.0 and len(trial_quads) >= 2 and lowest_vendor_name != "Digi-Key":
# The shipping costs are too high and there at least one
# vendor left; exclude this vendor:
message: str = ("Excluding '{0}': only saves {1:.2f}".
format(lowest_vendor_name, savings))
reduced_vendor_messages.append(message + '\n')
if tracing:
print(message)
excluded_vendor_names[lowest_vendor_name] = None
else:
# We are done when *lowest_quad* is worth shipping:
# print("lowest_cost={0:.2f}".format(lowest_cost))
done = True
# Order.exclude_vendors_with_high_minimums():
def exclude_vendors_with_high_minimums(self, choice_parts: "List[ChoicePart]",
excluded_vendor_names: Dict[str, None],
reduced_vendor_messages: List[str]) -> None:
""" *Order*: Sweep through *choice* parts and figure out if the
vendors with large minimum orders can be dropped:
"""
# Grab table of *vendor_minimums* from *order*:
order: Order = self
vendor_minimums: Dict[str, float] = order.vendor_minimums
# Now visit each vendor a decide if we should dump them because
# they cost too much:
tracing: str = tracing_get()
vendor_name: str
for vendor_name in vendor_minimums.keys():
# Grab the *vendor_minimum_cost*:
vendor_minimum_cost: float = vendor_minimums[vendor_name]
# Compute *vendor_total_cost* by visiting each *choice_part*
# to figure out if it has be selected to from *vendor_name*:
vendor_total_cost = 0.0
for choice_part in choice_parts:
choice_part.select(excluded_vendor_names)
if choice_part.selected_vendor_name == vendor_name:
vendor_total_cost += choice_part.selected_total_cost
# If the amount of order parts does not exceed the minimum,
# exclude *vendor_name*:
if vendor_total_cost < vendor_minimum_cost:
excluded_vendor_names[vendor_name] = None
reduced_vendor_messages.append(
f"{tracing}Excluding '{vendor_name}': needed order {vendor_total_cost}"
f" < minimum order {vendor_minimum_cost}\n")
# Order.final_choice_parts_compute():
@trace(1)
def final_choice_parts_compute(self, collections: Collections) -> "List[ChoicePart]":
""" *Order*: Return a list of final *ChoicePart* objects to order
for the the *Order* object (i.e. *self*). This routine also
has the side effect of looking up the vendor information for
each selected *ChoicePart* object.
"""
# Grab the some values from *order* (i.e. *self*):
order: Order = self
projects: List[Project] = order.projects
excluded_vendor_names: Dict[str, None] = order.excluded_vendor_names
# Construct *project_parts_table* table (Dict[name, List[ProjectPart]]) so that every
# we have a name to a List[ProjectPart] mapping.
project_parts_table: Dict[str, List[ProjectPart]] = {}
project_index: int
project: Project
tracing: str = tracing_get()
for project_index, project in enumerate(projects):
if tracing:
print(f"{tracing}Project[{project_index}]:'{project.name}'")
# Make sure that each *project_part* in *project* is on a list somewhere
# in the *project_parts_table*:
project_parts: List[ProjectPart] = project.project_parts
project_part_index: int
project_part: ProjectPart
for project_part_index, project_part in enumerate(project_parts):
assert isinstance(project_part, ProjectPart), (f"type(project_part)="
f"{type(project_part)}")
if tracing:
print(f"{tracing}ProjectPart[{project_part_index}]:'{project_part.name}'")
project_part_name: str = project_part.name
if project_part_name not in project_parts_table:
project_parts_table[project_part_name] = [project_part]
else:
project_parts_table[project_part_name].append(project_part)
# Now construct the *final_choice_parts* list, where each *choice_part* on
# the list consisists of a list of *project_parts* and *searches* where
# all their names match *search_name*:
final_choice_parts: List[ChoicePart] = []
pairs: List[Tuple[str, List[ProjectPart]]] = list(project_parts_table.items())
pairs.sort(key=lambda pair: pair[0])
search_name: str
for search_name, project_parts in pairs:
if tracing:
print(f"{tracing}search_name='{search_name}'")
assert len(project_parts) >= 1
searches: List[Search] = collections.searches_find(search_name)
if searches:
assert len(project_parts) >= 1, "Empty project_parts?"
search: Search
for search in searches:
assert search.name == search_name
for project_part in project_parts:
assert project_part.name == search_name, (f"'{search_name}'!="
f"'{project_part.name}'")
choice_part: ChoicePart = ChoicePart(search_name, project_parts, searches)
final_choice_parts.append(choice_part)
else:
print(f"{tracing}Could not find a search that matches part '{search_name}'")
# Now load the associated *actual_parts* into each *choice_part* from *final_choice_parts*:
for choice_part in final_choice_parts:
# Refresh the vendor part cache for each *actual_part*:
new_actual_parts: List[ActualPart] = collections.actual_parts_lookup(choice_part)
# Get reasonably up-to-date pricing and availability information about
# each *ActualPart* in actual_parts. *order* is needed to loccate where
# the cached information is:
choice_part_name: str = choice_part.name
choice_part.vendor_parts_refresh(new_actual_parts, order, choice_part_name)
# Stuff *final_choice_parts* back into *order*:
final_choice_parts.sort(key=lambda final_choice_part: final_choice_part.name)
order.final_choice_parts = final_choice_parts
final_choice_part: ChoicePart
for final_choice_part in final_choice_parts:
final_choice_part.select(excluded_vendor_names, True)
if False:
# Old code:
# Grab some values from *project*:
pose_parts = project.all_pose_parts
project_parts = project.project_parts
project_parts_table = project.project_parts_table
# Sort *pose_parts* by letters first followed by integers:
pose_parts.sort(key=lambda pose_part: (
text_filter(pose_part.reference, str.isalpha).upper(),
int(text_filter(pose_part.reference, str.isdigit))))
choice_parts_table = None
assert False
for project_part_index, project_part in enumerate(project_parts):
# Grab some values from *project_part*:
project_part_name = project_part.name
collection = None
assert False
searches = collection.searches_find(project_part_name)
choice_part = ChoicePart()
choice_parts = project_part.choice_parts()
for choice_part_index, choice_part in enumerate(choice_parts):
# Do some consistency checking:
choice_part_name = choice_part.name
assert isinstance(choice_part, ChoicePart), ("Not a choice part "
f"'{choice_part_name}'")
if tracing:
print(f"{tracing} ChoicePart[{choice_part_index}]:'{choice_part_name}'")
# Make sure *choice_part* is in *choice_parts_table*
# exactly once:
if choice_part_name not in choice_parts_table:
choice_parts_table[choice_part_name] = choice_part
# print(("Order.final_choice_parts_compute():" +
# " Insert {0:s} into table under key {1} (size={2})").format(
# choice_part, choice_part_name, len(choice_parts_table)))
# else:
# print("Order.final_choice_parts_compute(): Key {0} in table".format(
# choice_part_name))
# Remember *pose_part* in *choice_part*:
# choice_part.pose_part_append(pose_part)
# Sort by *final_choice_parts* by name and stuff backinto *order*:
# final_choice_parts = list(choice_parts_table.values())
# final_choice_parts.sort(key=lambda choice_part: choice_part.name)
# order.final_choice_parts = final_choice_parts
# Sweep through *final_choice_parts* and force the associated
# *PosePart*'s to be in a reasonable order:
for choice_part in final_choice_parts:
# Make sure that we only have *ChoicePart* objects:
assert isinstance(choice_part, ChoicePart)
choice_part.pose_parts_sort()
return final_choice_parts
# Order.footprints_check():
def footprints_check(self, final_choice_parts: "List[ChoicePart]") -> None:
""" *Order*: Verify that footprints exist. """
assert False, "Old Code"
# Visit each *project_part* in all of the *projects*:
kicad_footprints = {}
for project in self.projects:
for pose_part in project.all_pose_parts:
assert isinstance(pose_part, PosePart)
project_part = pose_part.project_part
assert isinstance(project_part, ProjectPart)
project_part.footprints_check(kicad_footprints)
# Sweep through aliases:
# while isinstance(project_part, AliasPart):
# alias_part = project_part
# project_parts = alias_part.project_parts
# # Conceptually, alias parts can reference one or more parts.
# # For now, assume it is 1-to-1:
# assert len(project_parts) == 1, \
# "Multiple Schematic Parts for {0}".format(alias_part.base_name)
# project_part = project_parts[0]
# assert isinstance(project_part, ProjectPart)
# assert not isinstance(project_part, AliasPart)
# Dispatch on type of *project_part*. This really should be done with
# with a method:
# if isinstance(project_part, FractionalPart):
# fractional_part = project_part
# # print("{0} is fractional {1}".
# # format(fractional_part.base_name, fractional_part.kicad_footprint))
# kicad_footprints[fractional_part.kicad_footprint] = \
# project_part.name
# elif isinstance(project_part, ChoicePart):
# choice_part = project_part
# # print("{0} is choice".format(choice_part.base_name))
# kicad_footprint = choice_part.kicad_footprint
# kicad_footprints[kicad_footprint] = project_part.name
# else:
# print("{0} is ??".format(project_part.base_name))
# assert False
# Now verify that each footprint exists:
sorted_kicad_footprints = sorted(kicad_footprints.keys())
for footprint_name in sorted_kicad_footprints:
footprint_path = "pretty/{0}.kicad_mod".format(footprint_name)
if not os.path.isfile(footprint_path):
print("Footprint '{0}' does not exist for '{1}'".
format(footprint_path, kicad_footprints[footprint_name]))
# Order.positions_process():
def positions_process(self) -> None:
""" *Order*: Process any Pick and Place `.csv` or `.pos` file.
"""
order: Order = self
projects = order.projects
project: Project
for project in projects:
project.positions_process()
# Order.process():
@trace(1)
def process(self, collections: Collections) -> None:
""" *Order*: Process the *Order* object (i.e. *self*.) """
# Grab some values from *order* (i.e. *self*):
order: Order = self
excluded_vendor_names: Dict[str, None] = order.excluded_vendor_names
# print("=>Order.process()")
# Collect the messages from each vendor reduction operation into *reduced_vendor_messages*:
reduced_vendor_messages: List[str] = []
# We need to contruct a list of *ChoicePart* objects. This
# will land in *final_choice_parts* below. Only *ChoicePart*
# objects can actually be ordered because they list one or
# more *ActualPart* objects to choose from. Both *AliasPart*
# objects and *FractionalPart* objects eventually get
# converted to *ChoicePart* objects. Once we have
# *final_choice_parts* it can be sorted various different ways
# (by vendor, by cost, by part_name, etc.)
final_choice_parts: List[ChoicePart] = order.final_choice_parts_compute(collections)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}A:len(final_choice_parts)={len(final_choice_parts)}")
# excluded_vendor_names = order.excluded_vendor_names
# selected_vendor_names = order.selected_vendor_names
# if selected_vendor_names is not None:
# all_vendor_names = order.vendor_names_get(final_choice_parts, excluded_vendor_names)
# for vendor_name in all_vendor_names:
# if vendor_name not in selected_vendor_names:
# excluded_vendor_names[vendor_name] = None
# else:
# # Now we winnow down the total number of vendors to order from
# # to 1) minimize the number of orders that can be messed up
# # (i.e. supply chain simplication) and to save shipping costs.
# # There are two steps -- throw out vendors with excessive minimum
# # order amounts followed by throwing out vendors where the savings
# # do not exceed additional shipping costs.
# #order.exclude_vendors_with_high_minimums(
# # final_choice_parts, excluded_vendor_names, reduced_vendor_messages)
# pass
if tracing:
print(f"{tracing}B:len(final_choice_parts)={len(final_choice_parts)}")
# order.exclude_vendors_with_high_minimums(final_choice_parts, excluded_vendor_names,
# reduced_vendor_messages)
order.exclude_vendors_to_reduce_shipping_costs(final_choice_parts, excluded_vendor_names,
reduced_vendor_messages)
if tracing:
print(f"{tracing}C:len(final_choice_parts)={len(final_choice_parts)}")
# Write out *reduced_vendor_messages* to a report file:
order_root: str = order.order_root
reduced_vendor_messages_file_name = os.path.join(order_root, "vendor_reduction_report.txt")
reduced_vendor_messages_file: IO[str]
with open(reduced_vendor_messages_file_name, "w") as reduced_vendor_messages_file:
reduced_vendor_message: str
for reduced_vendor_message in reduced_vendor_messages:
reduced_vendor_messages_file.write(reduced_vendor_message)
if tracing:
print(f"{tracing}D:len(final_choice_parts)={len(final_choice_parts)}")
# Let the user know how many vendors were eliminated:
reduced_vendor_messages_size: int = len(reduced_vendor_messages)
if reduced_vendor_messages_size >= 1:
print(f"{tracing}{reduced_vendor_messages_size} vendors eliminated. "
f"See '{reduced_vendor_messages_file_name}' file for why.")
if tracing:
print(f"{tracing}E:len(final_choice_parts)={len(final_choice_parts)}")
# Check for missing footprints:
# order.footprints_check(final_choice_parts)
# order.positions_process()
if tracing:
print(f"{tracing}F:len(final_choice_parts)={len(final_choice_parts)}")
# Print out the final selected vendor summary:
order.summary_print(final_choice_parts, excluded_vendor_names)
# Generate the bom file reports for *self.final_choice_parts*:
order.final_choice_parts = final_choice_parts
order.bom_write(os.path.join(order_root, "bom_by_price.txt"), lambda choice_part:
(choice_part.selected_total_cost,
choice_part.selected_vendor_name,
choice_part.name))
order.bom_write(os.path.join(order_root, "bom_by_vendor.txt"), lambda choice_part:
(choice_part.selected_vendor_name,
choice_part.selected_total_cost,
choice_part.name))
order.bom_write(os.path.join(order_root, "bom_by_name.txt"), lambda choice_part:
(choice_part.name,
choice_part.selected_vendor_name,
choice_part.selected_total_cost))
order.csv_write()
# Write a part summary file for each project:
project: Project
for project in order.projects:
project.assembly_summary_write(final_choice_parts, order)
# Now generate a BOM summary:
if False:
total_cost = 0.0
for choice_part in final_choice_parts:
# Open *csv_file* for summary spread sheet:
csv_file = open(os.path.join(order_root, "order.csv"), "w")
# Output a one line header
csv_file.write("Quantity,Vendor Part Name,Reference\n")
# Select the vendor_part and associated quantity/cost
choice_part.select(excluded_vendor_names, False)
# selected_actual_part = choice_part.selected_actual_part
selected_vendor_part = choice_part.selected_vendor_part
selected_order_quantity = choice_part.selected_order_quantity
selected_total_cost = choice_part.selected_total_cost
selected_price_break_index = choice_part.selected_price_break_index
# Per vendor order lists need some more thought:
if isinstance(selected_vendor_part, VendorPart):
vendor_name = selected_vendor_part.vendor_name
assert False
vendor_files = None
if vendor_name not in vendor_files:
# csv_vendor_name = vendor_name.replace(' ', '_').replace('&', '+')
csv_file = open("{0}.csv".format(vendor_name), "wa")
vendor_files[vendor_name] = csv_file
else:
csv_file = vendor_files[vendor_name]
# Print out the *price breaks* on each side of the
# *selected_price_breaks_index*:
price_breaks = selected_vendor_part.price_breaks
# print("len(price_breaks)={0} selected_price_break_index={1}".
# format(len(price_breaks), selected_price_break_index))
# selected_price_break = price_breaks[selected_price_break_index]
minimum_index = max(selected_price_break_index - 1, 0)
maximum_index = min(selected_price_break_index + 2, len(price_breaks))
price_breaks = price_breaks[minimum_index: maximum_index]
# Compute the *price_breaks_text*:
price_breaks_text = ""
for price_break in price_breaks[minimum_index: maximum_index]:
price_breaks_text += "{0}/${1:.3f} ".format(
price_break.quantity, price_break.price)
# Print out the line:
# print(" {0}:{1} {2}".format(
# selected_vendor_part.vendor_name,
# selected_vendor_part.vendor_part_name, price_breaks_text))
# Print out the result:
# print(" {0}@({1}/${2:.3f})={3:.2f}".format(
# selected_order_quantity,
# selected_price_break.quantity, selected_price_break.price,
# selected_total_cost))
total_cost += selected_total_cost
# Write out another line in the *csv_file*:
csv_file.write("{0},{1},{2}\n".format(
selected_order_quantity,
selected_vendor_part.vendor_part_name,
choice_part.name))
# Close all the vendor files:
for csv_file in vendor_files.values():
csv_file.close()
# Order.quad_compute():
def quad_compute(self, choice_parts: "List[ChoicePart]",
excluded_vendor_names: Dict[str, None],
excluded_vendor_name: str, trace: str = "") -> Quad:
""" *Order*: Return quad tuple of the form:
(*missing_parts*, *total_cost*,
*vendor_priority*, *excluded_vendor_name*) where:
* *missing_parts* is number of parts that can not be fullfilled.
* *total_cost* is the sum the parts costs for all *ChoicePart*
objects in *choice_parts* that do not use any vendors in
*excluded_vendor_names*.
* *vendor_priority* is a sort order for *excluded_vendor_name*.
* *excluded_vendor_name* is the current vendor that is excluded.
The returned key is structured to sort so that most interesting
vendor to exclude sorts to the first item.
"""
order: Order = self
missing_parts: int = 0
total_cost: float = 0.0
choice_part: ChoicePart
for choice_part in choice_parts:
# Perform the vendor selection excluding all vendors in
# *excluded_vendor_names*:
missing_parts += choice_part.select(excluded_vendor_names, False)
# Grab some values out of *choice_part*:
# selected_vendor_name = choice_part.selected_vendor_name
selected_total_cost = choice_part.selected_total_cost
# Keep a running total of everything:
total_cost += selected_total_cost
# Figure out *vendor_priority* for *excluded_vendor_name*:
vendor_priorities: Dict[str, int] = order.vendor_priorities
if excluded_vendor_name in vendor_priorities:
# Priority already assigned to *excluded_vendor_name*:
vendor_priority = vendor_priorities[excluded_vendor_name]
else:
# Assigned a new priority for *excluded_vendor_name*:
vendor_priority = order.vendor_priority
vendor_priorities[excluded_vendor_name] = vendor_priority
order.vendor_priority += 1
# Return the final *quad*:
quad: Quad = (missing_parts, total_cost, vendor_priority, excluded_vendor_name)
return quad
# Order.summary_print():
@trace(1)
def summary_print(self, choice_parts: "List[ChoicePart]",
excluded_vendor_names: Dict[str, None]) -> None:
""" *Order*: Print a summary of the selected vendors.
"""
# Let the user know what we winnowed the vendor list down to:
final_vendor_names: List[str] = self.vendor_names_get(choice_parts, excluded_vendor_names)
# Print the final *total_cost*:
total_cost: float = 0.0
choice_part: ChoicePart
for choice_part in choice_parts:
choice_part.select(excluded_vendor_names, False)
total_cost += choice_part.selected_total_cost
print("Total Cost: ${0:.2f}".format(total_cost))
# Print out the sub-totals for each vendor:
print("Final selected vendors:")
venodr_name: str
for vendor_name in final_vendor_names:
vendor_cost: float = 0.0
for choice_part in choice_parts:
if choice_part.selected_vendor_name == vendor_name:
vendor_cost += choice_part.selected_total_cost
print(" {0}: ${1:.2f}".format(vendor_name, vendor_cost))
# Order.vendor_exclude():
def vendor_exclude(self, vendor_name: str) -> None:
""" *Order*: Exclude *vendor_name* from the *Order* object (i.e. *self*)
"""
# Mark *vendor_name* from being selectable:
self.excluded_vendor_names[vendor_name] = None
# Order.vendor_names_get():
def vendor_names_get(self, choice_parts: "List[ChoicePart]",
excluded_vendor_names: Dict[str, None]) -> List[str]:
""" *Order*: Return all possible vendor names for *choice_parts*:
"""
# Load up *vendor_names_table*:
vendor_names_table: Dict[str, None] = {}
choice_part: ChoicePart
for choice_part in choice_parts:
choice_part.vendor_names_load(vendor_names_table, excluded_vendor_names)
# Return the sorted list of vendor names:
return list(sorted(vendor_names_table.keys()))
# Order.vendors_select():
def vendors_select(self, selected_vendor_names: List[str]) -> None:
""" *Order*: Force the selected vendors for the *order* object (i.e. *self*)
to *selected_vendors.
"""
# Stuff *selected_vendor_names* into *order* (i.e. *self*):
order: Order = self
order.selected_vendor_names = selected_vendor_names
# Panda:
class Panda:
# Panda stands for Pricing AND Availability:
# Panda.__init__():
def __init__(self, name: str) -> None:
# Stuff values into *panda* (i.e. *self*):
# panda = self
self.name = name
# Panda.__str__():
def __str__(self) -> str:
panda: Panda = self
name: str = "??"
if hasattr(panda, "name"):
name = panda.name
return f"Panda({name})"
# Panda.vendor_parts_lookup():
def vendor_parts_lookup(self, actual_part, part_name) -> "List[VendorPart]":
panda: Panda = self
class_name: str = panda.__class__.__name__
assert False, f"{class_name}.vendor_parts_lookup() has not been implemented"
return list()
# Parameter():
class Parameter:
# Parameter.__init__():
def __init__(self, name: str, type: str, csv: str, csv_index: int, default: str, optional: bool,
comments: List[ParameterComment], enumerations: List[Enumeration]) -> None:
# Load values into *parameter* (i.e. *self*):
# parameter: Parameter = self
self.comments: List[ParameterComment] = comments
self.csv: str = csv
self.csv_index: int = csv_index
self.default: str = default
self.enumerations: List[Enumeration] = enumerations
self.name: str = name
self.optional: bool = optional
self.type: str = type
self.use: bool = False
# Parameter.__equ__():
def __eq__(self, parameter2: object) -> bool:
# print("=>Parameter.__eq__()")
# Compare each field of *parameter1* (i.e. *self*) with the corresponding field
# of *parameter2*:
equal: bool = False
if isinstance(parameter2, Parameter):
parameter1: Parameter = self
name_equal: bool = (parameter1.name == parameter2.name)
default_equal: bool = (parameter1.default == parameter2.default)
type_equal: bool = (parameter1.type == parameter2.type)
optional_equal: bool = (parameter1.optional == parameter2.optional)
comments_equal: bool = (parameter1.comments == parameter2.comments)
enumerations_equal: bool = (parameter1.enumerations == parameter2.enumerations)
equal = (name_equal and default_equal and type_equal and
optional_equal and comments_equal and enumerations_equal)
# Debugging code:
# print("name_equal={0}".format(name_equal))
# print("default_equal={0}".format(default_equal))
# print("type_equal={0}".format(type_equal))
# print("optional_equal={0}".format(optional_equal))
# print("comments_equal={0}".format(comments_equal))
# print("enumerations_equal={0}".format(enumerations_equal))
# print("<=Parameter.__eq__()=>{0}".format(all_equal))
return equal
# Parameter.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *parameter* (i.e. *self*):
parameter: Parameter = self
csv: str = parameter.csv
csv_index: int = parameter.csv_index
default: str = parameter.default
name: str = parameter.name
optional: bool = parameter.optional
type: str = parameter.type
# Start with the `<Parameter ...> XML element:
optional_text: str = ' optional="true"' if optional else ''
default_text: str = ' default="{default}"' if default else ''
xml_line: str = (f'{indent}<Parameter'
f' name="{name}"'
f' type="{type}"'
f' csv="{csv}"'
f' csv_index="{csv_index}"'
f'{default_text}'
f'{optional_text}'
'>')
xml_lines.append(xml_line)
# Append all of the comments*:
comments: List[ParameterComment] = parameter.comments
comment: ParameterComment
comment_indent = indent + " "
for comment in comments:
xml_lines.append(f'{indent} <ParameterComments>')
comment.xml_lines_append(xml_lines, comment_indent)
xml_lines.append(f'{indent} </ParameterComments>')
# Append all of the *enumerations*:
enumerations: List[Enumeration] = parameter.enumerations
if enumerations:
xml_lines.append(f'{indent} <Enumerations>')
enumeration_indent = indent + " "
enumeration: Enumeration
for enumeration in enumerations:
enumeration.xml_lines_append(xml_lines, enumeration_indent)
xml_lines.append(f'{indent} </Enumerations>')
# Close out with the `</Parameter>` XML element:
xml_lines.append(f'{indent}</Parameter>')
# Parameter.xml_parse():
@staticmethod
def xml_parse(parameter_tree: etree._Element) -> "Parameter":
assert parameter_tree.tag == "Parameter"
attributes_table: Dict[str, str] = parameter_tree.attrib
assert "name" in attributes_table
name: str = attributes_table["name"]
assert "type" in attributes_table
type: str = attributes_table["type"].lower()
optional: bool = False
if "optional" in attributes_table:
optional_text: str = attributes_table["optional"].lower()
assert optional_text in ("true", "false")
optional = (optional_text == "true")
csv: str = attributes_table["csv"] if "csv" in attributes_table else ""
csv_index: int = (int(attributes_table["csv_index"])
if "csv_index" in attributes_table else -1)
default: str = attributes_table["default"] if "default" in attributes_table else ""
parameter_tree_elements: List[etree._Element] = list(parameter_tree)
assert parameter_tree_elements
comments_tree: etree._Element = parameter_tree_elements[0]
assert comments_tree.tag == "ParameterComments"
assert not comments_tree.attrib
comments: List[ParameterComment] = list()
comment_tree: etree._Element
for comment_tree in comments_tree:
comment: ParameterComment = ParameterComment.xml_parse(comment_tree)
comments.append(comment)
enumerations: List[Enumeration] = list()
if type == "enumeration":
assert len(parameter_tree_elements) == 2
enumerations_tree: etree._Element = parameter_tree_elements[1]
assert len(enumerations_tree.attrib) == 0
assert enumerations_tree.tag == "Enumerations"
assert len(enumerations_tree) >= 1
for enumeration_tree in enumerations_tree:
enumeration: Enumeration = Enumeration.xml_parse(enumeration_tree)
enumerations.append(enumeration)
else:
assert len(parameter_tree_elements) == 1
# Finally, create *parameter* and return it:
parameter: Parameter = Parameter(name, type, csv, csv_index,
default, optional, comments, enumerations)
return parameter
# PosePart:
class PosePart:
# A PosePart basically specifies the binding of a ProjectPart
# and its associated schemtatic reference. Reference strings must
# be unique for a given project.
# PosePart.__init__():
def __init__(self, project: "Project", project_part: "ProjectPart", reference: str,
comment: str) -> None:
""" Initialize *PosePart* object (i.e. *self*) to contain *project*,
*project_part*, *reference*, and *comment*.
"""
# Load up *pose_part* (i.e. *self*):
# pose_part: PosePart = self
self.project: Project = project
self.project_part: ProjectPart = project_part
self.reference: str = reference
self.comment: str = comment
self.install: bool = (comment != "DNI")
# PosePart.__str__():
def __str__(self) -> str:
reference: str = "??"
pose_part: PosePart = self
if hasattr(pose_part, "reference"):
reference = pose_part.reference
return f"PosePart('{reference}')"
# PosePart.check():
def check(self, collections: Collections) -> None:
# Grab some values from *pose_part* (i.e. *self*):
pose_part: PosePart = self
reference: str = pose_part.reference
project: Project = pose_part.project
project_name: str = project.name
# Check the underlying *project_part*:
project_part: ProjectPart = pose_part.project_part
search_name: str = project_part.name
collections.check(search_name, project_name, reference)
# PositionRow:
class PositionRow:
""" PositionRow: Represents one row of data for a *PositionsTable*: """
# PositionRow.__init__():
def __init__(self, reference: str, value: str, package: str, x: float, y: float,
rotation: float, feeder_name: str, pick_dx: float, pick_dy: float,
side: str, part_height: float) -> None:
""" *PositionRow*: ...
"""
# Load up *position_row* (i.e. *self*):
# position_row: PositionRow = self
self.package: str = package
self.part_height: float = part_height
self.feeder_name: str = feeder_name
self.rotation: float = rotation
self.reference: str = reference
self.side: str = side
self.value: str = value
self.x: float = x - pick_dx
self.y: float = y - pick_dy
self.pick_dx: float = pick_dx
self.pick_dy: float = pick_dx
# PositionRow:__str__():
def __str__(self) -> str:
reference = "??"
position_row: PositionRow = self
if hasattr(position_row, "reference"):
reference = position_row.reference
return f"PositionRow('{reference}')"
# PositionsRow.as_strings():
def as_strings(self, mapping: List[int], feeders: Dict[str, None]) -> List[str]:
""" *PositionsRow*: Return a list of formatted strings.
The arguments are:
* *mapping*: The order to map the strings in.
"""
positions_row: PositionRow = self
value: str = positions_row.value
if value not in feeders:
print(f"There is no feeder for '{value}'")
row_strings = [""] * 7
row_strings[mapping[0]] = positions_row.reference
row_strings[mapping[1]] = positions_row.value
row_strings[mapping[2]] = positions_row.package
row_strings[mapping[3]] = "{0:.4f}".format(positions_row.x)
row_strings[mapping[4]] = "{0:.4f}".format(positions_row.y)
row_strings[mapping[5]] = "{0:.4f}".format(positions_row.rotation)
row_strings[mapping[6]] = positions_row.side
return row_strings
# PositionsRow.part_rotate():
def part_rotate(self, rotation_adjust: float) -> None:
""" *PostitionRow*: """
position_row: PositionRow = self
rotation: float = position_row.rotation
rotation -= rotation_adjust
while rotation < 0.0:
rotation += 360.0
while rotation > 360.0:
rotation -= 360.0
position_row.rotation = rotation
# PositionsRow.translate():
def translate(self, dx: float, dy: float) -> None:
"""
"""
position_row: PositionRow = self
position_row.x += dx
position_row.y += dy
# PositionsTable:
class PositionsTable:
def __init__(self, positions_file_name: str) -> None:
# positions_table: PositionsTable = self
self.positions_file_name: str = positions_file_name
# """ PositionsTable: Represents a part positining table for a Pick and Place machine. """
#
# # PositionsTable.__init__():
# def __init__(self, file_name: str, database):
# """ *PositionsTable*: Initialize the *PositionsTable* object read in from *file_name*:
#
# The arguments are:
# * *file_name*: The file name to read positions table from. *file_name* must
# have one of the following suffixes:
# * `.csv`: A comma separated value format.
# * `.pos`: A text file with the columns separated by one or more spaces.
# Usually the columns are aligned virtically when viewe using a fixed
# pitch font.
# """
#
# # Verify argument types:
# assert isinstance(file_name, str) and (
# file_name.endswith(".csv") or file_name.endswith(".pos"))
#
# #
# positions_table = self
# comments = list()
# heading_indices = dict()
# rows = list()
# row_table = dict()
# mapping = list()
# trailers = list()
# headings_line = None
# headings = list()
# headings_size = 0
# part_heights = dict()
#
# # Process `.pos` and `.csv` *file_name*'s differently:
# if file_name.endswith(".pos"):
# # `.pos` suffix:
#
# # Read in *file_name* and break it into a *lines* list with no carriage returns:
# with open(file_name, "r") as positions_file:
# content = positions_file.read().replace('\r', "")
# lines = content.split('\n')
#
# # Start parsing the file *lines*:
# for line_number, line in enumerate(lines):
# # Dispatch on the beginning of the *line*:
# if line.startswith("##"):
# # Lines starting with "##" or "###" are just *comments*:
# if headings_size <= 0:
# comments.append(line)
# else:
# trailers.append(line)
# # print("comment='{0}'".format(line))
# elif line.startswith("# "):
# # Lines that start with "# " are the column headings:
# assert headings_size <= 0
# headings_line = line
# headings = line[2:].split()
# headings_size = len(headings)
# assert headings_size > 0
# for heading_index, heading in enumerate(headings):
# heading_indices[heading] = heading_index
# # print("key='{0}' index={1}".format(key, heading_index))
#
# # Create the *mapping* used for formatting the output table:
# heading_keys = ("Ref", "Val", "Package", "PosX", "PosY", "Rot", "Side")
# for heading in heading_keys:
# heading_index = heading_indices[heading]
# mapping.append(heading_index)
# # print("mapping={0}".format(mapping))
# else:
# # Assume that everything else is a row of data:
# columns = line.split()
# columns_size = len(columns)
# if columns_size == headings_size:
# # print("row={0}".format(row))
# reference = columns[heading_indices["Ref"]]
# value = columns[heading_indices["Val"]]
# value = value.replace('\\', "")
# part = database.lookup(value)
# if isinstance(part, ChoicePart):
# choice_part = part
# feeder_name = choice_part.feeder_name
# part_height = choice_part.part_height
# pick_dx = choice_part.pick_dx
# pick_dy = choice_part.pick_dy
# if isinstance(feeder_name, str) and isinstance(part_height, float):
# # print("'{0}'=>'{1}''".format(value, feeder_name))
# value = feeder_name
# part_heights[value] = part_height
# # print("part_heights['{0}'] = {1}".format(value, part_height))
# elif isinstance(part, AliasPart):
# alias_part = part
# feeder_name = alias_part.feeder_name
# part_height = alias_part.part_height
# pick_dx = alias_part.pick_dx
# pick_dy = alias_part.pick_dy
# if isinstance(feeder_name, str) and isinstance(part_height, float):
# # print("'{0}'=>'{1}''".format(value, feeder_name))
# value = feeder_name
# part_heights[value] = part_height
# # print("part_heights['{0}'] = {1}".format(value, part_height))
# package = columns[heading_indices["Package"]]
# x = float(columns[heading_indices["PosX"]])
# y = float(columns[heading_indices["PosY"]])
# rotation = float(columns[heading_indices["Rot"]])
# side = columns[heading_indices["Side"]]
# if isinstance(part_height, float):
# row = PositionRow(reference, value, package, x, y, rotation,
# feeder_name, pick_dx, pick_dy, side, part_height)
# rows.append(row)
# row_table[reference] = row
# else:
# print("Part '{0}' does not have a part_height".format(value))
# elif columns_size != 0:
# assert False, "Row/Header mismatch {0} {0}".format(row) # , headers)
# elif file_name.endswith(".csv"):
# assert ".csv reader not implemented yet."
# else:
# assert "Bad file suffix for file: '{0}'".format(file_name)
#
# feeders = {
# "1uF": "E1",
# "2N7002": "E2",
# "27K": "E4",
# "0": "E5",
# "33": "E7",
# "4.7uF_?": "E11",
# "1.0M": "E14",
# "49.9K": "E16",
# "200K": "E19",
# "BAT54": "E21",
# "0.1uF": "W4",
# "49.9": "W6",
# "20k": "W7",
# "330": "W10",
# "10K": "W11",
# "100": "W15",
# "100K": "W16",
# "5.1K": "W17",
# "GRN_LED": "W14",
# "470": "W20",
# "10uF": "W21",
# "120": "W22",
# "4.7k": "W23",
# "220": "W24",
#
# "33nF": "E100",
# "470nF": "E101",
# "10nF": "E102",
# "NFET_10A": "E103",
# "330nF": "E104",
# "18V_ZENER": "E105",
# "SI8055": "E106",
# "MC33883": "E107",
# "MCP2562": "E108",
# "18V_REG": "E109",
# "74HC08": "E110",
# "OPTOISO2": "E111",
# "5V_REG/LDO": "E112",
# "3.3V_LDO": "E113",
# "FID": "E114",
# "10": "E115",
# }
#
# # Write out `feeders.txt`:
# footprints = database.footprints
# quintuples = list()
# for key in feeders.keys():
# feeder_name = feeders[key]
# part_height = "{0:.2f}".format(part_heights[key]) if key in part_heights else "----"
# rotation = int(footprints[key].rotation) if key in footprints else "----"
# quintuple = (feeder_name[0], int(feeder_name[1:]), key, part_height, rotation)
# quintuples.append(quintuple)
# quintuples.sort()
# order_root = None
# feeders_file_name = os.path.join(order_root, "feeders.txt")
# with open(feeders_file_name, "w") as feeders_file:
# feeders_file.write("Feeder\tHeight\tRotate\tValue\n")
# feeders_file.write(('=' * 50) + '\n')
# for quintuple in quintuples:
# side = quintuple[0]
# number = quintuple[1]
# value = quintuple[2]
# rotation = quintuple[2]
# part_height = quintuple[3]
# rotation = quintuple[4]
# feeders_file.write(f"{side}{number}:\t{part_height}\t{rotation}\t{value}\n")
#
# # Fill in the value of *positions_table* (i.e. *self*):
# positions_table.comments = comments
# positions_table.headings_line = headings_line
# positions_table.headings = headings
# positions_table.headings_indices = heading_indices
# positions_table.feeders = feeders
# positions_table.file_name = file_name
# positions_table.mapping = mapping
# positions_table.rows = rows
# positions_table.row_table = row_table
# positions_table.trailers = trailers
#
# # PositionsTable.footprints_rotate():
# def footprints_rotate(self, database):
# """ *Positions_Table: ..."""
#
# order_root = None
# positions_table = self
# file_name = positions_table.file_name
# footprints = database.footprints
# rows = positions_table.rows
# for row_index, row in enumerate(rows):
# feeder_name = row.feeder_name
# debugging = False
# # debugging = package == "DPAK"
# # print("Row[{0}]: '{1}'".format(row_index, package))
# if feeder_name in footprints:
# # We have a match:
# footprint = footprints[feeder_name]
# rotation = footprint.rotation
# if debugging:
# print("rotation={0}".format(rotation))
# if isinstance(rotation, float):
# row.part_rotate(rotation)
# else:
# print("Footprint '{0}' does not have a feeder rotation.".format(feeder_name))
# else:
# # No match:
# print("Could not find footprint '{0}' from file '{1}'".
# format(feeder_name, file_name))
# positions_table.write(os.path.join(order_root, file_name))
#
# # PositionsTable.reorigin():
# def reorigin(self, reference):
# """
# """
#
# positions = self
# row_table = positions.row_table
# if reference in row_table:
# row = row_table[reference]
# dx = -row.x
# dy = -row.y
# positions.translate(dx, dy)
#
# # PositionsTable.translate():
# def translate(self, dx, dy):
# """
# """
#
# positions = self
# rows = positions.rows
# for row in rows:
# row.translate(dx, dy)
#
# # PositionsTable.write():
# def write(self, file_name):
# """ *PositionsTable*: Write out the *PostionsTable* object to *file_name*.
#
# The arguments are:
# * *file_name*: specifies the file to write out to. It must of a suffix of:
# * `.csv`: Writes the file out in Comma Separated Values format.
# * `.pos`: Writes the file out as a text file with data separated by spaces.
# """
#
# # Verify argument types:
# assert isinstance(file_name, str)
# assert len(file_name) >= 4 and file_name[-4:] in (".csv", ".pos")
#
# # Unpack the *positions_table* (i.e. *self*):
# positions_table = self
# comments = positions_table.comments
# headings_line = positions_table.headings_line
# headings = positions_table.headings
# rows = positions_table.rows
# mapping = positions_table.mapping
# trailers = positions_table.trailers
# feeders = positions_table.feeders
#
# # In order exactly match KiCAD output, the output formatting is adjusted based
# # on the column heading. *spaces_table* specifies the extra spaces to add to the column.
# # *aligns_table* specifies whether the data is left justified (i.e. "") or right
# # justified (i.e. ">"):
# extras_table = {"Ref": 5, "Val": 0, "Package": 1,
# "PosX": 0, "PosY": 0, "Rot": 0, "Side": 0}
# aligns_table = {"Ref": "", "Val": "", "Package": "",
# "PosX": ">", "PosY": ">", "Rot": ">", "Side": ""}
#
# # Build up the final output as a list of *final_lines*:
# final_lines = list()
#
# # Just copy the *comments* and *headings_line* into *final_lines*:
# final_lines.extend(comments)
# final_lines.append(headings_line)
#
# # Dispatch on *file_name* suffix:
# if file_name.endswith(".pos"):
# # We have a `.pos` file:
#
# # Populate *string_rows* with strings containing the data:
# string_rows = list()
# for row in rows:
# string_row = row.as_strings(mapping, feeders)
# string_rows.append(string_row)
#
# # Figure out the maximum *sizes* for each column:
# sizes = [0] * len(headings)
# for string_row in string_rows:
# for column_index, column in enumerate(string_row):
# sizes[column_index] = max(sizes[column_index], len(column))
#
# # Convert *aligns_table* into a properly ordered list of *aligns*:
# aligns = list()
# for header_index, header in enumerate(headings):
# sizes[header_index] += extras_table[header]
# aligns.append(aligns_table[header])
#
# # Create a *format_string* for outputing each row:
# format_columns = list()
# for size_index, size in enumerate(sizes):
# format_columns.append("{{{0}:{1}{2}}}"
# .format(size_index, aligns[size_index], size))
# # format_columns.append("{" + str(size_index) +
# # ":" + aligns[size_index] + str(size) + "}")
# format_string = " ".join(format_columns)
# # print("format_string='{0}'".format(format_string))
#
# # Now format each *string_row* and append the result to *final_lines*:
# for string_row in string_rows:
# final_line = format_string.format(*string_row)
# final_lines.append(final_line)
#
# # Tack *trailers* and an empty line onto *final_lines*:
# final_lines.extend(trailers)
# final_lines.append("")
# elif file_name.endswith(".csv"):
# # File is a `.csv` file:
# assert False, ".csv file support not implemented yet."
# else:
# assert False, ("File name ('{0}') does not have a suffixe of .csv or .pos".
# format(file_name))
#
# # Write *final_lines* to *file_name*:
# with open(file_name, "w") as output_file:
# output_file.write("\r\n".join(final_lines))
# PositionTable.__str__():
def __str__(self) -> str:
return "PositionsTable()"
# PriceBreak:
class PriceBreak:
# A price break is where a the pricing changes:
# PriceBreak.__init__():
def __init__(self, quantity: int, price: float) -> None:
""" *PriceBreak*: Initialize *self* to contain *quantity*
and *price*. """
# Load up *price_break* (i.e. *self*):
# price_break: PriceBreak = self
self.quantity: int = quantity
self.price: float = price
self.order_quantity: int = 0
self.order_price: float = 0.00
# PriceBreak.__eq__():
def __eq__(self, price_break2: object) -> bool:
equal: bool = False
if isinstance(price_break2, PriceBreak):
price_break1: PriceBreak = self
equal = (price_break1.quantity == price_break2.quantity and
price_break1.price == price_break2.price)
return equal
# PriceBreak.__format__():
def __format__(self, format: str) -> str:
""" *PriceBreak*: Return the *PriceBreak* object as a human redable string.
"""
# Grab some values from *price_break* (i.e. *self*):
price_break: PriceBreak = self
quantity = price_break.quantity
price = price_break.price
result = "{0}/{1}".format(quantity, price)
# print("Result='{0}'".format(result))
return result
# PriceBreak.__lt__():
def __lt__(self, price_break2: "PriceBreak") -> bool:
price_break1: PriceBreak = self
return price_break1.price < price_break2.price
# PriceBreak.__str__():
def __str__(self) -> str:
return "PriceBreak()"
# PriceBreak.compute():
def compute(self, needed: int) -> None:
""" *PriceBreak*: """
price_break: PriceBreak = self
price_break.order_quantity = order_quantity = max(needed, price_break.quantity)
price_break.order_price = order_quantity * price_break.price
# PriceBreak.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *price_break* (i.e. *self*):
price_break: PriceBreak = self
quantity: int = price_break.quantity
price: float = price_break.price
# Output `<PriceBreak ...>` tag:
xml_lines.append('{0}<PriceBreak quantity="{1}" price="{2:.6f}"/>'.
format(indent, quantity, price))
# PriceBreak.xml_parse():
@staticmethod
def xml_parse(price_break_tree: etree._Element) -> "PriceBreak":
# Grab some the attribute values from *price_break_tree*:
attributes_table: Dict[str, str] = price_break_tree.attrib
quantity: int = int(attributes_table["quantity"])
price: float = float(attributes_table["price"])
# Create and return the new *PriceBreak* object:
price_break: PriceBreak = PriceBreak(quantity, price)
return price_break
# Project:
class Project:
# Project.__init__():
def __init__(self, name: str, revision: str, cad_file_name: str, count: int, order: Order,
positions_file_name: str = "") -> None:
""" Initialize a new *Project* object (i.e. *self*) containing *name*, *revision*,
*net_file_name*, *count*, *order*, and optionally *positions_file_name.
"""
# Load up *project* (i.e. *self*):
project: Project = self
self.name: str = name # Project name
self.revision: str = revision # Revision designator
self.cad_file_name: str = cad_file_name # Cad/bom file name
self.count: int = count # Number of this project to order
self.positions_file_name: str = positions_file_name # Positions Pick and Place file name
self.order: Order = order # Parent *Order*
self.pose_parts_table: Dict[str, PosePart] = {} # PosePart name to *PosePart* table
self.project_parts: List[ProjectPart] = [] # List of *ProjectPart*'s
self.project_parts_table: Dict[str, ProjectPart] = {} # ProjectPart name to *ProjectPart*
self.all_pose_parts: List[PosePart] = [] # All *ProjectPart*'s
self.installed_pose_parts: List[PosePart] = [] # *ProjectPart*'s to be installed
self.uninstalled_pose_parts: List[PosePart] = [] # *ProjectParts*'s not installed
# Read all of the *cads* associated with *order*:
cads: List[Cad] = order.cads
success: bool = False
cad: Cad
for cad in cads:
success = cad.file_read(cad_file_name, project)
if success:
break
assert success, f"Could not successfully read and process file '{cad_file_name}'!"
# Project.__format__():
def __format__(self, format) -> str:
# Grab some values from *project* (i.e. *self*):
project: Project = self
name: str = project.name
revision: str = project.revision
# Return *result*:
result: str = f"{name}.{revision}"
return result
# Project.__str__():
def __str__(self) -> str:
name: str = "??"
project: Project = self
if hasattr(project, "name"):
name = project.name
return f"Project('{name}')"
# Project.assembly_summary_write():
@trace(1)
def assembly_summary_write(self, final_choice_parts: "List[ChoicePart]",
order: Order) -> None:
""" Write out an assembly summary .csv file for the *Project* object (i.e. *self*)
using *final_choice_parts*.
"""
# Open *project_file* (i.e. *self*):
project: Project = self
order_root: str = order.order_root
project_file_name: str = os.path.join(order_root, f"{project.name}.csv")
project_file: IO[str]
with open(project_file_name, "w") as project_file:
# Write out the column headings:
project_file.write(
'"Quan.","Reference","Schematic Name","Description","Fractional",' +
'"Manufacturer","Manufacture PN","Vendor","Vendor PN"\n\n')
# Output the installed parts:
has_fractional_parts1: bool = project.assembly_summary_write_helper(True,
final_choice_parts,
project_file)
# Output the uninstalled parts:
project_file.write("\nDo Not Install\n")
# Output the installed parts:
has_fractional_parts2: bool = project.assembly_summary_write_helper(False,
final_choice_parts,
project_file)
# Explain what a fractional part is:
if has_fractional_parts1 or has_fractional_parts2:
project_file.write(
'"","\nFractional parts are snipped off of 1xN or 2xN break-way headers"\n')
# Write out a progress message:
print(f"Wrote out assembly file '{project_file_name}'")
# Project.assembly_summary_write_helper():
def assembly_summary_write_helper(self, install: bool, final_choice_parts: "List[ChoicePart]",
csv_file: IO[str]) -> bool:
""" Write out an assembly summary .csv file for *Project* object (i.e. *self*)
out to *project_file*. *install* is set *True* to list the installable parts from
*final_choice_parts* and *False* for an uninstallable parts listing.
This routine returns *True* if there are any fractional parts output to *csv_file*.
"""
# Each *final_choice_part* that is part of the project (i.e. *self*) will wind up
# in a list in *pose_parts_table*. The key is the *project_part_key*:
project: Project = self
pose_parts_table: Dict[str, List[Tuple[PosePart, ChoicePart]]] = {}
final_choice_part: ChoicePart
for final_choice_part in final_choice_parts:
# Now figure out if final choice part is part of *pose_parts*:
pose_parts: List[PosePart] = final_choice_part.pose_parts
pose_part: PosePart
for pose_part in pose_parts:
# We only care care about *final_choice_part* if is used on *project* and
# it matches the *install* selector:
if pose_part.project is project and pose_part.install == install:
# We are on the project; create *schemati_part_key*:
project_part: ProjectPart = pose_part.project_part
project_part_key: str = (f"{project_part.base_name};"
f"{project_part.short_footprint}")
# Create/append a list to *pose_parts_table*, keyed on *project_part_key*:
if project_part_key not in pose_parts_table:
pose_parts_table[project_part_key] = []
key: str = project_part_key
pairs_list: List[Tuple[PosePart, ChoicePart]] = pose_parts_table[key]
# Append a pair of *pose_part* and *final_choice_part* onto *pairs_list*:
project_final_pair = (pose_part, final_choice_part)
pairs_list.append(project_final_pair)
# Now organize everything around the *reference_list*:
reference_pose_parts: Dict[str, Tuple[PosePart, ChoicePart]] = {}
for pairs_list in pose_parts_table.values():
# We want to sort base on *reference_value* which is converted into *reference_text*:
reference_list: List[str] = \
[project_final_pair[0].reference.upper() for project_final_pair in pairs_list]
reference_text: str = ", ".join(reference_list)
# print("reference_text='{0}'".format(reference_text))
pair: Tuple[PosePart, ChoicePart] = pairs_list[0]
reference_pose_parts[reference_text] = pair
# Sort the *reference_parts_keys*:
reference_pose_parts_keys: List[str] = list(reference_pose_parts.keys())
reference_pose_parts_keys.sort()
# Now dig down until we have all the information we need for output the next
# `.csv` file line:
has_fractional_parts: bool = False
for reference_pose_parts_key in reference_pose_parts_keys:
# Extract the *pose_part* and *final_choice_part*:
key = reference_pose_parts_key
project_final_pair = reference_pose_parts[key]
pose_part = project_final_pair[0]
final_choice_part = project_final_pair[1]
# Now get the corresponding *project_part*:
project_part = pose_part.project_part
project_part_key = f"{project_part.base_name};{project_part.short_footprint}"
# Now get the *actual_part*:
actual_part: Optional[ActualPart] = final_choice_part.selected_actual_part
if isinstance(actual_part, ActualPart):
# Now get the VendorPart:
manufacturer_name: str = actual_part.manufacturer_name
manufacturer_part_name: str = actual_part.manufacturer_part_name
vendor_part: Optional[VendorPart] = final_choice_part.selected_vendor_part
assert isinstance(vendor_part, VendorPart)
# Output the line for the .csv file:
vendor_name: str = vendor_part.vendor_name
vendor_part_name: str = vendor_part.vendor_part_name
quantity: int = final_choice_part.count_get()
fractional: str = "No"
if len(final_choice_part.fractional_parts) > 0:
fractional = "Yes"
has_fractional_parts = True
csv_file.write(f'{Encode.to_csv(str(quantity))},'
f'{Encode.to_csv(reference_pose_parts_key)},'
f'{Encode.to_csv(project_part_key)},'
f'{Encode.to_csv(final_choice_part.description)},'
f'{Encode.to_csv(fractional)},'
f'{Encode.to_csv(manufacturer_name)},'
f'{Encode.to_csv(manufacturer_part_name)},'
f'{Encode.to_csv(vendor_name)},'
f'{Encode.to_csv(vendor_part_name)}\n')
else:
print(f"Problems with actual_part '{actual_part}'")
return has_fractional_parts
# Project.check():
def check(self, collections: Collections) -> None:
# Grab some values from *project* (i.e. *self*):
project: Project = self
all_pose_parts: List[PosePart] = project.all_pose_parts
# Check *all_pose_parts*:
pose_part: PosePart
for pose_part in all_pose_parts:
pose_part.check(collections)
# Project.project_part_append():
def pose_part_append(self, pose_part: PosePart) -> None:
""" Append *pose_part* onto the *Project* object (i.e. *self*).
"""
# Tack *pose_part* onto the appropriate lists inside of *project* (i.e. *self*):
project: Project = self
project.all_pose_parts.append(pose_part)
if pose_part.install:
project.installed_pose_parts.append(pose_part)
else:
project.uninstalled_pose_parts.append(pose_part)
# Project.pose_part_find():
def pose_part_find(self, name: str, reference: str) -> PosePart:
# Grab some values from *project_part* (i.e. *self*):
project: Project = self
all_pose_parts: List[PosePart] = project.all_pose_parts
pose_parts_table: Dict[str, PosePart] = project.pose_parts_table
# Find the *project_part* named *name* associated with *project*:
project_part: ProjectPart = project.project_part_find(name)
# Make sure that *reference* is not a duplicated:
pose_part: PosePart
if reference in pose_parts_table:
pose_part = pose_parts_table[reference]
print(f"Project {project} has a duplicate '{reference}'?")
else:
pose_part = PosePart(project, project_part, reference, "")
pose_parts_table[reference] = pose_part
all_pose_parts.append(pose_part)
return pose_part
# Project.positions_process():
def positions_process(self) -> None:
""" Reorigin the the contents of the positions table.
"""
pass
# project: Project = self
# positions_file_name: str = project.positions_file_name
# positions_table: PositionsTable = PositionsTable(positions_file_name)
# positions_table.reorigin("FD1")
# positions_table.footprints_rotate()
# Project.project_part_find():
def project_part_find(self, project_part_name: str) -> "ProjectPart":
# Grab some values from *project* (i.e. *self*):
project: Project = self
project_parts: List[ProjectPart] = project.project_parts
project_parts_table: Dict[str, ProjectPart] = project.project_parts_table
# Determine if we have a pre-existing *project_part* named *name*:
project_part: ProjectPart
if project_part_name in project_parts_table:
# Reuse the pre-existing *project_part* named *name*:
project_part = project_parts_table[project_part_name]
else:
# Create a new *project_part* named *name* and stuff into the *project* data structures:
project_part = ProjectPart(project_part_name, [project])
project_parts.append(project_part)
project_parts_table[project_part_name] = project_part
return project_part
# ProjectPart:
class ProjectPart:
# A *ProjectPart* represents part with a footprint. The schematic
# part name must adhere to the format of "name;footprint:comment", where
# ":comment" is optional. The footprint name can be short (e.g. 1608,
# QFP100, SOIC20, SOT3), since it only has to disambiguate the various
# footprints associated with "name". A *ProjectPart* is always
# sub-classed by one of *ChoicePart*, *AliasPart*, or *FractionalPart*.
# ProjectPart.__init__():
def __init__(self, name: str, projects: List[Project]) -> None:
""" *ProjectPart*: Initialize *self* to contain
*name*, and *kicad_footprint*. """
# Extract *base_name*, *short_footprint* and *comment* from *name* where
# *name* is formatted as '*base_name*;*short_footprint_name*:*comment*':
short_footprint: str = ""
comment: str = ""
base_name: str = ""
pieces: List[str] = [name]
if ':' in pieces[0]:
pieces = pieces[0].split(':')
pieces_size: int = len(pieces)
assert pieces_size <= 2, f"Too many colons (':') in '{name}'"
comment = pieces[1] if len(pieces) == 2 else ""
if ';' in pieces[0]:
pieces = pieces[0].split(';')
pieces_size = len(pieces)
assert pieces_size <= 2, f"Too many semi-colons (';') in '{name}'"
short_footprint = pieces[1] if pieces_size == 2 else ""
base_name = pieces[0]
# Stuff values into *project_part* (i.e. *self*):
# project_part = self
self.name: str = name
self.base_name: str = base_name
self.comment: str = comment
self.pose_parts: List[PosePart] = []
self.pose_parts_table: Dict[str, PosePart] = {}
self.projects: List[Project] = projects
self.short_footprint: str = short_footprint
# ProjectPart.__format__():
def __format__(self, format: str) -> str:
""" *ProjectPart*: Format the *ProjectPart* object (i.e. *self*) using *format***. """
# Grab some values from *project_part* (i.e. *self*):
project_part: ProjectPart = self
name: str = project_part.name
# Return *result*' based on *format*:
result: str
if format == "s":
projects: List[Project] = project_part.projects
project_names: List[str] = [project.name for project in projects]
project_names_text: str = ":".join(project_names)
result = f"{project_names_text}:{name}"
else:
result = f"{name}"
return result
# ProjectPart.__str__():
def __str__(self) -> str:
name: str = "??"
project_part: ProjectPart = self
if hasattr(project_part, "name"):
name = project_part.name
return f"ProjectPart('{name}')"
# ProjectPart.footprints_check():
def footprints_check(self, footprints: Dict[str, str]) -> None:
""" *ProjectPart*: Verify that all the footprints exist for the *ProjectPart* object
(i.e. *self*.)
"""
assert False, "No footprints_check method for this Schematic Part"
# AliasPart():
class AliasPart(ProjectPart):
# An *AliasPart* specifies one or more *ProjectParts* to use.
# AliasPart.__init__():
def __init__(self, name: str, project_parts: List[ProjectPart], footprint: str = "",
feeder_name="", part_height=0.0, pick_dx=0.0, pick_dy=0.0) -> None:
""" *AliasPart*: Initialize *self* to contain *name*,
*kicad_footprint*, and *project_parts*. """
projects_table: Dict[str, Project] = {}
project_part: ProjectPart
for project_part in project_parts:
projects: List[Project] = project_part.projects
project: Project
for project in projects:
project_name_revision: str = f"{project.name}.{project.revision}"
if project_name_revision not in projects_table:
projects_table[project_name_revision] = project
union_projects: List[Project] = list(projects_table.values())
# Load up *alias_part* (i.e *self*):
super().__init__(name, union_projects)
# alias_part: AliasPart = self
self.project_parts: List[ProjectPart] = project_parts
self.feeder_name: str = feeder_name
self.footprint: str = footprint
self.part_height: float = part_height
self.pick_dx: float = pick_dx
self.pick_dy: float = pick_dy
# AliasPart.__str__():
def __str__(self) -> str:
name: str = "??"
alias_part: AliasPart = self
if hasattr(alias_part, "name"):
name = alias_part.name
return f"AliasPart('{name}')"
# AliasPart.choice_parts():
def choice_parts(self) -> "List[ChoicePart]":
""" *AliasPart*: Return a list of *ChoicePart*'s corresponding to *self*
"""
# choice_parts: List[ChoicePart] = []
# project_part: ProjectPart
# project_parts: List[ProjectPart]
# for project_part in project_parts:
# choice_parts.extend(project_part.choice_parts)
# return choice_parts
assert False, "Fix this code"
return list()
# AliasPart.footprints_check():
def footprints_check(self, footprints: Dict[str, str]) -> None:
""" *AliasPart*: Verify that all the footprints exist for the *AliasPart* object
(i.e. *self*.)
"""
# Grab *project_parts* from *alias_part* (i.e. *self*):
alias_part: AliasPart = self
project_parts: List[ProjectPart] = alias_part.project_parts
project_part: ProjectPart
for project_part in project_parts:
project_part.footprints_check(footprints)
# ChoicePart:
class ChoicePart(ProjectPart):
# A *ChoicePart* specifies a list of *ActualPart*'s to choose from.
# ChoicePart.__init__():
def __init__(self, name: str, project_parts: List[ProjectPart], searches: List[Search]) -> None:
""" *ChoicePart*: Initiailize *self* to contain *name*
*kicad_footprint* and *actual_parts*. """
# A *chice_part* (i.e. *self*) can have multiple *Project*'s associated with it.
# Thus, we need to compute the *union_projects* of all *Project*'s associated
# with *project parts*:
projects_table: Dict[str, Project] = {}
for project_part in project_parts:
projects: List[Project] = project_part.projects
project: Project
for project in projects:
cad_file_name: str = project.cad_file_name
projects_table[cad_file_name] = project
union_projects: List[Project] = list(projects_table.values())
# Load up *choice_part* (i.e. *self*):
super().__init__(name, union_projects)
self.actual_parts: List[ActualPart] = []
self.searches: List[Search] = searches
# Fields used by algorithm:
self.description: str = "DESCRIPTION"
self.footprint: str = "FOOTPRINT"
self.fractional_parts: List[FractionalPart] = []
self.selected_total_cost: float = 0.00
self.selected_order_quantity: int = -1
self.selected_actual_part: Optional[ActualPart] = None
self.selected_vendor_part: Optional[VendorPart] = None
self.selected_vendor_name: str = ""
self.selected_price_break_index: int = -1
self.selected_price_break: Optional[PriceBreak] = None
# assert isinstance(kicad_footprint, str)
# assert isinstance(location, str)
# assert isinstance(description, str)
# assert isinstance(rotation, float) or rotation is None
# assert isinstance(pick_dx, float)
# assert isinstance(pick_dy, float)
# assert isinstance(feeder_name, str) or feeder_name is None
# assert isinstance(part_height, float) or part_height is None
# choice_part.feeder_name = feeder_name
# choice_part.location = location
# choice_part.part_height = part_height
# choice_part.rotation = rotation
# choice_part.pick_dx = pick_dx
# choice_part.pick_dy = pick_dy
# ChoicePart.__format__():
def __format__(self, format) -> str:
""" *ChoicePart*: Return the *ChoicePart object (i.e. *self* as a string formatted by
*format*.
"""
choice_part: ChoicePart = self
return choice_part.__str__()
# ChoicePart.__str__():
def __str__(self) -> str:
choice_part: ChoicePart = self
name: str = "??"
if hasattr(choice_part, "name"):
name = choice_part.name
return f"ChoicePart('{name}')"
# # ChoicePart.actual_part():
# def actual_part(self, manufacturer_name, manufacturer_part_name, vendor_triples=[]):
# """ *ChoicePart*: Create an *ActualPart* that contains *manufacturer_name* and
# *manufacturer_part_name* and append it to the *ChoicePart* object (i.e. *self*.)
# For parts whose prices are not available via screen scraping, it is possible to
# specify vendor/pricing information as a list of vendor triples. The vendor triples
# are a *tuple* of(*vendor_name*, *vendor_part_name*, *price_pairs_text*),
# where *vendor_name* is a distributor (i.e. "Newark", or "Pololu"), *vendor_part_name*
# is a the vendors order number of the part, and *price_pairs_text* is a string of
# the form "quant1/price1 quant2/price2 ... quantN/priceN". *quantI* is an quantity
# as an integer and *priceI* is a price in dollars.
# """
# # Verify argument types:
# assert isinstance(manufacturer_name, str)
# assert isinstance(manufacturer_part_name, str)
# assert isinstance(vendor_triples, list)
# for vendor_triple in vendor_triples:
# assert len(vendor_triple) == 3
# assert isinstance(vendor_triple[0], str)
# assert isinstance(vendor_triple[1], str)
# assert isinstance(vendor_triple[2], str)
# actual_part = ActualPart(manufacturer_name, manufacturer_part_name)
# self.actual_parts.append(actual_part)
# if True:
# for vendor_triple in vendor_triples:
# vendor_name = vendor_triple[0]
# vendor_part_name = vendor_triple[1]
# price_pairs_text = vendor_triple[2]
# price_breaks = []
# for price_pair_text in price_pairs_text.split():
# # Make sure we only have a price and a pair*:
# price_pair = price_pair_text.split('/')
# assert len(price_pair) == 2
# # Extract the *quantity* from *price_pair*:
# quantity = 1
# try:
# quantity = int(price_pair[0])
# except ValueError:
# assert False, \
# "Quantity '{0}' is not an integer". \
# format(price_pair[0])
# # Extract the *price* from *price_pair*:
# price = 100.00
# try:
# price = float(price_pair[1])
# except ValueError:
# assert False, \
# "Price '{0}' is not a float".format(price_pair[1])
# # Construct the *price_break* and append to *price_breaks*:
# price_break = PriceBreak(quantity, price)
# price_breaks.append(price_break)
# # Create the *vendor_part* and append it to *actual_part*:
# assert len(price_breaks) > 0
# vendor_part = VendorPart(actual_part,
# vendor_name, vendor_part_name, 1000000, price_breaks)
# actual_part.vendor_part_append(vendor_part)
# # if tracing:
# # print("vendor_part_append called")
# # print("ChoicePart.actual_part(): Explicit vendor_part specified={0}".
# # format(vendor_part))
# return self
# ChoicePart.pose_part_append():
def pose_part_append(self, pose_part: PosePart) -> None:
""" *ChoicePart*: Store *pose_part* into the *ChoicePart* object
(i.e. *self*.)
"""
choice_part: ChoicePart = self
choice_part.pose_parts.append(pose_part)
# ChoicePart.pose_parts_sort():
def pose_parts_sort(self) -> None:
""" *ChoicePart*: Sort the *pose_parts* of the *ChoicePart* object
(i.e. *self*.)
"""
# Sort the *pose_parts* using a key of
# (project_name, reference, reference_number). A reference of
# "SW123" gets conferted to (..., "SW123", 123):
choice_part: ChoicePart = self
pose_parts: List[PosePart] = choice_part.pose_parts
pose_parts.sort(key=lambda pose_part:
(pose_part.project.name,
text_filter(pose_part.reference, str.isalpha).upper(),
int(text_filter(pose_part.reference, str.isdigit))))
# print(" {0}:{1};{2} {3}:{4}".\
# format(choice_part.name,
# choice_part.kicad_footprint, choice_part.description,
# choice_part.count_get(), choice_part.references_text_get()))
# ChoicePart.count_get():
def count_get(self) -> int:
""" *ChoicePart*: Return the number of needed instances of *self*. """
choice_part: ChoicePart = self
count: int = 0
fractional_part: FractionalPart
fractional_parts: List[FractionalPart] = choice_part.fractional_parts
pose_parts: List[PosePart] = choice_part.pose_parts
pose_part: PosePart
if len(fractional_parts) == 0:
for pose_part in pose_parts:
count += pose_part.project.count
else:
# for fractional_part in fractional_parts:
# print("{0}".format(fractional_part.name))
# This code is not quite right:
first_fractional_part: FractionalPart = fractional_parts[0]
denominator: int = first_fractional_part.denominator
for fractional_part in fractional_parts[1:]:
assert denominator == fractional_part.denominator, \
"'{0}' has a denominator of {1} and '{2}' has one of {3}". \
format(first_fractional_part.name,
first_fractional_part.denominator,
fractional_part.name,
fractional_part.denominator)
# Compute the *count*:
numerator: int = 0
for pose_part in pose_parts:
project_part: ProjectPart = pose_part.project_part
# print("'{0}'".format(project_part.name))
if isinstance(project_part, AliasPart):
alias_part = project_part
for project_part in alias_part.project_parts:
if isinstance(project_part, FractionalPart):
fractional_part = project_part
elif isinstance(project_part, FractionalPart):
fractional_part = project_part
else:
assert False, "Missing code"
fractional_numerator: int = fractional_part.numerator
for index in range(pose_part.project.count):
if numerator + fractional_numerator > denominator:
count += 1
numerator = 0
numerator += fractional_numerator
if numerator > 0:
numerator = 0
count += 1
return count
# ChoicePart.choice_parts():
def choice_parts(self) -> "List[ChoicePart]":
""" *ChoicePart*: Return a list of *ChoicePart* corresponding
to *self* """
choice_part: ChoicePart = self
return [choice_part]
# ChoicePart.footprints_check():
def footprints_check(self, footprints: Dict[str, str]) -> None:
""" *ChoicePart*: Verify that all the footprints exist for the *ChoicePart* object
(i.e. *self*.)
"""
# Use *choice_part* instead of *self*:
choice_part: ChoicePart = self
footprint: str = choice_part.footprint
if footprint != "-":
footprints[footprint] = choice_part.name
# rotation = choice_part.rotation
# ChoicePart.references_text_get():
def references_text_get(self) -> str:
""" *ChoicePart*: Return a string of references for *self*. """
choice_part: ChoicePart = self
references_text: str = ""
previous_project: Optional[Project] = None
is_first: bool = True
pose_part: PosePart
for pose_part in choice_part.pose_parts:
project: Project = pose_part.project
if project != previous_project:
if not is_first:
references_text += "]"
references_text += f"[{project.name}"
previous_project = project
is_first = False
# Now tack the reference to the end:
references_text += f" {pose_part.reference}"
references_text += "]"
return references_text
# ChoicePart.select():
@trace(2)
def select(self, excluded_vendor_names: Dict[str, None], announce: bool = False) -> int:
""" *ChoicePart*: Select and return the best priced *ActualPart*
for the *ChoicePart* (i.e. *self*) excluding any vendors
in the *excluded_vendor_names* dictionary.
"""
trace_level: int = trace_level_get()
tracing: str = tracing_get()
# This lovely piece of code basically brute forces the decision
# process of figuring out which *vendor_part* to select and the
# number of parts to order. We iterate over each *actual_part*,
# *vendor_part* and *price_break* and compute the *total_cost*
# and *order_quanity* for that combination. We store this into
# a 5-tuple called *quint* and build of the list of *quints*.
# When we are done, we sort *quints* and select the first one
# off the head of the list.
# Grab some values from *choice_part* (i.e. *self*):
choice_part: "ChoicePart" = self
required_quantity: int = choice_part.count_get()
actual_parts: List[ActualPart] = choice_part.actual_parts
quints: List[Quint] = []
actual_part_index: int
actual_part: ActualPart
for actual_part_index, actual_part in enumerate(actual_parts):
if tracing and trace_level >= 1:
manufacturer_name: str = actual_part.manufacturer_name
manufacturer_part_name: str = actual_part.manufacturer_part_name
print(f"{tracing} Manufacturer: '{manufacturer_name}' '{manufacturer_part_name}'")
vendor_parts: List[VendorPart] = actual_part.vendor_parts
vendor_part_index: int
vendor_part: VendorPart
for vendor_part_index, vendor_part in enumerate(vendor_parts):
# if tracing and trace_level >= 2
# print(f"Vendor: {vendor_part.vendor_name}: "
# f"'{vendor_part.vendor_part_name}':"
# f":{vendor_part.quantity_available}")
if tracing and trace_level >= 2:
vendor_name: str = vendor_part.vendor_name
vendor_part_name: str = vendor_part.vendor_part_name
quantity_available: int = vendor_part.quantity_available
print(f"{tracing} Vendor: {quantity_available} x "
f"'{vendor_name}': '{vendor_part_name}'")
price_breaks: List[PriceBreak] = vendor_part.price_breaks
price_break_index: int
price_break: PriceBreak
for price_break_index, price_break in enumerate(price_breaks):
# if tracing:
# print(" B")
# We not have an *actual_part*, *vendor_part* and
# *price_break* triple. Compute *order_quantity*
# and *total_cost*:
price: float = price_break.price
quantity: int = price_break.quantity
order_quantity: int = max(required_quantity, quantity)
total_cost: float = order_quantity * price
# if tracing:
# print(" price={0:.2f} quant={1} order_quantity={2} total_cost={3:.2f}".
# format(price, quantity, order_quantity, total_cost))
# Assemble the *quint* and append to *quints* if there
# enough parts available:
is_excluded: bool = vendor_part.vendor_name in excluded_vendor_names
# if trace_level:
# print(f"quantity_available={vendor_part.quantity_available}, "
# f"is_excluded={is_excluded}")
if tracing and trace_level >= 3:
quantity_available = vendor_part.quantity_available
print(f"{tracing} Quantity Available:{quantity_available} "
f"Is Excluded:{is_excluded}")
if not is_excluded and vendor_part.quantity_available >= order_quantity:
assert price_break_index < len(price_breaks)
quint: Quint = (total_cost, order_quantity,
actual_part_index, vendor_part_index,
price_break_index, len(price_breaks))
quints.append(quint)
if tracing:
print(f"{tracing} quint={quint}")
if len(quints) == 0:
choice_part_name = self.name
if announce:
print(f"{tracing}No vendor parts found for Part '{choice_part_name}'")
else:
# Now sort in ascending order:
quints.sort()
quint = quints[0]
# Extract values from *quint*:
selected_total_cost = quint[0]
selected_order_quantity = quint[1]
selected_actual_part = actual_parts[quint[2]]
selected_vendor_part = selected_actual_part.vendor_parts[quint[3]]
selected_vendor_name = selected_vendor_part.vendor_name
selected_price_break_index = quint[4]
# Now stuff extracted values from *quint* into *self*:
self.selected_total_cost = selected_total_cost
self.selected_order_quantity = selected_order_quantity
self.selected_actual_part = selected_actual_part
self.selected_vendor_part = selected_vendor_part
self.selected_vendor_name = selected_vendor_name
self.selected_price_break_index = selected_price_break_index
assert selected_price_break_index < len(selected_vendor_part.price_breaks)
# print("selected_vendor_name='{0}'".format(selected_vendor_name))
# actual_parts = self.actual_parts
# for actual_part in actual_parts:
# print(" {0}:{1}".format(actual_part.manufacturer_name,
# actual_part.manufacturer_part_name))
# actual_parts = self.actual_parts
# selected_actual_part = actual_parts[0]
# assert isinstance(selected_actual_part, ActualPart)
# self.selected_actual_part = selected_actual_part
# vendor_parts = selected_actual_part.vendor_parts
# if len(vendor_parts) == 0:
# key = selected_actual_part.key
# print("No vendor part for Actual Part '{0} {1}'". \
# format(key[0], key[1]))
# else:
# selected_actual_part.selected_vendor_part = vendor_parts[0]
# assert isinstance(selected_actual_part, ActualPart)
missing_part: int = 0
if len(quints) == 0:
missing_part = 1
return missing_part
# ChoicePart.vendor_names_load():
def vendor_names_load(self, vendor_names_table: Dict[str, None],
excluded_vendor_names: Dict[str, None]) -> None:
""" *ChoicePart*: Add each possible vendor name possible for the
*ChoicePart* object (i.e. *self*) to *vendor_names_table*
provided it is not in *excluded_vendor_names*:
"""
# Visit each *actual_part* and add the vendor names to
# *vendor_names_table*:
choice_part: ChoicePart = self
actual_parts: List[ActualPart] = choice_part.actual_parts
actual_part: ActualPart
for actual_part in actual_parts:
actual_part.vendor_names_load(vendor_names_table, excluded_vendor_names)
# ChoicePart.vendor_parts_refresh():
@trace(1)
def vendor_parts_refresh(self, proposed_actual_parts: List[ActualPart],
order: Order, part_name: str) -> None:
# Grab some values from *choice_part* (i.e. *self*) and *order*:
choice_part: ChoicePart = self
choice_part_name: str = choice_part.name
pandas: List[Panda] = order.pandas
stale: int = order.stale
vendor_searches_root: str = order.vendor_searches_root
trace_level: int = trace_level_get()
# Construct the file path for the `.xml` file associated *choice_part*:
xml_base_name: str = Encode.to_file_name(choice_part_name + ".xml")
xml_full_name: str = os.path.join(vendor_searches_root, xml_base_name)
tracing: str = tracing_get()
if tracing:
print(f"{tracing}choice_part_name='{choice_part_name}'")
print(f"{tracing}vendor_searches_root='{vendor_searches_root}'")
print(f"{tracing}xml_base_name='{xml_base_name}'")
print(f"{tracing}xml_full_name='{xml_full_name}'")
# Open *xml_full_name*, read it in, and fill in *previous_actual_parts_table* with
# the resulting *previous_actual_part* from the `.xml` file. Mark *xml_save_required*
# as *True* if *xml_full_file_name* does not exist:
xml_save_required: bool = False
previous_actual_parts: List[ActualPart] = list()
previous_actual_parts_table: Dict[Tuple[str, str], ActualPart] = dict()
if os.path.isfile(xml_full_name):
# Read in and parse the *xml_full_name* file:
xml_read_file: IO[str]
with open(xml_full_name) as xml_read_file:
choice_part_xml_text: str = xml_read_file.read()
choice_part_tree: etree._Element = etree.fromstring(choice_part_xml_text)
# Note that *previous_choice_part* is kind of busted since it
# its internal *project_parts* and *searches* lists are empty.
# This is OK, since we only need the *previous_actual_parts* list
# which is popluated with valid *ActualPart*'s:
previous_choice_part: ChoicePart = ChoicePart.xml_parse(choice_part_tree)
if tracing:
print(f"{tracing}Read in '{xml_full_name}'")
# Sweep through *previous_actual_parts* and enter them into
# *previous_actual_parts_table*:
previous_actual_parts = previous_choice_part.actual_parts
for previous_actual_part in previous_actual_parts:
previous_actual_parts_table[previous_actual_part.key] = previous_actual_part
else:
# *xml_full_name* does not exist, so we must write out a new one later one:
xml_save_required = True
# For debugging show both the sorted *proposed_actual_parts* and *previous_actual_parts*
# side-by-side:
if tracing and trace_level >= 2:
# First sort *proposed_actual_parts* and *previous_actual_parts*:
proposed_actual_parts.sort(key=lambda proposed_actual_part: proposed_actual_part.key)
previous_actual_parts.sort(key=lambda previous_actual_part: previous_actual_part.key)
# Compute the *maximum_actual_parts_size*:
proposed_actual_parts_size: int = len(proposed_actual_parts)
previous_actual_parts_size: int = len(previous_actual_parts)
maximum_actual_parts_size: int = max(proposed_actual_parts_size,
previous_actual_parts_size)
print(f"{tracing}proposed_actual_parts_size={proposed_actual_parts_size}")
print(f"{tracing}previous_actual_parts_size={previous_actual_parts_size}")
print(f"{tracing}maximum_actual_parts_size={maximum_actual_parts_size}")
# Now sweep across both *proposed_actual_parts* and *previous_actual_parts*
# printing out the key values side by side:
print(f"{tracing}Actual_Parts[xx]: (proposed) (previous)")
for index in range(maximum_actual_parts_size):
proposed_text: str = ("--------" if index >= proposed_actual_parts_size
else str(proposed_actual_parts[index].key))
previous_text: str = ("--------" if index >= previous_actual_parts_size
else str(previous_actual_parts[index].key))
print(f"{tracing}Actual_Parts[{index}]:'{previous_text}'\t{proposed_text}")
# We need to figure out when actual parts from the `.xml` are old (i.e. *stale*)
# and refresh them.
now: int = int(time.time())
if tracing:
print(f"{tracing}now={now} stale={stale} now-stale={now-stale}")
# Now sweep through *proposed_actual_parts* and refresh any that are either missing or out
# of date and construct the *final_actual_parts*:
final_actual_parts: List[ActualPart] = list()
proposed_actual_part: ActualPart
for index, proposed_actual_part in enumerate(proposed_actual_parts):
# Grab the *proposed_actual_part_key*:
proposed_actual_part_key: Tuple[str, str] = proposed_actual_part.key
if tracing:
print(f"{tracing}Proposed_Actual_Part[{index}]:'{proposed_actual_part.key}'")
# Start by assuming that *lookup_is_required* and set to *False* if we can avoid
# the lookup:
lookup_is_required: bool = True
if proposed_actual_part_key in previous_actual_parts_table:
if tracing and trace_level >= 2:
print(f"{tracing}'{proposed_actual_part_key} is in previous_actual_parts_table")
# We have a *previous_actual_part* that matches *proposed_actual_part*.
# Now we see if can simply copy *previous_vendor_parts* over or
# whether we must trigger a vendor parts lookup:
key: Tuple[str, str] = proposed_actual_part_key
previous_actual_part = previous_actual_parts_table[key]
previous_vendor_parts: List[VendorPart] = previous_actual_part.vendor_parts
if tracing and trace_level >= 2:
print(f"{tracing}previous_actual_part.name="
f"'{previous_actual_part.manufacturer_part_name}'")
print(f"{tracing}len(previous_vendor_parts)={len(previous_vendor_parts)}")
# Compute the *minimum_time_stamp* across all *previous_vendor_parts*:
minimum_timestamp: int = now
for previous_vendor_part in previous_vendor_parts:
minimum_timestamp = min(minimum_timestamp, previous_vendor_part.timestamp)
if tracing and trace_level >= 2:
print(f"{tracing}minimum_timestamp={minimum_timestamp}")
# If the *minimum_time_stamp* is too stale, force a refresh:
if minimum_timestamp + stale > now:
if tracing and trace_level >= 2:
print(f"{tracing}Not stale")
proposed_actual_part.vendor_parts = previous_vendor_parts
lookup_is_required = False
else:
if tracing and trace_level >= 2:
print(f"{tracing}'{proposed_actual_part_key} is not"
f" in previous_actual_parts_table")
if tracing:
print(f"{tracing}lookup_is_required={lookup_is_required}")
# If *lookup_is_required*, visit each *Panda* object in *pandas* and look up
# *VendorPart*'s. Assemble them all in the *new_vendor_parts* list:
if lookup_is_required:
new_vendor_parts: List[VendorPart] = list()
panda: Panda
for panda in pandas:
actual_part: ActualPart = proposed_actual_part
panda_vendor_parts: List[VendorPart] = panda.vendor_parts_lookup(actual_part,
part_name)
if tracing:
print(f"{tracing}len(panda_vendor_parts)={len(panda_vendor_parts)}")
new_vendor_parts.extend(panda_vendor_parts)
if tracing:
panda_vendor_parts_size: int = len(panda_vendor_parts)
new_vendor_parts_size: int = len(new_vendor_parts)
print(f"{tracing}panda_vendor_parts_size={panda_vendor_parts_size}")
print(f"{tracing}new_vendor_parts_size={new_vendor_parts_size}")
if len(new_vendor_parts) >= 0:
final_actual_parts.append(proposed_actual_part)
xml_save_required = True
else:
final_actual_parts.append(proposed_actual_part)
# Figure out if we need to write out *final_actual_parts* by figuring out
# whether or not they match *previous_actual_parts*:
TableType = Dict[Tuple[str, str], ActualPart]
previous_actual_parts_table = {previous_actual_part.key: previous_actual_part
for previous_actual_part in previous_actual_parts}
final_actual_parts_table: TableType = {final_actual_part.key: final_actual_part
for final_actual_part in final_actual_parts}
xml_save_required &= len(previous_actual_parts_table) != len(final_actual_parts_table)
if not xml_save_required:
for final_actual_part_key, final_actual_part in final_actual_parts_table.items():
if final_actual_part_key not in previous_actual_parts_table:
xml_save_required = True
break
previous_actual_part = previous_actual_parts_table[final_actual_part_key]
if previous_actual_part != final_actual_part:
xml_save_required = True
break
# Do a little more *tracing*:
if tracing:
final_actual_parts_size = len(final_actual_parts)
previous_actual_parts_size = len(previous_actual_parts)
proposed_actual_parts_size = len(proposed_actual_parts)
print(f"{tracing}final_actual_parts_size={final_actual_parts_size}")
print(f"{tracing}previous_actual_parts_size={previous_actual_parts_size}")
print(f"{tracing}proposed_actual_parts_size={proposed_actual_parts_size}")
# Update *choice_part* with the new *final_actual_parts*:
choice_part.actual_parts = final_actual_parts
# Save *choice_part* out to *xml_file* if *xml_save_required* hase been set:
if xml_save_required:
if tracing:
print(f"{tracing}Writing out '{xml_full_name}'")
xml_lines = []
xml_lines.append('<?xml version="1.0"?>')
choice_part.xml_lines_append(xml_lines, "")
xml_lines.append("")
xml_text = "\n".join(xml_lines)
with open(xml_full_name, "w") as xml_write_file:
xml_write_file.write(xml_text)
# ChoicePart.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *choice_part* (i.e. *self*):
choice_part: ChoicePart = self
actual_parts: List[ActualPart] = choice_part.actual_parts
name: str = choice_part.name
# Output the `<ChoicePart ... >` tag:
xml_lines.append(f'{indent}<ChoicePart name="{Encode.to_attribute(name)}">')
# Output the `<ActualPart ... >` tags:
next_indent: str = indent + " "
actual_part: ActualPart
for actual_part in actual_parts:
actual_part.xml_lines_append(xml_lines, next_indent)
# Output the closing `</ChoicePart>` tag:
xml_lines.append(f'{indent}</ChoicePart>')
# ChoicePart.xml_parse():
@staticmethod
def xml_parse(choice_part_tree: etree._Element) -> "ChoicePart":
# Create *choice_part* (most of the values are no longer used...):
assert choice_part_tree.tag == "ChoicePart"
attributes_table: Dict[str, str] = choice_part_tree.attrib
name: str = attributes_table["name"]
choice_part: ChoicePart = ChoicePart(name, [], [])
# Read in the *actual_parts* from *choice_part_tree* and return the resulting *choice_part*:
actual_parts: List[ActualPart] = choice_part.actual_parts
actual_part_tree: etree._Element
for actual_part_tree in list(choice_part_tree):
actual_part: ActualPart = ActualPart.xml_parse(actual_part_tree)
actual_parts.append(actual_part)
return choice_part
# FractionalPart:
class FractionalPart(ProjectPart):
# A *FractionalPart* specifies a part that is constructed by
# using a portion of another *ProjectPart*.
# FractionalPart.__init__():
def __init__(self, name: str, projects: List[Project], footprint: str, choice_part: ChoicePart,
numerator: int, denominator: int, description: str) -> None:
""" *FractionalPart*: Initialize *self* to contain
*name*, *kicad_footprint*, *choie_part*,
*numerator*, *denomoniator*, and *description*. """
# Load up *self*:
super().__init__(name, projects)
# fractional_part: FractionPart = self
self.choice_part: ChoicePart = choice_part
self.footprint: str = footprint
self.numerator: int = numerator
self.denominator: int = denominator
self.description: str = description
# FractionalPart.__str__()
def __str__(self) -> str:
name: str = "??"
fractional_part: FractionalPart = self
if hasattr(fractional_part, "name"):
name = fractional_part.name
return f"FractionalPart('{name}')"
# FractionalPart.choice_parts():
def choice_parts(self) -> List[ChoicePart]:
""" *FractionalPart*: Return the *ChoicePart* objects associated
with *self*.
"""
choice_part = self.choice_part
choice_part.fractional_parts.append(self)
return [choice_part]
# FractionalPart.footprints_check():
def footprints_check(self, footprints: Dict[str, str]) -> None:
""" *FractionalPart*: Verify that all the footprints exist for the *FractionalPart* object
(i.e. *self*.)
"""
# Use *fractional_part* instead of *self*:
fractional_part: FractionalPart = self
# Record *kicad_footprint* into *kicad_footprints*:
footprint: str = fractional_part.footprint
if footprint != "-":
footprints[footprint] = fractional_part.name
# Units:
class Units:
# Units.__init():
def __init__(self) -> None:
pass
# Units.__str__():
def __str__(self) -> str:
return "Units()"
# Units.si_units_re_text_get():
@staticmethod
def si_units_re_text_get() -> str:
base_units: List[str] = ["s(ecs?)?", "seconds?", "m(eters?)?", "g(rams?)?", "[Aa](mps?)?",
"[Kk](elvin)?", "mol(es?)?", "cd", "candelas?"]
derived_units: List[str] = ["rad", "sr", "[Hh]z", "[Hh]ertz", "[Nn](ewtons?)?",
"Pa(scals?)?", "J(oules?)?", "W(atts?)?", "°C", "V(olts?)?",
"F(arads?)?", "Ω", "O(hms?)?", "S", "Wb", "T(eslas?)?", "H",
"degC", "lm", "lx", "Bq", "Gy", "Sv", "kat"]
all_units: List[str] = base_units + derived_units
all_units_re_text: str = "(" + "|".join(all_units) + ")"
prefixes: List[Tuple[str, float]] = [
("Y", 1e24),
("Z", 1e21),
("E", 1e18),
("P", 1e15),
("T", 1e12),
("G", 1e9),
("M", 1e6),
("k", 1e3),
("h", 1e2),
("da", 1e1),
("c", 1e-2),
("u", 1e-6),
("n", 1e-9),
("p", 1e-12),
("f", 1e-15),
("a", 1e-18),
("z", 1e-21),
("y", 1e-24)
]
single_letter_prefixes: List[str] = [prefix[0] for prefix in prefixes
if len(prefix[0]) == 1]
single_letter_re_text: str = "[" + "".join(single_letter_prefixes) + "]"
multi_letter_prefixes: List[str] = [prefix[0] for prefix in prefixes if len(prefix[0]) >= 2]
letter_prefixes: List[str] = [single_letter_re_text] + multi_letter_prefixes
prefix_re_text: str = "(" + "|".join(letter_prefixes) + ")"
# print("prefix_re_text='{0}'".format(prefix_re_text))
si_units_re_text: str = prefix_re_text + "?" + all_units_re_text
# print("si_units_re_text='{0}'".format(si_units_re_text))
return si_units_re_text
# VendorPart:
class VendorPart:
# A vendor part represents a part that can be ordered from a vendor.
# VendorPart.__init__():
def __init__(self, actual_part: ActualPart, vendor_name: str, vendor_part_name: str,
quantity_available: int, price_breaks: List[PriceBreak],
timestamp: int = 0) -> None:
""" *VendorPart*: Initialize *self* to contain *actual_part"""
# Clean up *vendor_name*:
# original_vendor_name = vendor_name
vendor_name = vendor_name.replace('\n', "")
if vendor_name.endswith(" •"):
vendor_name = vendor_name[:-2]
if vendor_name.endswith(" ECIA (NEDA) Member"):
vendor_name = vendor_name[:-19]
if vendor_name.endswith(" CEDA member"):
vendor_name = vendor_name[:-12]
vendor_name = vendor_name.strip(" \t")
# print("vendor_name='{0}'\t\toriginal_vendor_name='{1}'".format(
# vendor_name, original_vendor_name))
# Sort *price_breakes* and convert to a tuple:
price_breaks.sort(key=lambda price_break: (price_break.quantity, price_break.price))
# Load up *self*:
# vendor_part: VendorPart = self
self.actual_part_key: Tuple[str, str] = actual_part.key
self.quantity_available: int = quantity_available
self.price_breaks: List[PriceBreak] = price_breaks
self.timestamp: int = timestamp
self.vendor_key: Tuple[str, str] = (vendor_name, vendor_part_name)
self.vendor_name: str = vendor_name
self.vendor_part_name: str = vendor_part_name
# Append *self* to the vendor parts of *actual_part*:
actual_part.vendor_part_append(self)
# VendorPart.__eq__():
def __eq__(self, vendor_part2: object) -> bool:
equal: bool = False
if isinstance(vendor_part2, VendorPart):
# Compare *vendor_part1* to *vendor_part2*:
vendor_part1: VendorPart = self
actual_part_key_equal: bool = (vendor_part1.actual_part_key ==
vendor_part2.actual_part_key)
quantity_available_equal: bool = (vendor_part1.quantity_available ==
vendor_part2.quantity_available)
timestamp_equal: bool = vendor_part1.timestamp == vendor_part2.timestamp
vendor_key_equal: bool = vendor_part1.vendor_key == vendor_part2.vendor_key
# Compute whether *price_breaks1* is equal to *price_breaks2*:
price_breaks_equal: bool = vendor_part1.price_breaks == vendor_part2.price_breaks
equal = (actual_part_key_equal and quantity_available_equal and
timestamp_equal and vendor_key_equal and price_breaks_equal)
return equal
# VendorPart.__format__():
def __format__(self, format: str) -> str:
""" *VendorPart*: Print out the information of the *VendorPart* (i.e. *self*):
"""
vendor_part = self
vendor_name = vendor_part.vendor_name
vendor_part_name = vendor_part.vendor_part_name
# price_breaks = vendor_part.price_breaks
return "'{0}':'{1}'".format(vendor_name, vendor_part_name)
# VendorPart.__str__():
def __str__(self):
vendor_part: VendorPart = self
return f"VendorPart('{vendor_part.vendor_name}':'{vendor_part.vendor_part_name}')"
# VendorPart.price_breaks_text_get():
def price_breaks_text_get(self) -> str:
""" *VendorPart*: Return the prices breaks for the *VendorPart*
object (i.e. *self*) as a text string:
"""
price_breaks_texts: List[str] = list()
for price_break in self.price_breaks:
price_breaks_texts.append("{0}/${1:.3f}".
format(price_break.quantity, price_break.price))
price_breaks_text: str = " ".join(price_breaks_texts)
return price_breaks_text
# VendorPart.xml_lines_append():
def xml_lines_append(self, xml_lines: List[str], indent: str) -> None:
# Grab some values from *vendor_part* (i.e. *self*):
vendor_part: VendorPart = self
quantity_available: int = vendor_part.quantity_available
price_breaks: List[PriceBreak] = vendor_part.price_breaks
vendor_name: str = vendor_part.vendor_name
vendor_part_name: str = vendor_part.vendor_part_name
timestamp: int = vendor_part.timestamp
# Output the `<VendorPart ...>` tag first:
xml_lines.append(f'{indent}<VendorPart '
f'quantity_available="{quantity_available}\" '
f'timestamp="{timestamp}" '
f'vendor_name="{Encode.to_attribute(vendor_name)}\" '
f'vendor_part_name="{Encode.to_attribute(vendor_part_name)}">')
# Output the nested `<PriceBreak ...>` tags:
next_indent: str = indent + " "
price_break: PriceBreak
for price_break in price_breaks:
price_break.xml_lines_append(xml_lines, next_indent)
# Close out with the `</VendorPart>` tag:
xml_lines.append(f"{indent}</VendorPart>")
# VendorPart.xml_parse():
@staticmethod
@trace(2)
def xml_parse(vendor_part_tree: etree._Element, actual_part: ActualPart) -> "VendorPart":
# Pull out the attribute values:
attributes_table: Dict[str, str] = vendor_part_tree.attrib
timestamp: int = int(float(attributes_table["timestamp"]))
vendor_name: str = attributes_table["vendor_name"]
vendor_part_name: str = attributes_table["vendor_part_name"]
quantity_available: int = int(attributes_table["quantity_available"])
price_breaks: List[PriceBreak] = []
price_break_trees: List[etree._Element] = list(vendor_part_tree)
price_break_tree: etree._Element
for price_break_tree in price_break_trees:
price_break: PriceBreak = PriceBreak.xml_parse(price_break_tree)
price_breaks.append(price_break)
vendor_part: VendorPart = VendorPart(actual_part, vendor_name, vendor_part_name,
quantity_available, price_breaks, timestamp)
return vendor_part
if __name__ == "__main__":
main()
#
# Notes on using tab widgets:
# * Tabs are actually named in the parent tab widget (1 level up.)
# * To add a tab, hover the mouse over an existing tab, right click mouse, and select
# Insert page.
# PySide2 TableView Video: https://www.youtube.com/watch?v=4PkPezdpO90
# Associatied repo: https://github.com/vfxpipeline/filebrowser
# [Python Virtual Environments](https://realpython.com/python-virtual-environments-a-primer/)
#
# * Use [Virtual Environment Wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/)
# to make life easier.
#
# * Add to `~/.bashrc`:
#
# # Setup for Python virtual enviroments:
# export WORKON_HOME=$HOME/.virtualenvs # Standard place to store virtual env.'s
# export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3 # Make sure you point to correct Python
# export VIRTUALENVWRAPPER_WORKON_CD=1 # Forces `workon` to cd to project dir.
# source /usr/local/bin/virtualenvwrapper.sh # Actually `which virtualenvwrapper.sh`
#
# * Run the following commands in your shell:
#
# sudo -H pip3 install virtualenv # Should already be installed
# sudo -H pip3 install virtualenvwrapper # This makes life easier.
# source ~/.bashrc
#
# * The following shell commands now exist:
# * mkvirtualenv -a *project_directory* *env_name*: Create new virtual environment named
# *env_name* with *project_directory* as the home directory to go to when initially
# activated. (Requires `export VIRTUALENVWRAPPER_WORKON_CD=1` to be set in `~/.bashrc`.
# * workon: List all available virtual environments.
# * workon *env_name*: Switch over to virtual environment *env_name*.
# * lssitepackages: List the packages installed in the current virtual environment.
# * Read the documentation for more commands.
#
# * There is a section about "Using Different Versions of Python" that looks interesting.
#
# Python Packaging Tutorial (2.x):
# https://python-packaging.readthedocs.io/en/latest/
# [Python Packaging](https://packaging.python.org/tutorials/packaging-projects/)
# *
# Another URL (talks about PyPlace accounts -- dated 2009, Python 2.6):
# https://pythonhosted.org/an_example_pypi_project/setuptools.html
# [Configuring `~/.pypirc`](https://truveris.github.io/articles/configuring-pypirc/)
# [Python Plugins](https://packaging.python.org/guides/creating-and-discovering-plugins/)
# [Python Plugins Tutorial](https://amir.rachum.com/blog/2017/07/28/python-entry-points/)
| 44.303483 | 100 | 0.603251 |
4f527facd937d103d758b8f480ff18f3d57f4beb | 137 | py | Python | tests/conftest.py | jmuddappa/DeepClassificationBot | 70aaa6787cf02e8a6b49a913af6496bc0f288b35 | [
"MIT"
] | 40 | 2020-09-15T16:39:35.000Z | 2022-03-20T12:49:45.000Z | tests/conftest.py | jmuddappa/DeepClassificationBot | 70aaa6787cf02e8a6b49a913af6496bc0f288b35 | [
"MIT"
] | 82 | 2020-06-17T08:22:51.000Z | 2021-10-02T15:22:20.000Z | tests/conftest.py | jmuddappa/DeepClassificationBot | 70aaa6787cf02e8a6b49a913af6496bc0f288b35 | [
"MIT"
] | 5 | 2021-09-29T22:15:04.000Z | 2022-03-31T20:32:57.000Z | import pytest
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
monkeypatch.delattr("requests.sessions.Session.request")
| 19.571429 | 60 | 0.79562 |
4f4ff9231554cbc25ebf152eaa61c41f361d9496 | 5,616 | py | Python | cupy/sorting/sort.py | andy6975/cupy | 34b388e4a4fe7c59092b4d4c9c96b2f307e49e46 | [
"MIT"
] | 2 | 2020-02-28T09:27:58.000Z | 2020-10-12T07:10:24.000Z | cupy/sorting/sort.py | andy6975/cupy | 34b388e4a4fe7c59092b4d4c9c96b2f307e49e46 | [
"MIT"
] | 1 | 2019-08-05T09:36:13.000Z | 2019-08-06T12:03:01.000Z | cupy/sorting/sort.py | andy6975/cupy | 34b388e4a4fe7c59092b4d4c9c96b2f307e49e46 | [
"MIT"
] | 1 | 2020-11-24T03:44:35.000Z | 2020-11-24T03:44:35.000Z | import cupy
import numpy
if cupy.cuda.thrust_enabled:
from cupy.cuda import thrust
def sort(a, axis=-1):
"""Returns a sorted copy of an array with a stable sorting algorithm.
Args:
a (cupy.ndarray): Array to be sorted.
axis (int or None): Axis along which to sort. Default is -1, which
means sort along the last axis. If None is supplied, the array is
flattened before sorting.
Returns:
cupy.ndarray: Array of the same type and shape as ``a``.
.. note::
For its implementation reason, ``cupy.sort`` currently does not support
``kind`` and ``order`` parameters that ``numpy.sort`` does
support.
.. seealso:: :func:`numpy.sort`
"""
if axis is None:
ret = a.flatten()
axis = -1
else:
ret = a.copy()
ret.sort(axis=axis)
return ret
def lexsort(keys):
"""Perform an indirect sort using an array of keys.
Args:
keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped
arrays. The ``k`` different "rows" to be sorted. The last row is
the primary sort key.
Returns:
cupy.ndarray: Array of indices that sort the keys.
.. note::
For its implementation reason, ``cupy.lexsort`` currently supports only
keys with their rank of one or two and does not support ``axis``
parameter that ``numpy.lexsort`` supports.
.. seealso:: :func:`numpy.lexsort`
"""
# TODO(takagi): Support axis argument.
if not cupy.cuda.thrust_enabled:
raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '
'install CUDA Toolkit with Thrust then reinstall '
'CuPy after uninstalling it.')
if keys.ndim == ():
# as numpy.lexsort() raises
raise TypeError('need sequence of keys with len > 0 in lexsort')
if keys.ndim == 1:
return 0
# TODO(takagi): Support ranks of three or more.
if keys.ndim > 2:
raise NotImplementedError('Keys with the rank of three or more is not '
'supported in lexsort')
idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)
k = keys._shape[0]
n = keys._shape[1]
thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)
return idx_array
def argsort(a, axis=-1):
"""Returns the indices that would sort an array with a stable sorting.
Args:
a (cupy.ndarray): Array to sort.
axis (int or None): Axis along which to sort. Default is -1, which
means sort along the last axis. If None is supplied, the array is
flattened before sorting.
Returns:
cupy.ndarray: Array of indices that sort ``a``.
.. note::
For its implementation reason, ``cupy.argsort`` does not support
``kind`` and ``order`` parameters.
.. seealso:: :func:`numpy.argsort`
"""
return a.argsort(axis=axis)
def msort(a):
"""Returns a copy of an array sorted along the first axis.
Args:
a (cupy.ndarray): Array to be sorted.
Returns:
cupy.ndarray: Array of the same type and shape as ``a``.
.. note:
``cupy.msort(a)``, the CuPy counterpart of ``numpy.msort(a)``, is
equivalent to ``cupy.sort(a, axis=0)``.
.. seealso:: :func:`numpy.msort`
"""
# TODO(takagi): Support float16 and bool.
return sort(a, axis=0)
# TODO(okuta): Implement sort_complex
def partition(a, kth, axis=-1):
"""Returns a partitioned copy of an array.
Creates a copy of the array whose elements are rearranged such that the
value of the element in k-th position would occur in that position in a
sorted array. All of the elements before the new k-th element are less
than or equal to the elements after the new k-th element.
Args:
a (cupy.ndarray): Array to be sorted.
kth (int or sequence of ints): Element index to partition by. If
supplied with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis (int or None): Axis along which to sort. Default is -1, which
means sort along the last axis. If None is supplied, the array is
flattened before sorting.
Returns:
cupy.ndarray: Array of the same type and shape as ``a``.
.. seealso:: :func:`numpy.partition`
"""
if axis is None:
ret = a.flatten()
axis = -1
else:
ret = a.copy()
ret.partition(kth, axis=axis)
return ret
def argpartition(a, kth, axis=-1):
"""Returns the indices that would partially sort an array.
Args:
a (cupy.ndarray): Array to be sorted.
kth (int or sequence of ints): Element index to partition by. If
supplied with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis (int or None): Axis along which to sort. Default is -1, which
means sort along the last axis. If None is supplied, the array is
flattened before sorting.
Returns:
cupy.ndarray: Array of the same type and shape as ``a``.
.. note::
For its implementation reason, `cupy.argpartition` fully sorts the
given array as `cupy.argsort` does. It also does not support ``kind``
and ``order`` parameters that ``numpy.argpartition`` supports.
.. seealso:: :func:`numpy.argpartition`
"""
return a.argpartition(kth, axis=axis)
| 30.193548 | 79 | 0.615028 |
4f52d027679398864183f7311fcdffa8dd37c1ff | 2,135 | py | Python | classes/functions_involving_formula_class.py | watchduck/concertina_hypercubes | 4d51d4ebcb2ea13a237356bb238b066f6f3d9feb | [
"MIT"
] | 1 | 2018-10-28T08:58:54.000Z | 2018-10-28T08:58:54.000Z | classes/functions_involving_formula_class.py | watchduck/concertina_hypercubes | 4d51d4ebcb2ea13a237356bb238b066f6f3d9feb | [
"MIT"
] | null | null | null | classes/functions_involving_formula_class.py | watchduck/concertina_hypercubes | 4d51d4ebcb2ea13a237356bb238b066f6f3d9feb | [
"MIT"
] | 1 | 2021-06-19T21:35:11.000Z | 2021-06-19T21:35:11.000Z | from .formula import Formula
def old_to_new_formulas(n, old_formulas):
new_lower_formulas = set()
for old_vert, old_horz in old_formulas:
for i in range(n):
old_formula = Formula(old_vert, old_horz)
new_formula = old_formula.insert(i)
new_vert = new_formula.vert
new_horz = new_formula.horz
new_lower_formulas.add((
tuple(new_vert), tuple(new_horz)
))
new_lower_formulas = list(new_lower_formulas)
new_upper_formulas = []
for lower_vert, lower_horz in new_lower_formulas:
lower_formula = Formula(lower_vert, lower_horz)
upper_formula = lower_formula.complement()
upper_vert = upper_formula.vert
upper_horz = upper_formula.horz
new_upper_formulas.append((
tuple(upper_vert), tuple(upper_horz)
))
return new_lower_formulas + new_upper_formulas
def old_to_new_points_and_formulas(n, old_vertices):
top_point_coordinates = n * [(n+1)] # e.g. [4, 4, 4] for dimension 3
new_lower_vertices = set()
for old_point, old_vert, old_horz in old_vertices:
for i in range(n):
new_point = list(old_point)
new_point.insert(i, 0)
old_formula = Formula(old_vert, old_horz)
new_formula = old_formula.insert(i)
new_vert = new_formula.vert
new_horz = new_formula.horz
new_lower_vertices.add((
tuple(new_point), tuple(new_vert), tuple(new_horz)
))
new_lower_vertices = list(new_lower_vertices)
new_upper_vertices = []
for lower_point, lower_vert, lower_horz in new_lower_vertices:
upper_point = [t - p for t, p in zip(top_point_coordinates, lower_point)]
lower_formula = Formula(lower_vert, lower_horz)
upper_formula = lower_formula.complement()
upper_vert = upper_formula.vert
upper_horz = upper_formula.horz
new_upper_vertices.append((
tuple(upper_point), tuple(upper_vert), tuple(upper_horz)
))
return new_lower_vertices + new_upper_vertices | 32.348485 | 81 | 0.650585 |
4f52dc547217b9ce3c1d38e8608a23de171a3cd3 | 29,905 | py | Python | tests/functional/test_download.py | ichard26/pip | a4cd22b277b9392e41ab0191fc416731f06a170d | [
"MIT"
] | null | null | null | tests/functional/test_download.py | ichard26/pip | a4cd22b277b9392e41ab0191fc416731f06a170d | [
"MIT"
] | null | null | null | tests/functional/test_download.py | ichard26/pip | a4cd22b277b9392e41ab0191fc416731f06a170d | [
"MIT"
] | null | null | null | import os.path
import shutil
import textwrap
from hashlib import sha256
from pathlib import Path
import pytest
from pip._internal.cli.status_codes import ERROR
from pip._internal.utils.urls import path_to_url
from tests.lib import create_really_basic_wheel
from tests.lib.server import file_response
def fake_wheel(data, wheel_path):
wheel_name = os.path.basename(wheel_path)
name, version, rest = wheel_name.split("-", 2)
wheel_data = create_really_basic_wheel(name, version)
data.packages.joinpath(wheel_path).write_bytes(wheel_data)
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip("download", "-d", "pip_downloads", "INITools==0.1")
result.did_create(Path("scratch") / "pip_downloads" / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip("download", "setuptools")
setuptools_prefix = str(Path("scratch") / "setuptools")
assert any(path.startswith(setuptools_prefix) for path in result.files_created)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
"download", "--no-index", "-f", data.packages, "-d", ".", "meta"
)
result.did_create(Path("scratch") / "meta-1.0-py2.py3-none-any.whl")
result.did_not_create(script.site_packages / "piptestpackage")
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPI from a
requirements file
"""
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
result.did_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip("download", "Paste[openid]==1.7.5.1", "-d", ".")
result.did_create(Path("scratch") / "Paste-1.7.5.1.tar.gz")
openid_tarball_prefix = str(Path("scratch") / "python-openid-")
assert any(path.startswith(openid_tarball_prefix) for path in result.files_created)
result.did_not_create(script.site_packages / "openid")
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = "colander-0.9.9-py2.py3-none-any.whl"
wheel_path = "/".join((data.find_links, wheel_filename))
result = script.pip("download", wheel_path, "-d", ".", "--no-deps")
result.did_create(Path("scratch") / wheel_filename)
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = "colander-0.9.9-py2.py3-none-any.whl"
dep_filename = "translationstring-1.1.tar.gz"
wheel_path = "/".join((data.find_links, wheel_filename))
result = script.pip(
"download", wheel_path, "-d", ".", "--find-links", data.find_links, "--no-index"
)
result.did_create(Path("scratch") / wheel_filename)
result.did_create(Path("scratch") / dep_filename)
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
result.did_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
# adding second package to test-req.txt
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
INITools==0.1
python-openid==2.2.5
"""
)
)
# only the second package should be downloaded
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"-d",
".",
)
openid_tarball_prefix = str(Path("scratch") / "python-openid-")
assert any(path.startswith(openid_tarball_prefix) for path in result.files_created)
result.did_not_create(Path("scratch") / "INITools-0.1.tar.gz")
result.did_not_create(script.site_packages / "initools")
result.did_not_create(script.site_packages / "openid")
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
"download", "-d", ".", "git+git://github.com/pypa/pip-test-package.git"
)
result.did_create(Path("scratch") / "pip-test-package-0.1.1.zip")
result.did_not_create(script.site_packages / "piptestpackage")
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--no-deps",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
expect_error=True,
)
assert "--only-binary=:all:" in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--no-binary=fake",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
expect_error=True,
)
assert "--only-binary=:all:" in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_9_x86_64",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl")
fake_wheel(data, "fake-2.0-py2.py3-none-linux_x86_64.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_10_x86_64",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl")
# OSX platform wheels are not backward-compatible.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"macosx_10_8_x86_64",
"fake",
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake==1",
expect_error=True,
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake==2",
)
result.did_create(Path("scratch") / "fake-2.0-py2.py3-none-linux_x86_64.whl")
# Test with multiple supported platforms specified.
data.reset()
fake_wheel(data, "fake-3.0-py2.py3-none-linux_x86_64.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"manylinux1_x86_64",
"--platform",
"linux_x86_64",
"--platform",
"any",
"fake==3",
)
result.did_create(Path("scratch") / "fake-3.0-py2.py3-none-linux_x86_64.whl")
class TestDownloadPlatformManylinuxes:
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize(
"platform",
[
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
"manylinux2014_x86_64",
],
)
def test_download_universal(self, platform, script, data):
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
@pytest.mark.parametrize(
"wheel_abi,platform",
[
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
("manylinux1_x86_64", "manylinux2014_x86_64"),
("manylinux2010_x86_64", "manylinux2014_x86_64"),
("manylinux2014_x86_64", "manylinux2014_x86_64"),
],
)
def test_download_compatible_manylinuxes(
self,
wheel_abi,
platform,
script,
data,
):
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = f"fake-1.0-py2.py3-none-{wheel_abi}.whl"
fake_wheel(data, wheel)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
platform,
"fake",
)
result.did_create(Path("scratch") / wheel)
def test_explicit_platform_only(self, data, script):
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, "fake-1.0-py2.py3-none-linux_x86_64.whl")
script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--platform",
"linux_x86_64",
"fake",
)
def test_download__python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"27",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"33",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-py2-none-any.whl")
fake_wheel(data, "fake-2.0-py3-none-any.whl")
# No py3 provided for version 1.
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake==1.0",
expect_error=True,
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"26",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-2.0-py3-none-any.whl")
def make_wheel_with_python_requires(script, package_name, python_requires):
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent(
"""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
"""
).format(package_name, python_requires)
package_dir.joinpath("setup.py").write_text(text)
script.run(
"python",
"setup.py",
"bdist_wheel",
"--universal",
cwd=package_dir,
)
file_name = f"{package_name}-1.0-py2.py3-none-any.whl"
return package_dir / "dist" / file_name
def test_download__python_version_used_for_python_requires(
script,
data,
with_wheel,
):
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==3.2",
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version):
return [
"download",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
python_version,
"mypackage==1.0",
]
args = make_args("33")
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: "
"3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, f"stderr: {result.stderr}"
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args("32")
script.pip(*args) # no exception
def test_download_ignore_requires_python_dont_fail_with_wrong_python(
script,
with_wheel,
):
"""
Test that --ignore-requires-python ignores Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script,
"mypackage",
python_requires="==999",
)
wheel_dir = os.path.dirname(wheel_path)
result = script.pip(
"download",
"--ignore-requires-python",
"--no-index",
"--find-links",
wheel_dir,
"--only-binary=:all:",
"--dest",
".",
"mypackage==1.0",
)
result.did_create(Path("scratch") / "mypackage-1.0-py2.py3-none-any.whl")
def test_download_specify_abi(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"fake_abi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--abi",
"none",
"fake",
)
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--abi",
"cp27m",
"fake",
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-fakeabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"none",
"fake",
expect_error=True,
)
data.reset()
fake_wheel(data, "fake-1.0-fk2-otherabi-fake_platform.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--python-version",
"2",
"--implementation",
"fk",
"--platform",
"fake_platform",
"--abi",
"fakeabi",
"--abi",
"otherabi",
"--abi",
"none",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk2-otherabi-fake_platform.whl")
def test_download_specify_implementation(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, "fake-1.0-py2.py3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-py2.py3-none-any.whl")
data.reset()
fake_wheel(data, "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"3",
"fake",
)
result.did_create(Path("scratch") / "fake-1.0-fk3-none-any.whl")
result = script.pip(
"download",
"--no-index",
"--find-links",
data.find_links,
"--only-binary=:all:",
"--dest",
".",
"--implementation",
"fk",
"--python-version",
"2",
"fake",
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(script):
"""
Test download exit status code when no requirements specified
"""
result = script.pip("download", expect_error=True)
assert "You must give at least one requirement to download" in result.stderr
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(script):
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip("download", "-r", "blank.txt")
def test_download_prefer_binary_when_tarball_higher_than_wheel(script, data):
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_tarball_higher_than_wheel_req_file(script, data):
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"-r",
script.scratch_path / "test-req.txt",
"--no-index",
"-f",
data.packages,
"-d",
".",
)
result.did_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
result.did_not_create(Path("scratch") / "source-1.0.tar.gz")
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(script, data):
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
source>0.9
"""
)
)
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_prefer_binary_when_wheel_doesnt_satisfy_req_req_file(script, data):
fake_wheel(data, "source-0.8-py2.py3-none-any.whl")
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source>0.9
"""
)
)
result = script.pip(
"download",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
result.did_not_create(Path("scratch") / "source-0.8-py2.py3-none-any.whl")
def test_download_prefer_binary_when_only_tarball_exists(script, data):
result = script.pip(
"download",
"--prefer-binary",
"--no-index",
"-f",
data.packages,
"-d",
".",
"source",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
def test_prefer_binary_when_only_tarball_exists_req_file(script, data):
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent(
"""
--prefer-binary
source
"""
)
)
result = script.pip(
"download",
"--no-index",
"-f",
data.packages,
"-d",
".",
"-r",
script.scratch_path / "test-req.txt",
)
result.did_create(Path("scratch") / "source-1.0.tar.gz")
@pytest.fixture(scope="session")
def shared_script(tmpdir_factory, script_factory):
tmpdir = Path(str(tmpdir_factory.mktemp("download_shared_script")))
script = script_factory(tmpdir.joinpath("workspace"))
return script
def test_download_file_url(shared_script, shared_data, tmpdir):
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
shared_script.pip(
"download",
"-d",
str(download_dir),
"--no-index",
path_to_url(str(simple_pkg)),
)
assert downloaded_path.exists()
assert simple_pkg.read_bytes() == downloaded_path.read_bytes()
def test_download_file_url_existing_ok_download(shared_script, shared_data, tmpdir):
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
downloaded_path_bytes = downloaded_path.read_bytes()
digest = sha256(downloaded_path_bytes).hexdigest()
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
url = "{}#sha256={}".format(path_to_url(simple_pkg), digest)
shared_script.pip("download", "-d", str(download_dir), url)
assert downloaded_path_bytes == downloaded_path.read_bytes()
def test_download_file_url_existing_bad_download(shared_script, shared_data, tmpdir):
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
digest = sha256(simple_pkg_bytes).hexdigest()
url = "{}#sha256={}".format(path_to_url(simple_pkg), digest)
shared_script.pip("download", "-d", str(download_dir), url)
assert simple_pkg_bytes == downloaded_path.read_bytes()
def test_download_http_url_bad_hash(shared_script, shared_data, tmpdir, mock_server):
"""
If already-downloaded file has bad checksum, re-download.
"""
download_dir = tmpdir / "download"
download_dir.mkdir()
downloaded_path = download_dir / "simple-1.0.tar.gz"
fake_existing_package = shared_data.packages / "simple-2.0.tar.gz"
shutil.copy(str(fake_existing_package), str(downloaded_path))
simple_pkg = shared_data.packages / "simple-1.0.tar.gz"
simple_pkg_bytes = simple_pkg.read_bytes()
digest = sha256(simple_pkg_bytes).hexdigest()
mock_server.set_responses([file_response(simple_pkg)])
mock_server.start()
base_address = f"http://{mock_server.host}:{mock_server.port}"
url = f"{base_address}/simple-1.0.tar.gz#sha256={digest}"
shared_script.pip("download", "-d", str(download_dir), url)
assert simple_pkg_bytes == downloaded_path.read_bytes()
mock_server.stop()
requests = mock_server.get_requests()
assert len(requests) == 1
assert requests[0]["PATH_INFO"] == "/simple-1.0.tar.gz"
assert requests[0]["HTTP_ACCEPT_ENCODING"] == "identity"
def test_download_editable(script, data, tmpdir):
"""
Test 'pip download' of editables in requirement file.
"""
editable_path = str(data.src / "simplewheel-1.0").replace(os.path.sep, "/")
requirements_path = tmpdir / "requirements.txt"
requirements_path.write_text("-e " + editable_path + "\n")
download_dir = tmpdir / "download_dir"
script.pip(
"download", "--no-deps", "-r", str(requirements_path), "-d", str(download_dir)
)
downloads = os.listdir(download_dir)
assert len(downloads) == 1
assert downloads[0].endswith(".zip")
| 26.844704 | 88 | 0.56529 |
4f53e230d35be88e424150139cb82087bdb426d5 | 568 | py | Python | deepchem/utils/typing.py | akihironitta/deepchem | ffad1a75997bb79360751b2a5fcf3b3405f3c44e | [
"MIT"
] | null | null | null | deepchem/utils/typing.py | akihironitta/deepchem | ffad1a75997bb79360751b2a5fcf3b3405f3c44e | [
"MIT"
] | null | null | null | deepchem/utils/typing.py | akihironitta/deepchem | ffad1a75997bb79360751b2a5fcf3b3405f3c44e | [
"MIT"
] | null | null | null | """Type annotations that are widely used in DeepChem"""
from typing import Callable, List, Sequence, Tuple, TypeVar, Union
T = TypeVar("T")
# An activation function for a Keras layer: either a TensorFlow function or the name of a standard activation
KerasActivationFn = Union[Callable, str]
# A loss function for use with KerasModel: f(outputs, labels, weights)
KerasLossFn = Callable[[List, List, List], float]
# A single value of some type, or multiple values of that type
OneOrMany = Union[T, Sequence[T]]
# The shape of a NumPy array
Shape = Tuple[int, ...]
| 31.555556 | 109 | 0.742958 |
4f54a5f6c3f2d09e721c001e88296a09fd1d8f55 | 6,748 | py | Python | kivy/tools/benchmark.py | Abestanis/kivy | a3dc9211830eb932421cdd2223e7cc7da3891211 | [
"MIT"
] | 1 | 2020-02-24T19:03:54.000Z | 2020-02-24T19:03:54.000Z | kivy/tools/benchmark.py | waderly/kivy | 1bfa50bcbe83350f42f002bc217d381e4878666d | [
"MIT"
] | null | null | null | kivy/tools/benchmark.py | waderly/kivy | 1bfa50bcbe83350f42f002bc217d381e4878666d | [
"MIT"
] | null | null | null | '''
Benchmark
=========
'''
from __future__ import print_function
benchmark_version = '1'
import os
import sys
import json
import kivy
import gc
from time import clock, time, ctime
from random import randint
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.graphics import RenderContext
from kivy.input.motionevent import MotionEvent
from kivy.cache import Cache
from kivy.clock import Clock
clockfn = time
if sys.platform == 'win32':
clockfn = clock
class FakeMotionEvent(MotionEvent):
pass
class bench_widget_creation:
'''Widget: creation (10000 Widget)'''
def run(self):
o = []
for x in range(10000):
o.append(Widget())
class bench_widget_creation_with_root:
'''Widget: creation (10000 Widget + 1 root)'''
def run(self):
o = Widget()
for x in range(10000):
o.add_widget(Widget())
class bench_widget_draw:
'''Widget: empty drawing (10000 Widget + 1 root)'''
def __init__(self):
self.ctx = RenderContext()
self.root = root = Widget()
for x in range(10000):
root.add_widget(Widget())
self.ctx.add(self.root.canvas)
def run(self):
self.ctx.draw()
class bench_widget_dispatch:
'''Widget: event dispatch (1000 on_update in 10*1000 Widget)'''
def __init__(self):
root = Widget()
for x in range(10):
parent = Widget()
for y in range(1000):
parent.add_widget(Widget())
root.add_widget(parent)
self.root = root
def run(self):
touch = FakeMotionEvent('fake', 1, [])
self.root.dispatch('on_touch_down', touch)
self.root.dispatch('on_touch_move', touch)
self.root.dispatch('on_touch_up', touch)
class bench_label_creation:
'''Core: label creation (10000 * 10 a-z)'''
def __init__(self):
labels = []
for x in range(10000):
label = [chr(randint(ord('a'), ord('z'))) for x in range(10)]
labels.append(''.join(label))
self.labels = labels
def run(self):
o = []
for x in self.labels:
o.append(Label(text=x))
class bench_button_creation:
'''Core: button creation (10000 * 10 a-z)'''
def __init__(self):
labels = []
for x in xrange(10000):
button = map(lambda x: chr(randint(ord('a'), ord('z'))), xrange(10))
labels.append(''.join(button))
self.labels = labels
def run(self):
o = []
for x in self.labels:
o.append(Button(text=x))
class bench_label_creation_with_tick:
'''Core: label creation (10000 * 10 a-z), with Clock.tick'''
def __init__(self):
labels = []
for x in range(10000):
label = [chr(randint(ord('a'), ord('z'))) for x in range(10)]
labels.append(''.join(label))
self.labels = labels
def run(self):
o = []
for x in self.labels:
o.append(Label(text=x))
# tick for texture creation
Clock.tick()
class bench_button_creation_with_tick:
'''Core: button creation (10000 * 10 a-z), with Clock.tick'''
def __init__(self):
labels = []
for x in xrange(10000):
button = map(lambda x: chr(randint(ord('a'), ord('z'))), xrange(10))
labels.append(''.join(button))
self.labels = labels
def run(self):
o = []
for x in self.labels:
o.append(Button(text=x))
# tick for texture creation
Clock.tick()
if __name__ == '__main__':
report = []
report_newline = True
def log(s, newline=True):
global report_newline
if not report_newline:
report[-1] = '%s %s' % (report[-1], s)
else:
report.append(s)
if newline:
print(s)
report_newline = True
else:
print(s, end=' ')
report_newline = False
sys.stdout.flush()
clock_total = 0
benchs = list(globals().keys())
benchs.sort()
benchs = [globals()[x] for x in benchs if x.startswith('bench_')]
log('')
log('=' * 70)
log('Kivy Benchmark v%s' % benchmark_version)
log('=' * 70)
log('')
log('System informations')
log('-------------------')
log('OS platform : %s' % sys.platform)
log('Python EXE : %s' % sys.executable)
log('Python Version : %s' % sys.version)
log('Python API : %s' % sys.api_version)
log('Kivy Version : %s' % kivy.__version__)
log('Install path : %s' % os.path.dirname(kivy.__file__))
log('Install date : %s' % ctime(os.path.getctime(kivy.__file__)))
log('')
log('OpenGL informations')
log('-------------------')
from kivy.core.gl import glGetString, GL_VENDOR, GL_RENDERER, GL_VERSION
log('GL Vendor: %s' % glGetString(GL_VENDOR))
log('GL Renderer: %s' % glGetString(GL_RENDERER))
log('GL Version: %s' % glGetString(GL_VERSION))
log('')
log('Benchmark')
log('---------')
for x in benchs:
# clean cache to prevent weird case
for cat in Cache._categories:
Cache.remove(cat)
# force gc before next test
gc.collect()
log('%2d/%-2d %-60s' % (benchs.index(x) + 1,
len(benchs), x.__doc__), False)
try:
sys.stderr.write('.')
test = x()
except Exception as e:
log('failed %s' % str(e))
import traceback
traceback.print_exc()
continue
clock_start = clockfn()
try:
sys.stderr.write('.')
test.run()
clock_end = clockfn() - clock_start
log('%.6f' % clock_end)
except Exception as e:
log('failed %s' % str(e))
continue
clock_total += clock_end
log('')
log('Result: %.6f' % clock_total)
log('')
try:
reply = input(
'Do you want to send benchmark to gist.github.com (Y/n) : ')
except EOFError:
sys.exit(0)
if reply.lower().strip() in ('', 'y'):
print('Please wait while sending the benchmark...')
try:
import requests
except ImportError:
print("`requests` module not found, no benchmark posted.")
sys.exit(1)
payload = {
'public': True, 'files': {
'benchmark.txt': {
'content': '\n'.join(report)}}}
r = requests.post('https://api.github.com/gists', data=json.dumps(payload))
print()
print()
print('REPORT posted at {0}'.format(r.json['html_url']))
print()
print()
else:
print('No benchmark posted.')
| 24.717949 | 80 | 0.555868 |
4f52a423af4d9be1f6bea1fd72840deb206d48e4 | 497 | py | Python | integration/airflow/tests/mocks/git_mock.py | skcc00000app08542/OpenLineage | e0dd3715e61b3d89f60ece0d7385e82ccd141ba4 | [
"Apache-2.0"
] | 1 | 2021-12-03T17:00:00.000Z | 2021-12-03T17:00:00.000Z | integration/airflow/tests/mocks/git_mock.py | skcc00000app08542/OpenLineage | e0dd3715e61b3d89f60ece0d7385e82ccd141ba4 | [
"Apache-2.0"
] | 1 | 2022-02-17T09:43:38.000Z | 2022-02-17T09:43:52.000Z | integration/airflow/tests/mocks/git_mock.py | skcc00000app08542/OpenLineage | e0dd3715e61b3d89f60ece0d7385e82ccd141ba4 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0.
import logging
import subprocess
log = logging.getLogger(__name__)
def execute_git_mock(cwd, params):
# just mock the git revision
log.debug("execute_git_mock()")
if len(cwd) > 0 and params[0] == 'rev-list':
return 'abcd1234'
p = subprocess.Popen(['git'] + params,
cwd=cwd, stdout=subprocess.PIPE, stderr=None)
p.wait(timeout=0.5)
out, err = p.communicate()
return out.decode('utf8').strip()
| 24.85 | 70 | 0.635815 |
4f4fcd1fae86d3edeaaf584bd3d6f8844adce8f6 | 2,687 | py | Python | mfutil/cli.py | moas/mfutil | 6c6deb0f4b70a6e8b34d666a4ee16e431d88265f | [
"BSD-3-Clause"
] | null | null | null | mfutil/cli.py | moas/mfutil | 6c6deb0f4b70a6e8b34d666a4ee16e431d88265f | [
"BSD-3-Clause"
] | null | null | null | mfutil/cli.py | moas/mfutil | 6c6deb0f4b70a6e8b34d666a4ee16e431d88265f | [
"BSD-3-Clause"
] | null | null | null | """Utility functions to build CLI."""
from __future__ import print_function
import six
import sys
import ctypes
MFUTIL_INSTANCE = None
def _get_mfutil():
global MFUTIL_INSTANCE
if MFUTIL_INSTANCE is None:
i = ctypes.cdll.LoadLibrary("libmfutil.so")
i.mfutil_echo_ok.restype = None
i.mfutil_echo_ok.argtypes = [ctypes.c_char_p]
i.mfutil_is_interactive_execution.restype = ctypes.c_int
i.mfutil_is_interactive_execution.argtypes = []
i.mfutil_echo_nok.restype = None
i.mfutil_echo_nok.argtypes = [ctypes.c_char_p]
i.mfutil_echo_warning.restype = None
i.mfutil_echo_warning.argtypes = [ctypes.c_char_p]
i.mfutil_echo_bold.restype = None
i.mfutil_echo_bold.argtypes = [ctypes.c_char_p]
i.mfutil_echo_running.restype = None
i.mfutil_echo_running.argtypes = []
i.mfutil_echo_clean.restype = None
i.mfutil_echo_clean.argtypes = []
MFUTIL_INSTANCE = i
return MFUTIL_INSTANCE
def is_interactive():
"""Return True if we are in an interactive terminal."""
tmp = _get_mfutil().mfutil_is_interactive_execution()
return (tmp == 1)
def echo_ok(message=""):
"""Write [OK] with colors if supported a little optional message.
Args:
message (string): little optional message.
"""
_get_mfutil().mfutil_echo_ok(message.encode('utf8'))
def echo_nok(message=""):
"""Write [ERROR] with colors if supported a little optional message.
Args:
message (string): little optional message.
"""
_get_mfutil().mfutil_echo_nok(message.encode('utf8'))
def echo_warning(message=""):
"""Write [WARNING] with colors if supported a little optional message.
Args:
message (string): little optional message.
"""
_get_mfutil().mfutil_echo_warning(message.encode('utf8'))
def echo_bold(message):
"""Write a message in bold (if supported).
Args:
message (string): message to write in bold.
"""
_get_mfutil().mfutil_echo_bold(message.encode('utf8'))
def echo_running(message=None):
"""Write [RUNNING] with colors if supported.
You can pass an optional message which will be rendered before [RUNNING]
on the same line.
Args:
message (string): little optional message.
"""
if message is None:
_get_mfutil().mfutil_echo_running()
else:
if six.PY2:
print(message, end="")
sys.stdout.flush()
else:
print(message, end="", flush=True)
_get_mfutil().mfutil_echo_running()
def echo_clean():
"""Clean waiting status."""
_get_mfutil().mfutil_echo_clean()
| 26.087379 | 76 | 0.663937 |
4f50129406e313b4040fbeee72210a940b04b0b7 | 440 | py | Python | tests/unit/libraries/falcon_/conftest.py | PaulStaggs/signalfx-python-tracing | 0f704f86d25824cb9fac306f5b2132fe913aaf87 | [
"Apache-2.0"
] | 21 | 2019-01-05T00:56:39.000Z | 2022-02-27T23:51:49.000Z | tests/unit/libraries/falcon_/conftest.py | PaulStaggs/signalfx-python-tracing | 0f704f86d25824cb9fac306f5b2132fe913aaf87 | [
"Apache-2.0"
] | 15 | 2019-01-09T20:43:23.000Z | 2021-04-23T08:56:50.000Z | tests/unit/libraries/falcon_/conftest.py | PaulStaggs/signalfx-python-tracing | 0f704f86d25824cb9fac306f5b2132fe913aaf87 | [
"Apache-2.0"
] | 9 | 2019-02-18T07:32:49.000Z | 2021-03-15T17:52:55.000Z | # Copyright (C) 2018 SignalFx. All rights reserved.
import pytest
from signalfx_tracing.libraries.falcon_.instrument import config, uninstrument
class FalconTestSuite(object):
@pytest.fixture(autouse=True)
def restored_falcon_config(self):
orig = dict(config.__dict__)
yield
config.__dict__ = orig
@pytest.fixture(autouse=True)
def uninstrument_falcon(self):
yield
uninstrument()
| 24.444444 | 78 | 0.709091 |
4f54484650e55ed504e7bd5891f623bac1fcaf75 | 1,550 | py | Python | mnsim_noc/Buffer/output_buffer.py | godfather991/MNSIM_NoC | 402680ad72c46c2a0b040b5fa52232807d554aec | [
"MIT"
] | null | null | null | mnsim_noc/Buffer/output_buffer.py | godfather991/MNSIM_NoC | 402680ad72c46c2a0b040b5fa52232807d554aec | [
"MIT"
] | 3 | 2021-11-01T15:43:20.000Z | 2021-11-09T03:49:06.000Z | mnsim_noc/Buffer/output_buffer.py | ILTShade/MNSIM_NoC | 8fa4580cce0ef113b473dd22662748846ec6b45a | [
"MIT"
] | null | null | null | #-*-coding:utf-8-*-
"""
@FileName:
output_buffer.py
@Description:
output behavior buffer of the Tile
@Authors:
Hanbo Sun(sun-hb17@mails.tsinghua.edu.cn)
@CreateTime:
2022/05/07 10:51
"""
from mnsim_noc.Buffer.base_buffer import BaseBuffer, get_data_size
class OutputBuffer(BaseBuffer):
"""
output behavior buffer
"""
NAME = "behavior_buffer_output"
def __init__(self, buffer_size):
super(OutputBuffer, self).__init__(buffer_size)
self.end_flag = False
def check_remain_size(self):
"""
check the remain size, for the computing
"""
if self.end_flag:
return float("inf")
return self.buffer_size - self.used_space
def check_enough_space(self, data_list):
"""
check if the buffer has enough space to add the data
"""
data_size = sum([get_data_size(data) for data in data_list])
return self.check_remain_size() >= data_size
def next_transfer_data(self):
"""
get the next transfer data
"""
if self.end_flag:
return None
if len(self.buffer_data) == 0:
return None
else:
return [self.buffer_data[0]]
def set_end(self):
"""
set this buffer ad the end buffer
"""
self.end_flag = True
def check_finish(self):
"""
check if the buffer is finished
"""
if not self.end_flag:
assert len(self.buffer_data) == 0, "the buffer is not empty"
| 25.409836 | 72 | 0.593548 |
4f50cee1367628fbfa1d99eccb100691e537e0d3 | 1,028 | py | Python | 01 Basics/006 - Escape Sequences/006 - Escape Sequences.py | TheUnicum/CorsoPython | 06cb2b58d46b26fc6d1cf4a585ff2666ab4b5c19 | [
"Apache-2.0"
] | null | null | null | 01 Basics/006 - Escape Sequences/006 - Escape Sequences.py | TheUnicum/CorsoPython | 06cb2b58d46b26fc6d1cf4a585ff2666ab4b5c19 | [
"Apache-2.0"
] | null | null | null | 01 Basics/006 - Escape Sequences/006 - Escape Sequences.py | TheUnicum/CorsoPython | 06cb2b58d46b26fc6d1cf4a585ff2666ab4b5c19 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Mattia Benedetti
# All rights reserved.
#
# Author: Mattia Benedetti
"""
- Escape Sequences -
https://docs.python.org/2/reference/lexical_analysis.html
\newline Ignored
\\ Backslash (\)
\' Single quote (')
\" Double quote (")
\a ASCII Bell (BEL)
\b ASCII Backspace (BS)
\f ASCII Formfeed (FF)
\n ASCII Linefeed (LF)
\N{name} Character named name in the Unicode database (Unicode only)
\r ASCII Carriage Return (CR)
\t ASCII Horizontal Tab (TAB)
\uxxxx Character with 16-bit hex value xxxx (Unicode only) (1)
\Uxxxxxxxx Character with 32-bit hex value xxxxxxxx (Unicode only) (2)
\v ASCII Vertical Tab (VT)
\ooo Character with octal value ooo (3,5)
\ xhh Character with hex value hh (4,5)
"""
print 'Hello\nWorld'
print
print 'Hello\'World\''
print
print "----- Caratteri speciali ------"
print "ASCII Bell (BEL) : \a"
print "ASCII Backspace (BS) : \b"
print "ASCII Formfeed (FF) : \f"
print "ASCII Linefeed (LF) : \n"
| 24.47619 | 70 | 0.667315 |
4f54e3df09102cb15baafe1ac39fc21f4b9fe37e | 775 | py | Python | devito/yask/grid.py | thast/devito | 7e7ca7c06319babddaa2d7a6481c81caa47e8f8b | [
"MIT"
] | 11 | 2019-07-18T11:15:12.000Z | 2021-09-14T02:20:52.000Z | devito/yask/grid.py | thast/devito | 7e7ca7c06319babddaa2d7a6481c81caa47e8f8b | [
"MIT"
] | null | null | null | devito/yask/grid.py | thast/devito | 7e7ca7c06319babddaa2d7a6481c81caa47e8f8b | [
"MIT"
] | 3 | 2020-02-15T14:23:29.000Z | 2021-02-04T01:51:02.000Z | import devito.grid as grid
from devito.yask.function import Constant
from devito.yask.wrappers import contexts
__all__ = ['Grid']
class Grid(grid.Grid):
def __init__(self, *args, **kwargs):
super(Grid, self).__init__(*args, **kwargs)
# Initialize a new YaskContext for this Grid
contexts.putdefault(self)
@property
def _const(self):
return Constant
def _make_stepping_dim(self, time_dim, **kwargs):
# In the `yask` backend, the stepping dimension is an alias of the
# time dimension
return time_dim
def __setstate__(self, state):
super(Grid, self).__setstate__(state)
# A new context is created, as the unpickled Dimensions are new objects
contexts.putdefault(self)
| 25.833333 | 79 | 0.670968 |
4f54382f6306ef3a9576db44ac81c5a527215de4 | 1,838 | py | Python | venv/Lib/site-packages/pyrogram/raw/base/dc_option.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/pyrogram/raw/base/dc_option.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/pyrogram/raw/base/dc_option.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
DcOption = Union[raw.types.DcOption]
# noinspection PyRedeclaration
class DcOption: # type: ignore
"""This base type has 1 constructor available.
Constructors:
.. hlist::
:columns: 2
- :obj:`DcOption <pyrogram.raw.types.DcOption>`
"""
QUALNAME = "pyrogram.raw.base.DcOption"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/dc-option")
| 36.76 | 87 | 0.634385 |
4f53b13e4722b7e72262dfa28f131e2c401f0653 | 4,079 | py | Python | napari/_qt/layer_controls/qt_layer_controls_base.py | pranathivemuri/napari | 7a7f824b686a276dc6cdc8013d8f437e3c3b03e1 | [
"BSD-3-Clause"
] | 1 | 2021-05-16T19:31:42.000Z | 2021-05-16T19:31:42.000Z | napari/_qt/layer_controls/qt_layer_controls_base.py | clbarnes/napari | 9b46dd5a92653a194b870e433debd672ff0457ad | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/layer_controls/qt_layer_controls_base.py | clbarnes/napari | 9b46dd5a92653a194b870e433debd672ff0457ad | [
"BSD-3-Clause"
] | null | null | null | from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QFrame, QGridLayout, QSlider
from ...layers.base._base_constants import BLENDING_TRANSLATIONS
from ...utils.events import disconnect_events
class QtLayerControls(QFrame):
"""Superclass for all the other LayerControl classes.
This class is never directly instantiated anywhere.
Parameters
----------
layer : napari.layers.Layer
An instance of a napari layer.
Attributes
----------
blendComboBox : qtpy.QtWidgets.QComboBox
Drowpdown widget to select blending mode of layer.
grid_layout : qtpy.QtWidgets.QGridLayout
Layout of Qt widget controls for the layer.
layer : napari.layers.Layer
An instance of a napari layer.
opacitySlider : qtpy.QtWidgets.QSlider
Slider controlling opacity of the layer.
"""
def __init__(self, layer):
super().__init__()
self.layer = layer
self.layer.events.blending.connect(self._on_blending_change)
self.layer.events.opacity.connect(self._on_opacity_change)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setObjectName('layer')
self.setMouseTracking(True)
self.grid_layout = QGridLayout(self)
self.grid_layout.setContentsMargins(0, 0, 0, 0)
self.grid_layout.setSpacing(2)
self.grid_layout.setColumnMinimumWidth(0, 86)
self.grid_layout.setColumnStretch(1, 1)
self.setLayout(self.grid_layout)
sld = QSlider(Qt.Horizontal, parent=self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setMinimum(0)
sld.setMaximum(100)
sld.setSingleStep(1)
sld.valueChanged.connect(self.changeOpacity)
self.opacitySlider = sld
self._on_opacity_change()
blend_comboBox = QComboBox(self)
for index, (data, text) in enumerate(BLENDING_TRANSLATIONS.items()):
data = data.value
blend_comboBox.addItem(text, data)
if data == self.layer.blending:
blend_comboBox.setCurrentIndex(index)
blend_comboBox.activated[str].connect(self.changeBlending)
self.blendComboBox = blend_comboBox
def changeOpacity(self, value):
"""Change opacity value on the layer model.
Parameters
----------
value : float
Opacity value for shapes.
Input range 0 - 100 (transparent to fully opaque).
"""
with self.layer.events.blocker(self._on_opacity_change):
self.layer.opacity = value / 100
def changeBlending(self, text):
"""Change blending mode on the layer model.
Parameters
----------
text : str
Name of blending mode, eg: 'translucent', 'additive', 'opaque'.
"""
self.layer.blending = self.blendComboBox.currentData()
def _on_opacity_change(self, event=None):
"""Receive layer model opacity change event and update opacity slider.
Parameters
----------
event : napari.utils.event.Event, optional
The napari event that triggered this method, by default None.
"""
with self.layer.events.opacity.blocker():
self.opacitySlider.setValue(int(self.layer.opacity * 100))
def _on_blending_change(self, event=None):
"""Receive layer model blending mode change event and update slider.
Parameters
----------
event : napari.utils.event.Event, optional
The napari event that triggered this method, by default None.
"""
with self.layer.events.blending.blocker():
self.blendComboBox.setCurrentIndex(
self.blendComboBox.findData(self.layer.blending)
)
def close(self):
"""Disconnect events when widget is closing."""
disconnect_events(self.layer.events, self)
for child in self.children():
close_method = getattr(child, 'close', None)
if close_method is not None:
close_method()
super().close()
| 33.434426 | 78 | 0.63643 |
4f4a0bc4f0b98705ff36b45d56ab1e2f7e9efa84 | 20,803 | py | Python | parlai/agents/rag/modules.py | askender/ParlAI | 054a0fff8183e357727dc7a91682496734badb7f | [
"MIT"
] | 1 | 2022-03-27T17:16:19.000Z | 2022-03-27T17:16:19.000Z | parlai/agents/rag/modules.py | askender/ParlAI | 054a0fff8183e357727dc7a91682496734badb7f | [
"MIT"
] | null | null | null | parlai/agents/rag/modules.py | askender/ParlAI | 054a0fff8183e357727dc7a91682496734badb7f | [
"MIT"
] | 1 | 2022-03-30T14:05:29.000Z | 2022-03-30T14:05:29.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Modules for RAG.
"""
import torch
import torch.cuda
import torch.nn
import torch.nn.functional as F
from typing import Any, Tuple, Dict, Optional, List, Union, Type
from parlai.agents.hugging_face.t5 import (
ParlaiT5Encoder,
ParlaiT5Decoder,
build_t5,
set_device,
)
from parlai.agents.transformer.modules import (
TransformerEncoder,
TransformerDecoder,
get_n_positions_from_options,
create_embeddings,
)
from parlai.core.dict import DictionaryAgent
from parlai.core.opt import Opt
from parlai.core.torch_generator_agent import TorchGeneratorModel
from parlai.utils.torch import padded_tensor
from parlai.agents.rag.retrievers import retriever_factory, Document
class RagModel(TorchGeneratorModel):
"""
RagModel.
The RagModel operates in the following phases:
1) retrieve: given a tokenized query, return relevant documents
2) expand: given queries and documents, expand the inputs n_docs times,
concatenating each document with a relevant context
3) encode: given expanded input, encode into encoder representations
4) decoding: given encoder outputs, compute n_docs decoder representations for
each batch item.
5) marginalize: given the decoded representations, marginalize over the documents
appropriately.
The RagModel overloads the `encoder` and `decoder` attributes of your standard
`TorchGeneratorModel` to accomplish the five phases above.
"""
def __init__(self, opt, dictionary, retriever_shared=None):
from parlai.agents.rag.rag import RAG_MODELS
self.pad_idx = dictionary[dictionary.null_token]
self.start_idx = dictionary[dictionary.start_token]
self.end_idx = dictionary[dictionary.end_token]
super().__init__(self.pad_idx, self.start_idx, self.end_idx)
self.fp16 = (
not opt['no_cuda'] and torch.cuda.is_available() and opt.get('fp16', False)
)
self.dict = dictionary
self.embeddings = create_embeddings(
dictionary, opt['embedding_size'], self.pad_idx
)
# attrs
self.rag_model_type = opt['rag_model_type']
self._rag_model_interface = RAG_MODELS[self.rag_model_type](opt, self.pad_idx)
self.generation_model = opt['generation_model']
self.n_extra_positions = opt['n_extra_positions']
self.n_positions = get_n_positions_from_options(opt) + opt['n_extra_positions']
assert opt['n_extra_positions'] >= 0
self.expanded_input_truncate = min(
opt['text_truncate'] or opt['truncate'], get_n_positions_from_options(opt)
)
if self.n_extra_positions > 0:
# This attribute is overloaded.
# when n_extra_positions == 0, it is the truncation of the full expanded input
# when >0, it is the maximum length of the knowledge tokens.
self.expanded_input_truncate = self.n_extra_positions
self.min_doc_token_length = opt['min_doc_token_length']
# modules
self.retriever = retriever_factory(opt, dictionary, shared=retriever_shared)
self.seq2seq_encoder = self.build_encoder(
opt,
dictionary=dictionary,
embedding=self.embeddings,
padding_idx=self.pad_idx,
)
self.seq2seq_decoder = self.build_decoder(
opt, embedding=self.embeddings, padding_idx=self.pad_idx
)
@classmethod
def build_encoder(
cls,
opt: Opt,
*args,
dictionary: Optional[DictionaryAgent] = None,
embedding: Optional[torch.nn.Embedding] = None,
encoder_class: Optional[Type] = None,
**kwargs,
):
if encoder_class is None:
assert dictionary is not None
return RagEncoder(
opt=opt, dictionary=dictionary, embedding=embedding, **kwargs
)
else:
return encoder_class(opt, *args, **kwargs)
@classmethod
def build_decoder(
cls,
opt: Opt,
*args,
embedding: Optional[torch.nn.Embedding] = None,
n_positions: Optional[int] = None,
decoder_class: Optional[Type] = None,
**kwargs,
):
if decoder_class is None:
return RagDecoder(opt=opt, embedding=embedding, n_positions=n_positions)
else:
return decoder_class(opt, *args, **kwargs)
def tokenize_query(self, query: str) -> List[int]:
"""
Tokenize the query for the retriever.
"""
return self.retriever.tokenize_query(query)
def get_retriever_delimiter(self) -> str:
"""
Return the retriever's delimiter.
"""
return self.retriever.get_delimiter()
def encoder(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
query_vec: torch.LongTensor,
input_turns_cnt: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
) -> Tuple[
torch.Tensor,
torch.BoolTensor,
Optional[torch.LongTensor],
Optional[List[List[Document]]],
Optional[torch.Tensor],
]:
"""
Retrieve documents and expand input via concatenation.
Then, encode as usual in the seq2seq encoder.
:param input:
2D [bsz, seqlen] input to the encoder
:param input_lengths:
1D [bsz] lengths of each input item
:param query_vec:
2D [bsz*n_turns, seqlen] input for the retriever
:param input_turns_cnt:
1D [bsz] number of dialogue turns for each input example
:return (encoder_out, encoder_mask, input_turns_cnt, top_docs, top_doc_scores):
encoder_out: encoded representations of context/document pairs
encoder_mask: mask for enc_out
input_turns_cnt: pass along the input turns count for the decoder
top_docs: List of top Documents for each batch example
top_doc_scores: scores for each retrieved document.
"""
# Retrieve, get expanded input
if all([tensor is not None for tensor in [input_lengths, query_vec]]):
expanded_input, top_docs, top_doc_scores = self.retrieve_and_concat(
input, input_lengths, query_vec, input_turns_cnt
)
else:
expanded_input = input
top_docs = top_doc_scores = None
# Run through seq2seq encoder
tensor, mask = self.seq2seq_encoder(
expanded_input, positions, segments
) # type: ignore
return tensor, mask, input_turns_cnt, top_docs, top_doc_scores
def decoder(
self,
input: torch.LongTensor,
encoder_state: Tuple[Any, ...],
incr_state: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, Optional[Dict[str, Any]]]:
"""
Decode, RAG-Style.
Obtain decoder representations as usual, then marginalize appropriately.
:param input:
input for the decoder
:param encoder_state:
RAG encoder states
:param incr_state:
incremental decoder state
:return (output, new_incr_state):
return the output token distribution, as well as new incremental state.
"""
# 1. Get decoder outputs
enc_out, enc_mask, input_turns_cnt, docs, doc_scores = encoder_state
dec_out, new_incr_state = self.seq2seq_decoder(
input, (enc_out, enc_mask), incr_state
) # type: ignore
dec_out = self.decoder_output(dec_out)
if all([obj is not None for obj in [docs, doc_scores]]):
# 2. Get logprobs
n_docs = doc_scores.size(1)
out_probs = F.log_softmax(
dec_out, dim=-1, dtype=torch.float32 # type: ignore
).view(
input.shape[0] // n_docs, n_docs, -1, dec_out.size(-1)
) # [bsz * beam_size, n_docs, input_len, esz]
# 3. Marginalize
marginalized = self._rag_model_interface.marginalize(
out_probs, F.log_softmax(doc_scores, dim=1), input_turns_cnt
)
else:
# With RAG Sequence Generation, we do not marginalize over documents.
marginalized = dec_out
return marginalized, new_incr_state
def seq2seq_forward_pass(
self, xs: torch.LongTensor, ys: torch.LongTensor
) -> Tuple[torch.Tensor, torch.Tensor, Tuple[Any, ...]]:
"""
Simulate a standard seq2seq encoder/decoder forward pass.
Used in thorough decoding.
:param xs:
input tokens
:param ys:
teacher forced decoder outputs
:return (logits, preds, encoder_states):
logits: token output distribution
preds: max probability token at each output position
encoder_states: output states from the encoder
"""
encoder_states = self.seq2seq_encoder(xs) # type: ignore
bsz = ys.size(0)
seqlen = ys.size(1)
inputs = ys.narrow(1, 0, seqlen - 1)
dec_inputs = self._rag_model_interface.get_initial_forced_decoder_input(
bsz,
inputs,
n_docs=1,
start_idx=self.START_IDX,
end_idx=self.END_IDX,
input_turns_cnt=None,
)
latent, _ = self.seq2seq_decoder(
dec_inputs, encoder_states, None
) # type: ignore
logits = self.decoder_output(latent)
_, preds = logits.max(dim=-1)
return logits, preds, encoder_states
def decoder_output(self, latent: torch.Tensor) -> torch.Tensor:
"""
Output layer for the decoder; maps latent state to token distributions.
:param latent:
final representations from last decoder layer.
:return logits:
return output distribution over tokens.
"""
return F.linear(latent, self.embeddings.weight)
def retrieve_and_concat(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
query_vec: torch.LongTensor,
input_turns_cnt: torch.LongTensor,
) -> Tuple[torch.LongTensor, List[List[Document]], torch.Tensor]:
"""
Retrieve documents, concat with input.
:param input:
2D [bsz, seqlen] input to the encoder
:param input_lengths:
1D [bsz] lengths of each input item
:param query_vec:
2D [bsz*n_turns, seqlen] input for the retriever
:param input_turns_cnt:
1D [bsz] number of dialogue turns for each input example
:return (expanded_input, top_docs, top_doc_scores):
expanded_input: [bsz * n_docs, seqlen+doc_len] tensor of context/document inputs
top_docs: List of top documents for each input
top_doc_scores: document scores for each document
"""
# 1. Retrieve
top_docs, top_doc_scores = self.retriever.retrieve(query_vec)
# 2. Expand the input
if input_turns_cnt is not None:
input = input.repeat_interleave(input_turns_cnt, dim=0) # type: ignore
input_lengths = input_lengths.repeat_interleave(
input_turns_cnt, dim=0
) # type: ignore
expanded_input = self.concat_docs_and_input(
input, input_lengths, top_docs, top_doc_scores.size(1)
)
return expanded_input, top_docs, top_doc_scores
def concat_docs_and_input(
self,
input: torch.LongTensor,
input_lengths: torch.LongTensor,
top_docs: List[List[Document]],
max_num_docs: int,
right_padded: bool = True,
) -> torch.LongTensor:
"""
Add document tokens to input tokens.
:param input:
original input tokens
:param input_lengths:
original input lengths
:param top_docs:
list of n_docs top documents for each input sequence
:param max_num_docs:
maximum number of docs out of all examples
:param right_padded:
whether the input is right padded.
:return (tokens, lengths):
return expanded token vectors & corresponding lengths
"""
max_len = self.expanded_input_truncate
expanded_input = []
for i, docs in enumerate(top_docs):
for rank in range(len(docs)):
input_i = input[i, :]
doc = docs[rank]
doc_tokens = self.dict.txt2vec(doc.get_passage_str())
if self.generation_model == 'bart' and self.n_extra_positions <= 0:
# move SOS to start of passage since we append question to end
input_i = input_i[1:]
sample_doc_tokens = torch.LongTensor(
[self.start_idx] + doc_tokens
).to(input)
else:
sample_doc_tokens = torch.LongTensor(doc_tokens).to(input)
if self.n_extra_positions <= 0:
# Prepend document to text
input_i_len = input_lengths[i]
new_input_length = min(
self.expanded_input_truncate - self.min_doc_token_length,
input_i_len,
)
if right_padded:
input_i = input_i[input_i_len - new_input_length : input_i_len]
else:
input_i = input_i[input_i.size(0) - new_input_length :]
doc_max_len = max(max_len - len(input_i), 0)
sample_doc_tokens = sample_doc_tokens[:doc_max_len]
expanded_input.append(
torch.cat([sample_doc_tokens, input_i])[:max_len]
)
else:
# Append Document to text
sample_doc_tokens = sample_doc_tokens[:max_len]
input_i_new = input_i.new(
self.n_positions - self.n_extra_positions
).fill_(self.pad_idx)
input_i_new[input_i_new.size(0) - input_i.size(0) :] = input_i
expanded_input.append(torch.cat([input_i_new, sample_doc_tokens]))
# append extra null inputs if there are diff # of docs per input
expanded_input += [
input[i, :].new(input[i, :].size()).fill_(self.pad_idx)
] * (max_num_docs - len(docs))
expanded_input, _ = padded_tensor(
expanded_input,
fp16friendly=self.fp16 and right_padded,
max_len=max_len if self.n_extra_positions <= 0 else None,
pad_idx=self.pad_idx,
left_padded=not right_padded,
)
expanded_input = expanded_input.to(input.device)
return expanded_input # type: ignore
def output(self, tensor: torch.Tensor) -> torch.Tensor:
"""
RAG "output" is already scaled in RagModel.decoder.
"""
return tensor
def reorder_encoder_states(
self,
encoder_states: Tuple[torch.Tensor, ...],
indices: Union[List[int], torch.LongTensor],
) -> Tuple[torch.Tensor, ...]:
"""
Reorder the encoder states.
Each RAG Model type prepares encoder states for generation differently.
"""
if not torch.is_tensor(indices):
indices = torch.LongTensor(indices).to(
encoder_states[0].device
) # type: ignore
return self._rag_model_interface.reorder_encoder_states(encoder_states, indices)
def reorder_decoder_incremental_state(
self,
incremental_state: Dict[str, Any],
inds: Union[List[int], torch.LongTensor],
) -> Optional[Dict[int, dict]]:
"""
TODO: Determine how to do this
"""
return self._rag_model_interface.reorder_decoder_incremental_state(
incremental_state, inds, self.seq2seq_decoder
)
def decode_forced(
self, encoder_states: Tuple[torch.Tensor, ...], ys: torch.LongTensor
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
Decode with a fixed, true sequence, computing loss.
Override TGM.decode_forced to both:
1) handle BART eos/bos issues, and
2) appropriately get forced decoder input.
:param encoder_states:
encoder output states
:param ys:
teacher forced label
:return logits, preds:
logits: output token distribution (as logits, not probs)
preds: tokens corresponding with max probs according to output distribution.
"""
bsz = ys.size(0)
seqlen = ys.size(1)
inputs = ys.narrow(1, 0, seqlen - 1)
if (ys[:, 0] == self.START_IDX).any() and self.generation_model != 'bart':
raise AssertionError(
"The Beginning of Sentence token is automatically added to the "
"label in decode_forced, but you included it in the label. This means "
"your model will have a double BOS token, which is probably not what "
"you intended."
)
doc_scores = encoder_states[-1]
inputs = self._rag_model_interface.get_initial_forced_decoder_input(
bsz,
inputs,
n_docs=doc_scores.size(1) if doc_scores is not None else None,
start_idx=self.START_IDX,
end_idx=self.END_IDX,
input_turns_cnt=encoder_states[2],
)
latent, _ = self.decoder(inputs, encoder_states)
logits = self.output(latent)
_, preds = logits.max(dim=-1)
return logits, preds # type: ignore
class RagEncoder(TransformerEncoder):
"""
Subclass TransformerEncoder to use additional positions if desired.
"""
def __init__(
self,
opt: Opt,
dictionary: DictionaryAgent,
embedding: Optional[torch.nn.Embedding] = None,
padding_idx: int = 0,
):
"""
RagEncoder initialization.
The Rag Seq2seq encoder is just a regular encoder
"""
n_init_positions = get_n_positions_from_options(opt) + opt['n_extra_positions']
super().__init__(
opt=opt,
vocabulary_size=len(dictionary),
embedding=embedding,
padding_idx=padding_idx,
reduction_type='none',
n_positions=n_init_positions,
)
class RagDecoder(TransformerDecoder):
"""
RagDecoder is a subclass of TransformerDecoder.
No further modifications necessary.
"""
pass
class T5RagModel(RagModel):
"""
T5 For RAG.
"""
def __init__(self, opt, dictionary, retriever_shared=None):
opt['t5'] = build_t5(opt)
if opt['t5_model_parallel']:
opt['t5'].parallelize()
else:
opt['t5'].deparallelize()
super().__init__(opt, dictionary, retriever_shared)
self.embedding_size = opt['t5'].model_dim
self.t5 = opt.pop('t5', None)
self.paralleled = not opt['t5_model_parallel']
@classmethod
def build_encoder(
cls,
opt: Opt,
*args,
dictionary: Optional[DictionaryAgent] = None,
embedding: Optional[torch.nn.Embedding] = None,
encoder_class: Optional[Type] = None,
**kwargs,
):
return RagModel.build_encoder(
opt,
encoder=opt['t5'].get_encoder(),
encoder_class=ParlaiT5Encoder,
**kwargs,
)
@classmethod
def build_decoder(
cls,
opt: Opt,
*args,
embedding: Optional[torch.nn.Embedding] = None,
n_positions: Optional[int] = None,
decoder_class: Optional[Type] = None,
**kwargs,
):
return RagModel.build_decoder(
opt,
decoder=opt['t5'].get_decoder(),
decoder_class=ParlaiT5Decoder,
**kwargs,
)
def reorder_decoder_incremental_state(
self, incremental_state: Dict[int, dict], inds: torch.Tensor
) -> Optional[Dict[int, dict]]:
return None
@set_device
def decoder_output(self, latent: torch.Tensor):
tensor = latent * (self.t5.model_dim ** -0.5)
logits = self.t5.lm_head(tensor)
return logits
| 35.621575 | 92 | 0.604384 |
4f528b0e40797effc44a2d7611330b26e1b0cac6 | 122 | py | Python | tests/ocd_backend/__init__.py | openstate/kamervragen | 0b9486dfe1521f3e88b144f9cc8ad2b32ff24834 | [
"CC-BY-4.0"
] | 2 | 2018-03-15T16:32:59.000Z | 2021-04-06T01:37:18.000Z | tests/ocd_backend/__init__.py | openstate/kamervragen | 0b9486dfe1521f3e88b144f9cc8ad2b32ff24834 | [
"CC-BY-4.0"
] | 7 | 2017-07-06T11:45:54.000Z | 2022-03-11T23:14:16.000Z | tests/ocd_backend/__init__.py | openstate/kamervragen | 0b9486dfe1521f3e88b144f9cc8ad2b32ff24834 | [
"CC-BY-4.0"
] | 2 | 2018-04-05T09:46:44.000Z | 2018-04-13T14:29:02.000Z | from .extractors import *
from .items import *
from .transformers import *
from .loaders import *
from .pipeline import *
| 20.333333 | 27 | 0.754098 |
4f545f4b8287dd9136045404a5322d9fc602e9b2 | 8,970 | py | Python | acl_version/src/reflection_based_transfer/nets.py | ahclab/reflection | d09fd500e9ff74211e1eb15711d479bb204c7a83 | [
"MIT"
] | 4 | 2020-07-14T10:36:30.000Z | 2021-04-22T04:20:42.000Z | acl_version/src/reflection_based_transfer/nets.py | ahclab/reflection | d09fd500e9ff74211e1eb15711d479bb204c7a83 | [
"MIT"
] | null | null | null | acl_version/src/reflection_based_transfer/nets.py | ahclab/reflection | d09fd500e9ff74211e1eb15711d479bb204c7a83 | [
"MIT"
] | 2 | 2021-02-25T09:19:13.000Z | 2021-08-18T09:16:29.000Z | import numpy as np
import chainer
from chainer import Chain, Variable
from chainer import functions as F
from chainer import links as L
def reflection_numpy(x, a, c):
'''
e.g.,
x = np.array([1,1])
a = np.array([1,1])
c = np.array([0,0])
return np.array([-1,-1])
'''
return x - 2 * (np.dot(x-c, a) / np.dot(a, a)) * a
def reflection(x, a, c):
assert x.shape==a.shape==c.shape, \
'x.shape={}, a.shape={}, c.shape={}'.format(x.shape, a.shape, c.shape)
# Reflection
dot_aa = F.batch_matmul(a, a, transa=True)
dot_aa = F.reshape(dot_aa, (a.shape[0],1))
dot_xca = F.batch_matmul(x-c, a, transa=True)
dot_xca = F.reshape(dot_xca, (c.shape[0],1))
return x - 2 * ( dot_xca / dot_aa) * a
def add_noise_to_word_vec(wv, sigma=.1):
if chainer.config.train:
xp = chainer.backend.get_array_module(wv.array)
return wv + sigma * xp.random.randn(*wv.shape)
else:
return wv
class Ref(chainer.Chain):
''' Reflection-based word attribute transfer with a single mirror'''
def __init__(self, dim_x, dim_h, n_attribute, sigma):
'''
Args
dim_x: dim of word2vec/GloVe
dim_h: dim of MLP hidden layers
n_attribute: num of target attribute
sigma: standard deviation of Gaussian distribution (Gaussian noise)
'''
super(Ref, self).__init__()
self.sigma = sigma
self.net_name = 'Ref'
with self.init_scope():
self.wa1 = L.Linear(dim_x, dim_x) # self.wa1 = L.Linear(dim_x, dim_h)
#self.wa2 = L.Linear(dim_h, dim_h)
#self.wa3 = L.Linear(dim_h, dim_x)
self.wc1 = L.Linear(dim_x, dim_x) # self.wc1 = L.Linear(dim_x, dim_h)
#self.wc2 = L.Linear(dim_h, dim_h)
#self.wc3 = L.Linear(dim_h, dim_x)
self.embed_z = L.EmbedID(n_attribute, dim_x)
def mlp_a(self, z):
a = self.wa1(z)
#a = self.wa3(F.relu(a))
#a = self.wa4(F.relu(a))
return a
def mlp_c(self, z):
c = self.wc1(z)
#c = self.wc3(F.relu(c))
#c = self.wc4(F.relu(c))
return c
def forward(self, x, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
Return:
y: output vector
'''
# Add noise
x = add_noise_to_word_vec(x, self.sigma)
# Embed z
z = self.embed_z(z)
# Estimate a
a = self.mlp_a(z) # Single mirror
# Estimate c
c = self.mlp_c(z) # Single mirror
# Transfer the word vector with reflection
y = reflection(x, a, c) # y = Ref_a,c(x)
return y
def loss(self, x, t, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
t: target word vector
Return:
mean squared error between y and t
'''
y = self.forward(x, z)
return F.mean_squared_error(y, t)
def test(self, x, z):
with chainer.using_config('train', False):
y = self.forward(x, z)
return y
class Ref_PM(chainer.Chain):
''' Reflection-based word attribute transfer with parameterized mirrors'''
def __init__(self, dim_x, dim_h, n_attribute, sigma):
'''
Args
dim_x: dim of word2vec/GloVe
dim_h: dim of MLP hidden layers
n_attribute: num of target attribute
sigma: standard deviation of Gaussian distribution (Gaussian noise)
'''
super(Ref_PM, self).__init__()
self.sigma = sigma
self.net_name = 'Ref+PM'
with self.init_scope():
self.wa1 = L.Linear(dim_x, dim_h)
self.wa2 = L.Linear(dim_x, dim_h)
self.wa3 = L.Linear(dim_h, dim_h)
self.wa4 = L.Linear(dim_h, dim_x)
self.wc1 = L.Linear(dim_x, dim_h)
self.wc2 = L.Linear(dim_x, dim_h)
self.wc3 = L.Linear(dim_h, dim_h)
self.wc4 = L.Linear(dim_h, dim_x)
self.embed_z = L.EmbedID(n_attribute, dim_x)
def mlp_a(self, z, x):
a = self.wa1(z) + self.wa2(x)
a = self.wa3(F.relu(a))
a = self.wa4(F.relu(a))
return a
def mlp_c(self, z, x):
c = self.wc1(z) + self.wc2(x)
c = self.wc3(F.relu(c))
c = self.wc4(F.relu(c))
return c
def forward(self, x, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
Return:
y: output vector
'''
# Add noise
x = add_noise_to_word_vec(x, self.sigma)
# Embed z
z = self.embed_z(z)
# Estimate a
a = self.mlp_a(z, x) # Parameterized mirror
# Estimate c
c = self.mlp_c(z, x) # Parameterized mirror
# Transfer the word vector with reflection
y = reflection(x, a, c) # y = Ref_a,c(x)
return y
def loss(self, x, t, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
t: target word vector
Return:
mean squared error between y and t
'''
y = self.forward(x, z)
return F.mean_squared_error(y, t)
def test(self, x, z):
with chainer.using_config('train', False):
y = self.forward(x, z)
return y
class MLP2(chainer.Chain):
def __init__(self, dim_x, dim_h, n_attribute, sigma, drop_ratio):
super(MLP2, self).__init__()
self.dim_x = dim_x
self.dim_h = dim_h
self.n_attribute = n_attribute
self.sigma = sigma
self.drop_ratio = drop_ratio
self.net_name = 'MLP2'
with self.init_scope():
self.wx = L.Linear(dim_x, dim_h) # to concat [x;z]
self.wz = L.Linear(dim_x, dim_h) # to concat [x;z]
self.w1 = L.Linear(dim_h, dim_h)
self.w2 = L.Linear(dim_h, dim_x)
self.emb_z = L.EmbedID(n_attribute, dim_x)
def mlp(self, x, z):
# Concat x and z
z = self.wz(z)
x = self.wx(x)
x = x + z
# MLP
y = F.dropout(F.relu(x), self.drop_ratio)
y = self.w1(y)
y = F.dropout(F.relu(y), self.drop_ratio)
y = self.w2(y)
return y
def forward(self, x, z):
# Add noise
x = add_noise_to_word_vec(x, self.sigma)
# Embed z
z = self.emb_z(z)
# Pred y by using MLP
y = self.mlp(x, z)
return y
def loss(self, x, t, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
t: target word vector
Return:
mean squared error between y and t
'''
y = self.forward(x, z)
return F.mean_squared_error(y, t)
def test(self, x, z):
with chainer.using_config('train', False):
y = self.forward(x, z)
return y
class MLP3(chainer.Chain):
def __init__(self, dim_x, dim_h, n_attribute, sigma, drop_ratio):
super(MLP3, self).__init__()
self.dim_x = dim_x
self.dim_h = dim_h
self.n_attribute = n_attribute
self.sigma = sigma
self.drop_ratio = drop_ratio
self.net_name = 'MLP3'
with self.init_scope():
self.wx = L.Linear(dim_x, dim_h)
self.wz = L.Linear(dim_x, dim_h)
self.w1 = L.Linear(dim_h, dim_h)
self.w2 = L.Linear(dim_h, dim_h)
self.w3 = L.Linear(dim_h, dim_x)
self.emb_z = L.EmbedID(n_attribute, dim_x)
def mlp(self, x, z):
# Concat x and z
z = self.wz(z)
x = self.wx(x)
x = x + z
# MLP
y = F.dropout(F.relu(x), self.drop_ratio)
y = self.w1(y)
y = F.dropout(F.relu(y), self.drop_ratio)
y = self.w2(y)
y = F.dropout(F.relu(y), self.drop_ratio)
y = self.w3(y)
return y
def forward(self, x, z):
# Add noise
x = add_noise_to_word_vec(x, self.sigma)
# Embed z
z = self.emb_z(z)
# Pred y by using MLP
y = self.mlp(x, z)
return y
def loss(self, x, t, z):
'''
Args
x: input word vector
z: attribute ID (e.g. 0)
t: target word vector
Return:
mean squared error between y and t
'''
y = self.forward(x, z)
return F.mean_squared_error(y, t)
def test(self, x, z):
with chainer.using_config('train', False):
y = self.forward(x, z)
return y | 27.6 | 81 | 0.504571 |
4f50e64f2180cf5176280cff5599ecfee27716f0 | 1,023 | py | Python | SympatheticScripts/SPMagTrapAbsImage.py | jstammers/EDMSuite | 2841c0edef32a496855ca41deaa0f710b017ae2f | [
"MIT"
] | null | null | null | SympatheticScripts/SPMagTrapAbsImage.py | jstammers/EDMSuite | 2841c0edef32a496855ca41deaa0f710b017ae2f | [
"MIT"
] | null | null | null | SympatheticScripts/SPMagTrapAbsImage.py | jstammers/EDMSuite | 2841c0edef32a496855ca41deaa0f710b017ae2f | [
"MIT"
] | null | null | null | # Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from System.Collections.Generic import Dictionary
from DAQ.Environment import *
from DAQ import *
from MOTMaster import*
def run_script():
return 0
def SwitchCoils(maxCurrent,minCurrent):
count = 0
endcount = 2
dic = Dictionary[String,Object]()
mm.SetScriptPath("C:\\Experiment Control\\EDMSuiteTrunk\\SympatheticMOTMasterScripts\\MagTrapAbsImage.cs")
while(count < endcount):
if count == 0:
dic["MOTCoilsCurrent"] = maxCurrent
mm.Run(dic)
count = count + 1
elif count == 1:
dic["MOTCoilsCurrent"] = minCurrent
mm.Run(dic)
count = count + 1
return 0
def RepeatScansSWC(maxCurrent,minCurrent,numberofrepeats):
j = 0
while(j < numberofrepeats):
SwitchCoils(maxCurrent,minCurrent)
j = j+1
return 0 | 25.575 | 108 | 0.71652 |
4f53604ee0833985485bc751e6ab1c2c91e9b8de | 2,811 | py | Python | Core/Model/FirefoxModel/SQLite/permissions.py | UpBeatMan/Abschlussarbeit | e842df691bbfdf77cd1c41d9de00718166bb4e19 | [
"MIT"
] | null | null | null | Core/Model/FirefoxModel/SQLite/permissions.py | UpBeatMan/Abschlussarbeit | e842df691bbfdf77cd1c41d9de00718166bb4e19 | [
"MIT"
] | 28 | 2021-06-20T17:39:00.000Z | 2021-09-04T15:03:41.000Z | Core/Model/FirefoxModel/SQLite/permissions.py | UpBeatMan/Abschlussarbeit | e842df691bbfdf77cd1c41d9de00718166bb4e19 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, orm
from Model.util import log_message
from Model.FirefoxModel.SQLite.base import *
ID = "ID"
ORIGIN = "Herkunft"
TYPE = "Erlaubnistyp"
EXPIRYAT = "Ungueltig ab"
LASTMODIFIED = "Zuletzt geaendert"
class Permission(BaseSession, BaseSQLiteClass):
__tablename__ = "moz_perms"
id = Column("id", Integer, primary_key=True)
origin = Column("origin", String)
type = Column("type", String)
expiry_timestamp = Column("expireTime", Integer)
modify_timestamp = Column("modificationTime", Integer)
@orm.reconstructor
def init(self):
self.is_date_changed = False
self.attr_list = []
self.attr_list.append(BaseAttribute(ORIGIN, OTHER, self.origin))
self.attr_list.append(BaseAttribute(TYPE, OTHER, self.type))
self.attr_list.append(
BaseAttribute(EXPIRYAT, DT_MILLI_OR_ZERO, self.expiry_timestamp)
)
self.attr_list.append(
BaseAttribute(LASTMODIFIED, DT_MILLI, self.modify_timestamp)
)
def update(self, delta):
if not delta:
log_message("Kein Delta erhalten in Permission", "error")
return
for attr in self.attr_list:
if attr.name == EXPIRYAT:
if attr.type == DT_ZERO:
self.expiry_timestamp = 0
self.is_date_changed = True
else:
try:
attr.change_date(delta)
attr.date_to_timestamp()
self.expiry_timestamp = attr.timestamp
except:
log_message(
"Fehler bei Update in Permissions für " + attr.name, "error"
)
continue
self.is_date_changed = True
elif attr.name == LASTMODIFIED:
try:
attr.change_date(delta)
attr.date_to_timestamp()
self.modify_timestamp = attr.timestamp
except:
log_message(
"Fehler bei Update in Permissions für " + attr.name, "error"
)
continue
self.is_date_changed = True
class PermissionHandler(BaseSQliteHandler):
name = "Seitenerlaubnis"
attr_names = [ID, ORIGIN, TYPE, EXPIRYAT, LASTMODIFIED]
def __init__(
self,
profile_path: str,
cache_path: str,
file_name: str = "permissions.sqlite",
logging: bool = False,
):
super().__init__(profile_path, file_name, logging)
def get_all_id_ordered(self):
query = self.session.query(Permission).order_by(Permission.id)
return query.all()
| 33.464286 | 88 | 0.566346 |
4f54ca5962b8fd5f883d8b6909430f739d2ef9c4 | 4,980 | py | Python | src/main/python/utils/transform.py | meowpunch/bobsim-research | 4411ac6eaf5b760611f689b0a9e290546e2f5435 | [
"MIT"
] | 2 | 2020-03-01T17:42:44.000Z | 2020-03-09T06:13:34.000Z | src/main/python/utils/transform.py | meowpunch/bobsim-research | 4411ac6eaf5b760611f689b0a9e290546e2f5435 | [
"MIT"
] | 2 | 2020-04-01T16:48:06.000Z | 2020-04-04T11:04:10.000Z | src/main/python/utils/transform.py | meowpunch/bobsim-research | 4411ac6eaf5b760611f689b0a9e290546e2f5435 | [
"MIT"
] | null | null | null | import sys
import tempfile
from functools import reduce
import numpy as np
import pandas as pd
from joblib import load, dump
from sklearn.base import BaseEstimator
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, FunctionTransformer, StandardScaler
from utils.function import add
from utils.logging import init_logger
from utils.s3_manager.manage import S3Manager
# TODO: not working, make available
def save(self):
dump(self, 'custom_transformer.bin', compress=True)
def save_to_s3(transformer, bucket_name, key):
with tempfile.TemporaryFile() as fp:
dump(transformer, fp)
fp.seek(0)
S3Manager(bucket_name=bucket_name).save_object(body=fp.read(), key=key)
fp.close()
def load_from_s3(bucket_name, key):
with tempfile.TemporaryFile() as fp:
S3Manager(bucket_name=bucket_name).s3_bucket.download_fileobj(Fileobj=fp, Key=key)
fp.seek(0)
transformer = load(fp)
fp.close()
return transformer
class CustomTransformer(ColumnTransformer):
"""
overwrite sklearn ColumnTransformer
"""
def __init__(self, df=None, strategy: dict = None):
"""
TODO : overwrite to self.strategy
:param strategy: dictionary {
"method": [] list of columns
}
"""
if df is not None and strategy is not None:
self.logger = init_logger()
self.input_df = df
self.strategy = strategy
# constant
self.transform_method = {
"log": FunctionTransformer(np.log1p),
"standard": StandardScaler(),
"one_hot_encoding": OneHotEncoder(sparse=False),
"none": FunctionTransformer(lambda x: x)
}
# self.column_transformer = ColumnTransformer(transformers=self.make_transformers())
self.transformed = None
super().__init__(transformers=self.make_transformers())
else:
super().__init__()
def make_transformers(self):
# transformers for ColumnTransformer
method_list = self.strategy.keys()
def make_tuple(name):
try:
return tuple([name, self.transform_method[name], self.strategy[name]])
except KeyError:
self.logger.critical("'{}' is not supported method name".format(name), exc_info=True)
# TODO: handle exception
sys.exit()
return list(map(make_tuple, method_list))
@property
def transformed_df(self):
if self.transformed is None:
raise Exception("Not transformed")
return pd.DataFrame(self.transformed, columns=self.header)
@property
def header(self):
"""
It should be called after transformers are fitted
:return: list of column named
"""
if self.transformed is None:
raise Exception("Not transformed")
def get_columns(method_name):
# columns for method
if method_name is "one_hot_encoding":
return self.fitted_transformer(method_name).get_feature_names().tolist()
else:
return list(map(
lambda column: '{method}_{column}'.format(method=method_name, column=column),
self.strategy[method_name]
))
# transformed columns
columns_list = list(map(get_columns, self.strategy.keys()))
return reduce(add, columns_list)
# override
def fit(self, X=None, y=None):
if X is None:
X = self.input_df
return super().fit(X)
# override
def fit_transform(self, X: pd.DataFrame = None, y=None):
if X is None:
X = self.input_df
# TODO: use only once?
if self.transformed is None:
self.transformed = super().fit_transform(X)
return self.transformed
return self.transformed
def fitted_transformer(self, method="one_hot_encoding"):
"""
get fitted transformer
:param method: str, e.g. "one_hot_encoding", "log", "standard"
:return: transformer for method
"""
filtered = filter(
# ColumnTransformer.transformers_: fitted transformers
lambda x: method in x, self.transformers_
).__next__()
return filter(lambda x: isinstance(x, BaseEstimator), filtered).__next__()
def main():
"""
test for Transformer
"""
# df, key = build_origin_price(date="201908", prefix="clean")
# print(df.info())
# t = CustomTransformer(
# strategy={
# "one_hot_encoding": ['품목명', '조사지역명'],
# "standard": ["당일조사가격"],
# # "hey": ['ㅎ']
# }, df=df
# )
# print(t.fit_transform())
# print(t.header)
# print(t.transformed_df)
pass
if __name__ == '__main__':
main()
| 29.294118 | 101 | 0.602209 |
4f54eefc84b73d8defc56b6aaea68ba0b4b47981 | 590 | py | Python | sender/schc_utils.py | schc-over-sigfox/schc-over-sigfox | d03e26cf5524ebd6bd64b4ca33a83149eec5e59f | [
"MIT"
] | null | null | null | sender/schc_utils.py | schc-over-sigfox/schc-over-sigfox | d03e26cf5524ebd6bd64b4ca33a83149eec5e59f | [
"MIT"
] | null | null | null | sender/schc_utils.py | schc-over-sigfox/schc-over-sigfox | d03e26cf5524ebd6bd64b4ca33a83149eec5e59f | [
"MIT"
] | 1 | 2022-03-19T12:36:30.000Z | 2022-03-19T12:36:30.000Z | def zfill(string, width):
if len(string) < width:
return ("0" * (width - len(string))) + string
else:
return string
def insert_index(ls, pos, elmt):
while len(ls) < pos:
ls.append([])
ls.insert(pos, elmt)
def replace_bit(string, position, value):
return '%s%s%s' % (string[:position], value, string[position + 1:])
def find(string, character):
return [i for i, ltr in enumerate(string) if ltr == character]
def bitstring_to_bytes(s):
return int(s, 2).to_bytes(len(s) // 8, 'big')
def is_monochar(s):
return len(set(s)) == 1
| 21.071429 | 71 | 0.608475 |
4f556f0c8656a9b09a50bf498bdcb77afceec723 | 28 | py | Python | taxstats/tests/__init__.py | raheem03/taxstats | 23537030d7fb84b72ad1d514f7fd7f4ba6cd3ca3 | [
"MIT"
] | null | null | null | taxstats/tests/__init__.py | raheem03/taxstats | 23537030d7fb84b72ad1d514f7fd7f4ba6cd3ca3 | [
"MIT"
] | null | null | null | taxstats/tests/__init__.py | raheem03/taxstats | 23537030d7fb84b72ad1d514f7fd7f4ba6cd3ca3 | [
"MIT"
] | null | null | null | from taxstats.tests import * | 28 | 28 | 0.821429 |
4f55c8c1e9f8873de824db417c3f4370887141b2 | 1,381 | py | Python | catkin_ws_indigo/build/catkin_generated/generate_cached_setup.py | allenwxf/ROS-TUTORIAL | 9dfee042ea90cf3d6a6ae7f7200aa0c8f1a6b000 | [
"MIT"
] | null | null | null | catkin_ws_indigo/build/catkin_generated/generate_cached_setup.py | allenwxf/ROS-TUTORIAL | 9dfee042ea90cf3d6a6ae7f7200aa0c8f1a6b000 | [
"MIT"
] | null | null | null | catkin_ws_indigo/build/catkin_generated/generate_cached_setup.py | allenwxf/ROS-TUTORIAL | 9dfee042ea90cf3d6a6ae7f7200aa0c8f1a6b000 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/wxf/Workspace/ROS-TUTORIAL/catkin_ws_indigo/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/wxf/Workspace/ROS-TUTORIAL/catkin_ws_indigo/devel/env.sh')
output_filename = '/home/wxf/Workspace/ROS-TUTORIAL/catkin_ws_indigo/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 44.548387 | 108 | 0.744388 |
4f557813dc8fef713f719096e8a89ab0c9111248 | 3,212 | py | Python | docs/conf.py | ariebovenberg/quiz | 16c938ca644d6677482d673d4876d86e1a9f27ec | [
"Apache-2.0"
] | 71 | 2018-04-04T16:30:26.000Z | 2022-03-03T17:38:35.000Z | docs/conf.py | ariebovenberg/quiz | 16c938ca644d6677482d673d4876d86e1a9f27ec | [
"Apache-2.0"
] | 504 | 2018-08-19T19:07:19.000Z | 2022-01-19T12:34:47.000Z | docs/conf.py | ariebovenberg/quiz | 16c938ca644d6677482d673d4876d86e1a9f27ec | [
"Apache-2.0"
] | 6 | 2018-10-30T13:51:46.000Z | 2020-05-17T20:31:25.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
from collections import OrderedDict
# -- Project information -----------------------------------------------------
import importlib.metadata
metadata = importlib.metadata.metadata("quiz")
project = metadata["Name"]
author = metadata["Author"]
version = metadata["Version"]
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_static_path = ['_static']
highlight_language = 'python3'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'quiz-logo.png',
'logo_name': True,
'logo_text_align': 'center',
"description": metadata['Description'],
'description_font_style': 'italic',
"github_user": 'ariebovenberg',
"github_repo": 'quiz',
"github_banner": True,
'codecov_button': True,
"github_type": 'star',
'fixed_sidebar': True,
'code_font_size': '0.8em',
'extra_nav_links': OrderedDict([
('quiz @ PyPI', 'http://pypi.python.org/pypi/quiz'),
('quiz @ GitHub', 'http://github.com/ariebovenberg/quiz'),
('Issue Tracker', 'http://github.com/ariebovenberg/quiz/issues'),
]),
'note_bg': '#D5E8ED',
'note_border': '#D5E8ED',
'seealso_bg': '#D5E8ED',
'seealso_border': '#D5E8ED',
'warn_bg': '#EDC7BE',
'warn_border': '#EDC7BE',
'pre_bg': '#DCEDD5',
}
html_sidebars = {
'**': ['about.html', 'navigation.html', 'searchbox.html']
}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'snug': ('https://snug.readthedocs.org/en/latest/', None),
}
| 29.46789 | 78 | 0.657534 |
4f55081eb562c8671a9c7018e6c9eea031bf9abb | 2,880 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/input_photo_file_location.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/input_photo_file_location.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/input_photo_file_location.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class InputPhotoFileLocation(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.InputFileLocation`.
Details:
- Layer: ``126``
- ID: ``0x40181ffe``
Parameters:
id: ``int`` ``64-bit``
access_hash: ``int`` ``64-bit``
file_reference: ``bytes``
thumb_size: ``str``
"""
__slots__: List[str] = ["id", "access_hash", "file_reference", "thumb_size"]
ID = 0x40181ffe
QUALNAME = "types.InputPhotoFileLocation"
def __init__(self, *, id: int, access_hash: int, file_reference: bytes, thumb_size: str) -> None:
self.id = id # long
self.access_hash = access_hash # long
self.file_reference = file_reference # bytes
self.thumb_size = thumb_size # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "InputPhotoFileLocation":
# No flags
id = Long.read(data)
access_hash = Long.read(data)
file_reference = Bytes.read(data)
thumb_size = String.read(data)
return InputPhotoFileLocation(id=id, access_hash=access_hash, file_reference=file_reference, thumb_size=thumb_size)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Long(self.id))
data.write(Long(self.access_hash))
data.write(Bytes(self.file_reference))
data.write(String(self.thumb_size))
return data.getvalue()
| 33.103448 | 123 | 0.621181 |
4f523978933f3a46604b10f60c0ed8fd56aace04 | 48,375 | py | Python | dataprofiler/profilers/numerical_column_stats.py | az85252/DataProfiler | 1303abe04b48fa87c67d8d9b3a13f8cb88e79afb | [
"Apache-2.0"
] | null | null | null | dataprofiler/profilers/numerical_column_stats.py | az85252/DataProfiler | 1303abe04b48fa87c67d8d9b3a13f8cb88e79afb | [
"Apache-2.0"
] | null | null | null | dataprofiler/profilers/numerical_column_stats.py | az85252/DataProfiler | 1303abe04b48fa87c67d8d9b3a13f8cb88e79afb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
coding=utf-8
Build model for a dataset by identifying type of column along with its
respective parameters.
"""
from __future__ import print_function
from __future__ import division
from future.utils import with_metaclass
import copy
import abc
import warnings
import sys
import numpy as np
from . import utils
from . import histogram_utils
from .base_column_profilers import BaseColumnProfiler
from .profiler_options import NumericalOptions
class abstractstaticmethod(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
class NumericStatsMixin(with_metaclass(abc.ABCMeta, object)):
"""
Abstract numerical column profile subclass of BaseColumnProfiler. Represents
a column in the dataset which is a text column. Has Subclasses itself.
"""
type = None
def __init__(self, options=None):
"""
Initialization of column base properties and itself.
:param options: Options for the numerical stats.
:type options: NumericalOptions
"""
if options and not isinstance(options, NumericalOptions):
raise ValueError("NumericalStatsMixin parameter 'options' must be "
"of type NumericalOptions.")
self.min = None
self.max = None
self.sum = 0
self._biased_variance = np.nan
self._biased_skewness = np.nan
self._biased_kurtosis = np.nan
self.max_histogram_bin = 100000
self.min_histogram_bin = 1000
self.histogram_bin_method_names = [
'auto', 'fd', 'doane', 'scott', 'rice', 'sturges', 'sqrt'
]
self.histogram_selection = None
self.user_set_histogram_bin = None
self.bias_correction = True # By default, we correct for bias
self.num_zeros = 0
self.num_negatives = 0
if options:
self.bias_correction = options.bias_correction.is_enabled
bin_count_or_method = \
options.histogram_and_quantiles.bin_count_or_method
if isinstance(bin_count_or_method, str):
self.histogram_bin_method_names = [bin_count_or_method]
elif isinstance(bin_count_or_method, list):
self.histogram_bin_method_names = bin_count_or_method
elif isinstance(bin_count_or_method, int):
self.user_set_histogram_bin = bin_count_or_method
self.histogram_bin_method_names = ['custom']
self.histogram_methods = {}
self._stored_histogram = {
'total_loss': 0,
'current_loss': 0,
'suggested_bin_count': self.min_histogram_bin,
'histogram': {
'bin_counts': None,
'bin_edges': None
}
}
self._batch_history = []
for method in self.histogram_bin_method_names:
self.histogram_methods[method] = {
'total_loss': 0,
'current_loss': 0,
'suggested_bin_count': self.min_histogram_bin,
'histogram': {
'bin_counts': None,
'bin_edges': None
}
}
num_quantiles = 1000 # TODO: add to options
self.quantiles = {bin_num: None for bin_num in range(num_quantiles - 1)}
self.__calculations = {
"min": NumericStatsMixin._get_min,
"max": NumericStatsMixin._get_max,
"sum": NumericStatsMixin._get_sum,
"variance": NumericStatsMixin._get_variance,
"skewness": NumericStatsMixin._get_skewness,
"kurtosis": NumericStatsMixin._get_kurtosis,
"histogram_and_quantiles":
NumericStatsMixin._get_histogram_and_quantiles,
"num_zeros": NumericStatsMixin._get_num_zeros,
"num_negatives": NumericStatsMixin._get_num_negatives
}
self._filter_properties_w_options(self.__calculations, options)
def __getattribute__(self, name):
return super(NumericStatsMixin, self).__getattribute__(name)
def __getitem__(self, item):
return super(NumericStatsMixin, self).__getitem__(item)
@property
def _has_histogram(self):
return self._stored_histogram['histogram']['bin_counts'] is not None
@BaseColumnProfiler._timeit(name="histogram_and_quantiles")
def _add_helper_merge_profile_histograms(self, other1, other2):
"""
Adds histogram of two profiles together
:param other1: profile1 being added to self
:type other1: BaseColumnProfiler
:param other2: profile2 being added to self
:type other2: BaseColumnProfiler
:return: None
"""
# get available bin methods and set to current
bin_methods = [x for x in other1.histogram_bin_method_names
if x in other2.histogram_bin_method_names]
if not bin_methods:
raise ValueError('Profiles have no overlapping bin methods and '
'therefore cannot be added together.')
elif other1.user_set_histogram_bin and other2.user_set_histogram_bin:
if other1.user_set_histogram_bin != other2.user_set_histogram_bin:
warnings.warn('User set histogram bin counts did not match. '
'Choosing the larger bin count.')
self.user_set_histogram_bin = max(other1.user_set_histogram_bin,
other2.user_set_histogram_bin)
# initial creation of the profiler creates all methods, but
# only the methods which intersect should exist.
self.histogram_bin_method_names = bin_methods
self.histogram_methods = dict()
for method in self.histogram_bin_method_names:
self.histogram_methods[method] = {
'total_loss': 0,
'current_loss': 0,
'histogram': {
'bin_counts': None,
'bin_edges': None
}
}
combined_values = np.concatenate([other1._histogram_to_array(),
other2._histogram_to_array()])
bin_counts, bin_edges = self._get_histogram(combined_values)
self._stored_histogram['histogram']['bin_counts'] = bin_counts
self._stored_histogram['histogram']['bin_edges'] = bin_edges
histogram_loss = self._histogram_bin_error(combined_values)
self._stored_histogram['histogram']['current_loss'] = histogram_loss
self._stored_histogram['histogram']['total_loss'] = histogram_loss
self._get_quantiles()
def _add_helper(self, other1, other2):
"""
Helper function for merging profiles.
:param other1: profile1 being added to self
:param other2: profile2 being added to self
:return: None
"""
BaseColumnProfiler._merge_calculations(
self._NumericStatsMixin__calculations,
other1._NumericStatsMixin__calculations,
other2._NumericStatsMixin__calculations)
# Check and potentially override bias correction computation
self.bias_correction = True
if not other1.bias_correction or not other2.bias_correction:
self.bias_correction = False
# Merge variance, histogram, min, max, and sum
if "variance" in self.__calculations.keys():
self._biased_variance = self._merge_biased_variance(
other1.match_count, other1._biased_variance, other1.mean,
other2.match_count, other2._biased_variance, other2.mean)
if "histogram_and_quantiles" in self.__calculations.keys():
if other1._has_histogram and other2._has_histogram:
self._add_helper_merge_profile_histograms(other1, other2)
elif not other2._has_histogram:
self.histogram_methods = other1.histogram_methods
self.quantiles = other1.quantiles
else:
self.histogram_methods = other2.histogram_methods
self.quantiles = other2.quantiles
if "min" in self.__calculations.keys():
if other1.min is not None and other2.min is not None:
self.min = min(other1.min, other2.min)
elif other2.min is None:
self.min = other1.min
else:
self.min = other2.min
if "max" in self.__calculations.keys():
if other1.max is not None and other2.max is not None:
self.max = max(other1.max, other2.max)
elif other2.max is None:
self.max = other1.max
else:
self.max = other2.max
if "sum" in self.__calculations.keys():
self.sum = other1.sum + other2.sum
if "skewness" in self.__calculations.keys():
self._biased_skewness = self._merge_biased_skewness(
other1.match_count,
other1._biased_skewness,
other1._biased_variance, other1.mean,
other2.match_count,
other2._biased_skewness,
other2._biased_variance, other2.mean)
if "kurtosis" in self.__calculations.keys():
self._biased_kurtosis = self._merge_biased_kurtosis(
other1.match_count,
other1._biased_kurtosis,
other1._biased_skewness,
other1._biased_variance,
other1.mean, other2.match_count,
other2._biased_kurtosis,
other2._biased_skewness,
other2._biased_variance, other2.mean)
if "num_zeros" in self.__calculations.keys():
self.num_zeros = other1.num_zeros + other2.num_zeros
if "num_negatives" in self.__calculations.keys():
self.num_negatives = other1.num_negatives + other2.num_negatives
def diff(self, other_profile, options=None):
"""
Finds the differences for several numerical stats.
:param other_profile: profile to find the difference with
:type other_profile: NumericStatsMixin Profile
:return: the numerical stats differences
:rtype: dict
"""
cls = self.__class__
if not isinstance(other_profile, cls):
raise TypeError("Unsupported operand type(s) for diff: '{}' "
"and '{}'".format(cls.__name__,
other_profile.__class__.__name__))
differences = {
"min": utils.find_diff_of_numbers(self.min, other_profile.min),
"max": utils.find_diff_of_numbers(self.max, other_profile.max),
"sum": utils.find_diff_of_numbers(self.sum, other_profile.sum),
"mean": utils.find_diff_of_numbers(self.mean, other_profile.mean),
"variance": utils.find_diff_of_numbers(self.variance,
other_profile.variance),
"stddev": utils.find_diff_of_numbers(self.stddev,
other_profile.stddev),
}
return differences
@property
def mean(self):
if self.match_count == 0:
return 0
return float(self.sum) / self.match_count
@property
def variance(self):
return self._biased_variance if not self.bias_correction \
else self._correct_bias_variance(
self.match_count,
self._biased_variance)
@property
def stddev(self):
if self.match_count == 0:
return np.nan
return np.sqrt(self.variance)
@property
def skewness(self):
return self._biased_skewness if not self.bias_correction \
else self._correct_bias_skewness(
self.match_count,
self._biased_skewness)
@property
def kurtosis(self):
return self._biased_kurtosis if not self.bias_correction \
else self._correct_bias_kurtosis(
self.match_count,
self._biased_kurtosis)
def _update_variance(self, batch_mean, batch_var, batch_count):
"""
Calculate the combined biased variance of the current values and new dataset.
:param batch_mean: mean of new chunk
:param batch_var: biased variance of new chunk
:param batch_count: number of samples in new chunk
:return: combined biased variance
:rtype: float
"""
return self._merge_biased_variance(self.match_count, self._biased_variance, self.mean,
batch_count, batch_var, batch_mean)
@staticmethod
def _merge_biased_variance(match_count1, biased_variance1, mean1,
match_count2, biased_variance2, mean2):
"""
Calculate the combined biased variance of the current values and new dataset.
:param match_count1: number of samples in new chunk 1
:param mean1: mean of chunk 1
:param biased_variance1: variance of chunk 1 without bias correction
:param match_count2: number of samples in new chunk 2
:param mean2: mean of chunk 2
:param biased_variance2: variance of chunk 2 without bias correction
:return: combined variance
:rtype: float
"""
if match_count1 < 1:
return biased_variance2
elif match_count2 < 1:
return biased_variance1
elif np.isnan(biased_variance1) or np.isnan(biased_variance2):
return np.nan
curr_count = match_count1
delta = mean2 - mean1
m_curr = biased_variance1 * curr_count
m_batch = biased_variance2 * match_count2
M2 = m_curr + m_batch + delta ** 2 * curr_count * match_count2 / \
(curr_count + match_count2)
new_variance = M2 / (curr_count + match_count2)
return new_variance
@staticmethod
def _correct_bias_variance(match_count, biased_variance):
if match_count is None or biased_variance is None or match_count < 2:
warnings.warn("Insufficient match count to correct bias in variance. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
variance = match_count / (match_count - 1) * biased_variance
return variance
@staticmethod
def _merge_biased_skewness(match_count1, biased_skewness1, biased_variance1, mean1,
match_count2, biased_skewness2, biased_variance2, mean2):
"""
Calculate the combined skewness of two data chunks
:param match_count1: # of samples in 1st chunk
:param biased_skewness1: skewness of 1st chunk without bias correction
:param biased_variance1: variance of 1st chunk without bias correction
:param mean1: mean of 1st chunk
:param match_count2: # of samples in 2nd chunk
:param biased_skewness2: skewness of 2nd chunk without bias correction
:param biased_variance2: variance of 2nd chunk without bias correction
:param mean2: mean of 2nd chunk
:return: combined skewness
:rtype: float
"""
if match_count1 < 1:
return biased_skewness2
elif match_count2 < 1:
return biased_skewness1
elif np.isnan(biased_skewness1) or np.isnan(biased_skewness2):
return np.nan
delta = mean2 - mean1
N = match_count1 + match_count2
M2_1 = match_count1 * biased_variance1
M2_2 = match_count2 * biased_variance2
M2 = M2_1 + M2_2 + delta**2 * match_count1 * match_count2 / N
if not M2:
return 0.0
M3_1 = biased_skewness1 * np.sqrt(M2_1**3) / np.sqrt(match_count1)
M3_2 = biased_skewness2 * np.sqrt(M2_2**3) / np.sqrt(match_count2)
first_term = M3_1 + M3_2
second_term = delta**3 * match_count1 * match_count2 \
* (match_count1 - match_count2) / N**2
third_term = 3 * delta * (match_count1 * M2_2
- match_count2 * M2_1) / N
M3 = first_term + second_term + third_term
biased_skewness = np.sqrt(N) * M3 / np.sqrt(M2**3)
return biased_skewness
@staticmethod
def _correct_bias_skewness(match_count, biased_skewness):
"""
Apply bias correction to skewness
:param match_count: number of samples
:param biased_skewness: skewness without bias correction
:return: unbiased estimator of skewness
:rtype: NaN if sample size is too small, float otherwise
"""
if np.isnan(biased_skewness) or match_count < 3:
warnings.warn("Insufficient match count to correct bias in skewness. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
skewness = np.sqrt(match_count * (match_count - 1)) \
* biased_skewness / (match_count - 2)
return skewness
@staticmethod
def _merge_biased_kurtosis(match_count1, biased_kurtosis1, biased_skewness1,
biased_variance1, mean1, match_count2, biased_kurtosis2,
biased_skewness2, biased_variance2, mean2):
"""
Calculate the combined kurtosis of two sets of data
:param match_count1: # of samples in 1st chunk
:param biased_kurtosis1: kurtosis of 1st chunk without bias correction
:param biased_skewness1: skewness of 1st chunk without bias correction
:param biased_variance1: variance of 1st chunk without bias correction
:param mean1: mean of 1st chunk
:param match_count2: # of samples in 2nd chunk
:param biased_kurtosis2: kurtosis of 2nd chunk without bias correction
:param biased_skewness2: skewness of 2nd chunk without bias correction
:param biased_variance2: variance of 2nd chunk without bias correction
:param mean2: mean of 2nd chunk
:return: combined skewness
:rtype: float
"""
if match_count1 < 1:
return biased_kurtosis2
elif match_count2 < 1:
return biased_kurtosis1
elif np.isnan(biased_kurtosis1) or np.isnan(biased_kurtosis2):
return np.nan
delta = mean2 - mean1
N = match_count1 + match_count2
M2_1 = match_count1 * biased_variance1
M2_2 = match_count2 * biased_variance2
M2 = M2_1 + M2_2 + delta ** 2 * match_count1 * match_count2 / N
if not M2:
return 0
M3_1 = biased_skewness1 * np.sqrt(M2_1**3) / np.sqrt(match_count1)
M3_2 = biased_skewness2 * np.sqrt(M2_2**3) / np.sqrt(match_count2)
M4_1 = (biased_kurtosis1 + 3) * M2_1**2 / match_count1
M4_2 = (biased_kurtosis2 + 3) * M2_2**2 / match_count2
first_term = M4_1 + M4_2
second_term = delta**4 * (match_count1 * match_count2 *
(match_count1**2 - match_count1 * match_count2 +
match_count2**2)) / N**3
third_term = 6 * delta**2 * (match_count1**2 * M2_2 +
match_count2**2 * M2_1) / N**2
fourth_term = 4 * delta * (match_count1 * M3_2 - match_count2
* M3_1) / N
M4 = first_term + second_term + third_term + fourth_term
biased_kurtosis = N * M4 / M2**2 - 3
return biased_kurtosis
@staticmethod
def _correct_bias_kurtosis(match_count, biased_kurtosis):
"""
Apply bias correction to kurtosis
:param match_count: number of samples
:param biased_kurtosis: skewness without bias correction
:return: unbiased estimator of kurtosis
:rtype: NaN if sample size is too small, float otherwise
"""
if np.isnan(biased_kurtosis) or match_count < 4:
warnings.warn("Insufficient match count to correct bias in kurtosis. Bias correction"
"can be manually disabled by setting bias_correction.is_enabled to"
"False in ProfilerOptions.", RuntimeWarning)
return np.nan
kurtosis = (match_count - 1) / ((match_count - 2) *
(match_count - 3)) * ((match_count + 1) *
(biased_kurtosis + 3) - 3 * (match_count - 1))
return kurtosis
def _estimate_stats_from_histogram(self):
# test estimated mean and var
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
mids = 0.5 * (bin_edges[1:] + bin_edges[:-1])
mean = np.average(mids, weights=bin_counts)
var = np.average((mids - mean) ** 2, weights=bin_counts)
return var
def _total_histogram_bin_variance(self, input_array):
# calculate total variance over all bins of a histogram
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
# account ofr digitize which is exclusive
bin_edges = bin_edges.copy()
bin_edges[-1] += 1e-3
inds = np.digitize(input_array, bin_edges)
sum_var = 0
non_zero_bins = np.where(bin_counts)[0] + 1
for i in non_zero_bins:
elements_in_bin = input_array[inds == i]
bin_var = elements_in_bin.var()
sum_var += bin_var
return sum_var
def _histogram_bin_error(self, input_array):
"""
Calculate the error of each value from the bin of the histogram it
falls within.
:param input_array: input data used to calculate the histogram
:type input_array: Union[np.array, pd.Series]
:return: binning error
:rtype: float
"""
bin_edges = self._stored_histogram['histogram']['bin_edges']
# account ofr digitize which is exclusive
bin_edges = bin_edges.copy()
temp_last_edge = bin_edges[-1]
bin_edges[-1] = np.inf
inds = np.digitize(input_array, bin_edges)
if temp_last_edge == np.inf:
inds = np.minimum(inds, len(bin_edges) - 1)
# reset the edge
bin_edges[-1] = temp_last_edge
sum_error = sum(
(input_array - (bin_edges[inds] + bin_edges[inds - 1])/2) ** 2
)
return sum_error
@staticmethod
def _histogram_loss(diff_var, avg_diffvar, total_var,
avg_totalvar, run_time, avg_runtime):
norm_diff_var, norm_total_var, norm_runtime = 0, 0, 0
if avg_diffvar > 0:
norm_diff_var = float(diff_var - avg_diffvar) / avg_diffvar
if avg_totalvar > 0:
norm_total_var = float(total_var - avg_totalvar) / avg_totalvar
penalized_time = 1 # currently set as 1s
if (run_time - avg_runtime) >= penalized_time:
norm_runtime = float(run_time - avg_runtime) / avg_runtime
return norm_diff_var + norm_total_var + norm_runtime
def _select_method_for_histogram(self, current_exact_var, current_est_var,
current_total_var, current_run_time):
current_diff_var = np.abs(current_exact_var - current_est_var)
current_avg_diff_var = current_diff_var.mean()
current_avg_total_var = current_total_var.mean()
current_avg_run_time = current_run_time.mean()
min_total_loss = np.inf
selected_method = ''
selected_suggested_bin_count = 0
for method_id, method in enumerate(self.histogram_bin_method_names):
self.histogram_methods[method]['current_loss'] = \
self._histogram_loss(current_diff_var[method_id],
current_avg_diff_var,
current_total_var[method_id],
current_avg_total_var,
current_run_time[method_id],
current_avg_run_time)
self.histogram_methods[method]['total_loss'] += \
self.histogram_methods[method]['current_loss']
if min_total_loss >= self.histogram_methods[method]['total_loss']:
# if same loss and less bins, don't save bc higher resolution
if (self.histogram_methods[method]['suggested_bin_count']
<= selected_suggested_bin_count
and min_total_loss ==
self.histogram_methods[method]['total_loss']):
continue
min_total_loss = self.histogram_methods[method]['total_loss']
selected_method = method
selected_suggested_bin_count = \
self.histogram_methods[method]['suggested_bin_count']
return selected_method
def _histogram_to_array(self):
# Extend histogram to array format
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
is_bin_non_zero = bin_counts[:-1] > 0
bin_left_edge = bin_edges[:-2][is_bin_non_zero]
hist_to_array = [
[left_edge] * count for left_edge, count
in zip(bin_left_edge, bin_counts[:-1][is_bin_non_zero])
]
if not hist_to_array:
hist_to_array = [[]]
array_flatten = np.concatenate(
(hist_to_array + [[bin_edges[-2]] * int(bin_counts[-1] / 2)] +
[[bin_edges[-1]] * (bin_counts[-1] - int(bin_counts[-1] / 2))]))
# If we know they are integers, we can limit the data to be as such
# during conversion
if not self.__class__.__name__ == 'FloatColumn':
array_flatten = np.round(array_flatten)
return array_flatten
def _get_histogram(self, values):
"""
Calculates the stored histogram the suggested bin counts for each
histogram method, uses np.histogram
:param values: input data values
:type values: Union[np.array, pd.Series]
:return: bin edges and bin counts
"""
if len(np.unique(values)) == 1:
bin_counts = np.array([len(values)])
if isinstance(values, (np.ndarray, list)):
unique_value = values[0]
else:
unique_value = values.iloc[0]
bin_edges = np.array([unique_value, unique_value])
for bin_method in self.histogram_bin_method_names:
self.histogram_methods[bin_method]['histogram'][
'bin_counts'] = bin_counts
self.histogram_methods[bin_method]['histogram'][
'bin_edges'] = bin_edges
self.histogram_methods[bin_method]['suggested_bin_count'] = 1
else:
# if user set the bin count, then use the user set count to
n_equal_bins = suggested_bin_count = self.min_histogram_bin
if self.user_set_histogram_bin:
n_equal_bins = suggested_bin_count = self.user_set_histogram_bin
if not isinstance(values, np.ndarray):
values = np.array(values)
# loop through all methods to get their suggested bin count for
# reporting
for i, bin_method in enumerate(self.histogram_bin_method_names):
if self.user_set_histogram_bin is None:
_, suggested_bin_count = histogram_utils._get_bin_edges(
values, bin_method, None, None)
suggested_bin_count = min(suggested_bin_count,
self.max_histogram_bin)
n_equal_bins = max(n_equal_bins, suggested_bin_count)
self.histogram_methods[bin_method]['histogram'][
'bin_counts'] = None
self.histogram_methods[bin_method]['histogram'][
'bin_edges'] = None
self.histogram_methods[bin_method]['suggested_bin_count'] = \
suggested_bin_count
# calculate the stored histogram bins
bin_counts, bin_edges = np.histogram(values, bins=n_equal_bins)
return bin_counts, bin_edges
def _merge_histogram(self, values):
# values is the current array of values,
# that needs to be updated to the accumulated histogram
combined_values = np.concatenate([values, self._histogram_to_array()])
bin_counts, bin_edges = self._get_histogram(combined_values)
self._stored_histogram['histogram']['bin_counts'] = bin_counts
self._stored_histogram['histogram']['bin_edges'] = bin_edges
def _update_histogram(self, df_series):
"""
Update histogram for each method and the combined method. The algorithm
'Follow the best expert' is applied to select the combined method:
N. Cesa-Bianchi and G. Lugosi, Prediction, learning, and games.
Cambridge University Press, 2006.
R. D. Kleinberg, A. Niculescu-Mizil, and Y. Sharma, "Regret bounds
for sleeping experts and bandits," in Proceedings of the 21st Annual
Conference on Learning Theory - COLT 2008, Helsinki, Finland, 2008,
pp. 425–436.
The idea is to select the current best method based on accumulated
losses up to the current time: all methods are compared using the
accumulated losses, and the best method with minimal loss is picked
:param df_series: a given column
:type df_series: pandas.core.series.Series
:return:
"""
df_series = df_series.replace([np.inf, -np.inf], np.nan).dropna()
if df_series.empty:
return
if self._has_histogram:
self._merge_histogram(df_series.tolist())
else:
bin_counts, bin_edges = self._get_histogram(df_series)
self._stored_histogram['histogram']['bin_counts'] = bin_counts
self._stored_histogram['histogram']['bin_edges'] = bin_edges
# update loss for the stored bins
histogram_loss = self._histogram_bin_error(df_series)
self._stored_histogram['current_loss'] = histogram_loss
self._stored_histogram['total_loss'] += histogram_loss
def _histogram_for_profile(self, histogram_method):
"""
Converts the stored histogram into the presentable state based on the
suggested histogram bin count from numpy.histograms. The bin count used
is stored in 'suggested_bin_count' for each method.
:param histogram_method: method to use for determining the histogram
profile
:type histogram_method: str
:return: histogram bin edges and bin counts
:rtype: dict
"""
bin_counts, bin_edges = (
self._stored_histogram['histogram']['bin_counts'],
self._stored_histogram['histogram']['bin_edges'],
)
current_bin_counts, suggested_bin_count = (
self.histogram_methods[histogram_method]['histogram']['bin_counts'],
self.histogram_methods[histogram_method]['suggested_bin_count'],
)
# base case, no need to change if it is already correct
if not self._has_histogram or current_bin_counts is not None:
return (self.histogram_methods[histogram_method]['histogram'],
self.histogram_methods[histogram_method]['total_loss'])
elif len(bin_counts) == suggested_bin_count:
return (self._stored_histogram['histogram'],
self._stored_histogram['total_loss'])
# create proper binning
new_bin_counts = np.zeros((suggested_bin_count,))
new_bin_edges = np.linspace(
bin_edges[0], bin_edges[-1], suggested_bin_count + 1)
# allocate bin_counts
new_bin_id = 0
hist_loss = 0
for bin_id, bin_count in enumerate(bin_counts):
if not bin_count: # if nothing in bin, nothing to add
continue
bin_edge = bin_edges[bin_id: bin_id + 3]
# if we know not float, we can assume values in bins are integers.
is_float_profile = self.__class__.__name__ == 'FloatColumn'
if not is_float_profile:
bin_edge = np.round(bin_edge)
# loop until we have a new bin which contains the current bin.
while (bin_edge[0] >= new_bin_edges[new_bin_id + 1]
and new_bin_id < suggested_bin_count - 1):
new_bin_id += 1
new_bin_edge = new_bin_edges[new_bin_id: new_bin_id + 3]
# find where the current bin falls within the new bins
is_last_bin = new_bin_id == suggested_bin_count -1
if bin_edge[1] < new_bin_edge[1] or is_last_bin:
# current bin is within the new bin
new_bin_counts[new_bin_id] += bin_count
hist_loss += ((
(new_bin_edge[1] + new_bin_edge[0])
- (bin_edge[1] + bin_edge[0])) / 2) ** 2 * bin_count
elif bin_edge[0] < new_bin_edge[1]:
# current bin straddles two of the new bins
# get the percentage of bin that falls to the left
percentage_in_left_bin = (
(new_bin_edge[1] - bin_edge[0])
/ (bin_edge[1] - bin_edge[0])
)
count_in_left_bin = round(bin_count * percentage_in_left_bin)
new_bin_counts[new_bin_id] += count_in_left_bin
hist_loss += ((
(new_bin_edge[1] + new_bin_edge[0])
- (bin_edge[1] + bin_edge[0])) / 2) ** 2 * count_in_left_bin
# allocate leftovers to the right bin
new_bin_counts[new_bin_id + 1] += bin_count - count_in_left_bin
hist_loss += ((
(new_bin_edge[2] - new_bin_edge[1])
- (bin_edge[1] - bin_edge[0])
) / 2)**2 * (bin_count - count_in_left_bin)
# increment bin id to the right bin
new_bin_id += 1
return ({'bin_edges': new_bin_edges, 'bin_counts': new_bin_counts},
hist_loss)
def _get_best_histogram_for_profile(self):
"""
Converts the stored histogram into the presentable state based on the
suggested histogram bin count from numpy.histograms. The bin count used
is stored in 'suggested_bin_count' for each method.
:return: histogram bin edges and bin counts
:rtype: dict
"""
if self.histogram_selection is None:
best_hist_loss = None
for method in self.histogram_methods:
histogram, hist_loss = self._histogram_for_profile(method)
self.histogram_methods[method]['histogram'] = histogram
self.histogram_methods[method]['current_loss'] = hist_loss
self.histogram_methods[method]['total_loss'] += hist_loss
if not best_hist_loss or hist_loss < best_hist_loss:
self.histogram_selection = method
best_hist_loss = hist_loss
return self.histogram_methods[self.histogram_selection]['histogram']
def _get_percentile(self, percentiles):
"""
Get value for the number where the given percentage of values fall below
it.
:param percentiles: List of percentage of values to fall before the
value
:type percentiles: list[float]
:return: List of corresponding values for which the percentage of values
in the distribution fall before each percentage
"""
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
zero_inds = bin_counts == 0
bin_counts = bin_counts.astype(float)
normalized_bin_counts = bin_counts / np.sum(bin_counts)
cumsum_bin_counts = np.cumsum(normalized_bin_counts)
median_value = None
median_bin_inds = cumsum_bin_counts == 0.5
if np.sum(median_bin_inds) > 1:
median_value = np.mean(bin_edges[np.append([False], median_bin_inds)])
# use the floor by slightly increasing cases where no bin exist.
cumsum_bin_counts[zero_inds] += 1e-15
# add initial zero bin
cumsum_bin_counts = np.append([0], cumsum_bin_counts)
quantiles = np.interp(percentiles / 100,
cumsum_bin_counts, bin_edges).tolist()
if median_value:
quantiles[499] = median_value
return quantiles
def _get_quantiles(self):
"""
Retrieves the quantile set based on the specified number of quantiles
in self.quantiles.
:return: list of quantiles
"""
percentiles = np.linspace(0, 100, len(self.quantiles) + 2)[1:-1]
self.quantiles = self._get_percentile(
percentiles=percentiles)
def _update_helper(self, df_series_clean, profile):
"""
Method for updating the base numerical profile properties with a cleaned
dataset and the known null parameters of the dataset.
:param df_series_clean: df series with nulls removed
:type df_series_clean: pandas.core.series.Series
:param profile: numerical profile dictionary
:type profile: dict
:return: None
"""
if df_series_clean.empty:
return
prev_dependent_properties = {"mean": self.mean,
"biased_variance": self._biased_variance,
"biased_skewness": self._biased_skewness,
"biased_kurtosis": self._biased_kurtosis}
subset_properties = copy.deepcopy(profile)
df_series_clean = df_series_clean.astype(float)
super(NumericStatsMixin, self)._perform_property_calcs(self.__calculations,
df_series=df_series_clean,
prev_dependent_properties=prev_dependent_properties,
subset_properties=subset_properties)
if len(self._batch_history) == 5:
self._batch_history.pop(0)
self._batch_history.append(subset_properties)
@BaseColumnProfiler._timeit(name="min")
def _get_min(self, df_series, prev_dependent_properties,
subset_properties):
min_value = df_series.min()
self.min = min_value if not self.min else min(self.min, min_value)
subset_properties["min"] = min_value
@BaseColumnProfiler._timeit(name="max")
def _get_max(self, df_series, prev_dependent_properties,
subset_properties):
max_value = df_series.max()
self.max = max_value if not self.max else max(self.max, max_value)
subset_properties["max"] = max_value
@BaseColumnProfiler._timeit(name="sum")
def _get_sum(self, df_series, prev_dependent_properties,
subset_properties):
if np.isinf(self.sum) or (np.isnan(self.sum) and self.match_count > 0):
return
sum_value = df_series.sum()
if np.isinf(sum_value) or (len(df_series) > 0 and np.isnan(sum_value)):
warnings.warn("Infinite or invalid values found in data. "
"Future statistics (mean, variance, skewness, kurtosis) "
"will not be computed.", RuntimeWarning)
subset_properties["sum"] = sum_value
self.sum = self.sum + sum_value
@BaseColumnProfiler._timeit(name="variance")
def _get_variance(self, df_series, prev_dependent_properties,
subset_properties):
if np.isinf(self._biased_variance) or \
(np.isnan(self._biased_variance) and self.match_count > 0):
return
# Suppress any numpy warnings as we have a custom warning for invalid
# or infinite data already
with np.errstate(all='ignore'):
batch_biased_variance = np.var(df_series) # Obtains biased variance
subset_properties["biased_variance"] = batch_biased_variance
sum_value = subset_properties["sum"]
batch_count = subset_properties["match_count"]
batch_mean = 0. if not batch_count else \
float(sum_value) / batch_count
subset_properties["mean"] = batch_mean
self._biased_variance = self._merge_biased_variance(
self.match_count, self._biased_variance,
prev_dependent_properties["mean"],
batch_count,
batch_biased_variance,
batch_mean)
@BaseColumnProfiler._timeit(name = "skewness")
def _get_skewness(self, df_series, prev_dependent_properties,
subset_properties):
"""
Computes and updates the skewness of the current dataset given
new chunk
:param df_series: incoming data
:type df_series: pandas series
:param prev_dependent_properties: pre-update values needed
for computation
:type prev_dependent_properties: dict
:param subset_properties: incoming data statistics
:type subset_properties: dict
:return None
"""
# If skewness is still NaN but has a valid match count, this
# must mean that there were previous invalid values in
# the dataset.
if np.isinf(self._biased_skewness) or \
(np.isnan(self._biased_skewness) and self.match_count > 0):
return
batch_biased_skewness = utils.biased_skew(df_series)
subset_properties["biased_skewness"] = batch_biased_skewness
batch_count = subset_properties["match_count"]
batch_biased_var = subset_properties["biased_variance"]
batch_mean = subset_properties["mean"]
self._biased_skewness = self._merge_biased_skewness(
self.match_count, self._biased_skewness,
prev_dependent_properties["biased_variance"],
prev_dependent_properties["mean"],
batch_count, batch_biased_skewness,
batch_biased_var, batch_mean)
@BaseColumnProfiler._timeit(name = "kurtosis")
def _get_kurtosis(self, df_series, prev_dependent_properties,
subset_properties):
"""
Computes and updates the kurtosis of the current dataset given
new chunk
:param df_series: incoming data
:type df_series: pandas series
:param prev_dependent_properties: pre-update values needed
for computation
:type prev_dependent_properties: dict
:param subset_properties: incoming data statistics
:type subset_properties: dict
:return None
"""
# If kurtosis is still NaN but has a valid match count, this
# must mean that there were previous invalid values in
# the dataset.
if np.isinf(self._biased_kurtosis) or \
(np.isnan(self._biased_kurtosis) and self.match_count > 0):
return
batch_biased_kurtosis = utils.biased_kurt(df_series)
subset_properties["biased_kurtosis"] = batch_biased_kurtosis
batch_count = subset_properties["match_count"]
batch_biased_var = subset_properties["biased_variance"]
batch_biased_skewness = subset_properties["biased_skewness"]
batch_mean = subset_properties["mean"]
self._biased_kurtosis = self._merge_biased_kurtosis(
self.match_count, self._biased_kurtosis,
prev_dependent_properties["biased_skewness"],
prev_dependent_properties["biased_variance"],
prev_dependent_properties["mean"],
batch_count, batch_biased_kurtosis,
batch_biased_skewness,
batch_biased_var, batch_mean)
@BaseColumnProfiler._timeit(name="histogram_and_quantiles")
def _get_histogram_and_quantiles(self, df_series,
prev_dependent_properties,
subset_properties):
try:
self._update_histogram(df_series)
self.histogram_selection = None
if self._has_histogram:
self._get_quantiles()
except BaseException:
warnings.warn(
'Histogram error. Histogram and quantile results will not be '
'available')
@BaseColumnProfiler._timeit(name="num_zeros")
def _get_num_zeros(self, df_series, prev_dependent_properties,
subset_properties):
"""
Method for getting the count of zeros in the numerical column.
:param df_series: df series
:type df_series: pandas.core.series.Series
:param prev_dependent_properties: previous dependent properties
:type prev_dependent_properties: dict
:param subset_properties: subset of properties
:type subset_properties: dict
:return: None
"""
num_zeros_value = (df_series == 0).sum()
subset_properties["num_zeros"] = num_zeros_value
self.num_zeros = self.num_zeros + num_zeros_value
@BaseColumnProfiler._timeit(name="num_negatives")
def _get_num_negatives(self, df_series, prev_dependent_properties,
subset_properties):
"""
Method for getting the count of negative numbers
in the numerical column.
:param df_series: df series
:type df_series: pandas.core.series.Series
:param prev_dependent_properties: previous dependent properties
:type prev_dependent_properties: dict
:param subset_properties: subset of properties
:type subset_properties: dict
:return: None
"""
num_negatives_value = (df_series < 0).sum()
subset_properties["num_negatives"] = num_negatives_value
self.num_negatives = self.num_negatives + num_negatives_value
@abc.abstractmethod
def update(self, df_series):
"""
Abstract Method for updating the numerical profile properties with an
uncleaned dataset.
:param df_series: df series with nulls removed
:type df_series: pandas.core.series.Series
:return: None
"""
raise NotImplementedError()
@staticmethod
def is_float(x):
"""
For "0.80" this function returns True
For "1.00" this function returns True
For "1" this function returns True
:param x: string to test
:type x: str
:return: if is float or not
:rtype: bool
"""
try:
float(x)
except ValueError:
return False
else:
return True
@staticmethod
def is_int(x):
"""
For "0.80" This function returns False
For "1.00" This function returns True
For "1" this function returns True
:param x: string to test
:type x: str
:return: if is integer or not
:rtype: bool
"""
try:
a = float(x)
b = int(a)
except (ValueError, OverflowError, TypeError):
return False
else:
return a == b
@staticmethod
def np_type_to_type(val):
"""
Converts numpy variables to base python type variables
:param val: value to check & change
:type val: numpy type or base type
:return val: base python type
:rtype val: int or float
"""
if isinstance(val, np.integer):
return int(val)
if isinstance(val, np.float):
return float(val)
return val
| 41.559278 | 97 | 0.61691 |