repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
Metatab/metapack
metapack/cli/doc.py
yield_deps
def yield_deps(doc): """ TODO: This implementation is very specific to a particular environment and project, and should be generalized. :param doc: :return: """ this_node = DependencyNode(doc, None) for r in doc.references(): other_node = DependencyNode(r, this_node) yield (this_node, other_node) if isinstance(other_node, PackageDependencyNode): yield from yield_deps(other_node.doc)
python
def yield_deps(doc): """ TODO: This implementation is very specific to a particular environment and project, and should be generalized. :param doc: :return: """ this_node = DependencyNode(doc, None) for r in doc.references(): other_node = DependencyNode(r, this_node) yield (this_node, other_node) if isinstance(other_node, PackageDependencyNode): yield from yield_deps(other_node.doc)
[ "def", "yield_deps", "(", "doc", ")", ":", "this_node", "=", "DependencyNode", "(", "doc", ",", "None", ")", "for", "r", "in", "doc", ".", "references", "(", ")", ":", "other_node", "=", "DependencyNode", "(", "r", ",", "this_node", ")", "yield", "(", "this_node", ",", "other_node", ")", "if", "isinstance", "(", "other_node", ",", "PackageDependencyNode", ")", ":", "yield", "from", "yield_deps", "(", "other_node", ".", "doc", ")" ]
TODO: This implementation is very specific to a particular environment and project, and should be generalized. :param doc: :return:
[ "TODO", ":", "This", "implementation", "is", "very", "specific", "to", "a", "particular", "environment", "and", "project", "and", "should", "be", "generalized", ".", ":", "param", "doc", ":", ":", "return", ":" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/doc.py#L246-L262
Metatab/metapack
metapack/cli/doc.py
wrap_url
def wrap_url(s, l): """Wrap a URL string""" parts = s.split('/') if len(parts) == 1: return parts[0] else: i = 0 lines = [] for j in range(i, len(parts) + 1): tv = '/'.join(parts[i:j]) nv = '/'.join(parts[i:j + 1]) if len(nv) > l or nv == tv: i = j lines.append(tv) return '/\n'.join(lines)
python
def wrap_url(s, l): """Wrap a URL string""" parts = s.split('/') if len(parts) == 1: return parts[0] else: i = 0 lines = [] for j in range(i, len(parts) + 1): tv = '/'.join(parts[i:j]) nv = '/'.join(parts[i:j + 1]) if len(nv) > l or nv == tv: i = j lines.append(tv) return '/\n'.join(lines)
[ "def", "wrap_url", "(", "s", ",", "l", ")", ":", "parts", "=", "s", ".", "split", "(", "'/'", ")", "if", "len", "(", "parts", ")", "==", "1", ":", "return", "parts", "[", "0", "]", "else", ":", "i", "=", "0", "lines", "=", "[", "]", "for", "j", "in", "range", "(", "i", ",", "len", "(", "parts", ")", "+", "1", ")", ":", "tv", "=", "'/'", ".", "join", "(", "parts", "[", "i", ":", "j", "]", ")", "nv", "=", "'/'", ".", "join", "(", "parts", "[", "i", ":", "j", "+", "1", "]", ")", "if", "len", "(", "nv", ")", ">", "l", "or", "nv", "==", "tv", ":", "i", "=", "j", "lines", ".", "append", "(", "tv", ")", "return", "'/\\n'", ".", "join", "(", "lines", ")" ]
Wrap a URL string
[ "Wrap", "a", "URL", "string" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/doc.py#L291-L308
Parsely/probably
probably/hll.py
HyperLogLog._get_rho
def _get_rho(self, w, arr): """ Return the least signifiant bit O(N) in the worst case """ lsb = 0 while not (w & arr[lsb]): lsb += 1 return lsb + 1
python
def _get_rho(self, w, arr): """ Return the least signifiant bit O(N) in the worst case """ lsb = 0 while not (w & arr[lsb]): lsb += 1 return lsb + 1
[ "def", "_get_rho", "(", "self", ",", "w", ",", "arr", ")", ":", "lsb", "=", "0", "while", "not", "(", "w", "&", "arr", "[", "lsb", "]", ")", ":", "lsb", "+=", "1", "return", "lsb", "+", "1" ]
Return the least signifiant bit O(N) in the worst case
[ "Return", "the", "least", "signifiant", "bit", "O", "(", "N", ")", "in", "the", "worst", "case" ]
train
https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hll.py#L38-L45
Parsely/probably
probably/hll.py
HyperLogLog.add
def add(self, uuid): """ Adds a key to the HyperLogLog """ if uuid: # Computing the hash try: x = hash64(uuid) except UnicodeEncodeError: x = hash64(uuid.encode('ascii', 'ignore')) # Finding the register to update by using the first b bits as an index j = x & ((1 << self.b) - 1) # Remove those b bits w = x >> self.b # Find the first 0 in the remaining bit pattern self.M[j] = max(self.M[j], self._get_rho(w, self.bitcount_arr))
python
def add(self, uuid): """ Adds a key to the HyperLogLog """ if uuid: # Computing the hash try: x = hash64(uuid) except UnicodeEncodeError: x = hash64(uuid.encode('ascii', 'ignore')) # Finding the register to update by using the first b bits as an index j = x & ((1 << self.b) - 1) # Remove those b bits w = x >> self.b # Find the first 0 in the remaining bit pattern self.M[j] = max(self.M[j], self._get_rho(w, self.bitcount_arr))
[ "def", "add", "(", "self", ",", "uuid", ")", ":", "if", "uuid", ":", "# Computing the hash", "try", ":", "x", "=", "hash64", "(", "uuid", ")", "except", "UnicodeEncodeError", ":", "x", "=", "hash64", "(", "uuid", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ")", "# Finding the register to update by using the first b bits as an index", "j", "=", "x", "&", "(", "(", "1", "<<", "self", ".", "b", ")", "-", "1", ")", "# Remove those b bits", "w", "=", "x", ">>", "self", ".", "b", "# Find the first 0 in the remaining bit pattern", "self", ".", "M", "[", "j", "]", "=", "max", "(", "self", ".", "M", "[", "j", "]", ",", "self", ".", "_get_rho", "(", "w", ",", "self", ".", "bitcount_arr", ")", ")" ]
Adds a key to the HyperLogLog
[ "Adds", "a", "key", "to", "the", "HyperLogLog" ]
train
https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hll.py#L47-L60
Parsely/probably
probably/hll.py
HyperLogLog.estimate
def estimate(self): """ Returns the estimate of the cardinality """ E = self.alpha * float(self.m ** 2) / np.power(2.0, - self.M).sum() if E <= 2.5 * self.m: # Small range correction V = self.m - np.count_nonzero(self.M) return int(self.m * np.log(self.m / float(V))) if V > 0 else int(E) # intermidiate range correction -> No correction elif E <= float(long(1) << self.precision) / 30.0: return int(E) else: return int(-(long(1) << self.precision) * np.log(1.0 - E / (long(1) << self.precision)))
python
def estimate(self): """ Returns the estimate of the cardinality """ E = self.alpha * float(self.m ** 2) / np.power(2.0, - self.M).sum() if E <= 2.5 * self.m: # Small range correction V = self.m - np.count_nonzero(self.M) return int(self.m * np.log(self.m / float(V))) if V > 0 else int(E) # intermidiate range correction -> No correction elif E <= float(long(1) << self.precision) / 30.0: return int(E) else: return int(-(long(1) << self.precision) * np.log(1.0 - E / (long(1) << self.precision)))
[ "def", "estimate", "(", "self", ")", ":", "E", "=", "self", ".", "alpha", "*", "float", "(", "self", ".", "m", "**", "2", ")", "/", "np", ".", "power", "(", "2.0", ",", "-", "self", ".", "M", ")", ".", "sum", "(", ")", "if", "E", "<=", "2.5", "*", "self", ".", "m", ":", "# Small range correction", "V", "=", "self", ".", "m", "-", "np", ".", "count_nonzero", "(", "self", ".", "M", ")", "return", "int", "(", "self", ".", "m", "*", "np", ".", "log", "(", "self", ".", "m", "/", "float", "(", "V", ")", ")", ")", "if", "V", ">", "0", "else", "int", "(", "E", ")", "# intermidiate range correction -> No correction", "elif", "E", "<=", "float", "(", "long", "(", "1", ")", "<<", "self", ".", "precision", ")", "/", "30.0", ":", "return", "int", "(", "E", ")", "else", ":", "return", "int", "(", "-", "(", "long", "(", "1", ")", "<<", "self", ".", "precision", ")", "*", "np", ".", "log", "(", "1.0", "-", "E", "/", "(", "long", "(", "1", ")", "<<", "self", ".", "precision", ")", ")", ")" ]
Returns the estimate of the cardinality
[ "Returns", "the", "estimate", "of", "the", "cardinality" ]
train
https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hll.py#L72-L83
Metatab/metapack
metapack/terms.py
Resource.base_url
def base_url(self): """Base URL for resolving resource URLs""" if self.doc.package_url: return self.doc.package_url return self.doc._ref
python
def base_url(self): """Base URL for resolving resource URLs""" if self.doc.package_url: return self.doc.package_url return self.doc._ref
[ "def", "base_url", "(", "self", ")", ":", "if", "self", ".", "doc", ".", "package_url", ":", "return", "self", ".", "doc", ".", "package_url", "return", "self", ".", "doc", ".", "_ref" ]
Base URL for resolving resource URLs
[ "Base", "URL", "for", "resolving", "resource", "URLs" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L29-L35
Metatab/metapack
metapack/terms.py
Resource.env
def env(self): """The execution context for rowprocessors and row-generating notebooks and functions. """ from copy import copy env = copy(self.doc.env) assert env is not None, 'Got a null execution context' env.update(self._envvar_env) env.update(self.all_props) return env
python
def env(self): """The execution context for rowprocessors and row-generating notebooks and functions. """ from copy import copy env = copy(self.doc.env) assert env is not None, 'Got a null execution context' env.update(self._envvar_env) env.update(self.all_props) return env
[ "def", "env", "(", "self", ")", ":", "from", "copy", "import", "copy", "env", "=", "copy", "(", "self", ".", "doc", ".", "env", ")", "assert", "env", "is", "not", "None", ",", "'Got a null execution context'", "env", ".", "update", "(", "self", ".", "_envvar_env", ")", "env", ".", "update", "(", "self", ".", "all_props", ")", "return", "env" ]
The execution context for rowprocessors and row-generating notebooks and functions.
[ "The", "execution", "context", "for", "rowprocessors", "and", "row", "-", "generating", "notebooks", "and", "functions", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L54-L66
Metatab/metapack
metapack/terms.py
Resource._resolved_url
def _resolved_url(self): """Return a URL that properly combines the base_url and a possibly relative resource url""" if not self.url: return None u = parse_app_url(self.url) if u.scheme == 'index': u = u.resolve() if u.scheme != 'file': # Hopefully means the URL is http, https, ftp, etc. return u elif u.resource_format == 'ipynb': # This shouldn't be a special case, but ... t = self.doc.package_url.inner.join_dir(self.url) t = t.as_type(type(u)) t.fragment = u.fragment return t elif u.proto == 'metatab': u = self.expanded_url return u.get_resource().get_target() elif u.proto == 'metapack': u = self.expanded_url if u.resource: return u.resource.resolved_url.get_resource().get_target() else: return u if u.scheme == 'file': return self.expanded_url elif False: assert isinstance(self.doc.package_url, MetapackPackageUrl), (type(self.doc.package_url), self.doc.package_url) try: t = self.doc.package_url.resolve_url(self.url) # Why are we doing this? # Also a hack t.scheme_extension = parse_app_url(self.url).scheme_extension # Another Hack! try: if not any(t.fragment) and any(u.fragment): t.fragment = u.fragment except TypeError: if not t.fragment and u.fragment: t.fragment = u.fragment # Yet more hack! t = parse_app_url(str(t)) return t except ResourceError as e: # This case happens when a filesystem packages has a non-standard metadata name # Total hack raise else: raise ResourceError('Unknown case for url {} '.format(self.url))
python
def _resolved_url(self): """Return a URL that properly combines the base_url and a possibly relative resource url""" if not self.url: return None u = parse_app_url(self.url) if u.scheme == 'index': u = u.resolve() if u.scheme != 'file': # Hopefully means the URL is http, https, ftp, etc. return u elif u.resource_format == 'ipynb': # This shouldn't be a special case, but ... t = self.doc.package_url.inner.join_dir(self.url) t = t.as_type(type(u)) t.fragment = u.fragment return t elif u.proto == 'metatab': u = self.expanded_url return u.get_resource().get_target() elif u.proto == 'metapack': u = self.expanded_url if u.resource: return u.resource.resolved_url.get_resource().get_target() else: return u if u.scheme == 'file': return self.expanded_url elif False: assert isinstance(self.doc.package_url, MetapackPackageUrl), (type(self.doc.package_url), self.doc.package_url) try: t = self.doc.package_url.resolve_url(self.url) # Why are we doing this? # Also a hack t.scheme_extension = parse_app_url(self.url).scheme_extension # Another Hack! try: if not any(t.fragment) and any(u.fragment): t.fragment = u.fragment except TypeError: if not t.fragment and u.fragment: t.fragment = u.fragment # Yet more hack! t = parse_app_url(str(t)) return t except ResourceError as e: # This case happens when a filesystem packages has a non-standard metadata name # Total hack raise else: raise ResourceError('Unknown case for url {} '.format(self.url))
[ "def", "_resolved_url", "(", "self", ")", ":", "if", "not", "self", ".", "url", ":", "return", "None", "u", "=", "parse_app_url", "(", "self", ".", "url", ")", "if", "u", ".", "scheme", "==", "'index'", ":", "u", "=", "u", ".", "resolve", "(", ")", "if", "u", ".", "scheme", "!=", "'file'", ":", "# Hopefully means the URL is http, https, ftp, etc.", "return", "u", "elif", "u", ".", "resource_format", "==", "'ipynb'", ":", "# This shouldn't be a special case, but ...", "t", "=", "self", ".", "doc", ".", "package_url", ".", "inner", ".", "join_dir", "(", "self", ".", "url", ")", "t", "=", "t", ".", "as_type", "(", "type", "(", "u", ")", ")", "t", ".", "fragment", "=", "u", ".", "fragment", "return", "t", "elif", "u", ".", "proto", "==", "'metatab'", ":", "u", "=", "self", ".", "expanded_url", "return", "u", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "elif", "u", ".", "proto", "==", "'metapack'", ":", "u", "=", "self", ".", "expanded_url", "if", "u", ".", "resource", ":", "return", "u", ".", "resource", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "else", ":", "return", "u", "if", "u", ".", "scheme", "==", "'file'", ":", "return", "self", ".", "expanded_url", "elif", "False", ":", "assert", "isinstance", "(", "self", ".", "doc", ".", "package_url", ",", "MetapackPackageUrl", ")", ",", "(", "type", "(", "self", ".", "doc", ".", "package_url", ")", ",", "self", ".", "doc", ".", "package_url", ")", "try", ":", "t", "=", "self", ".", "doc", ".", "package_url", ".", "resolve_url", "(", "self", ".", "url", ")", "# Why are we doing this?", "# Also a hack", "t", ".", "scheme_extension", "=", "parse_app_url", "(", "self", ".", "url", ")", ".", "scheme_extension", "# Another Hack!", "try", ":", "if", "not", "any", "(", "t", ".", "fragment", ")", "and", "any", "(", "u", ".", "fragment", ")", ":", "t", ".", "fragment", "=", "u", ".", "fragment", "except", "TypeError", ":", "if", "not", "t", ".", "fragment", "and", "u", ".", "fragment", ":", "t", ".", "fragment", "=", "u", ".", "fragment", "# Yet more hack!", "t", "=", "parse_app_url", "(", "str", "(", "t", ")", ")", "return", "t", "except", "ResourceError", "as", "e", ":", "# This case happens when a filesystem packages has a non-standard metadata name", "# Total hack", "raise", "else", ":", "raise", "ResourceError", "(", "'Unknown case for url {} '", ".", "format", "(", "self", ".", "url", ")", ")" ]
Return a URL that properly combines the base_url and a possibly relative resource url
[ "Return", "a", "URL", "that", "properly", "combines", "the", "base_url", "and", "a", "possibly", "relative", "resource", "url" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L115-L190
Metatab/metapack
metapack/terms.py
Resource.schema_term
def schema_term(self): """Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property""" if not self.name: raise MetapackError("Resource for url '{}' doe not have name".format(self.url)) t = self.doc.find_first('Root.Table', value=self.get_value('name')) frm = 'name' if not t: t = self.doc.find_first('Root.Table', value=self.get_value('schema')) frm = 'schema' if not t: frm = None return t
python
def schema_term(self): """Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property""" if not self.name: raise MetapackError("Resource for url '{}' doe not have name".format(self.url)) t = self.doc.find_first('Root.Table', value=self.get_value('name')) frm = 'name' if not t: t = self.doc.find_first('Root.Table', value=self.get_value('schema')) frm = 'schema' if not t: frm = None return t
[ "def", "schema_term", "(", "self", ")", ":", "if", "not", "self", ".", "name", ":", "raise", "MetapackError", "(", "\"Resource for url '{}' doe not have name\"", ".", "format", "(", "self", ".", "url", ")", ")", "t", "=", "self", ".", "doc", ".", "find_first", "(", "'Root.Table'", ",", "value", "=", "self", ".", "get_value", "(", "'name'", ")", ")", "frm", "=", "'name'", "if", "not", "t", ":", "t", "=", "self", ".", "doc", ".", "find_first", "(", "'Root.Table'", ",", "value", "=", "self", ".", "get_value", "(", "'schema'", ")", ")", "frm", "=", "'schema'", "if", "not", "t", ":", "frm", "=", "None", "return", "t" ]
Return the Table term for this resource, which is referenced either by the `table` property or the `schema` property
[ "Return", "the", "Table", "term", "for", "this", "resource", "which", "is", "referenced", "either", "by", "the", "table", "property", "or", "the", "schema", "property" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L222-L239
Metatab/metapack
metapack/terms.py
Resource.headers
def headers(self): """Return the headers for the resource. Returns the AltName, if specified; if not, then the Name, and if that is empty, a name based on the column position. These headers are specifically applicable to the output table, and may not apply to the resource source. FOr those headers, use source_headers""" t = self.schema_term if t: return [self._name_for_col_term(c, i) for i, c in enumerate(t.children, 1) if c.term_is("Table.Column")] else: return None
python
def headers(self): """Return the headers for the resource. Returns the AltName, if specified; if not, then the Name, and if that is empty, a name based on the column position. These headers are specifically applicable to the output table, and may not apply to the resource source. FOr those headers, use source_headers""" t = self.schema_term if t: return [self._name_for_col_term(c, i) for i, c in enumerate(t.children, 1) if c.term_is("Table.Column")] else: return None
[ "def", "headers", "(", "self", ")", ":", "t", "=", "self", ".", "schema_term", "if", "t", ":", "return", "[", "self", ".", "_name_for_col_term", "(", "c", ",", "i", ")", "for", "i", ",", "c", "in", "enumerate", "(", "t", ".", "children", ",", "1", ")", "if", "c", ".", "term_is", "(", "\"Table.Column\"", ")", "]", "else", ":", "return", "None" ]
Return the headers for the resource. Returns the AltName, if specified; if not, then the Name, and if that is empty, a name based on the column position. These headers are specifically applicable to the output table, and may not apply to the resource source. FOr those headers, use source_headers
[ "Return", "the", "headers", "for", "the", "resource", ".", "Returns", "the", "AltName", "if", "specified", ";", "if", "not", "then", "the", "Name", "and", "if", "that", "is", "empty", "a", "name", "based", "on", "the", "column", "position", ".", "These", "headers", "are", "specifically", "applicable", "to", "the", "output", "table", "and", "may", "not", "apply", "to", "the", "resource", "source", ".", "FOr", "those", "headers", "use", "source_headers" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L242-L254
Metatab/metapack
metapack/terms.py
Resource.source_headers
def source_headers(self): """"Returns the headers for the resource source. Specifically, does not include any header that is the EMPTY_SOURCE_HEADER value of _NONE_""" t = self.schema_term if t: return [self._name_for_col_term(c, i) for i, c in enumerate(t.children, 1) if c.term_is("Table.Column") and c.get_value('name') != EMPTY_SOURCE_HEADER ] else: return None
python
def source_headers(self): """"Returns the headers for the resource source. Specifically, does not include any header that is the EMPTY_SOURCE_HEADER value of _NONE_""" t = self.schema_term if t: return [self._name_for_col_term(c, i) for i, c in enumerate(t.children, 1) if c.term_is("Table.Column") and c.get_value('name') != EMPTY_SOURCE_HEADER ] else: return None
[ "def", "source_headers", "(", "self", ")", ":", "t", "=", "self", ".", "schema_term", "if", "t", ":", "return", "[", "self", ".", "_name_for_col_term", "(", "c", ",", "i", ")", "for", "i", ",", "c", "in", "enumerate", "(", "t", ".", "children", ",", "1", ")", "if", "c", ".", "term_is", "(", "\"Table.Column\"", ")", "and", "c", ".", "get_value", "(", "'name'", ")", "!=", "EMPTY_SOURCE_HEADER", "]", "else", ":", "return", "None" ]
Returns the headers for the resource source. Specifically, does not include any header that is the EMPTY_SOURCE_HEADER value of _NONE_
[ "Returns", "the", "headers", "for", "the", "resource", "source", ".", "Specifically", "does", "not", "include", "any", "header", "that", "is", "the", "EMPTY_SOURCE_HEADER", "value", "of", "_NONE_" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L257-L269
Metatab/metapack
metapack/terms.py
Resource.columns
def columns(self): """Return column information from the schema or from an upstreram package""" try: # For resources that are metapack packages. r = self.expanded_url.resource.columns() return list(r) except AttributeError as e: pass return self.schema_columns
python
def columns(self): """Return column information from the schema or from an upstreram package""" try: # For resources that are metapack packages. r = self.expanded_url.resource.columns() return list(r) except AttributeError as e: pass return self.schema_columns
[ "def", "columns", "(", "self", ")", ":", "try", ":", "# For resources that are metapack packages.", "r", "=", "self", ".", "expanded_url", ".", "resource", ".", "columns", "(", ")", "return", "list", "(", "r", ")", "except", "AttributeError", "as", "e", ":", "pass", "return", "self", ".", "schema_columns" ]
Return column information from the schema or from an upstreram package
[ "Return", "column", "information", "from", "the", "schema", "or", "from", "an", "upstreram", "package" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L273-L284
Metatab/metapack
metapack/terms.py
Resource.schema_columns
def schema_columns(self): """Return column informatino only from this schema""" t = self.schema_term columns = [] if t: for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props p['pos'] = i p['name'] = c.value p['header'] = self._name_for_col_term(c, i) columns.append(p) return columns
python
def schema_columns(self): """Return column informatino only from this schema""" t = self.schema_term columns = [] if t: for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props p['pos'] = i p['name'] = c.value p['header'] = self._name_for_col_term(c, i) columns.append(p) return columns
[ "def", "schema_columns", "(", "self", ")", ":", "t", "=", "self", ".", "schema_term", "columns", "=", "[", "]", "if", "t", ":", "for", "i", ",", "c", "in", "enumerate", "(", "t", ".", "children", ")", ":", "if", "c", ".", "term_is", "(", "\"Table.Column\"", ")", ":", "p", "=", "c", ".", "all_props", "p", "[", "'pos'", "]", "=", "i", "p", "[", "'name'", "]", "=", "c", ".", "value", "p", "[", "'header'", "]", "=", "self", ".", "_name_for_col_term", "(", "c", ",", "i", ")", "columns", ".", "append", "(", "p", ")", "return", "columns" ]
Return column informatino only from this schema
[ "Return", "column", "informatino", "only", "from", "this", "schema" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L287-L306
Metatab/metapack
metapack/terms.py
Resource.row_processor_table
def row_processor_table(self, ignore_none=False): """Create a row processor from the schema, to convert the text values from the CSV into real types""" from rowgenerators.rowpipe import Table type_map = { None: None, 'string': 'str', 'text': 'str', 'number': 'float', 'integer': 'int' } def map_type(v): return type_map.get(v, v) if self.schema_term: t = Table(self.get_value('name')) col_n = 0 for c in self.schema_term.children: if ignore_none and c.name == EMPTY_SOURCE_HEADER: continue if c.term_is('Table.Column'): t.add_column(self._name_for_col_term(c, col_n), datatype=map_type(c.get_value('datatype')), valuetype=map_type(c.get_value('valuetype')), transform=c.get_value('transform'), width=c.get_value('width') ) col_n += 1 return t else: return None
python
def row_processor_table(self, ignore_none=False): """Create a row processor from the schema, to convert the text values from the CSV into real types""" from rowgenerators.rowpipe import Table type_map = { None: None, 'string': 'str', 'text': 'str', 'number': 'float', 'integer': 'int' } def map_type(v): return type_map.get(v, v) if self.schema_term: t = Table(self.get_value('name')) col_n = 0 for c in self.schema_term.children: if ignore_none and c.name == EMPTY_SOURCE_HEADER: continue if c.term_is('Table.Column'): t.add_column(self._name_for_col_term(c, col_n), datatype=map_type(c.get_value('datatype')), valuetype=map_type(c.get_value('valuetype')), transform=c.get_value('transform'), width=c.get_value('width') ) col_n += 1 return t else: return None
[ "def", "row_processor_table", "(", "self", ",", "ignore_none", "=", "False", ")", ":", "from", "rowgenerators", ".", "rowpipe", "import", "Table", "type_map", "=", "{", "None", ":", "None", ",", "'string'", ":", "'str'", ",", "'text'", ":", "'str'", ",", "'number'", ":", "'float'", ",", "'integer'", ":", "'int'", "}", "def", "map_type", "(", "v", ")", ":", "return", "type_map", ".", "get", "(", "v", ",", "v", ")", "if", "self", ".", "schema_term", ":", "t", "=", "Table", "(", "self", ".", "get_value", "(", "'name'", ")", ")", "col_n", "=", "0", "for", "c", "in", "self", ".", "schema_term", ".", "children", ":", "if", "ignore_none", "and", "c", ".", "name", "==", "EMPTY_SOURCE_HEADER", ":", "continue", "if", "c", ".", "term_is", "(", "'Table.Column'", ")", ":", "t", ".", "add_column", "(", "self", ".", "_name_for_col_term", "(", "c", ",", "col_n", ")", ",", "datatype", "=", "map_type", "(", "c", ".", "get_value", "(", "'datatype'", ")", ")", ",", "valuetype", "=", "map_type", "(", "c", ".", "get_value", "(", "'valuetype'", ")", ")", ",", "transform", "=", "c", ".", "get_value", "(", "'transform'", ")", ",", "width", "=", "c", ".", "get_value", "(", "'width'", ")", ")", "col_n", "+=", "1", "return", "t", "else", ":", "return", "None" ]
Create a row processor from the schema, to convert the text values from the CSV into real types
[ "Create", "a", "row", "processor", "from", "the", "schema", "to", "convert", "the", "text", "values", "from", "the", "CSV", "into", "real", "types" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L308-L347
Metatab/metapack
metapack/terms.py
Resource.raw_row_generator
def raw_row_generator(self): """Like rowgenerator, but does not try to create a row processor table""" from rowgenerators import get_generator self.doc.set_sys_path() # Set sys path to package 'lib' dir in case of python function generator ru = self.resolved_url try: resource = ru.resource # For Metapack urls return resource.row_generator except AttributeError: pass ut = ru.get_resource().get_target() # Encoding is supposed to be preserved in the URL but isn't source_url = parse_app_url(self.url) # source_url will be None for Sql terms. ut.encoding = self.get_value('encoding') or (source_url.encoding if source_url else None) g = get_generator(ut, resource=self, doc=self._doc, working_dir=self._doc.doc_dir, env=self.env) assert g, ut return g
python
def raw_row_generator(self): """Like rowgenerator, but does not try to create a row processor table""" from rowgenerators import get_generator self.doc.set_sys_path() # Set sys path to package 'lib' dir in case of python function generator ru = self.resolved_url try: resource = ru.resource # For Metapack urls return resource.row_generator except AttributeError: pass ut = ru.get_resource().get_target() # Encoding is supposed to be preserved in the URL but isn't source_url = parse_app_url(self.url) # source_url will be None for Sql terms. ut.encoding = self.get_value('encoding') or (source_url.encoding if source_url else None) g = get_generator(ut, resource=self, doc=self._doc, working_dir=self._doc.doc_dir, env=self.env) assert g, ut return g
[ "def", "raw_row_generator", "(", "self", ")", ":", "from", "rowgenerators", "import", "get_generator", "self", ".", "doc", ".", "set_sys_path", "(", ")", "# Set sys path to package 'lib' dir in case of python function generator", "ru", "=", "self", ".", "resolved_url", "try", ":", "resource", "=", "ru", ".", "resource", "# For Metapack urls", "return", "resource", ".", "row_generator", "except", "AttributeError", ":", "pass", "ut", "=", "ru", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "# Encoding is supposed to be preserved in the URL but isn't", "source_url", "=", "parse_app_url", "(", "self", ".", "url", ")", "# source_url will be None for Sql terms.", "ut", ".", "encoding", "=", "self", ".", "get_value", "(", "'encoding'", ")", "or", "(", "source_url", ".", "encoding", "if", "source_url", "else", "None", ")", "g", "=", "get_generator", "(", "ut", ",", "resource", "=", "self", ",", "doc", "=", "self", ".", "_doc", ",", "working_dir", "=", "self", ".", "_doc", ".", "doc_dir", ",", "env", "=", "self", ".", "env", ")", "assert", "g", ",", "ut", "return", "g" ]
Like rowgenerator, but does not try to create a row processor table
[ "Like", "rowgenerator", "but", "does", "not", "try", "to", "create", "a", "row", "processor", "table" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L350-L380
Metatab/metapack
metapack/terms.py
Resource._get_header
def _get_header(self): """Get the header from the deinfed header rows, for use on references or resources where the schema has not been run""" try: header_lines = [int(e) for e in str(self.get_value('headerlines', 0)).split(',')] except ValueError as e: header_lines = [0] # We're processing the raw datafile, with no schema. header_rows = islice(self.row_generator, min(header_lines), max(header_lines) + 1) from tableintuit import RowIntuiter headers = RowIntuiter.coalesce_headers(header_rows) return headers
python
def _get_header(self): """Get the header from the deinfed header rows, for use on references or resources where the schema has not been run""" try: header_lines = [int(e) for e in str(self.get_value('headerlines', 0)).split(',')] except ValueError as e: header_lines = [0] # We're processing the raw datafile, with no schema. header_rows = islice(self.row_generator, min(header_lines), max(header_lines) + 1) from tableintuit import RowIntuiter headers = RowIntuiter.coalesce_headers(header_rows) return headers
[ "def", "_get_header", "(", "self", ")", ":", "try", ":", "header_lines", "=", "[", "int", "(", "e", ")", "for", "e", "in", "str", "(", "self", ".", "get_value", "(", "'headerlines'", ",", "0", ")", ")", ".", "split", "(", "','", ")", "]", "except", "ValueError", "as", "e", ":", "header_lines", "=", "[", "0", "]", "# We're processing the raw datafile, with no schema.", "header_rows", "=", "islice", "(", "self", ".", "row_generator", ",", "min", "(", "header_lines", ")", ",", "max", "(", "header_lines", ")", "+", "1", ")", "from", "tableintuit", "import", "RowIntuiter", "headers", "=", "RowIntuiter", ".", "coalesce_headers", "(", "header_rows", ")", "return", "headers" ]
Get the header from the deinfed header rows, for use on references or resources where the schema has not been run
[ "Get", "the", "header", "from", "the", "deinfed", "header", "rows", "for", "use", "on", "references", "or", "resources", "where", "the", "schema", "has", "not", "been", "run" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L426-L441
Metatab/metapack
metapack/terms.py
Resource.iterdict
def iterdict(self): """Iterate over the resource in dict records""" from collections import OrderedDict headers = None for row in self: if headers is None: headers = row continue yield OrderedDict(zip(headers, row))
python
def iterdict(self): """Iterate over the resource in dict records""" from collections import OrderedDict headers = None for row in self: if headers is None: headers = row continue yield OrderedDict(zip(headers, row))
[ "def", "iterdict", "(", "self", ")", ":", "from", "collections", "import", "OrderedDict", "headers", "=", "None", "for", "row", "in", "self", ":", "if", "headers", "is", "None", ":", "headers", "=", "row", "continue", "yield", "OrderedDict", "(", "zip", "(", "headers", ",", "row", ")", ")" ]
Iterate over the resource in dict records
[ "Iterate", "over", "the", "resource", "in", "dict", "records" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L493-L505
Metatab/metapack
metapack/terms.py
Resource.iterrows
def iterrows(self): """Iterate over the resource as row proxy objects, which allow acessing colums as attributes""" row_proxy = None headers = None for row in self: if not headers: headers = row row_proxy = RowProxy(headers) continue yield row_proxy.set_row(row)
python
def iterrows(self): """Iterate over the resource as row proxy objects, which allow acessing colums as attributes""" row_proxy = None headers = None for row in self: if not headers: headers = row row_proxy = RowProxy(headers) continue yield row_proxy.set_row(row)
[ "def", "iterrows", "(", "self", ")", ":", "row_proxy", "=", "None", "headers", "=", "None", "for", "row", "in", "self", ":", "if", "not", "headers", ":", "headers", "=", "row", "row_proxy", "=", "RowProxy", "(", "headers", ")", "continue", "yield", "row_proxy", ".", "set_row", "(", "row", ")" ]
Iterate over the resource as row proxy objects, which allow acessing colums as attributes
[ "Iterate", "over", "the", "resource", "as", "row", "proxy", "objects", "which", "allow", "acessing", "colums", "as", "attributes" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L508-L524
Metatab/metapack
metapack/terms.py
Resource.iterrowproxy
def iterrowproxy(self, cls=RowProxy): """Iterate over the resource as row proxy objects, which allow acessing colums as attributes. Like iterrows, but allows for setting a specific RowProxy class. """ row_proxy = None headers = None for row in self: if not headers: headers = row row_proxy = cls(headers) continue yield row_proxy.set_row(row)
python
def iterrowproxy(self, cls=RowProxy): """Iterate over the resource as row proxy objects, which allow acessing colums as attributes. Like iterrows, but allows for setting a specific RowProxy class. """ row_proxy = None headers = None for row in self: if not headers: headers = row row_proxy = cls(headers) continue yield row_proxy.set_row(row)
[ "def", "iterrowproxy", "(", "self", ",", "cls", "=", "RowProxy", ")", ":", "row_proxy", "=", "None", "headers", "=", "None", "for", "row", "in", "self", ":", "if", "not", "headers", ":", "headers", "=", "row", "row_proxy", "=", "cls", "(", "headers", ")", "continue", "yield", "row_proxy", ".", "set_row", "(", "row", ")" ]
Iterate over the resource as row proxy objects, which allow acessing colums as attributes. Like iterrows, but allows for setting a specific RowProxy class.
[ "Iterate", "over", "the", "resource", "as", "row", "proxy", "objects", "which", "allow", "acessing", "colums", "as", "attributes", ".", "Like", "iterrows", "but", "allows", "for", "setting", "a", "specific", "RowProxy", "class", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L526-L541
Metatab/metapack
metapack/terms.py
Resource.iterstruct
def iterstruct(self): """Yield data structures built from the JSON header specifications in a table""" from rowgenerators.rowpipe.json import add_to_struct json_headers = self.json_headers for row in islice(self, 1, None): # islice skips header d = {} for pos, jh in json_headers: add_to_struct(d, jh, row[pos]) yield d
python
def iterstruct(self): """Yield data structures built from the JSON header specifications in a table""" from rowgenerators.rowpipe.json import add_to_struct json_headers = self.json_headers for row in islice(self, 1, None): # islice skips header d = {} for pos, jh in json_headers: add_to_struct(d, jh, row[pos]) yield d
[ "def", "iterstruct", "(", "self", ")", ":", "from", "rowgenerators", ".", "rowpipe", ".", "json", "import", "add_to_struct", "json_headers", "=", "self", ".", "json_headers", "for", "row", "in", "islice", "(", "self", ",", "1", ",", "None", ")", ":", "# islice skips header", "d", "=", "{", "}", "for", "pos", ",", "jh", "in", "json_headers", ":", "add_to_struct", "(", "d", ",", "jh", ",", "row", "[", "pos", "]", ")", "yield", "d" ]
Yield data structures built from the JSON header specifications in a table
[ "Yield", "data", "structures", "built", "from", "the", "JSON", "header", "specifications", "in", "a", "table" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L548-L558
Metatab/metapack
metapack/terms.py
Resource.iterjson
def iterjson(self, *args, **kwargs): """Yields the data structures from iterstruct as JSON strings""" from rowgenerators.rowpipe.json import VTEncoder import json if 'cls' not in kwargs: kwargs['cls'] = VTEncoder for s in self.iterstruct: yield (json.dumps(s, *args, **kwargs))
python
def iterjson(self, *args, **kwargs): """Yields the data structures from iterstruct as JSON strings""" from rowgenerators.rowpipe.json import VTEncoder import json if 'cls' not in kwargs: kwargs['cls'] = VTEncoder for s in self.iterstruct: yield (json.dumps(s, *args, **kwargs))
[ "def", "iterjson", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "rowgenerators", ".", "rowpipe", ".", "json", "import", "VTEncoder", "import", "json", "if", "'cls'", "not", "in", "kwargs", ":", "kwargs", "[", "'cls'", "]", "=", "VTEncoder", "for", "s", "in", "self", ".", "iterstruct", ":", "yield", "(", "json", ".", "dumps", "(", "s", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Yields the data structures from iterstruct as JSON strings
[ "Yields", "the", "data", "structures", "from", "iterstruct", "as", "JSON", "strings" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L560-L569
Metatab/metapack
metapack/terms.py
Resource.iteryaml
def iteryaml(self, *args, **kwargs): """Yields the data structures from iterstruct as YAML strings""" from rowgenerators.rowpipe.json import VTEncoder import yaml if 'cls' not in kwargs: kwargs['cls'] = VTEncoder for s in self.iterstruct: yield (yaml.safe_dump(s))
python
def iteryaml(self, *args, **kwargs): """Yields the data structures from iterstruct as YAML strings""" from rowgenerators.rowpipe.json import VTEncoder import yaml if 'cls' not in kwargs: kwargs['cls'] = VTEncoder for s in self.iterstruct: yield (yaml.safe_dump(s))
[ "def", "iteryaml", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "rowgenerators", ".", "rowpipe", ".", "json", "import", "VTEncoder", "import", "yaml", "if", "'cls'", "not", "in", "kwargs", ":", "kwargs", "[", "'cls'", "]", "=", "VTEncoder", "for", "s", "in", "self", ".", "iterstruct", ":", "yield", "(", "yaml", ".", "safe_dump", "(", "s", ")", ")" ]
Yields the data structures from iterstruct as YAML strings
[ "Yields", "the", "data", "structures", "from", "iterstruct", "as", "YAML", "strings" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L571-L580
Metatab/metapack
metapack/terms.py
Resource.dataframe
def dataframe(self, dtype=False, parse_dates=True, *args, **kwargs): """Return a pandas datafrome from the resource""" import pandas as pd rg = self.row_generator t = self.resolved_url.get_resource().get_target() if t.target_format == 'csv': return self.read_csv(dtype, parse_dates, *args, **kwargs) # Maybe generator has it's own Dataframe method() try: return rg.dataframe( *args, **kwargs) except AttributeError: pass # Just normal data, so use the iterator in this object. headers = next(islice(self, 0, 1)) data = islice(self, 1, None) df = pd.DataFrame(list(data), columns=headers, *args, **kwargs) self.errors = df.metatab_errors = rg.errors if hasattr(rg, 'errors') and rg.errors else {} return df
python
def dataframe(self, dtype=False, parse_dates=True, *args, **kwargs): """Return a pandas datafrome from the resource""" import pandas as pd rg = self.row_generator t = self.resolved_url.get_resource().get_target() if t.target_format == 'csv': return self.read_csv(dtype, parse_dates, *args, **kwargs) # Maybe generator has it's own Dataframe method() try: return rg.dataframe( *args, **kwargs) except AttributeError: pass # Just normal data, so use the iterator in this object. headers = next(islice(self, 0, 1)) data = islice(self, 1, None) df = pd.DataFrame(list(data), columns=headers, *args, **kwargs) self.errors = df.metatab_errors = rg.errors if hasattr(rg, 'errors') and rg.errors else {} return df
[ "def", "dataframe", "(", "self", ",", "dtype", "=", "False", ",", "parse_dates", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "pandas", "as", "pd", "rg", "=", "self", ".", "row_generator", "t", "=", "self", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "if", "t", ".", "target_format", "==", "'csv'", ":", "return", "self", ".", "read_csv", "(", "dtype", ",", "parse_dates", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Maybe generator has it's own Dataframe method()", "try", ":", "return", "rg", ".", "dataframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "pass", "# Just normal data, so use the iterator in this object.", "headers", "=", "next", "(", "islice", "(", "self", ",", "0", ",", "1", ")", ")", "data", "=", "islice", "(", "self", ",", "1", ",", "None", ")", "df", "=", "pd", ".", "DataFrame", "(", "list", "(", "data", ")", ",", "columns", "=", "headers", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "errors", "=", "df", ".", "metatab_errors", "=", "rg", ".", "errors", "if", "hasattr", "(", "rg", ",", "'errors'", ")", "and", "rg", ".", "errors", "else", "{", "}", "return", "df" ]
Return a pandas datafrome from the resource
[ "Return", "a", "pandas", "datafrome", "from", "the", "resource" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L585-L612
Metatab/metapack
metapack/terms.py
Resource.geoframe
def geoframe(self, *args, **kwargs): """Return a Geo dataframe""" from geopandas import GeoDataFrame import geopandas as gpd from shapely.geometry.polygon import BaseGeometry from shapely.wkt import loads gdf = None try: gdf = self.resolved_url.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.resolved_url.geo_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.row_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = GeoDataFrame(self.dataframe(*args, **kwargs)) first = next(gdf.iterrows())[1]['geometry'] if isinstance(first, str): # We have a GeoDataframe, but the geometry column is still strings, so # it must be converted shapes = [loads(row['geometry']) for i, row in gdf.iterrows()] elif not isinstance(first, BaseGeometry): # If we are reading a metatab package, the geometry column's type should be # 'geometry' which will give the geometry values class type of # rowpipe.valuetype.geo.ShapeValue. However, there are other # types of objects that have a 'shape' property. shapes = [row['geometry'].shape for i, row in gdf.iterrows()] else: shapes = gdf['geometry'] gdf['geometry'] = gpd.GeoSeries(shapes) gdf.set_geometry('geometry') # Wild guess. This case should be most often for Metatab processed geo files, # which are all 4326 if gdf.crs is None: gdf.crs = {'init': 'epsg:4326'} except KeyError as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': No geometry column".format(self.name)) except (KeyError,TypeError) as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': {}".format(self.name, str(e))) assert gdf.crs is not None return gdf
python
def geoframe(self, *args, **kwargs): """Return a Geo dataframe""" from geopandas import GeoDataFrame import geopandas as gpd from shapely.geometry.polygon import BaseGeometry from shapely.wkt import loads gdf = None try: gdf = self.resolved_url.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.resolved_url.geo_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = self.row_generator.geoframe(*args, **kwargs) except AttributeError: pass if gdf is None: try: gdf = GeoDataFrame(self.dataframe(*args, **kwargs)) first = next(gdf.iterrows())[1]['geometry'] if isinstance(first, str): # We have a GeoDataframe, but the geometry column is still strings, so # it must be converted shapes = [loads(row['geometry']) for i, row in gdf.iterrows()] elif not isinstance(first, BaseGeometry): # If we are reading a metatab package, the geometry column's type should be # 'geometry' which will give the geometry values class type of # rowpipe.valuetype.geo.ShapeValue. However, there are other # types of objects that have a 'shape' property. shapes = [row['geometry'].shape for i, row in gdf.iterrows()] else: shapes = gdf['geometry'] gdf['geometry'] = gpd.GeoSeries(shapes) gdf.set_geometry('geometry') # Wild guess. This case should be most often for Metatab processed geo files, # which are all 4326 if gdf.crs is None: gdf.crs = {'init': 'epsg:4326'} except KeyError as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': No geometry column".format(self.name)) except (KeyError,TypeError) as e: raise ResourceError("Failed to create GeoDataFrame for resource '{}': {}".format(self.name, str(e))) assert gdf.crs is not None return gdf
[ "def", "geoframe", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "geopandas", "import", "GeoDataFrame", "import", "geopandas", "as", "gpd", "from", "shapely", ".", "geometry", ".", "polygon", "import", "BaseGeometry", "from", "shapely", ".", "wkt", "import", "loads", "gdf", "=", "None", "try", ":", "gdf", "=", "self", ".", "resolved_url", ".", "geoframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "pass", "if", "gdf", "is", "None", ":", "try", ":", "gdf", "=", "self", ".", "resolved_url", ".", "geo_generator", ".", "geoframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "pass", "if", "gdf", "is", "None", ":", "try", ":", "gdf", "=", "self", ".", "row_generator", ".", "geoframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "pass", "if", "gdf", "is", "None", ":", "try", ":", "gdf", "=", "GeoDataFrame", "(", "self", ".", "dataframe", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "first", "=", "next", "(", "gdf", ".", "iterrows", "(", ")", ")", "[", "1", "]", "[", "'geometry'", "]", "if", "isinstance", "(", "first", ",", "str", ")", ":", "# We have a GeoDataframe, but the geometry column is still strings, so", "# it must be converted", "shapes", "=", "[", "loads", "(", "row", "[", "'geometry'", "]", ")", "for", "i", ",", "row", "in", "gdf", ".", "iterrows", "(", ")", "]", "elif", "not", "isinstance", "(", "first", ",", "BaseGeometry", ")", ":", "# If we are reading a metatab package, the geometry column's type should be", "# 'geometry' which will give the geometry values class type of", "# rowpipe.valuetype.geo.ShapeValue. However, there are other", "# types of objects that have a 'shape' property.", "shapes", "=", "[", "row", "[", "'geometry'", "]", ".", "shape", "for", "i", ",", "row", "in", "gdf", ".", "iterrows", "(", ")", "]", "else", ":", "shapes", "=", "gdf", "[", "'geometry'", "]", "gdf", "[", "'geometry'", "]", "=", "gpd", ".", "GeoSeries", "(", "shapes", ")", "gdf", ".", "set_geometry", "(", "'geometry'", ")", "# Wild guess. This case should be most often for Metatab processed geo files,", "# which are all 4326", "if", "gdf", ".", "crs", "is", "None", ":", "gdf", ".", "crs", "=", "{", "'init'", ":", "'epsg:4326'", "}", "except", "KeyError", "as", "e", ":", "raise", "ResourceError", "(", "\"Failed to create GeoDataFrame for resource '{}': No geometry column\"", ".", "format", "(", "self", ".", "name", ")", ")", "except", "(", "KeyError", ",", "TypeError", ")", "as", "e", ":", "raise", "ResourceError", "(", "\"Failed to create GeoDataFrame for resource '{}': {}\"", ".", "format", "(", "self", ".", "name", ",", "str", "(", "e", ")", ")", ")", "assert", "gdf", ".", "crs", "is", "not", "None", "return", "gdf" ]
Return a Geo dataframe
[ "Return", "a", "Geo", "dataframe" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L614-L678
Metatab/metapack
metapack/terms.py
Resource._update_pandas_kwargs
def _update_pandas_kwargs(self, dtype=False, parse_dates=True, kwargs= {}): """ Construct args suitable for pandas read_csv :param dtype: If true, create a dtype type map. Otherwise, pass argument value to read_csv :param parse_dates: If true, create a list of date/time columns for the parse_dates argument of read_csv :param kwargs: :return: """ from datetime import datetime, time, date type_map = { None: None, 'string': str, 'text': str, 'number': float, 'integer': int, 'datetime': datetime, 'time': time, 'date': date } if dtype is True: kwargs['dtype'] = { c['name']:type_map.get(c['datatype'], c['datatype']) for c in self.columns() } elif dtype: kwargs['dtype'] = dtype if parse_dates is True: date_cols = [ c['name'] for c in self.columns() if c['datatype'] in ('date','datetime','time') ] kwargs['parse_dates'] = date_cols or True elif parse_dates: kwargs['parse_dates'] = parse_dates kwargs['low_memory'] = False return kwargs
python
def _update_pandas_kwargs(self, dtype=False, parse_dates=True, kwargs= {}): """ Construct args suitable for pandas read_csv :param dtype: If true, create a dtype type map. Otherwise, pass argument value to read_csv :param parse_dates: If true, create a list of date/time columns for the parse_dates argument of read_csv :param kwargs: :return: """ from datetime import datetime, time, date type_map = { None: None, 'string': str, 'text': str, 'number': float, 'integer': int, 'datetime': datetime, 'time': time, 'date': date } if dtype is True: kwargs['dtype'] = { c['name']:type_map.get(c['datatype'], c['datatype']) for c in self.columns() } elif dtype: kwargs['dtype'] = dtype if parse_dates is True: date_cols = [ c['name'] for c in self.columns() if c['datatype'] in ('date','datetime','time') ] kwargs['parse_dates'] = date_cols or True elif parse_dates: kwargs['parse_dates'] = parse_dates kwargs['low_memory'] = False return kwargs
[ "def", "_update_pandas_kwargs", "(", "self", ",", "dtype", "=", "False", ",", "parse_dates", "=", "True", ",", "kwargs", "=", "{", "}", ")", ":", "from", "datetime", "import", "datetime", ",", "time", ",", "date", "type_map", "=", "{", "None", ":", "None", ",", "'string'", ":", "str", ",", "'text'", ":", "str", ",", "'number'", ":", "float", ",", "'integer'", ":", "int", ",", "'datetime'", ":", "datetime", ",", "'time'", ":", "time", ",", "'date'", ":", "date", "}", "if", "dtype", "is", "True", ":", "kwargs", "[", "'dtype'", "]", "=", "{", "c", "[", "'name'", "]", ":", "type_map", ".", "get", "(", "c", "[", "'datatype'", "]", ",", "c", "[", "'datatype'", "]", ")", "for", "c", "in", "self", ".", "columns", "(", ")", "}", "elif", "dtype", ":", "kwargs", "[", "'dtype'", "]", "=", "dtype", "if", "parse_dates", "is", "True", ":", "date_cols", "=", "[", "c", "[", "'name'", "]", "for", "c", "in", "self", ".", "columns", "(", ")", "if", "c", "[", "'datatype'", "]", "in", "(", "'date'", ",", "'datetime'", ",", "'time'", ")", "]", "kwargs", "[", "'parse_dates'", "]", "=", "date_cols", "or", "True", "elif", "parse_dates", ":", "kwargs", "[", "'parse_dates'", "]", "=", "parse_dates", "kwargs", "[", "'low_memory'", "]", "=", "False", "return", "kwargs" ]
Construct args suitable for pandas read_csv :param dtype: If true, create a dtype type map. Otherwise, pass argument value to read_csv :param parse_dates: If true, create a list of date/time columns for the parse_dates argument of read_csv :param kwargs: :return:
[ "Construct", "args", "suitable", "for", "pandas", "read_csv", ":", "param", "dtype", ":", "If", "true", "create", "a", "dtype", "type", "map", ".", "Otherwise", "pass", "argument", "value", "to", "read_csv", ":", "param", "parse_dates", ":", "If", "true", "create", "a", "list", "of", "date", "/", "time", "columns", "for", "the", "parse_dates", "argument", "of", "read_csv", ":", "param", "kwargs", ":", ":", "return", ":" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L681-L716
Metatab/metapack
metapack/terms.py
Resource.read_csv
def read_csv(self, dtype=False, parse_dates=True, *args, **kwargs): """Fetch the target and pass through to pandas.read_csv Don't provide the first argument of read_csv(); it is supplied internally. """ import pandas t = self.resolved_url.get_resource().get_target() kwargs = self._update_pandas_kwargs(dtype, parse_dates, kwargs) return pandas.read_csv(t.fspath, *args, **kwargs)
python
def read_csv(self, dtype=False, parse_dates=True, *args, **kwargs): """Fetch the target and pass through to pandas.read_csv Don't provide the first argument of read_csv(); it is supplied internally. """ import pandas t = self.resolved_url.get_resource().get_target() kwargs = self._update_pandas_kwargs(dtype, parse_dates, kwargs) return pandas.read_csv(t.fspath, *args, **kwargs)
[ "def", "read_csv", "(", "self", ",", "dtype", "=", "False", ",", "parse_dates", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "pandas", "t", "=", "self", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "kwargs", "=", "self", ".", "_update_pandas_kwargs", "(", "dtype", ",", "parse_dates", ",", "kwargs", ")", "return", "pandas", ".", "read_csv", "(", "t", ".", "fspath", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Fetch the target and pass through to pandas.read_csv Don't provide the first argument of read_csv(); it is supplied internally.
[ "Fetch", "the", "target", "and", "pass", "through", "to", "pandas", ".", "read_csv" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L718-L730
Metatab/metapack
metapack/terms.py
Resource.read_fwf
def read_fwf(self, *args, **kwargs): """Fetch the target and pass through to pandas.read_fwf. Don't provide the first argument of read_fwf(); it is supplied internally. """ import pandas t = self.resolved_url.get_resource().get_target() return pandas.read_fwf(t.fspath, *args, **kwargs)
python
def read_fwf(self, *args, **kwargs): """Fetch the target and pass through to pandas.read_fwf. Don't provide the first argument of read_fwf(); it is supplied internally. """ import pandas t = self.resolved_url.get_resource().get_target() return pandas.read_fwf(t.fspath, *args, **kwargs)
[ "def", "read_fwf", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "pandas", "t", "=", "self", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "return", "pandas", ".", "read_fwf", "(", "t", ".", "fspath", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Fetch the target and pass through to pandas.read_fwf. Don't provide the first argument of read_fwf(); it is supplied internally.
[ "Fetch", "the", "target", "and", "pass", "through", "to", "pandas", ".", "read_fwf", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L732-L740
Metatab/metapack
metapack/terms.py
Resource.readlines
def readlines(self): """Load the target, open it, and return the result from readlines()""" t = self.resolved_url.get_resource().get_target() with open(t.fspath) as f: return f.readlines()
python
def readlines(self): """Load the target, open it, and return the result from readlines()""" t = self.resolved_url.get_resource().get_target() with open(t.fspath) as f: return f.readlines()
[ "def", "readlines", "(", "self", ")", ":", "t", "=", "self", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "with", "open", "(", "t", ".", "fspath", ")", "as", "f", ":", "return", "f", ".", "readlines", "(", ")" ]
Load the target, open it, and return the result from readlines()
[ "Load", "the", "target", "open", "it", "and", "return", "the", "result", "from", "readlines", "()" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L742-L747
Metatab/metapack
metapack/terms.py
Resource.petl
def petl(self, *args, **kwargs): """Return a PETL source object""" import petl t = self.resolved_url.get_resource().get_target() if t.target_format == 'txt': return petl.fromtext(str(t.fspath), *args, **kwargs) elif t.target_format == 'csv': return petl.fromcsv(str(t.fspath), *args, **kwargs) else: raise Exception("Can't handle")
python
def petl(self, *args, **kwargs): """Return a PETL source object""" import petl t = self.resolved_url.get_resource().get_target() if t.target_format == 'txt': return petl.fromtext(str(t.fspath), *args, **kwargs) elif t.target_format == 'csv': return petl.fromcsv(str(t.fspath), *args, **kwargs) else: raise Exception("Can't handle")
[ "def", "petl", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import", "petl", "t", "=", "self", ".", "resolved_url", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "if", "t", ".", "target_format", "==", "'txt'", ":", "return", "petl", ".", "fromtext", "(", "str", "(", "t", ".", "fspath", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "t", ".", "target_format", "==", "'csv'", ":", "return", "petl", ".", "fromcsv", "(", "str", "(", "t", ".", "fspath", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "Exception", "(", "\"Can't handle\"", ")" ]
Return a PETL source object
[ "Return", "a", "PETL", "source", "object" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L749-L760
Metatab/metapack
metapack/terms.py
SqlQuery.context
def context(self): """Build the interpolation context from the schemas""" # Can't use self.columns b/c of recursion with resolved_url t = self.schema_term if not t: return {} sql_columns = [] all_columns = [] for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props if p.get('sqlselect'): # has a value for SqlSqlect sql_columns.append(p.get('sqlselect')) all_columns.append(c.name) return { 'SQL_COLUMNS': ', '.join(sql_columns), 'ALL_COLUMNS': ', '.join(all_columns) }
python
def context(self): """Build the interpolation context from the schemas""" # Can't use self.columns b/c of recursion with resolved_url t = self.schema_term if not t: return {} sql_columns = [] all_columns = [] for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props if p.get('sqlselect'): # has a value for SqlSqlect sql_columns.append(p.get('sqlselect')) all_columns.append(c.name) return { 'SQL_COLUMNS': ', '.join(sql_columns), 'ALL_COLUMNS': ', '.join(all_columns) }
[ "def", "context", "(", "self", ")", ":", "# Can't use self.columns b/c of recursion with resolved_url", "t", "=", "self", ".", "schema_term", "if", "not", "t", ":", "return", "{", "}", "sql_columns", "=", "[", "]", "all_columns", "=", "[", "]", "for", "i", ",", "c", "in", "enumerate", "(", "t", ".", "children", ")", ":", "if", "c", ".", "term_is", "(", "\"Table.Column\"", ")", ":", "p", "=", "c", ".", "all_props", "if", "p", ".", "get", "(", "'sqlselect'", ")", ":", "# has a value for SqlSqlect", "sql_columns", ".", "append", "(", "p", ".", "get", "(", "'sqlselect'", ")", ")", "all_columns", ".", "append", "(", "c", ".", "name", ")", "return", "{", "'SQL_COLUMNS'", ":", "', '", ".", "join", "(", "sql_columns", ")", ",", "'ALL_COLUMNS'", ":", "', '", ".", "join", "(", "all_columns", ")", "}" ]
Build the interpolation context from the schemas
[ "Build", "the", "interpolation", "context", "from", "the", "schemas" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/terms.py#L839-L864
Metatab/metapack
metapack/jupyter/ipython.py
caller_locals
def caller_locals(): """Get the local variables in the caller's frame.""" import inspect frame = inspect.currentframe() try: return frame.f_back.f_back.f_locals finally: del frame
python
def caller_locals(): """Get the local variables in the caller's frame.""" import inspect frame = inspect.currentframe() try: return frame.f_back.f_back.f_locals finally: del frame
[ "def", "caller_locals", "(", ")", ":", "import", "inspect", "frame", "=", "inspect", ".", "currentframe", "(", ")", "try", ":", "return", "frame", ".", "f_back", ".", "f_back", ".", "f_locals", "finally", ":", "del", "frame" ]
Get the local variables in the caller's frame.
[ "Get", "the", "local", "variables", "in", "the", "caller", "s", "frame", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L15-L22
Metatab/metapack
metapack/jupyter/ipython.py
open_source_package
def open_source_package(dr=None): """Like open_package(), but always open the source package""" if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: return op(join(e[0], list(intr)[0] )) if i > 2: break return None
python
def open_source_package(dr=None): """Like open_package(), but always open the source package""" if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: return op(join(e[0], list(intr)[0] )) if i > 2: break return None
[ "def", "open_source_package", "(", "dr", "=", "None", ")", ":", "if", "dr", "is", "None", ":", "dr", "=", "getcwd", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "walk_up", "(", "dr", ")", ")", ":", "intr", "=", "set", "(", "[", "DEFAULT_METATAB_FILE", ",", "LINES_METATAB_FILE", ",", "IPYNB_METATAB_FILE", "]", ")", "&", "set", "(", "e", "[", "2", "]", ")", "if", "intr", ":", "return", "op", "(", "join", "(", "e", "[", "0", "]", ",", "list", "(", "intr", ")", "[", "0", "]", ")", ")", "if", "i", ">", "2", ":", "break", "return", "None" ]
Like open_package(), but always open the source package
[ "Like", "open_package", "()", "but", "always", "open", "the", "source", "package" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L30-L46
Metatab/metapack
metapack/jupyter/ipython.py
open_package
def open_package(locals=None, dr=None): """Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory""" if locals is None: locals = caller_locals() try: # Running in a package build return op(locals['metatab_doc']) except KeyError: # Running interactively in Jupyter package_name = None build_package_dir = None source_package = None if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: source_package = join(e[0], list(intr)[0]) p = op(source_package) package_name = p.find_first_value("Root.Name") if not package_name: raise PackageError("Source package in {} does not have root.Name term".format(e[0])) if PACKAGE_PREFIX in e[1]: build_package_dir = join(e[0], PACKAGE_PREFIX) break if i > 2: break if build_package_dir and package_name and exists(join(build_package_dir, package_name)): # Open the previously built package built_package = join(build_package_dir, package_name) try: return op(built_package) except RowGeneratorError as e: pass # Probably could not open the metadata file. if source_package: # Open the source package return op(source_package) raise PackageError("Failed to find package, either in locals() or above dir '{}' ".format(dr))
python
def open_package(locals=None, dr=None): """Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory""" if locals is None: locals = caller_locals() try: # Running in a package build return op(locals['metatab_doc']) except KeyError: # Running interactively in Jupyter package_name = None build_package_dir = None source_package = None if dr is None: dr = getcwd() for i, e in enumerate(walk_up(dr)): intr = set([DEFAULT_METATAB_FILE, LINES_METATAB_FILE, IPYNB_METATAB_FILE]) & set(e[2]) if intr: source_package = join(e[0], list(intr)[0]) p = op(source_package) package_name = p.find_first_value("Root.Name") if not package_name: raise PackageError("Source package in {} does not have root.Name term".format(e[0])) if PACKAGE_PREFIX in e[1]: build_package_dir = join(e[0], PACKAGE_PREFIX) break if i > 2: break if build_package_dir and package_name and exists(join(build_package_dir, package_name)): # Open the previously built package built_package = join(build_package_dir, package_name) try: return op(built_package) except RowGeneratorError as e: pass # Probably could not open the metadata file. if source_package: # Open the source package return op(source_package) raise PackageError("Failed to find package, either in locals() or above dir '{}' ".format(dr))
[ "def", "open_package", "(", "locals", "=", "None", ",", "dr", "=", "None", ")", ":", "if", "locals", "is", "None", ":", "locals", "=", "caller_locals", "(", ")", "try", ":", "# Running in a package build", "return", "op", "(", "locals", "[", "'metatab_doc'", "]", ")", "except", "KeyError", ":", "# Running interactively in Jupyter", "package_name", "=", "None", "build_package_dir", "=", "None", "source_package", "=", "None", "if", "dr", "is", "None", ":", "dr", "=", "getcwd", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "walk_up", "(", "dr", ")", ")", ":", "intr", "=", "set", "(", "[", "DEFAULT_METATAB_FILE", ",", "LINES_METATAB_FILE", ",", "IPYNB_METATAB_FILE", "]", ")", "&", "set", "(", "e", "[", "2", "]", ")", "if", "intr", ":", "source_package", "=", "join", "(", "e", "[", "0", "]", ",", "list", "(", "intr", ")", "[", "0", "]", ")", "p", "=", "op", "(", "source_package", ")", "package_name", "=", "p", ".", "find_first_value", "(", "\"Root.Name\"", ")", "if", "not", "package_name", ":", "raise", "PackageError", "(", "\"Source package in {} does not have root.Name term\"", ".", "format", "(", "e", "[", "0", "]", ")", ")", "if", "PACKAGE_PREFIX", "in", "e", "[", "1", "]", ":", "build_package_dir", "=", "join", "(", "e", "[", "0", "]", ",", "PACKAGE_PREFIX", ")", "break", "if", "i", ">", "2", ":", "break", "if", "build_package_dir", "and", "package_name", "and", "exists", "(", "join", "(", "build_package_dir", ",", "package_name", ")", ")", ":", "# Open the previously built package", "built_package", "=", "join", "(", "build_package_dir", ",", "package_name", ")", "try", ":", "return", "op", "(", "built_package", ")", "except", "RowGeneratorError", "as", "e", ":", "pass", "# Probably could not open the metadata file.", "if", "source_package", ":", "# Open the source package", "return", "op", "(", "source_package", ")", "raise", "PackageError", "(", "\"Failed to find package, either in locals() or above dir '{}' \"", ".", "format", "(", "dr", ")", ")" ]
Try to open a package with the metatab_doc variable, which is set when a Notebook is run as a resource. If that does not exist, try the local _packages directory
[ "Try", "to", "open", "a", "package", "with", "the", "metatab_doc", "variable", "which", "is", "set", "when", "a", "Notebook", "is", "run", "as", "a", "resource", ".", "If", "that", "does", "not", "exist", "try", "the", "local", "_packages", "directory" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L48-L103
Metatab/metapack
metapack/jupyter/ipython.py
rebuild_schema
def rebuild_schema(doc, r, df): """Rebuild the schema for a resource based on a dataframe""" import numpy as np # Re-get the resource in the doc, since it may be different. try: r = doc.resource(r.name) except AttributeError: # Maybe r is actually a resource name r = doc.resource(r) def alt_col_name(name, i): import re if not name: return 'col{}'.format(i) return re.sub('_+', '_', re.sub('[^\w_]', '_', str(name)).lower()).rstrip('_') df_types = { np.dtype('O'): 'text', np.dtype('int64'): 'integer', np.dtype('float64'): 'number' } try: df_index_frame = df.index.to_frame() except AttributeError: df_index_frame = None def get_col_dtype(c): c = str(c) try: return df_types[df[c].dtype] except KeyError: # Maybe it is in the index? pass try: return df_types[df_index_frame[c].dtype] except TypeError: # Maybe not a multi-index pass if c == 'id' or c == df.index.name: return df_types[df.index.dtype] return 'unknown' columns = [] schema_term = r.schema_term[0] if schema_term: old_cols = {c['name'].value: c.properties for c in schema_term.children} for c in schema_term.children: schema_term.remove_child(c) schema_term.children = [] else: old_cols = {} schema_term = doc['Schema'].new_term('Table', r.schema_name) index_names = [n if n else "id" for n in df.index.names] for i, col in enumerate(index_names + list(df.columns)): acn = alt_col_name(col, i) if alt_col_name(col, i) != str(col) else '' d = {'name': col, 'datatype': get_col_dtype(col), 'altname': acn} if col in old_cols.keys(): lookup_name = col elif acn in old_cols.keys(): lookup_name = acn else: lookup_name = None if lookup_name and lookup_name in old_cols: for k, v in schema_term.properties.items(): old_col = old_cols.get(lookup_name) for k, v in old_col.items(): if k != 'name' and v: d[k] = v columns.append(d) for c in columns: name = c['name'] del c['name'] datatype = c['datatype'] del c['datatype'] altname = c['altname'] del c['altname'] schema_term.new_child('Column', name, datatype=datatype, altname=altname, **c)
python
def rebuild_schema(doc, r, df): """Rebuild the schema for a resource based on a dataframe""" import numpy as np # Re-get the resource in the doc, since it may be different. try: r = doc.resource(r.name) except AttributeError: # Maybe r is actually a resource name r = doc.resource(r) def alt_col_name(name, i): import re if not name: return 'col{}'.format(i) return re.sub('_+', '_', re.sub('[^\w_]', '_', str(name)).lower()).rstrip('_') df_types = { np.dtype('O'): 'text', np.dtype('int64'): 'integer', np.dtype('float64'): 'number' } try: df_index_frame = df.index.to_frame() except AttributeError: df_index_frame = None def get_col_dtype(c): c = str(c) try: return df_types[df[c].dtype] except KeyError: # Maybe it is in the index? pass try: return df_types[df_index_frame[c].dtype] except TypeError: # Maybe not a multi-index pass if c == 'id' or c == df.index.name: return df_types[df.index.dtype] return 'unknown' columns = [] schema_term = r.schema_term[0] if schema_term: old_cols = {c['name'].value: c.properties for c in schema_term.children} for c in schema_term.children: schema_term.remove_child(c) schema_term.children = [] else: old_cols = {} schema_term = doc['Schema'].new_term('Table', r.schema_name) index_names = [n if n else "id" for n in df.index.names] for i, col in enumerate(index_names + list(df.columns)): acn = alt_col_name(col, i) if alt_col_name(col, i) != str(col) else '' d = {'name': col, 'datatype': get_col_dtype(col), 'altname': acn} if col in old_cols.keys(): lookup_name = col elif acn in old_cols.keys(): lookup_name = acn else: lookup_name = None if lookup_name and lookup_name in old_cols: for k, v in schema_term.properties.items(): old_col = old_cols.get(lookup_name) for k, v in old_col.items(): if k != 'name' and v: d[k] = v columns.append(d) for c in columns: name = c['name'] del c['name'] datatype = c['datatype'] del c['datatype'] altname = c['altname'] del c['altname'] schema_term.new_child('Column', name, datatype=datatype, altname=altname, **c)
[ "def", "rebuild_schema", "(", "doc", ",", "r", ",", "df", ")", ":", "import", "numpy", "as", "np", "# Re-get the resource in the doc, since it may be different.", "try", ":", "r", "=", "doc", ".", "resource", "(", "r", ".", "name", ")", "except", "AttributeError", ":", "# Maybe r is actually a resource name", "r", "=", "doc", ".", "resource", "(", "r", ")", "def", "alt_col_name", "(", "name", ",", "i", ")", ":", "import", "re", "if", "not", "name", ":", "return", "'col{}'", ".", "format", "(", "i", ")", "return", "re", ".", "sub", "(", "'_+'", ",", "'_'", ",", "re", ".", "sub", "(", "'[^\\w_]'", ",", "'_'", ",", "str", "(", "name", ")", ")", ".", "lower", "(", ")", ")", ".", "rstrip", "(", "'_'", ")", "df_types", "=", "{", "np", ".", "dtype", "(", "'O'", ")", ":", "'text'", ",", "np", ".", "dtype", "(", "'int64'", ")", ":", "'integer'", ",", "np", ".", "dtype", "(", "'float64'", ")", ":", "'number'", "}", "try", ":", "df_index_frame", "=", "df", ".", "index", ".", "to_frame", "(", ")", "except", "AttributeError", ":", "df_index_frame", "=", "None", "def", "get_col_dtype", "(", "c", ")", ":", "c", "=", "str", "(", "c", ")", "try", ":", "return", "df_types", "[", "df", "[", "c", "]", ".", "dtype", "]", "except", "KeyError", ":", "# Maybe it is in the index?", "pass", "try", ":", "return", "df_types", "[", "df_index_frame", "[", "c", "]", ".", "dtype", "]", "except", "TypeError", ":", "# Maybe not a multi-index", "pass", "if", "c", "==", "'id'", "or", "c", "==", "df", ".", "index", ".", "name", ":", "return", "df_types", "[", "df", ".", "index", ".", "dtype", "]", "return", "'unknown'", "columns", "=", "[", "]", "schema_term", "=", "r", ".", "schema_term", "[", "0", "]", "if", "schema_term", ":", "old_cols", "=", "{", "c", "[", "'name'", "]", ".", "value", ":", "c", ".", "properties", "for", "c", "in", "schema_term", ".", "children", "}", "for", "c", "in", "schema_term", ".", "children", ":", "schema_term", ".", "remove_child", "(", "c", ")", "schema_term", ".", "children", "=", "[", "]", "else", ":", "old_cols", "=", "{", "}", "schema_term", "=", "doc", "[", "'Schema'", "]", ".", "new_term", "(", "'Table'", ",", "r", ".", "schema_name", ")", "index_names", "=", "[", "n", "if", "n", "else", "\"id\"", "for", "n", "in", "df", ".", "index", ".", "names", "]", "for", "i", ",", "col", "in", "enumerate", "(", "index_names", "+", "list", "(", "df", ".", "columns", ")", ")", ":", "acn", "=", "alt_col_name", "(", "col", ",", "i", ")", "if", "alt_col_name", "(", "col", ",", "i", ")", "!=", "str", "(", "col", ")", "else", "''", "d", "=", "{", "'name'", ":", "col", ",", "'datatype'", ":", "get_col_dtype", "(", "col", ")", ",", "'altname'", ":", "acn", "}", "if", "col", "in", "old_cols", ".", "keys", "(", ")", ":", "lookup_name", "=", "col", "elif", "acn", "in", "old_cols", ".", "keys", "(", ")", ":", "lookup_name", "=", "acn", "else", ":", "lookup_name", "=", "None", "if", "lookup_name", "and", "lookup_name", "in", "old_cols", ":", "for", "k", ",", "v", "in", "schema_term", ".", "properties", ".", "items", "(", ")", ":", "old_col", "=", "old_cols", ".", "get", "(", "lookup_name", ")", "for", "k", ",", "v", "in", "old_col", ".", "items", "(", ")", ":", "if", "k", "!=", "'name'", "and", "v", ":", "d", "[", "k", "]", "=", "v", "columns", ".", "append", "(", "d", ")", "for", "c", "in", "columns", ":", "name", "=", "c", "[", "'name'", "]", "del", "c", "[", "'name'", "]", "datatype", "=", "c", "[", "'datatype'", "]", "del", "c", "[", "'datatype'", "]", "altname", "=", "c", "[", "'altname'", "]", "del", "c", "[", "'altname'", "]", "schema_term", ".", "new_child", "(", "'Column'", ",", "name", ",", "datatype", "=", "datatype", ",", "altname", "=", "altname", ",", "*", "*", "c", ")" ]
Rebuild the schema for a resource based on a dataframe
[ "Rebuild", "the", "schema", "for", "a", "resource", "based", "on", "a", "dataframe" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L106-L206
Metatab/metapack
metapack/jupyter/ipython.py
rewrite_schema
def rewrite_schema(r, df, doc=None): """Rebuild the schema for a resource based on a dataframe and re-write the doc""" from metapack.cli.core import write_doc if doc is None: doc = open_source_package() rebuild_schema(doc, r, df) write_doc(doc, doc.ref)
python
def rewrite_schema(r, df, doc=None): """Rebuild the schema for a resource based on a dataframe and re-write the doc""" from metapack.cli.core import write_doc if doc is None: doc = open_source_package() rebuild_schema(doc, r, df) write_doc(doc, doc.ref)
[ "def", "rewrite_schema", "(", "r", ",", "df", ",", "doc", "=", "None", ")", ":", "from", "metapack", ".", "cli", ".", "core", "import", "write_doc", "if", "doc", "is", "None", ":", "doc", "=", "open_source_package", "(", ")", "rebuild_schema", "(", "doc", ",", "r", ",", "df", ")", "write_doc", "(", "doc", ",", "doc", ".", "ref", ")" ]
Rebuild the schema for a resource based on a dataframe and re-write the doc
[ "Rebuild", "the", "schema", "for", "a", "resource", "based", "on", "a", "dataframe", "and", "re", "-", "write", "the", "doc" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L209-L219
Metatab/metapack
metapack/jupyter/ipython.py
interactive_rewrite_schema
def interactive_rewrite_schema(r, df, doc=None): """Rebuild the schema for a resource based on a dataframe and re-write the doc, but only if running the notebook interactively, not while building""" if 'metatab_doc' in caller_locals(): return False if doc is None: doc = open_source_package() rewrite_schema(r, df, doc) return True
python
def interactive_rewrite_schema(r, df, doc=None): """Rebuild the schema for a resource based on a dataframe and re-write the doc, but only if running the notebook interactively, not while building""" if 'metatab_doc' in caller_locals(): return False if doc is None: doc = open_source_package() rewrite_schema(r, df, doc) return True
[ "def", "interactive_rewrite_schema", "(", "r", ",", "df", ",", "doc", "=", "None", ")", ":", "if", "'metatab_doc'", "in", "caller_locals", "(", ")", ":", "return", "False", "if", "doc", "is", "None", ":", "doc", "=", "open_source_package", "(", ")", "rewrite_schema", "(", "r", ",", "df", ",", "doc", ")", "return", "True" ]
Rebuild the schema for a resource based on a dataframe and re-write the doc, but only if running the notebook interactively, not while building
[ "Rebuild", "the", "schema", "for", "a", "resource", "based", "on", "a", "dataframe", "and", "re", "-", "write", "the", "doc", "but", "only", "if", "running", "the", "notebook", "interactively", "not", "while", "building" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L222-L234
Metatab/metapack
metapack/jupyter/ipython.py
get_dataframes
def get_dataframes(): """Yield tuples of dataframe variable name and the dataframe. Skips variables with names that start with an underscore""" for k, v in caller_locals().items(): if k.startswith('_'): continue if isinstance(v, pd.core.frame.DataFrame): yield k, v
python
def get_dataframes(): """Yield tuples of dataframe variable name and the dataframe. Skips variables with names that start with an underscore""" for k, v in caller_locals().items(): if k.startswith('_'): continue if isinstance(v, pd.core.frame.DataFrame): yield k, v
[ "def", "get_dataframes", "(", ")", ":", "for", "k", ",", "v", "in", "caller_locals", "(", ")", ".", "items", "(", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", ":", "continue", "if", "isinstance", "(", "v", ",", "pd", ".", "core", ".", "frame", ".", "DataFrame", ")", ":", "yield", "k", ",", "v" ]
Yield tuples of dataframe variable name and the dataframe. Skips variables with names that start with an underscore
[ "Yield", "tuples", "of", "dataframe", "variable", "name", "and", "the", "dataframe", ".", "Skips", "variables", "with", "names", "that", "start", "with", "an", "underscore" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L237-L246
Metatab/metapack
metapack/jupyter/ipython.py
get_notebook_path
def get_notebook_path(): """ Return the full path of the jupyter notebook. """ kernel_id = re.search('kernel-(.*).json', ipykernel.connect.get_connection_file()).group(1) servers = list_running_servers() for ss in servers: response = requests.get(urljoin(ss['url'], 'api/sessions'), params={'token': ss.get('token', '')}) for nn in json.loads(response.text): if nn['kernel']['id'] == kernel_id: relative_path = nn['notebook']['path'] return os.path.join(ss['notebook_dir'], relative_path)
python
def get_notebook_path(): """ Return the full path of the jupyter notebook. """ kernel_id = re.search('kernel-(.*).json', ipykernel.connect.get_connection_file()).group(1) servers = list_running_servers() for ss in servers: response = requests.get(urljoin(ss['url'], 'api/sessions'), params={'token': ss.get('token', '')}) for nn in json.loads(response.text): if nn['kernel']['id'] == kernel_id: relative_path = nn['notebook']['path'] return os.path.join(ss['notebook_dir'], relative_path)
[ "def", "get_notebook_path", "(", ")", ":", "kernel_id", "=", "re", ".", "search", "(", "'kernel-(.*).json'", ",", "ipykernel", ".", "connect", ".", "get_connection_file", "(", ")", ")", ".", "group", "(", "1", ")", "servers", "=", "list_running_servers", "(", ")", "for", "ss", "in", "servers", ":", "response", "=", "requests", ".", "get", "(", "urljoin", "(", "ss", "[", "'url'", "]", ",", "'api/sessions'", ")", ",", "params", "=", "{", "'token'", ":", "ss", ".", "get", "(", "'token'", ",", "''", ")", "}", ")", "for", "nn", "in", "json", ".", "loads", "(", "response", ".", "text", ")", ":", "if", "nn", "[", "'kernel'", "]", "[", "'id'", "]", "==", "kernel_id", ":", "relative_path", "=", "nn", "[", "'notebook'", "]", "[", "'path'", "]", "return", "os", ".", "path", ".", "join", "(", "ss", "[", "'notebook_dir'", "]", ",", "relative_path", ")" ]
Return the full path of the jupyter notebook.
[ "Return", "the", "full", "path", "of", "the", "jupyter", "notebook", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L257-L270
Metatab/metapack
metapack/jupyter/ipython.py
get_notebook_rel_path
def get_notebook_rel_path(pkg=None): """Get the path of a notebook, relative to the current soruce package""" pkg = pkg or open_source_package() pkg_path = str(pkg.package_url.fspath) nb_path = get_notebook_path() return nb_path.replace(pkg_path, '').strip('/')
python
def get_notebook_rel_path(pkg=None): """Get the path of a notebook, relative to the current soruce package""" pkg = pkg or open_source_package() pkg_path = str(pkg.package_url.fspath) nb_path = get_notebook_path() return nb_path.replace(pkg_path, '').strip('/')
[ "def", "get_notebook_rel_path", "(", "pkg", "=", "None", ")", ":", "pkg", "=", "pkg", "or", "open_source_package", "(", ")", "pkg_path", "=", "str", "(", "pkg", ".", "package_url", ".", "fspath", ")", "nb_path", "=", "get_notebook_path", "(", ")", "return", "nb_path", ".", "replace", "(", "pkg_path", ",", "''", ")", ".", "strip", "(", "'/'", ")" ]
Get the path of a notebook, relative to the current soruce package
[ "Get", "the", "path", "of", "a", "notebook", "relative", "to", "the", "current", "soruce", "package" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L273-L279
Metatab/metapack
metapack/jupyter/ipython.py
add_dataframe
def add_dataframe(df, name, pkg=None, description=''): """Add a dataframe to a source package. Pass in either the name of the dataframe, or the dataframe. If the dataframeis passed it, the name will be the dataframe's variable name. The function will re-write the source package with the new resource. """ from warnings import warn from metapack.cli.core import alt_col_name, type_map import numpy as np if name is None or df is None: warn("Did not find dataframe for reference '{}' ".format(ref)) return pkg = pkg or open_source_package() resource_ref = 'file:' + get_notebook_rel_path(pkg) + '#' + name t = pkg.find_first('Root.Datafile', value=resource_ref) col_props = {} if t: print("Datafile exists for url '{}', deleting".format(resource_ref)) if t.schema_term: col_props = { c['name']:c for c in t.columns()} pkg.remove_term(t.schema_term) pkg.remove_term(t) t = pkg['Resources'].new_term('Root.Datafile', resource_ref, name=name, description=description) st = pkg['Schema'].new_term('Table', t.schema_name, description=description) for i, name in enumerate(df.columns): props = col_props.get(name,{}) try: native_type = type(np.asscalar(df[name].dtype.type(0))).__name__ except ValueError: native_type = df[name].dtype.name except AttributeError: native_type = type(df[name][0]).__name__ for pn in 'datatype name pos header'.split(): if pn in props: del props[pn] if 'altname' in props: altname = props['altname'] del props['altname'] else: raw_alt_name = alt_col_name(name, i) altname = raw_alt_name if raw_alt_name != name else '' col = df[name] if hasattr(col, 'description'): # custom property props['description'] = col.description t = st.new_child('Column', name, datatype=type_map.get(native_type, native_type), altname=altname, **props) pkg.write_csv()
python
def add_dataframe(df, name, pkg=None, description=''): """Add a dataframe to a source package. Pass in either the name of the dataframe, or the dataframe. If the dataframeis passed it, the name will be the dataframe's variable name. The function will re-write the source package with the new resource. """ from warnings import warn from metapack.cli.core import alt_col_name, type_map import numpy as np if name is None or df is None: warn("Did not find dataframe for reference '{}' ".format(ref)) return pkg = pkg or open_source_package() resource_ref = 'file:' + get_notebook_rel_path(pkg) + '#' + name t = pkg.find_first('Root.Datafile', value=resource_ref) col_props = {} if t: print("Datafile exists for url '{}', deleting".format(resource_ref)) if t.schema_term: col_props = { c['name']:c for c in t.columns()} pkg.remove_term(t.schema_term) pkg.remove_term(t) t = pkg['Resources'].new_term('Root.Datafile', resource_ref, name=name, description=description) st = pkg['Schema'].new_term('Table', t.schema_name, description=description) for i, name in enumerate(df.columns): props = col_props.get(name,{}) try: native_type = type(np.asscalar(df[name].dtype.type(0))).__name__ except ValueError: native_type = df[name].dtype.name except AttributeError: native_type = type(df[name][0]).__name__ for pn in 'datatype name pos header'.split(): if pn in props: del props[pn] if 'altname' in props: altname = props['altname'] del props['altname'] else: raw_alt_name = alt_col_name(name, i) altname = raw_alt_name if raw_alt_name != name else '' col = df[name] if hasattr(col, 'description'): # custom property props['description'] = col.description t = st.new_child('Column', name, datatype=type_map.get(native_type, native_type), altname=altname, **props) pkg.write_csv()
[ "def", "add_dataframe", "(", "df", ",", "name", ",", "pkg", "=", "None", ",", "description", "=", "''", ")", ":", "from", "warnings", "import", "warn", "from", "metapack", ".", "cli", ".", "core", "import", "alt_col_name", ",", "type_map", "import", "numpy", "as", "np", "if", "name", "is", "None", "or", "df", "is", "None", ":", "warn", "(", "\"Did not find dataframe for reference '{}' \"", ".", "format", "(", "ref", ")", ")", "return", "pkg", "=", "pkg", "or", "open_source_package", "(", ")", "resource_ref", "=", "'file:'", "+", "get_notebook_rel_path", "(", "pkg", ")", "+", "'#'", "+", "name", "t", "=", "pkg", ".", "find_first", "(", "'Root.Datafile'", ",", "value", "=", "resource_ref", ")", "col_props", "=", "{", "}", "if", "t", ":", "print", "(", "\"Datafile exists for url '{}', deleting\"", ".", "format", "(", "resource_ref", ")", ")", "if", "t", ".", "schema_term", ":", "col_props", "=", "{", "c", "[", "'name'", "]", ":", "c", "for", "c", "in", "t", ".", "columns", "(", ")", "}", "pkg", ".", "remove_term", "(", "t", ".", "schema_term", ")", "pkg", ".", "remove_term", "(", "t", ")", "t", "=", "pkg", "[", "'Resources'", "]", ".", "new_term", "(", "'Root.Datafile'", ",", "resource_ref", ",", "name", "=", "name", ",", "description", "=", "description", ")", "st", "=", "pkg", "[", "'Schema'", "]", ".", "new_term", "(", "'Table'", ",", "t", ".", "schema_name", ",", "description", "=", "description", ")", "for", "i", ",", "name", "in", "enumerate", "(", "df", ".", "columns", ")", ":", "props", "=", "col_props", ".", "get", "(", "name", ",", "{", "}", ")", "try", ":", "native_type", "=", "type", "(", "np", ".", "asscalar", "(", "df", "[", "name", "]", ".", "dtype", ".", "type", "(", "0", ")", ")", ")", ".", "__name__", "except", "ValueError", ":", "native_type", "=", "df", "[", "name", "]", ".", "dtype", ".", "name", "except", "AttributeError", ":", "native_type", "=", "type", "(", "df", "[", "name", "]", "[", "0", "]", ")", ".", "__name__", "for", "pn", "in", "'datatype name pos header'", ".", "split", "(", ")", ":", "if", "pn", "in", "props", ":", "del", "props", "[", "pn", "]", "if", "'altname'", "in", "props", ":", "altname", "=", "props", "[", "'altname'", "]", "del", "props", "[", "'altname'", "]", "else", ":", "raw_alt_name", "=", "alt_col_name", "(", "name", ",", "i", ")", "altname", "=", "raw_alt_name", "if", "raw_alt_name", "!=", "name", "else", "''", "col", "=", "df", "[", "name", "]", "if", "hasattr", "(", "col", ",", "'description'", ")", ":", "# custom property", "props", "[", "'description'", "]", "=", "col", ".", "description", "t", "=", "st", ".", "new_child", "(", "'Column'", ",", "name", ",", "datatype", "=", "type_map", ".", "get", "(", "native_type", ",", "native_type", ")", ",", "altname", "=", "altname", ",", "*", "*", "props", ")", "pkg", ".", "write_csv", "(", ")" ]
Add a dataframe to a source package. Pass in either the name of the dataframe, or the dataframe. If the dataframeis passed it, the name will be the dataframe's variable name. The function will re-write the source package with the new resource.
[ "Add", "a", "dataframe", "to", "a", "source", "package", ".", "Pass", "in", "either", "the", "name", "of", "the", "dataframe", "or", "the", "dataframe", ".", "If", "the", "dataframeis", "passed", "it", "the", "name", "will", "be", "the", "dataframe", "s", "variable", "name", ".", "The", "function", "will", "re", "-", "write", "the", "source", "package", "with", "the", "new", "resource", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/ipython.py#L282-L349
project-rig/rig
rig/utils/docstrings.py
add_int_enums_to_docstring
def add_int_enums_to_docstring(enum): """Decorator for IntEnum which re-writes the documentation string so that Sphinx enumerates all the enumeration values. This is a work-around for Sphinx autodoc's inability to properly document IntEnums. This decorator adds enumeration names and values to the 'Attributes' section of the docstring of the decorated IntEnum class. Example:: >>> from enum import IntEnum >>> @add_int_enums_to_docstring ... class MyIntEnum(IntEnum): ... '''An example IntEnum.''' ... a = 0 ... b = 1 >>> print(MyIntEnum.__doc__) An example IntEnum. <BLANKLINE> Attributes ---------- a = 0 b = 1 <BLANKLINE> """ # The enum34 library (used for compatibility with Python < v3.4) rather # oddly set its docstring to None rather than some senible but empty # default... if enum.__doc__ is None: # pragma: nocover enum.__doc__ = "" enum.__doc__ += ("\n\n" "Attributes\n" "----------\n") for val in list(enum): enum.__doc__ += "{} = {}\n".format(val.name, int(val)) return enum
python
def add_int_enums_to_docstring(enum): """Decorator for IntEnum which re-writes the documentation string so that Sphinx enumerates all the enumeration values. This is a work-around for Sphinx autodoc's inability to properly document IntEnums. This decorator adds enumeration names and values to the 'Attributes' section of the docstring of the decorated IntEnum class. Example:: >>> from enum import IntEnum >>> @add_int_enums_to_docstring ... class MyIntEnum(IntEnum): ... '''An example IntEnum.''' ... a = 0 ... b = 1 >>> print(MyIntEnum.__doc__) An example IntEnum. <BLANKLINE> Attributes ---------- a = 0 b = 1 <BLANKLINE> """ # The enum34 library (used for compatibility with Python < v3.4) rather # oddly set its docstring to None rather than some senible but empty # default... if enum.__doc__ is None: # pragma: nocover enum.__doc__ = "" enum.__doc__ += ("\n\n" "Attributes\n" "----------\n") for val in list(enum): enum.__doc__ += "{} = {}\n".format(val.name, int(val)) return enum
[ "def", "add_int_enums_to_docstring", "(", "enum", ")", ":", "# The enum34 library (used for compatibility with Python < v3.4) rather", "# oddly set its docstring to None rather than some senible but empty", "# default...", "if", "enum", ".", "__doc__", "is", "None", ":", "# pragma: nocover", "enum", ".", "__doc__", "=", "\"\"", "enum", ".", "__doc__", "+=", "(", "\"\\n\\n\"", "\"Attributes\\n\"", "\"----------\\n\"", ")", "for", "val", "in", "list", "(", "enum", ")", ":", "enum", ".", "__doc__", "+=", "\"{} = {}\\n\"", ".", "format", "(", "val", ".", "name", ",", "int", "(", "val", ")", ")", "return", "enum" ]
Decorator for IntEnum which re-writes the documentation string so that Sphinx enumerates all the enumeration values. This is a work-around for Sphinx autodoc's inability to properly document IntEnums. This decorator adds enumeration names and values to the 'Attributes' section of the docstring of the decorated IntEnum class. Example:: >>> from enum import IntEnum >>> @add_int_enums_to_docstring ... class MyIntEnum(IntEnum): ... '''An example IntEnum.''' ... a = 0 ... b = 1 >>> print(MyIntEnum.__doc__) An example IntEnum. <BLANKLINE> Attributes ---------- a = 0 b = 1 <BLANKLINE>
[ "Decorator", "for", "IntEnum", "which", "re", "-", "writes", "the", "documentation", "string", "so", "that", "Sphinx", "enumerates", "all", "the", "enumeration", "values", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/utils/docstrings.py#L12-L51
project-rig/rig
rig/utils/docstrings.py
add_signature_to_docstring
def add_signature_to_docstring(f, include_self=False, kw_only_args={}): """Decorator which adds the function signature of 'f' to the decorated function's docstring. Under Python 2, wrapping a function (even using functools.wraps) hides its signature to Sphinx's introspection tools so it is necessary to include the function signature in the docstring to enable Sphinx to render it correctly. Additionally, when building decorators which change a function's signature, it is non-trivial modify the wrapper's function signature and so automatically generated documentation will not display the correct signature. This decorator can aid in the specific case where a wrapper adds keyword-only arguments to the set of arguments accepted by the underlying function. For example:: >>> def my_func(a, b=0, *args, **kwargs): ... '''An example function.''' ... pass >>> import functools >>> @add_signature_to_docstring(my_func, kw_only_args={"c": 1}) ... @functools.wraps(my_func) ... def my_func_wrapper(*args, **kwargs): ... c = kwargs.pop("c") ... # ...do something with c... ... return my_func(*args, **kwargs) >>> print(my_func_wrapper.__doc__) my_func(a, b=0, *args, c=1, **kwargs) An example function. .. warning:: This function only works with functions which do not have any named keyword-only arguments. For example this function cannot be handled:: def f(*args, kw_only_arg=123) This is due to a limitation in the underlying introspection library provided in Python 2. Parameters ---------- f : function The function whose signature will be used. Need not be the same as the decorated function. include_self : bool Should an initial 'self' arguments be included in the signature? (These are assumed to be arguments called 'self' without a default value). kw_only_args : dict Optionally, add a set of keyword-only arguments to the function signature. This is useful if the wrapper function adds new keyword-only arguments. """ def decorate(f_wrapper): args, varargs, keywords, defaults = inspect.getargspec(f) # Simplifies later logic if defaults is None: defaults = [] # Make sure the keyword only arguments don't use the names of any other # arguments assert set(args).isdisjoint(set(kw_only_args)) assert varargs is None or varargs not in kw_only_args assert keywords is None or keywords not in kw_only_args # If required, remove the initial 'self' argument (e.g. for methods) if not include_self: if (len(args) >= 1 and args[0] == "self" and len(args) > len(defaults)): args.pop(0) # Assemble a string representation of the signature. This must be done # by hand (rather than using formatargspec) to allow the assembly of # signatures with keyword-only values. signature = "{}(".format(f_wrapper.__name__) for arg in args[:-len(defaults)] if defaults else args: signature += "{}, ".format(arg) for arg, default in zip(args[-len(defaults):], defaults): signature += "{}={}, ".format(arg, repr(default)) if kw_only_args or varargs is not None: # Must include a varargs name if keyword only arguments are # supplied. if varargs is None and kw_only_args: assert "_" not in args assert "_" not in kw_only_args assert "_" != keywords signature += "*_, " else: signature += "*{}, ".format(varargs) for keyword, default in iteritems(kw_only_args): signature += "{}={}, ".format(keyword, default) if keywords is not None: signature += "**{}, ".format(keywords) signature = "{})".format(signature.rstrip(", ")) # Only add the signature if one is not already present. if f_wrapper.__doc__ is None: f_wrapper.__doc__ = signature elif not f_wrapper.__doc__.lstrip().startswith( "{}(".format(f_wrapper.__name__)): f_wrapper.__doc__ = "{}\n{}".format(signature, f_wrapper.__doc__) # Return the original function (after modifying its __doc__) return f_wrapper return decorate
python
def add_signature_to_docstring(f, include_self=False, kw_only_args={}): """Decorator which adds the function signature of 'f' to the decorated function's docstring. Under Python 2, wrapping a function (even using functools.wraps) hides its signature to Sphinx's introspection tools so it is necessary to include the function signature in the docstring to enable Sphinx to render it correctly. Additionally, when building decorators which change a function's signature, it is non-trivial modify the wrapper's function signature and so automatically generated documentation will not display the correct signature. This decorator can aid in the specific case where a wrapper adds keyword-only arguments to the set of arguments accepted by the underlying function. For example:: >>> def my_func(a, b=0, *args, **kwargs): ... '''An example function.''' ... pass >>> import functools >>> @add_signature_to_docstring(my_func, kw_only_args={"c": 1}) ... @functools.wraps(my_func) ... def my_func_wrapper(*args, **kwargs): ... c = kwargs.pop("c") ... # ...do something with c... ... return my_func(*args, **kwargs) >>> print(my_func_wrapper.__doc__) my_func(a, b=0, *args, c=1, **kwargs) An example function. .. warning:: This function only works with functions which do not have any named keyword-only arguments. For example this function cannot be handled:: def f(*args, kw_only_arg=123) This is due to a limitation in the underlying introspection library provided in Python 2. Parameters ---------- f : function The function whose signature will be used. Need not be the same as the decorated function. include_self : bool Should an initial 'self' arguments be included in the signature? (These are assumed to be arguments called 'self' without a default value). kw_only_args : dict Optionally, add a set of keyword-only arguments to the function signature. This is useful if the wrapper function adds new keyword-only arguments. """ def decorate(f_wrapper): args, varargs, keywords, defaults = inspect.getargspec(f) # Simplifies later logic if defaults is None: defaults = [] # Make sure the keyword only arguments don't use the names of any other # arguments assert set(args).isdisjoint(set(kw_only_args)) assert varargs is None or varargs not in kw_only_args assert keywords is None or keywords not in kw_only_args # If required, remove the initial 'self' argument (e.g. for methods) if not include_self: if (len(args) >= 1 and args[0] == "self" and len(args) > len(defaults)): args.pop(0) # Assemble a string representation of the signature. This must be done # by hand (rather than using formatargspec) to allow the assembly of # signatures with keyword-only values. signature = "{}(".format(f_wrapper.__name__) for arg in args[:-len(defaults)] if defaults else args: signature += "{}, ".format(arg) for arg, default in zip(args[-len(defaults):], defaults): signature += "{}={}, ".format(arg, repr(default)) if kw_only_args or varargs is not None: # Must include a varargs name if keyword only arguments are # supplied. if varargs is None and kw_only_args: assert "_" not in args assert "_" not in kw_only_args assert "_" != keywords signature += "*_, " else: signature += "*{}, ".format(varargs) for keyword, default in iteritems(kw_only_args): signature += "{}={}, ".format(keyword, default) if keywords is not None: signature += "**{}, ".format(keywords) signature = "{})".format(signature.rstrip(", ")) # Only add the signature if one is not already present. if f_wrapper.__doc__ is None: f_wrapper.__doc__ = signature elif not f_wrapper.__doc__.lstrip().startswith( "{}(".format(f_wrapper.__name__)): f_wrapper.__doc__ = "{}\n{}".format(signature, f_wrapper.__doc__) # Return the original function (after modifying its __doc__) return f_wrapper return decorate
[ "def", "add_signature_to_docstring", "(", "f", ",", "include_self", "=", "False", ",", "kw_only_args", "=", "{", "}", ")", ":", "def", "decorate", "(", "f_wrapper", ")", ":", "args", ",", "varargs", ",", "keywords", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "f", ")", "# Simplifies later logic", "if", "defaults", "is", "None", ":", "defaults", "=", "[", "]", "# Make sure the keyword only arguments don't use the names of any other", "# arguments", "assert", "set", "(", "args", ")", ".", "isdisjoint", "(", "set", "(", "kw_only_args", ")", ")", "assert", "varargs", "is", "None", "or", "varargs", "not", "in", "kw_only_args", "assert", "keywords", "is", "None", "or", "keywords", "not", "in", "kw_only_args", "# If required, remove the initial 'self' argument (e.g. for methods)", "if", "not", "include_self", ":", "if", "(", "len", "(", "args", ")", ">=", "1", "and", "args", "[", "0", "]", "==", "\"self\"", "and", "len", "(", "args", ")", ">", "len", "(", "defaults", ")", ")", ":", "args", ".", "pop", "(", "0", ")", "# Assemble a string representation of the signature. This must be done", "# by hand (rather than using formatargspec) to allow the assembly of", "# signatures with keyword-only values.", "signature", "=", "\"{}(\"", ".", "format", "(", "f_wrapper", ".", "__name__", ")", "for", "arg", "in", "args", "[", ":", "-", "len", "(", "defaults", ")", "]", "if", "defaults", "else", "args", ":", "signature", "+=", "\"{}, \"", ".", "format", "(", "arg", ")", "for", "arg", ",", "default", "in", "zip", "(", "args", "[", "-", "len", "(", "defaults", ")", ":", "]", ",", "defaults", ")", ":", "signature", "+=", "\"{}={}, \"", ".", "format", "(", "arg", ",", "repr", "(", "default", ")", ")", "if", "kw_only_args", "or", "varargs", "is", "not", "None", ":", "# Must include a varargs name if keyword only arguments are", "# supplied.", "if", "varargs", "is", "None", "and", "kw_only_args", ":", "assert", "\"_\"", "not", "in", "args", "assert", "\"_\"", "not", "in", "kw_only_args", "assert", "\"_\"", "!=", "keywords", "signature", "+=", "\"*_, \"", "else", ":", "signature", "+=", "\"*{}, \"", ".", "format", "(", "varargs", ")", "for", "keyword", ",", "default", "in", "iteritems", "(", "kw_only_args", ")", ":", "signature", "+=", "\"{}={}, \"", ".", "format", "(", "keyword", ",", "default", ")", "if", "keywords", "is", "not", "None", ":", "signature", "+=", "\"**{}, \"", ".", "format", "(", "keywords", ")", "signature", "=", "\"{})\"", ".", "format", "(", "signature", ".", "rstrip", "(", "\", \"", ")", ")", "# Only add the signature if one is not already present.", "if", "f_wrapper", ".", "__doc__", "is", "None", ":", "f_wrapper", ".", "__doc__", "=", "signature", "elif", "not", "f_wrapper", ".", "__doc__", ".", "lstrip", "(", ")", ".", "startswith", "(", "\"{}(\"", ".", "format", "(", "f_wrapper", ".", "__name__", ")", ")", ":", "f_wrapper", ".", "__doc__", "=", "\"{}\\n{}\"", ".", "format", "(", "signature", ",", "f_wrapper", ".", "__doc__", ")", "# Return the original function (after modifying its __doc__)", "return", "f_wrapper", "return", "decorate" ]
Decorator which adds the function signature of 'f' to the decorated function's docstring. Under Python 2, wrapping a function (even using functools.wraps) hides its signature to Sphinx's introspection tools so it is necessary to include the function signature in the docstring to enable Sphinx to render it correctly. Additionally, when building decorators which change a function's signature, it is non-trivial modify the wrapper's function signature and so automatically generated documentation will not display the correct signature. This decorator can aid in the specific case where a wrapper adds keyword-only arguments to the set of arguments accepted by the underlying function. For example:: >>> def my_func(a, b=0, *args, **kwargs): ... '''An example function.''' ... pass >>> import functools >>> @add_signature_to_docstring(my_func, kw_only_args={"c": 1}) ... @functools.wraps(my_func) ... def my_func_wrapper(*args, **kwargs): ... c = kwargs.pop("c") ... # ...do something with c... ... return my_func(*args, **kwargs) >>> print(my_func_wrapper.__doc__) my_func(a, b=0, *args, c=1, **kwargs) An example function. .. warning:: This function only works with functions which do not have any named keyword-only arguments. For example this function cannot be handled:: def f(*args, kw_only_arg=123) This is due to a limitation in the underlying introspection library provided in Python 2. Parameters ---------- f : function The function whose signature will be used. Need not be the same as the decorated function. include_self : bool Should an initial 'self' arguments be included in the signature? (These are assumed to be arguments called 'self' without a default value). kw_only_args : dict Optionally, add a set of keyword-only arguments to the function signature. This is useful if the wrapper function adds new keyword-only arguments.
[ "Decorator", "which", "adds", "the", "function", "signature", "of", "f", "to", "the", "decorated", "function", "s", "docstring", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/utils/docstrings.py#L54-L165
project-rig/rig
rig/machine_control/machine_controller.py
_if_not_closed
def _if_not_closed(f): """Run the method iff. the memory view hasn't been closed and the parent object has not been freed.""" @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self.closed or self._parent._freed: raise OSError return f(self, *args, **kwargs) return f_
python
def _if_not_closed(f): """Run the method iff. the memory view hasn't been closed and the parent object has not been freed.""" @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self.closed or self._parent._freed: raise OSError return f(self, *args, **kwargs) return f_
[ "def", "_if_not_closed", "(", "f", ")", ":", "@", "add_signature_to_docstring", "(", "f", ")", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "closed", "or", "self", ".", "_parent", ".", "_freed", ":", "raise", "OSError", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f_" ]
Run the method iff. the memory view hasn't been closed and the parent object has not been freed.
[ "Run", "the", "method", "iff", ".", "the", "memory", "view", "hasn", "t", "been", "closed", "and", "the", "parent", "object", "has", "not", "been", "freed", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2482-L2492
project-rig/rig
rig/machine_control/machine_controller.py
_if_not_freed
def _if_not_freed(f): """Run the method iff. the memory view hasn't been closed.""" @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self._freed: raise OSError return f(self, *args, **kwargs) return f_
python
def _if_not_freed(f): """Run the method iff. the memory view hasn't been closed.""" @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self._freed: raise OSError return f(self, *args, **kwargs) return f_
[ "def", "_if_not_freed", "(", "f", ")", ":", "@", "add_signature_to_docstring", "(", "f", ")", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f_", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_freed", ":", "raise", "OSError", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f_" ]
Run the method iff. the memory view hasn't been closed.
[ "Run", "the", "method", "iff", ".", "the", "memory", "view", "hasn", "t", "been", "closed", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2738-L2747
project-rig/rig
rig/machine_control/machine_controller.py
unpack_routing_table_entry
def unpack_routing_table_entry(packed): """Unpack a routing table entry read from a SpiNNaker machine. Parameters ---------- packet : :py:class:`bytes` Bytes containing a packed routing table. Returns ------- (:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) or None Tuple containing the routing entry, the app_id associated with the entry and the core number associated with the entry; or None if the routing table entry is flagged as unused. """ # Unpack the routing table entry _, free, route, key, mask = struct.unpack(consts.RTE_PACK_STRING, packed) # If the top 8 bits of the route are set then this entry is not in use, so # return None. if route & 0xff000000 == 0xff000000: return None # Convert the routing table entry routes = {r for r in routing_table.Routes if (route >> r) & 0x1} rte = routing_table.RoutingTableEntry(routes, key, mask) # Convert the surrounding data app_id = free & 0xff core = (free >> 8) & 0x0f return (rte, app_id, core)
python
def unpack_routing_table_entry(packed): """Unpack a routing table entry read from a SpiNNaker machine. Parameters ---------- packet : :py:class:`bytes` Bytes containing a packed routing table. Returns ------- (:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) or None Tuple containing the routing entry, the app_id associated with the entry and the core number associated with the entry; or None if the routing table entry is flagged as unused. """ # Unpack the routing table entry _, free, route, key, mask = struct.unpack(consts.RTE_PACK_STRING, packed) # If the top 8 bits of the route are set then this entry is not in use, so # return None. if route & 0xff000000 == 0xff000000: return None # Convert the routing table entry routes = {r for r in routing_table.Routes if (route >> r) & 0x1} rte = routing_table.RoutingTableEntry(routes, key, mask) # Convert the surrounding data app_id = free & 0xff core = (free >> 8) & 0x0f return (rte, app_id, core)
[ "def", "unpack_routing_table_entry", "(", "packed", ")", ":", "# Unpack the routing table entry", "_", ",", "free", ",", "route", ",", "key", ",", "mask", "=", "struct", ".", "unpack", "(", "consts", ".", "RTE_PACK_STRING", ",", "packed", ")", "# If the top 8 bits of the route are set then this entry is not in use, so", "# return None.", "if", "route", "&", "0xff000000", "==", "0xff000000", ":", "return", "None", "# Convert the routing table entry", "routes", "=", "{", "r", "for", "r", "in", "routing_table", ".", "Routes", "if", "(", "route", ">>", "r", ")", "&", "0x1", "}", "rte", "=", "routing_table", ".", "RoutingTableEntry", "(", "routes", ",", "key", ",", "mask", ")", "# Convert the surrounding data", "app_id", "=", "free", "&", "0xff", "core", "=", "(", "free", ">>", "8", ")", "&", "0x0f", "return", "(", "rte", ",", "app_id", ",", "core", ")" ]
Unpack a routing table entry read from a SpiNNaker machine. Parameters ---------- packet : :py:class:`bytes` Bytes containing a packed routing table. Returns ------- (:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) or None Tuple containing the routing entry, the app_id associated with the entry and the core number associated with the entry; or None if the routing table entry is flagged as unused.
[ "Unpack", "a", "routing", "table", "entry", "read", "from", "a", "SpiNNaker", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2838-L2869
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.scp_data_length
def scp_data_length(self): """The maximum SCP data field length supported by the machine (bytes). """ # If not known, query the machine if self._scp_data_length is None: data = self.get_software_version(255, 255, 0) self._scp_data_length = data.buffer_size return self._scp_data_length
python
def scp_data_length(self): """The maximum SCP data field length supported by the machine (bytes). """ # If not known, query the machine if self._scp_data_length is None: data = self.get_software_version(255, 255, 0) self._scp_data_length = data.buffer_size return self._scp_data_length
[ "def", "scp_data_length", "(", "self", ")", ":", "# If not known, query the machine", "if", "self", ".", "_scp_data_length", "is", "None", ":", "data", "=", "self", ".", "get_software_version", "(", "255", ",", "255", ",", "0", ")", "self", ".", "_scp_data_length", "=", "data", ".", "buffer_size", "return", "self", ".", "_scp_data_length" ]
The maximum SCP data field length supported by the machine (bytes).
[ "The", "maximum", "SCP", "data", "field", "length", "supported", "by", "the", "machine", "(", "bytes", ")", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L151-L159
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.root_chip
def root_chip(self): """The coordinates (x, y) of the chip used to boot the machine.""" # If not known, query the machine if self._root_chip is None: self._root_chip = self.get_software_version(255, 255, 0).position return self._root_chip
python
def root_chip(self): """The coordinates (x, y) of the chip used to boot the machine.""" # If not known, query the machine if self._root_chip is None: self._root_chip = self.get_software_version(255, 255, 0).position return self._root_chip
[ "def", "root_chip", "(", "self", ")", ":", "# If not known, query the machine", "if", "self", ".", "_root_chip", "is", "None", ":", "self", ".", "_root_chip", "=", "self", ".", "get_software_version", "(", "255", ",", "255", ",", "0", ")", ".", "position", "return", "self", ".", "_root_chip" ]
The coordinates (x, y) of the chip used to boot the machine.
[ "The", "coordinates", "(", "x", "y", ")", "of", "the", "chip", "used", "to", "boot", "the", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L173-L178
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.send_scp
def send_scp(self, *args, **kwargs): """Transmit an SCP Packet and return the response. This function is a thin wrapper around :py:meth:`rig.machine_control.scp_connection.SCPConnection.send_scp`. This function will attempt to use the SCP connection nearest the destination of the SCP command if multiple connections have been discovered using :py:meth:`.discover_connections`. Parameters ---------- x : int y : int p : int *args **kwargs """ # Retrieve contextual arguments from the keyword arguments. The # context system ensures that these values are present. x = kwargs.pop("x") y = kwargs.pop("y") p = kwargs.pop("p") return self._send_scp(x, y, p, *args, **kwargs)
python
def send_scp(self, *args, **kwargs): """Transmit an SCP Packet and return the response. This function is a thin wrapper around :py:meth:`rig.machine_control.scp_connection.SCPConnection.send_scp`. This function will attempt to use the SCP connection nearest the destination of the SCP command if multiple connections have been discovered using :py:meth:`.discover_connections`. Parameters ---------- x : int y : int p : int *args **kwargs """ # Retrieve contextual arguments from the keyword arguments. The # context system ensures that these values are present. x = kwargs.pop("x") y = kwargs.pop("y") p = kwargs.pop("p") return self._send_scp(x, y, p, *args, **kwargs)
[ "def", "send_scp", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Retrieve contextual arguments from the keyword arguments. The", "# context system ensures that these values are present.", "x", "=", "kwargs", ".", "pop", "(", "\"x\"", ")", "y", "=", "kwargs", ".", "pop", "(", "\"y\"", ")", "p", "=", "kwargs", ".", "pop", "(", "\"p\"", ")", "return", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "p", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Transmit an SCP Packet and return the response. This function is a thin wrapper around :py:meth:`rig.machine_control.scp_connection.SCPConnection.send_scp`. This function will attempt to use the SCP connection nearest the destination of the SCP command if multiple connections have been discovered using :py:meth:`.discover_connections`. Parameters ---------- x : int y : int p : int *args **kwargs
[ "Transmit", "an", "SCP", "Packet", "and", "return", "the", "response", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L182-L205
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._get_connection
def _get_connection(self, x, y): """Get the appropriate connection for a chip.""" if (self._width is None or self._height is None or self._root_chip is None): return self.connections[None] else: # If possible, use the local Ethernet connected chip eth_chip = spinn5_local_eth_coord(x, y, self._width, self._height, *self._root_chip) conn = self.connections.get(eth_chip) if conn is not None: return conn else: # If no connection was available to the local board, choose # another arbitrarily. # XXX: This choice will cause lots of contention in systems # with many missing Ethernet connections. return self.connections[None]
python
def _get_connection(self, x, y): """Get the appropriate connection for a chip.""" if (self._width is None or self._height is None or self._root_chip is None): return self.connections[None] else: # If possible, use the local Ethernet connected chip eth_chip = spinn5_local_eth_coord(x, y, self._width, self._height, *self._root_chip) conn = self.connections.get(eth_chip) if conn is not None: return conn else: # If no connection was available to the local board, choose # another arbitrarily. # XXX: This choice will cause lots of contention in systems # with many missing Ethernet connections. return self.connections[None]
[ "def", "_get_connection", "(", "self", ",", "x", ",", "y", ")", ":", "if", "(", "self", ".", "_width", "is", "None", "or", "self", ".", "_height", "is", "None", "or", "self", ".", "_root_chip", "is", "None", ")", ":", "return", "self", ".", "connections", "[", "None", "]", "else", ":", "# If possible, use the local Ethernet connected chip", "eth_chip", "=", "spinn5_local_eth_coord", "(", "x", ",", "y", ",", "self", ".", "_width", ",", "self", ".", "_height", ",", "*", "self", ".", "_root_chip", ")", "conn", "=", "self", ".", "connections", ".", "get", "(", "eth_chip", ")", "if", "conn", "is", "not", "None", ":", "return", "conn", "else", ":", "# If no connection was available to the local board, choose", "# another arbitrarily.", "# XXX: This choice will cause lots of contention in systems", "# with many missing Ethernet connections.", "return", "self", ".", "connections", "[", "None", "]" ]
Get the appropriate connection for a chip.
[ "Get", "the", "appropriate", "connection", "for", "a", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L207-L224
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._send_scp
def _send_scp(self, x, y, p, *args, **kwargs): """Determine the best connection to use to send an SCP packet and use it to transmit. This internal version of the method is identical to send_scp except it has positional arguments for x, y and p. See the arguments for :py:meth:`~rig.machine_control.scp_connection.SCPConnection` for details. """ # Determine the size of packet we expect in return, this is usually the # size that we are informed we should expect by SCAMP/SARK or else is # the default. if self._scp_data_length is None: length = consts.SCP_SVER_RECEIVE_LENGTH_MAX else: length = self._scp_data_length connection = self._get_connection(x, y) return connection.send_scp(length, x, y, p, *args, **kwargs)
python
def _send_scp(self, x, y, p, *args, **kwargs): """Determine the best connection to use to send an SCP packet and use it to transmit. This internal version of the method is identical to send_scp except it has positional arguments for x, y and p. See the arguments for :py:meth:`~rig.machine_control.scp_connection.SCPConnection` for details. """ # Determine the size of packet we expect in return, this is usually the # size that we are informed we should expect by SCAMP/SARK or else is # the default. if self._scp_data_length is None: length = consts.SCP_SVER_RECEIVE_LENGTH_MAX else: length = self._scp_data_length connection = self._get_connection(x, y) return connection.send_scp(length, x, y, p, *args, **kwargs)
[ "def", "_send_scp", "(", "self", ",", "x", ",", "y", ",", "p", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Determine the size of packet we expect in return, this is usually the", "# size that we are informed we should expect by SCAMP/SARK or else is", "# the default.", "if", "self", ".", "_scp_data_length", "is", "None", ":", "length", "=", "consts", ".", "SCP_SVER_RECEIVE_LENGTH_MAX", "else", ":", "length", "=", "self", ".", "_scp_data_length", "connection", "=", "self", ".", "_get_connection", "(", "x", ",", "y", ")", "return", "connection", ".", "send_scp", "(", "length", ",", "x", ",", "y", ",", "p", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Determine the best connection to use to send an SCP packet and use it to transmit. This internal version of the method is identical to send_scp except it has positional arguments for x, y and p. See the arguments for :py:meth:`~rig.machine_control.scp_connection.SCPConnection` for details.
[ "Determine", "the", "best", "connection", "to", "use", "to", "send", "an", "SCP", "packet", "and", "use", "it", "to", "transmit", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L226-L246
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.boot
def boot(self, width=None, height=None, only_if_needed=True, check_booted=True, **boot_kwargs): """Boot a SpiNNaker machine. The system will be booted from the Ethernet connected chip whose hostname was given as the argument to the MachineController. With the default arguments this method will only boot systems which have not already been booted and will wait until machine is completely booted (and raise a :py:exc:`.SpiNNakerBootError` on failure). This method uses :py:func:`rig.machine_control.boot.boot` to send boot commands to the machine and update the struct files contained within this object according to those used during boot. .. warning:: Booting the system over the open internet is likely to fail due to the port number being blocked by most ISPs and UDP not being reliable. A proxy such as `spinnaker_proxy <https://github.com/project-rig/spinnaker_proxy>`_ may be useful in this situation. Parameters ---------- width, height : *Deprecated* **Deprecated.** In older versions of SC&MP, it was necessary to indicate the size of the machine being booted. These parameters are now ignored and setting them will produce a deprecation warning. scamp_binary : filename or None Filename of the binary to boot the machine with or None to use the SC&MP binary bundled with Rig. sark_struct : filename or None The 'sark.struct' file which defines the datastructures or None to use the one bundled with Rig. boot_delay : float Number of seconds to pause between sending boot data packets. post_boot_delay : float Number of seconds to wait after sending last piece of boot data to give SC&MP time to re-initialise the Ethernet interface. only_if_needed : bool If ``only_if_needed`` is True (the default), this method checks to see if the machine is already booted and only attempts to boot the machine if neccessary. If ``only_if_needed`` is False, the boot commands will be sent to the target machine without checking if it is already booted or not. .. warning:: If the machine has already been booted, sending the boot commands again will not 'reboot' the machine with the newly supplied boot image, even if ``only_if_needed`` is False. check_booted : bool If ``check_booted`` is True this method waits for the machine to be fully booted before returning. If False, this check is skipped and the function returns as soon as the machine's Ethernet interface is likely to be up (but not necessarily before booting has completed). sv_overrides : {name: value, ...} Additional arguments used to override the default values in the 'sv' struct defined in the struct file. Returns ------- bool Returns True if the machine was sent boot commands, False if the machine was already booted. Raises ------ rig.machine_control.machine_controller.SpiNNakerBootError Raised when ``check_booted`` is True and the boot process was unable to boot the machine. Also raised when ``only_if_needed`` is True and the remote host is a BMP. Notes ----- The constants `rig.machine_control.boot.spinX_boot_options` can be used to specify boot parameters, for example:: controller.boot(**spin3_boot_options) This is neccessary on boards such as SpiNN-3 boards if the more than LED 0 are required by an application since by default, only LED 0 is enabled. """ # Report deprecated width/height arguments if width is not None or height is not None: warnings.warn("Machine width and height are no longer needed when " "booting a machine.", DeprecationWarning) # Check to see if the machine is already booted first if only_if_needed: # We create a new MachineController which fails quickly if it # doesn't receieve a reply (since typically the machine is already # booted). quick_fail_mc = MachineController(self.initial_host, n_tries=1) try: info = quick_fail_mc.get_software_version(255, 255, 0) if "SpiNNaker" not in info.version_string: raise SpiNNakerBootError( "Remote host is not a SpiNNaker machine and so cannot " "be booted. (Are you using a BMP IP/hostname?)") # Machine did not need booting return False except SCPError: # The machine is not responding to SCP so it needs booting. pass # Actually boot the machine boot_kwargs.setdefault("boot_port", self.boot_port) self.structs = boot.boot(self.initial_host, **boot_kwargs) assert len(self.structs) > 0 # Wait for the machine to completely boot if check_booted: try: p2p_address = (255, 255) while p2p_address == (255, 255): time.sleep(0.1) p2p_address = self.get_software_version( 255, 255, 0).position except SCPError: # Machine did not respond raise SpiNNakerBootError( "The remote machine could not be booted.") # The machine was sent boot commands return True
python
def boot(self, width=None, height=None, only_if_needed=True, check_booted=True, **boot_kwargs): """Boot a SpiNNaker machine. The system will be booted from the Ethernet connected chip whose hostname was given as the argument to the MachineController. With the default arguments this method will only boot systems which have not already been booted and will wait until machine is completely booted (and raise a :py:exc:`.SpiNNakerBootError` on failure). This method uses :py:func:`rig.machine_control.boot.boot` to send boot commands to the machine and update the struct files contained within this object according to those used during boot. .. warning:: Booting the system over the open internet is likely to fail due to the port number being blocked by most ISPs and UDP not being reliable. A proxy such as `spinnaker_proxy <https://github.com/project-rig/spinnaker_proxy>`_ may be useful in this situation. Parameters ---------- width, height : *Deprecated* **Deprecated.** In older versions of SC&MP, it was necessary to indicate the size of the machine being booted. These parameters are now ignored and setting them will produce a deprecation warning. scamp_binary : filename or None Filename of the binary to boot the machine with or None to use the SC&MP binary bundled with Rig. sark_struct : filename or None The 'sark.struct' file which defines the datastructures or None to use the one bundled with Rig. boot_delay : float Number of seconds to pause between sending boot data packets. post_boot_delay : float Number of seconds to wait after sending last piece of boot data to give SC&MP time to re-initialise the Ethernet interface. only_if_needed : bool If ``only_if_needed`` is True (the default), this method checks to see if the machine is already booted and only attempts to boot the machine if neccessary. If ``only_if_needed`` is False, the boot commands will be sent to the target machine without checking if it is already booted or not. .. warning:: If the machine has already been booted, sending the boot commands again will not 'reboot' the machine with the newly supplied boot image, even if ``only_if_needed`` is False. check_booted : bool If ``check_booted`` is True this method waits for the machine to be fully booted before returning. If False, this check is skipped and the function returns as soon as the machine's Ethernet interface is likely to be up (but not necessarily before booting has completed). sv_overrides : {name: value, ...} Additional arguments used to override the default values in the 'sv' struct defined in the struct file. Returns ------- bool Returns True if the machine was sent boot commands, False if the machine was already booted. Raises ------ rig.machine_control.machine_controller.SpiNNakerBootError Raised when ``check_booted`` is True and the boot process was unable to boot the machine. Also raised when ``only_if_needed`` is True and the remote host is a BMP. Notes ----- The constants `rig.machine_control.boot.spinX_boot_options` can be used to specify boot parameters, for example:: controller.boot(**spin3_boot_options) This is neccessary on boards such as SpiNN-3 boards if the more than LED 0 are required by an application since by default, only LED 0 is enabled. """ # Report deprecated width/height arguments if width is not None or height is not None: warnings.warn("Machine width and height are no longer needed when " "booting a machine.", DeprecationWarning) # Check to see if the machine is already booted first if only_if_needed: # We create a new MachineController which fails quickly if it # doesn't receieve a reply (since typically the machine is already # booted). quick_fail_mc = MachineController(self.initial_host, n_tries=1) try: info = quick_fail_mc.get_software_version(255, 255, 0) if "SpiNNaker" not in info.version_string: raise SpiNNakerBootError( "Remote host is not a SpiNNaker machine and so cannot " "be booted. (Are you using a BMP IP/hostname?)") # Machine did not need booting return False except SCPError: # The machine is not responding to SCP so it needs booting. pass # Actually boot the machine boot_kwargs.setdefault("boot_port", self.boot_port) self.structs = boot.boot(self.initial_host, **boot_kwargs) assert len(self.structs) > 0 # Wait for the machine to completely boot if check_booted: try: p2p_address = (255, 255) while p2p_address == (255, 255): time.sleep(0.1) p2p_address = self.get_software_version( 255, 255, 0).position except SCPError: # Machine did not respond raise SpiNNakerBootError( "The remote machine could not be booted.") # The machine was sent boot commands return True
[ "def", "boot", "(", "self", ",", "width", "=", "None", ",", "height", "=", "None", ",", "only_if_needed", "=", "True", ",", "check_booted", "=", "True", ",", "*", "*", "boot_kwargs", ")", ":", "# Report deprecated width/height arguments", "if", "width", "is", "not", "None", "or", "height", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"Machine width and height are no longer needed when \"", "\"booting a machine.\"", ",", "DeprecationWarning", ")", "# Check to see if the machine is already booted first", "if", "only_if_needed", ":", "# We create a new MachineController which fails quickly if it", "# doesn't receieve a reply (since typically the machine is already", "# booted).", "quick_fail_mc", "=", "MachineController", "(", "self", ".", "initial_host", ",", "n_tries", "=", "1", ")", "try", ":", "info", "=", "quick_fail_mc", ".", "get_software_version", "(", "255", ",", "255", ",", "0", ")", "if", "\"SpiNNaker\"", "not", "in", "info", ".", "version_string", ":", "raise", "SpiNNakerBootError", "(", "\"Remote host is not a SpiNNaker machine and so cannot \"", "\"be booted. (Are you using a BMP IP/hostname?)\"", ")", "# Machine did not need booting", "return", "False", "except", "SCPError", ":", "# The machine is not responding to SCP so it needs booting.", "pass", "# Actually boot the machine", "boot_kwargs", ".", "setdefault", "(", "\"boot_port\"", ",", "self", ".", "boot_port", ")", "self", ".", "structs", "=", "boot", ".", "boot", "(", "self", ".", "initial_host", ",", "*", "*", "boot_kwargs", ")", "assert", "len", "(", "self", ".", "structs", ")", ">", "0", "# Wait for the machine to completely boot", "if", "check_booted", ":", "try", ":", "p2p_address", "=", "(", "255", ",", "255", ")", "while", "p2p_address", "==", "(", "255", ",", "255", ")", ":", "time", ".", "sleep", "(", "0.1", ")", "p2p_address", "=", "self", ".", "get_software_version", "(", "255", ",", "255", ",", "0", ")", ".", "position", "except", "SCPError", ":", "# Machine did not respond", "raise", "SpiNNakerBootError", "(", "\"The remote machine could not be booted.\"", ")", "# The machine was sent boot commands", "return", "True" ]
Boot a SpiNNaker machine. The system will be booted from the Ethernet connected chip whose hostname was given as the argument to the MachineController. With the default arguments this method will only boot systems which have not already been booted and will wait until machine is completely booted (and raise a :py:exc:`.SpiNNakerBootError` on failure). This method uses :py:func:`rig.machine_control.boot.boot` to send boot commands to the machine and update the struct files contained within this object according to those used during boot. .. warning:: Booting the system over the open internet is likely to fail due to the port number being blocked by most ISPs and UDP not being reliable. A proxy such as `spinnaker_proxy <https://github.com/project-rig/spinnaker_proxy>`_ may be useful in this situation. Parameters ---------- width, height : *Deprecated* **Deprecated.** In older versions of SC&MP, it was necessary to indicate the size of the machine being booted. These parameters are now ignored and setting them will produce a deprecation warning. scamp_binary : filename or None Filename of the binary to boot the machine with or None to use the SC&MP binary bundled with Rig. sark_struct : filename or None The 'sark.struct' file which defines the datastructures or None to use the one bundled with Rig. boot_delay : float Number of seconds to pause between sending boot data packets. post_boot_delay : float Number of seconds to wait after sending last piece of boot data to give SC&MP time to re-initialise the Ethernet interface. only_if_needed : bool If ``only_if_needed`` is True (the default), this method checks to see if the machine is already booted and only attempts to boot the machine if neccessary. If ``only_if_needed`` is False, the boot commands will be sent to the target machine without checking if it is already booted or not. .. warning:: If the machine has already been booted, sending the boot commands again will not 'reboot' the machine with the newly supplied boot image, even if ``only_if_needed`` is False. check_booted : bool If ``check_booted`` is True this method waits for the machine to be fully booted before returning. If False, this check is skipped and the function returns as soon as the machine's Ethernet interface is likely to be up (but not necessarily before booting has completed). sv_overrides : {name: value, ...} Additional arguments used to override the default values in the 'sv' struct defined in the struct file. Returns ------- bool Returns True if the machine was sent boot commands, False if the machine was already booted. Raises ------ rig.machine_control.machine_controller.SpiNNakerBootError Raised when ``check_booted`` is True and the boot process was unable to boot the machine. Also raised when ``only_if_needed`` is True and the remote host is a BMP. Notes ----- The constants `rig.machine_control.boot.spinX_boot_options` can be used to specify boot parameters, for example:: controller.boot(**spin3_boot_options) This is neccessary on boards such as SpiNN-3 boards if the more than LED 0 are required by an application since by default, only LED 0 is enabled.
[ "Boot", "a", "SpiNNaker", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L248-L376
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.discover_connections
def discover_connections(self, x=255, y=255): """Attempt to discover all available Ethernet connections to a machine. After calling this method, :py:class:`.MachineController` will attempt to communicate via the Ethernet connection on the same board as the destination chip for all commands. If called multiple times, existing connections will be retained in preference to new ones. .. note:: The system must be booted for this command to succeed. .. note:: Currently, only systems comprised of multiple Ethernet-connected SpiNN-5 boards are supported. Parameters ---------- x : int y : int (Optional) The coordinates of the chip to initially use to query the system for the set of live chips. Returns ------- int The number of new connections established. """ working_chips = set( (x, y) for (x, y), route in iteritems(self.get_p2p_routing_table(x, y)) if route != consts.P2PTableEntry.none) self._width = max(x for x, y in working_chips) + 1 self._height = max(y for x, y in working_chips) + 1 num_new_connections = 0 for x, y in spinn5_eth_coords(self._width, self._height, *self.root_chip): if (x, y) in working_chips and (x, y) not in self.connections: # Discover the chip's IP address try: ip = self.get_ip_address(x, y) except SCPError: continue if ip is not None: # Create a connection to the IP self.connections[(x, y)] = \ SCPConnection(ip, self.scp_port, self.n_tries, self.timeout) # Attempt to use the connection (and remove it if it # doesn't work) try: self.get_software_version(x, y, 0) num_new_connections += 1 except SCPError: self.connections.pop((x, y)).close() return num_new_connections
python
def discover_connections(self, x=255, y=255): """Attempt to discover all available Ethernet connections to a machine. After calling this method, :py:class:`.MachineController` will attempt to communicate via the Ethernet connection on the same board as the destination chip for all commands. If called multiple times, existing connections will be retained in preference to new ones. .. note:: The system must be booted for this command to succeed. .. note:: Currently, only systems comprised of multiple Ethernet-connected SpiNN-5 boards are supported. Parameters ---------- x : int y : int (Optional) The coordinates of the chip to initially use to query the system for the set of live chips. Returns ------- int The number of new connections established. """ working_chips = set( (x, y) for (x, y), route in iteritems(self.get_p2p_routing_table(x, y)) if route != consts.P2PTableEntry.none) self._width = max(x for x, y in working_chips) + 1 self._height = max(y for x, y in working_chips) + 1 num_new_connections = 0 for x, y in spinn5_eth_coords(self._width, self._height, *self.root_chip): if (x, y) in working_chips and (x, y) not in self.connections: # Discover the chip's IP address try: ip = self.get_ip_address(x, y) except SCPError: continue if ip is not None: # Create a connection to the IP self.connections[(x, y)] = \ SCPConnection(ip, self.scp_port, self.n_tries, self.timeout) # Attempt to use the connection (and remove it if it # doesn't work) try: self.get_software_version(x, y, 0) num_new_connections += 1 except SCPError: self.connections.pop((x, y)).close() return num_new_connections
[ "def", "discover_connections", "(", "self", ",", "x", "=", "255", ",", "y", "=", "255", ")", ":", "working_chips", "=", "set", "(", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", ",", "route", "in", "iteritems", "(", "self", ".", "get_p2p_routing_table", "(", "x", ",", "y", ")", ")", "if", "route", "!=", "consts", ".", "P2PTableEntry", ".", "none", ")", "self", ".", "_width", "=", "max", "(", "x", "for", "x", ",", "y", "in", "working_chips", ")", "+", "1", "self", ".", "_height", "=", "max", "(", "y", "for", "x", ",", "y", "in", "working_chips", ")", "+", "1", "num_new_connections", "=", "0", "for", "x", ",", "y", "in", "spinn5_eth_coords", "(", "self", ".", "_width", ",", "self", ".", "_height", ",", "*", "self", ".", "root_chip", ")", ":", "if", "(", "x", ",", "y", ")", "in", "working_chips", "and", "(", "x", ",", "y", ")", "not", "in", "self", ".", "connections", ":", "# Discover the chip's IP address", "try", ":", "ip", "=", "self", ".", "get_ip_address", "(", "x", ",", "y", ")", "except", "SCPError", ":", "continue", "if", "ip", "is", "not", "None", ":", "# Create a connection to the IP", "self", ".", "connections", "[", "(", "x", ",", "y", ")", "]", "=", "SCPConnection", "(", "ip", ",", "self", ".", "scp_port", ",", "self", ".", "n_tries", ",", "self", ".", "timeout", ")", "# Attempt to use the connection (and remove it if it", "# doesn't work)", "try", ":", "self", ".", "get_software_version", "(", "x", ",", "y", ",", "0", ")", "num_new_connections", "+=", "1", "except", "SCPError", ":", "self", ".", "connections", ".", "pop", "(", "(", "x", ",", "y", ")", ")", ".", "close", "(", ")", "return", "num_new_connections" ]
Attempt to discover all available Ethernet connections to a machine. After calling this method, :py:class:`.MachineController` will attempt to communicate via the Ethernet connection on the same board as the destination chip for all commands. If called multiple times, existing connections will be retained in preference to new ones. .. note:: The system must be booted for this command to succeed. .. note:: Currently, only systems comprised of multiple Ethernet-connected SpiNN-5 boards are supported. Parameters ---------- x : int y : int (Optional) The coordinates of the chip to initially use to query the system for the set of live chips. Returns ------- int The number of new connections established.
[ "Attempt", "to", "discover", "all", "available", "Ethernet", "connections", "to", "a", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L379-L440
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.application
def application(self, app_id): """Update the context to use the given application ID and stop the application when done. For example:: with cn.application(54): # All commands in this block will use app_id=54. # On leaving the block `cn.send_signal("stop", 54)` is # automatically called. """ # Get a new context and add a method that will be called before the # context is removed from the stack. context = self(app_id=app_id) context.before_close(lambda: self.send_signal("stop")) return context
python
def application(self, app_id): """Update the context to use the given application ID and stop the application when done. For example:: with cn.application(54): # All commands in this block will use app_id=54. # On leaving the block `cn.send_signal("stop", 54)` is # automatically called. """ # Get a new context and add a method that will be called before the # context is removed from the stack. context = self(app_id=app_id) context.before_close(lambda: self.send_signal("stop")) return context
[ "def", "application", "(", "self", ",", "app_id", ")", ":", "# Get a new context and add a method that will be called before the", "# context is removed from the stack.", "context", "=", "self", "(", "app_id", "=", "app_id", ")", "context", ".", "before_close", "(", "lambda", ":", "self", ".", "send_signal", "(", "\"stop\"", ")", ")", "return", "context" ]
Update the context to use the given application ID and stop the application when done. For example:: with cn.application(54): # All commands in this block will use app_id=54. # On leaving the block `cn.send_signal("stop", 54)` is # automatically called.
[ "Update", "the", "context", "to", "use", "the", "given", "application", "ID", "and", "stop", "the", "application", "when", "done", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L443-L458
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_software_version
def get_software_version(self, x=255, y=255, processor=0): """Get the software version for a given SpiNNaker core. Returns ------- :py:class:`.CoreInfo` Information about the software running on a core. """ sver = self._send_scp(x, y, processor, SCPCommands.sver) # Format the result # arg1 => p2p address, physical cpu, virtual cpu p2p = sver.arg1 >> 16 p2p_address = (p2p >> 8, p2p & 0x00ff) pcpu = (sver.arg1 >> 8) & 0xff vcpu = sver.arg1 & 0xff # arg2 => version number (parsed separately) and buffer size buffer_size = (sver.arg2 & 0xffff) software_name, version, version_labels = \ unpack_sver_response_version(sver) return CoreInfo(p2p_address, pcpu, vcpu, version, buffer_size, sver.arg3, software_name, version_labels)
python
def get_software_version(self, x=255, y=255, processor=0): """Get the software version for a given SpiNNaker core. Returns ------- :py:class:`.CoreInfo` Information about the software running on a core. """ sver = self._send_scp(x, y, processor, SCPCommands.sver) # Format the result # arg1 => p2p address, physical cpu, virtual cpu p2p = sver.arg1 >> 16 p2p_address = (p2p >> 8, p2p & 0x00ff) pcpu = (sver.arg1 >> 8) & 0xff vcpu = sver.arg1 & 0xff # arg2 => version number (parsed separately) and buffer size buffer_size = (sver.arg2 & 0xffff) software_name, version, version_labels = \ unpack_sver_response_version(sver) return CoreInfo(p2p_address, pcpu, vcpu, version, buffer_size, sver.arg3, software_name, version_labels)
[ "def", "get_software_version", "(", "self", ",", "x", "=", "255", ",", "y", "=", "255", ",", "processor", "=", "0", ")", ":", "sver", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "processor", ",", "SCPCommands", ".", "sver", ")", "# Format the result", "# arg1 => p2p address, physical cpu, virtual cpu", "p2p", "=", "sver", ".", "arg1", ">>", "16", "p2p_address", "=", "(", "p2p", ">>", "8", ",", "p2p", "&", "0x00ff", ")", "pcpu", "=", "(", "sver", ".", "arg1", ">>", "8", ")", "&", "0xff", "vcpu", "=", "sver", ".", "arg1", "&", "0xff", "# arg2 => version number (parsed separately) and buffer size", "buffer_size", "=", "(", "sver", ".", "arg2", "&", "0xffff", ")", "software_name", ",", "version", ",", "version_labels", "=", "unpack_sver_response_version", "(", "sver", ")", "return", "CoreInfo", "(", "p2p_address", ",", "pcpu", ",", "vcpu", ",", "version", ",", "buffer_size", ",", "sver", ".", "arg3", ",", "software_name", ",", "version_labels", ")" ]
Get the software version for a given SpiNNaker core. Returns ------- :py:class:`.CoreInfo` Information about the software running on a core.
[ "Get", "the", "software", "version", "for", "a", "given", "SpiNNaker", "core", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L461-L485
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_ip_address
def get_ip_address(self, x, y): """Get the IP address of a particular SpiNNaker chip's Ethernet link. Returns ------- str or None The IPv4 address (as a string) of the chip's Ethernet link or None if the chip does not have an Ethernet connection or the link is currently down. """ chip_info = self.get_chip_info(x=x, y=y) return chip_info.ip_address if chip_info.ethernet_up else None
python
def get_ip_address(self, x, y): """Get the IP address of a particular SpiNNaker chip's Ethernet link. Returns ------- str or None The IPv4 address (as a string) of the chip's Ethernet link or None if the chip does not have an Ethernet connection or the link is currently down. """ chip_info = self.get_chip_info(x=x, y=y) return chip_info.ip_address if chip_info.ethernet_up else None
[ "def", "get_ip_address", "(", "self", ",", "x", ",", "y", ")", ":", "chip_info", "=", "self", ".", "get_chip_info", "(", "x", "=", "x", ",", "y", "=", "y", ")", "return", "chip_info", ".", "ip_address", "if", "chip_info", ".", "ethernet_up", "else", "None" ]
Get the IP address of a particular SpiNNaker chip's Ethernet link. Returns ------- str or None The IPv4 address (as a string) of the chip's Ethernet link or None if the chip does not have an Ethernet connection or the link is currently down.
[ "Get", "the", "IP", "address", "of", "a", "particular", "SpiNNaker", "chip", "s", "Ethernet", "link", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L488-L499
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.write
def write(self, address, data, x, y, p=0): """Write a bytestring to an address in memory. It is strongly encouraged to only read and write to blocks of memory allocated using :py:meth:`.sdram_alloc`. Additionally, :py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap read/write access to memory with a file-like interface and prevent accidental access to areas outside the allocated block. Parameters ---------- address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands. """ # Call the SCPConnection to perform the write on our behalf connection = self._get_connection(x, y) return connection.write(self.scp_data_length, self.scp_window_size, x, y, p, address, data)
python
def write(self, address, data, x, y, p=0): """Write a bytestring to an address in memory. It is strongly encouraged to only read and write to blocks of memory allocated using :py:meth:`.sdram_alloc`. Additionally, :py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap read/write access to memory with a file-like interface and prevent accidental access to areas outside the allocated block. Parameters ---------- address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands. """ # Call the SCPConnection to perform the write on our behalf connection = self._get_connection(x, y) return connection.write(self.scp_data_length, self.scp_window_size, x, y, p, address, data)
[ "def", "write", "(", "self", ",", "address", ",", "data", ",", "x", ",", "y", ",", "p", "=", "0", ")", ":", "# Call the SCPConnection to perform the write on our behalf", "connection", "=", "self", ".", "_get_connection", "(", "x", ",", "y", ")", "return", "connection", ".", "write", "(", "self", ".", "scp_data_length", ",", "self", ".", "scp_window_size", ",", "x", ",", "y", ",", "p", ",", "address", ",", "data", ")" ]
Write a bytestring to an address in memory. It is strongly encouraged to only read and write to blocks of memory allocated using :py:meth:`.sdram_alloc`. Additionally, :py:meth:`.sdram_alloc_as_filelike` can be used to safely wrap read/write access to memory with a file-like interface and prevent accidental access to areas outside the allocated block. Parameters ---------- address : int The address at which to start writing the data. Addresses are given within the address space of a SpiNNaker core. See the SpiNNaker datasheet for more information. data : :py:class:`bytes` Data to write into memory. Writes are automatically broken into a sequence of SCP write commands.
[ "Write", "a", "bytestring", "to", "an", "address", "in", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L502-L524
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.read
def read(self, address, length_bytes, x, y, p=0): """Read a bytestring from an address in memory. Parameters ---------- address : int The address at which to start reading the data. length_bytes : int The number of bytes to read from memory. Large reads are transparently broken into multiple SCP read commands. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring. """ # Call the SCPConnection to perform the read on our behalf connection = self._get_connection(x, y) return connection.read(self.scp_data_length, self.scp_window_size, x, y, p, address, length_bytes)
python
def read(self, address, length_bytes, x, y, p=0): """Read a bytestring from an address in memory. Parameters ---------- address : int The address at which to start reading the data. length_bytes : int The number of bytes to read from memory. Large reads are transparently broken into multiple SCP read commands. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring. """ # Call the SCPConnection to perform the read on our behalf connection = self._get_connection(x, y) return connection.read(self.scp_data_length, self.scp_window_size, x, y, p, address, length_bytes)
[ "def", "read", "(", "self", ",", "address", ",", "length_bytes", ",", "x", ",", "y", ",", "p", "=", "0", ")", ":", "# Call the SCPConnection to perform the read on our behalf", "connection", "=", "self", ".", "_get_connection", "(", "x", ",", "y", ")", "return", "connection", ".", "read", "(", "self", ".", "scp_data_length", ",", "self", ".", "scp_window_size", ",", "x", ",", "y", ",", "p", ",", "address", ",", "length_bytes", ")" ]
Read a bytestring from an address in memory. Parameters ---------- address : int The address at which to start reading the data. length_bytes : int The number of bytes to read from memory. Large reads are transparently broken into multiple SCP read commands. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring.
[ "Read", "a", "bytestring", "from", "an", "address", "in", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L527-L546
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.write_across_link
def write_across_link(self, address, data, x, y, link): """Write a bytestring to an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'POKE' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start writing the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. data : :py:class:`bytes` Data to write into memory. Must be a whole number of words in length. Large writes are automatically broken into a sequence of SCP link-write commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the write will be performed. link : :py:class:`rig.links.Links` The link down which the write should be sent. """ if address % 4: raise ValueError("Addresses must be word-aligned.") if len(data) % 4: raise ValueError("Data must be a whole number of words.") length_bytes = len(data) cur_byte = 0 # Write the requested data, one SCP packet worth at a time while length_bytes > 0: to_write = min(length_bytes, (self.scp_data_length & ~0b11)) cur_data = data[cur_byte:cur_byte + to_write] self._send_scp(x, y, 0, SCPCommands.link_write, arg1=address, arg2=to_write, arg3=int(link), data=cur_data, expected_args=0) # Move to the next block to write address += to_write cur_byte += to_write length_bytes -= to_write
python
def write_across_link(self, address, data, x, y, link): """Write a bytestring to an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'POKE' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start writing the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. data : :py:class:`bytes` Data to write into memory. Must be a whole number of words in length. Large writes are automatically broken into a sequence of SCP link-write commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the write will be performed. link : :py:class:`rig.links.Links` The link down which the write should be sent. """ if address % 4: raise ValueError("Addresses must be word-aligned.") if len(data) % 4: raise ValueError("Data must be a whole number of words.") length_bytes = len(data) cur_byte = 0 # Write the requested data, one SCP packet worth at a time while length_bytes > 0: to_write = min(length_bytes, (self.scp_data_length & ~0b11)) cur_data = data[cur_byte:cur_byte + to_write] self._send_scp(x, y, 0, SCPCommands.link_write, arg1=address, arg2=to_write, arg3=int(link), data=cur_data, expected_args=0) # Move to the next block to write address += to_write cur_byte += to_write length_bytes -= to_write
[ "def", "write_across_link", "(", "self", ",", "address", ",", "data", ",", "x", ",", "y", ",", "link", ")", ":", "if", "address", "%", "4", ":", "raise", "ValueError", "(", "\"Addresses must be word-aligned.\"", ")", "if", "len", "(", "data", ")", "%", "4", ":", "raise", "ValueError", "(", "\"Data must be a whole number of words.\"", ")", "length_bytes", "=", "len", "(", "data", ")", "cur_byte", "=", "0", "# Write the requested data, one SCP packet worth at a time", "while", "length_bytes", ">", "0", ":", "to_write", "=", "min", "(", "length_bytes", ",", "(", "self", ".", "scp_data_length", "&", "~", "0b11", ")", ")", "cur_data", "=", "data", "[", "cur_byte", ":", "cur_byte", "+", "to_write", "]", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "link_write", ",", "arg1", "=", "address", ",", "arg2", "=", "to_write", ",", "arg3", "=", "int", "(", "link", ")", ",", "data", "=", "cur_data", ",", "expected_args", "=", "0", ")", "# Move to the next block to write", "address", "+=", "to_write", "cur_byte", "+=", "to_write", "length_bytes", "-=", "to_write" ]
Write a bytestring to an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'POKE' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start writing the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. data : :py:class:`bytes` Data to write into memory. Must be a whole number of words in length. Large writes are automatically broken into a sequence of SCP link-write commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the write will be performed. link : :py:class:`rig.links.Links` The link down which the write should be sent.
[ "Write", "a", "bytestring", "to", "an", "address", "in", "memory", "on", "a", "neigbouring", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L549-L600
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.read_across_link
def read_across_link(self, address, length_bytes, x, y, link): """Read a bytestring from an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'PEEK' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start reading the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. length_bytes : int The number of bytes to read from memory. Must be a multiple of four (i.e. a whole number of words). Large reads are transparently broken into multiple SCP link-read commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the read will be performed. link : :py:class:`rig.links.Links` The link down which the read should be sent. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring. """ if address % 4: raise ValueError("Addresses must be word-aligned.") if length_bytes % 4: raise ValueError("Lengths must be multiples of words.") # Prepare the buffer to receive the incoming data data = bytearray(length_bytes) mem = memoryview(data) # Read the requested data, one SCP packet worth at a time while length_bytes > 0: to_read = min(length_bytes, (self.scp_data_length & ~0b11)) response = self._send_scp(x, y, 0, SCPCommands.link_read, arg1=address, arg2=to_read, arg3=int(link), expected_args=0) # Accumulate the incoming data and advance the memoryview through # the buffer. mem[:to_read] = response.data mem = mem[to_read:] # Move to the next block to read address += to_read length_bytes -= to_read return bytes(data)
python
def read_across_link(self, address, length_bytes, x, y, link): """Read a bytestring from an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'PEEK' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start reading the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. length_bytes : int The number of bytes to read from memory. Must be a multiple of four (i.e. a whole number of words). Large reads are transparently broken into multiple SCP link-read commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the read will be performed. link : :py:class:`rig.links.Links` The link down which the read should be sent. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring. """ if address % 4: raise ValueError("Addresses must be word-aligned.") if length_bytes % 4: raise ValueError("Lengths must be multiples of words.") # Prepare the buffer to receive the incoming data data = bytearray(length_bytes) mem = memoryview(data) # Read the requested data, one SCP packet worth at a time while length_bytes > 0: to_read = min(length_bytes, (self.scp_data_length & ~0b11)) response = self._send_scp(x, y, 0, SCPCommands.link_read, arg1=address, arg2=to_read, arg3=int(link), expected_args=0) # Accumulate the incoming data and advance the memoryview through # the buffer. mem[:to_read] = response.data mem = mem[to_read:] # Move to the next block to read address += to_read length_bytes -= to_read return bytes(data)
[ "def", "read_across_link", "(", "self", ",", "address", ",", "length_bytes", ",", "x", ",", "y", ",", "link", ")", ":", "if", "address", "%", "4", ":", "raise", "ValueError", "(", "\"Addresses must be word-aligned.\"", ")", "if", "length_bytes", "%", "4", ":", "raise", "ValueError", "(", "\"Lengths must be multiples of words.\"", ")", "# Prepare the buffer to receive the incoming data", "data", "=", "bytearray", "(", "length_bytes", ")", "mem", "=", "memoryview", "(", "data", ")", "# Read the requested data, one SCP packet worth at a time", "while", "length_bytes", ">", "0", ":", "to_read", "=", "min", "(", "length_bytes", ",", "(", "self", ".", "scp_data_length", "&", "~", "0b11", ")", ")", "response", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "link_read", ",", "arg1", "=", "address", ",", "arg2", "=", "to_read", ",", "arg3", "=", "int", "(", "link", ")", ",", "expected_args", "=", "0", ")", "# Accumulate the incoming data and advance the memoryview through", "# the buffer.", "mem", "[", ":", "to_read", "]", "=", "response", ".", "data", "mem", "=", "mem", "[", "to_read", ":", "]", "# Move to the next block to read", "address", "+=", "to_read", "length_bytes", "-=", "to_read", "return", "bytes", "(", "data", ")" ]
Read a bytestring from an address in memory on a neigbouring chip. .. warning:: This function is intended for low-level debug use only and is not optimised for performance nor intended for more general use. This method instructs a monitor processor to send 'PEEK' nearest-neighbour packets to a neighbouring chip. These packets are handled directly by the SpiNNaker router in the neighbouring chip, potentially allowing advanced debug or recovery of a chip rendered otherwise unreachable. Parameters ---------- address : int The address at which to start reading the data. Only addresses in the system-wide address map may be accessed. Addresses must be word aligned. length_bytes : int The number of bytes to read from memory. Must be a multiple of four (i.e. a whole number of words). Large reads are transparently broken into multiple SCP link-read commands. x : int y : int The coordinates of the chip from which the command will be sent, *not* the coordinates of the chip on which the read will be performed. link : :py:class:`rig.links.Links` The link down which the read should be sent. Returns ------- :py:class:`bytes` The data is read back from memory as a bytestring.
[ "Read", "a", "bytestring", "from", "an", "address", "in", "memory", "on", "a", "neigbouring", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L603-L667
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.read_struct_field
def read_struct_field(self, struct_name, field_name, x, y, p=0): """Read the value out of a struct maintained by SARK. This method is particularly useful for reading fields from the ``sv`` struct which, for example, holds information about system status. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to read from, e.g., `"sv"` field_name : string Name of the field to read, e.g., `"eth_addr"` Returns ------- value The value returned is unpacked given the struct specification. Currently arrays are returned as tuples, e.g.:: # Returns a 20-tuple. cn.read_struct_field("sv", "status_map") # Fails cn.read_struct_field("sv", "status_map[1]") """ # Look up the struct and field field, address, pack_chars = \ self._get_struct_field_and_address(struct_name, field_name) length = struct.calcsize(pack_chars) # Perform the read data = self.read(address, length, x, y, p) # Unpack the data unpacked = struct.unpack(pack_chars, data) if field.length == 1: return unpacked[0] else: return unpacked
python
def read_struct_field(self, struct_name, field_name, x, y, p=0): """Read the value out of a struct maintained by SARK. This method is particularly useful for reading fields from the ``sv`` struct which, for example, holds information about system status. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to read from, e.g., `"sv"` field_name : string Name of the field to read, e.g., `"eth_addr"` Returns ------- value The value returned is unpacked given the struct specification. Currently arrays are returned as tuples, e.g.:: # Returns a 20-tuple. cn.read_struct_field("sv", "status_map") # Fails cn.read_struct_field("sv", "status_map[1]") """ # Look up the struct and field field, address, pack_chars = \ self._get_struct_field_and_address(struct_name, field_name) length = struct.calcsize(pack_chars) # Perform the read data = self.read(address, length, x, y, p) # Unpack the data unpacked = struct.unpack(pack_chars, data) if field.length == 1: return unpacked[0] else: return unpacked
[ "def", "read_struct_field", "(", "self", ",", "struct_name", ",", "field_name", ",", "x", ",", "y", ",", "p", "=", "0", ")", ":", "# Look up the struct and field", "field", ",", "address", ",", "pack_chars", "=", "self", ".", "_get_struct_field_and_address", "(", "struct_name", ",", "field_name", ")", "length", "=", "struct", ".", "calcsize", "(", "pack_chars", ")", "# Perform the read", "data", "=", "self", ".", "read", "(", "address", ",", "length", ",", "x", ",", "y", ",", "p", ")", "# Unpack the data", "unpacked", "=", "struct", ".", "unpack", "(", "pack_chars", ",", "data", ")", "if", "field", ".", "length", "==", "1", ":", "return", "unpacked", "[", "0", "]", "else", ":", "return", "unpacked" ]
Read the value out of a struct maintained by SARK. This method is particularly useful for reading fields from the ``sv`` struct which, for example, holds information about system status. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to read from, e.g., `"sv"` field_name : string Name of the field to read, e.g., `"eth_addr"` Returns ------- value The value returned is unpacked given the struct specification. Currently arrays are returned as tuples, e.g.:: # Returns a 20-tuple. cn.read_struct_field("sv", "status_map") # Fails cn.read_struct_field("sv", "status_map[1]")
[ "Read", "the", "value", "out", "of", "a", "struct", "maintained", "by", "SARK", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L677-L718
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.write_struct_field
def write_struct_field(self, struct_name, field_name, values, x, y, p=0): """Write a value into a struct. This method is particularly useful for writing values into the ``sv`` struct which contains some configuration data. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to write to, e.g., `"sv"` field_name : string Name of the field to write, e.g., `"random"` values : Value(s) to be written into the field. .. warning:: Fields which are arrays must currently be written in their entirety. """ # Look up the struct and field field, address, pack_chars = \ self._get_struct_field_and_address(struct_name, field_name) if field.length != 1: assert len(values) == field.length data = struct.pack(pack_chars, *values) else: data = struct.pack(pack_chars, values) # Perform the write self.write(address, data, x, y, p)
python
def write_struct_field(self, struct_name, field_name, values, x, y, p=0): """Write a value into a struct. This method is particularly useful for writing values into the ``sv`` struct which contains some configuration data. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to write to, e.g., `"sv"` field_name : string Name of the field to write, e.g., `"random"` values : Value(s) to be written into the field. .. warning:: Fields which are arrays must currently be written in their entirety. """ # Look up the struct and field field, address, pack_chars = \ self._get_struct_field_and_address(struct_name, field_name) if field.length != 1: assert len(values) == field.length data = struct.pack(pack_chars, *values) else: data = struct.pack(pack_chars, values) # Perform the write self.write(address, data, x, y, p)
[ "def", "write_struct_field", "(", "self", ",", "struct_name", ",", "field_name", ",", "values", ",", "x", ",", "y", ",", "p", "=", "0", ")", ":", "# Look up the struct and field", "field", ",", "address", ",", "pack_chars", "=", "self", ".", "_get_struct_field_and_address", "(", "struct_name", ",", "field_name", ")", "if", "field", ".", "length", "!=", "1", ":", "assert", "len", "(", "values", ")", "==", "field", ".", "length", "data", "=", "struct", ".", "pack", "(", "pack_chars", ",", "*", "values", ")", "else", ":", "data", "=", "struct", ".", "pack", "(", "pack_chars", ",", "values", ")", "# Perform the write", "self", ".", "write", "(", "address", ",", "data", ",", "x", ",", "y", ",", "p", ")" ]
Write a value into a struct. This method is particularly useful for writing values into the ``sv`` struct which contains some configuration data. See ``sark.h`` for details. Parameters ---------- struct_name : string Name of the struct to write to, e.g., `"sv"` field_name : string Name of the field to write, e.g., `"random"` values : Value(s) to be written into the field. .. warning:: Fields which are arrays must currently be written in their entirety.
[ "Write", "a", "value", "into", "a", "struct", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L721-L752
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._get_vcpu_field_and_address
def _get_vcpu_field_and_address(self, field_name, x, y, p): """Get the field and address for a VCPU struct field.""" vcpu_struct = self.structs[b"vcpu"] field = vcpu_struct[six.b(field_name)] address = (self.read_struct_field("sv", "vcpu_base", x, y) + vcpu_struct.size * p) + field.offset pack_chars = b"<" + field.pack_chars return field, address, pack_chars
python
def _get_vcpu_field_and_address(self, field_name, x, y, p): """Get the field and address for a VCPU struct field.""" vcpu_struct = self.structs[b"vcpu"] field = vcpu_struct[six.b(field_name)] address = (self.read_struct_field("sv", "vcpu_base", x, y) + vcpu_struct.size * p) + field.offset pack_chars = b"<" + field.pack_chars return field, address, pack_chars
[ "def", "_get_vcpu_field_and_address", "(", "self", ",", "field_name", ",", "x", ",", "y", ",", "p", ")", ":", "vcpu_struct", "=", "self", ".", "structs", "[", "b\"vcpu\"", "]", "field", "=", "vcpu_struct", "[", "six", ".", "b", "(", "field_name", ")", "]", "address", "=", "(", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"vcpu_base\"", ",", "x", ",", "y", ")", "+", "vcpu_struct", ".", "size", "*", "p", ")", "+", "field", ".", "offset", "pack_chars", "=", "b\"<\"", "+", "field", ".", "pack_chars", "return", "field", ",", "address", ",", "pack_chars" ]
Get the field and address for a VCPU struct field.
[ "Get", "the", "field", "and", "address", "for", "a", "VCPU", "struct", "field", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L754-L761
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.read_vcpu_struct_field
def read_vcpu_struct_field(self, field_name, x, y, p): """Read a value out of the VCPU struct for a specific core. Similar to :py:meth:`.read_struct_field` except this method accesses the individual VCPU struct for to each core and contains application runtime status. Parameters ---------- field_name : string Name of the field to read from the struct (e.g. `"cpu_state"`) Returns ------- value A value of the type contained in the specified struct field. """ # Get the base address of the VCPU struct for this chip, then advance # to get the correct VCPU struct for the requested core. field, address, pack_chars = \ self._get_vcpu_field_and_address(field_name, x, y, p) # Perform the read length = struct.calcsize(pack_chars) data = self.read(address, length, x, y) # Unpack and return unpacked = struct.unpack(pack_chars, data) if field.length == 1: return unpacked[0] else: # If the field is a string then truncate it and return if b"s" in pack_chars: return unpacked[0].strip(b"\x00").decode("utf-8") # Otherwise just return. (Note: at the time of writing, no fields # in the VCPU struct are of this form.) return unpacked
python
def read_vcpu_struct_field(self, field_name, x, y, p): """Read a value out of the VCPU struct for a specific core. Similar to :py:meth:`.read_struct_field` except this method accesses the individual VCPU struct for to each core and contains application runtime status. Parameters ---------- field_name : string Name of the field to read from the struct (e.g. `"cpu_state"`) Returns ------- value A value of the type contained in the specified struct field. """ # Get the base address of the VCPU struct for this chip, then advance # to get the correct VCPU struct for the requested core. field, address, pack_chars = \ self._get_vcpu_field_and_address(field_name, x, y, p) # Perform the read length = struct.calcsize(pack_chars) data = self.read(address, length, x, y) # Unpack and return unpacked = struct.unpack(pack_chars, data) if field.length == 1: return unpacked[0] else: # If the field is a string then truncate it and return if b"s" in pack_chars: return unpacked[0].strip(b"\x00").decode("utf-8") # Otherwise just return. (Note: at the time of writing, no fields # in the VCPU struct are of this form.) return unpacked
[ "def", "read_vcpu_struct_field", "(", "self", ",", "field_name", ",", "x", ",", "y", ",", "p", ")", ":", "# Get the base address of the VCPU struct for this chip, then advance", "# to get the correct VCPU struct for the requested core.", "field", ",", "address", ",", "pack_chars", "=", "self", ".", "_get_vcpu_field_and_address", "(", "field_name", ",", "x", ",", "y", ",", "p", ")", "# Perform the read", "length", "=", "struct", ".", "calcsize", "(", "pack_chars", ")", "data", "=", "self", ".", "read", "(", "address", ",", "length", ",", "x", ",", "y", ")", "# Unpack and return", "unpacked", "=", "struct", ".", "unpack", "(", "pack_chars", ",", "data", ")", "if", "field", ".", "length", "==", "1", ":", "return", "unpacked", "[", "0", "]", "else", ":", "# If the field is a string then truncate it and return", "if", "b\"s\"", "in", "pack_chars", ":", "return", "unpacked", "[", "0", "]", ".", "strip", "(", "b\"\\x00\"", ")", ".", "decode", "(", "\"utf-8\"", ")", "# Otherwise just return. (Note: at the time of writing, no fields", "# in the VCPU struct are of this form.)", "return", "unpacked" ]
Read a value out of the VCPU struct for a specific core. Similar to :py:meth:`.read_struct_field` except this method accesses the individual VCPU struct for to each core and contains application runtime status. Parameters ---------- field_name : string Name of the field to read from the struct (e.g. `"cpu_state"`) Returns ------- value A value of the type contained in the specified struct field.
[ "Read", "a", "value", "out", "of", "the", "VCPU", "struct", "for", "a", "specific", "core", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L764-L802
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.write_vcpu_struct_field
def write_vcpu_struct_field(self, field_name, value, x, y, p): """Write a value to the VCPU struct for a specific core. Parameters ---------- field_name : string Name of the field to write (e.g. `"user0"`) value : Value to write to this field. """ field, address, pack_chars = \ self._get_vcpu_field_and_address(field_name, x, y, p) # Pack the data if b"s" in pack_chars: data = struct.pack(pack_chars, value.encode('utf-8')) elif field.length == 1: data = struct.pack(pack_chars, value) else: # NOTE: At the time of writing no VCPU struct fields are of this # form. data = struct.pack(pack_chars, *value) # pragma: no cover # Perform the write self.write(address, data, x, y)
python
def write_vcpu_struct_field(self, field_name, value, x, y, p): """Write a value to the VCPU struct for a specific core. Parameters ---------- field_name : string Name of the field to write (e.g. `"user0"`) value : Value to write to this field. """ field, address, pack_chars = \ self._get_vcpu_field_and_address(field_name, x, y, p) # Pack the data if b"s" in pack_chars: data = struct.pack(pack_chars, value.encode('utf-8')) elif field.length == 1: data = struct.pack(pack_chars, value) else: # NOTE: At the time of writing no VCPU struct fields are of this # form. data = struct.pack(pack_chars, *value) # pragma: no cover # Perform the write self.write(address, data, x, y)
[ "def", "write_vcpu_struct_field", "(", "self", ",", "field_name", ",", "value", ",", "x", ",", "y", ",", "p", ")", ":", "field", ",", "address", ",", "pack_chars", "=", "self", ".", "_get_vcpu_field_and_address", "(", "field_name", ",", "x", ",", "y", ",", "p", ")", "# Pack the data", "if", "b\"s\"", "in", "pack_chars", ":", "data", "=", "struct", ".", "pack", "(", "pack_chars", ",", "value", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "field", ".", "length", "==", "1", ":", "data", "=", "struct", ".", "pack", "(", "pack_chars", ",", "value", ")", "else", ":", "# NOTE: At the time of writing no VCPU struct fields are of this", "# form.", "data", "=", "struct", ".", "pack", "(", "pack_chars", ",", "*", "value", ")", "# pragma: no cover", "# Perform the write", "self", ".", "write", "(", "address", ",", "data", ",", "x", ",", "y", ")" ]
Write a value to the VCPU struct for a specific core. Parameters ---------- field_name : string Name of the field to write (e.g. `"user0"`) value : Value to write to this field.
[ "Write", "a", "value", "to", "the", "VCPU", "struct", "for", "a", "specific", "core", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L805-L829
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_processor_status
def get_processor_status(self, p, x, y): """Get the status of a given core and the application executing on it. Returns ------- :py:class:`.ProcessorStatus` Representation of the current state of the processor. """ # Get the VCPU base address = (self.read_struct_field("sv", "vcpu_base", x, y) + self.structs[b"vcpu"].size * p) # Get the VCPU data data = self.read(address, self.structs[b"vcpu"].size, x, y) # Build the kwargs that describe the current state state = { name.decode('utf-8'): struct.unpack( f.pack_chars, data[f.offset:f.offset+struct.calcsize(f.pack_chars)] )[0] for (name, f) in iteritems(self.structs[b"vcpu"].fields) } state["registers"] = [state.pop("r{}".format(i)) for i in range(8)] state["user_vars"] = [state.pop("user{}".format(i)) for i in range(4)] state["app_name"] = state["app_name"].strip(b'\x00').decode('utf-8') state["cpu_state"] = consts.AppState(state["cpu_state"]) state["rt_code"] = consts.RuntimeException(state["rt_code"]) sw_ver = state.pop("sw_ver") state["version"] = ((sw_ver >> 16) & 0xFF, (sw_ver >> 8) & 0xFF, (sw_ver >> 0) & 0xFF) for newname, oldname in [("iobuf_address", "iobuf"), ("program_state_register", "psr"), ("stack_pointer", "sp"), ("link_register", "lr"), ]: state[newname] = state.pop(oldname) state.pop("__PAD") return ProcessorStatus(**state)
python
def get_processor_status(self, p, x, y): """Get the status of a given core and the application executing on it. Returns ------- :py:class:`.ProcessorStatus` Representation of the current state of the processor. """ # Get the VCPU base address = (self.read_struct_field("sv", "vcpu_base", x, y) + self.structs[b"vcpu"].size * p) # Get the VCPU data data = self.read(address, self.structs[b"vcpu"].size, x, y) # Build the kwargs that describe the current state state = { name.decode('utf-8'): struct.unpack( f.pack_chars, data[f.offset:f.offset+struct.calcsize(f.pack_chars)] )[0] for (name, f) in iteritems(self.structs[b"vcpu"].fields) } state["registers"] = [state.pop("r{}".format(i)) for i in range(8)] state["user_vars"] = [state.pop("user{}".format(i)) for i in range(4)] state["app_name"] = state["app_name"].strip(b'\x00').decode('utf-8') state["cpu_state"] = consts.AppState(state["cpu_state"]) state["rt_code"] = consts.RuntimeException(state["rt_code"]) sw_ver = state.pop("sw_ver") state["version"] = ((sw_ver >> 16) & 0xFF, (sw_ver >> 8) & 0xFF, (sw_ver >> 0) & 0xFF) for newname, oldname in [("iobuf_address", "iobuf"), ("program_state_register", "psr"), ("stack_pointer", "sp"), ("link_register", "lr"), ]: state[newname] = state.pop(oldname) state.pop("__PAD") return ProcessorStatus(**state)
[ "def", "get_processor_status", "(", "self", ",", "p", ",", "x", ",", "y", ")", ":", "# Get the VCPU base", "address", "=", "(", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"vcpu_base\"", ",", "x", ",", "y", ")", "+", "self", ".", "structs", "[", "b\"vcpu\"", "]", ".", "size", "*", "p", ")", "# Get the VCPU data", "data", "=", "self", ".", "read", "(", "address", ",", "self", ".", "structs", "[", "b\"vcpu\"", "]", ".", "size", ",", "x", ",", "y", ")", "# Build the kwargs that describe the current state", "state", "=", "{", "name", ".", "decode", "(", "'utf-8'", ")", ":", "struct", ".", "unpack", "(", "f", ".", "pack_chars", ",", "data", "[", "f", ".", "offset", ":", "f", ".", "offset", "+", "struct", ".", "calcsize", "(", "f", ".", "pack_chars", ")", "]", ")", "[", "0", "]", "for", "(", "name", ",", "f", ")", "in", "iteritems", "(", "self", ".", "structs", "[", "b\"vcpu\"", "]", ".", "fields", ")", "}", "state", "[", "\"registers\"", "]", "=", "[", "state", ".", "pop", "(", "\"r{}\"", ".", "format", "(", "i", ")", ")", "for", "i", "in", "range", "(", "8", ")", "]", "state", "[", "\"user_vars\"", "]", "=", "[", "state", ".", "pop", "(", "\"user{}\"", ".", "format", "(", "i", ")", ")", "for", "i", "in", "range", "(", "4", ")", "]", "state", "[", "\"app_name\"", "]", "=", "state", "[", "\"app_name\"", "]", ".", "strip", "(", "b'\\x00'", ")", ".", "decode", "(", "'utf-8'", ")", "state", "[", "\"cpu_state\"", "]", "=", "consts", ".", "AppState", "(", "state", "[", "\"cpu_state\"", "]", ")", "state", "[", "\"rt_code\"", "]", "=", "consts", ".", "RuntimeException", "(", "state", "[", "\"rt_code\"", "]", ")", "sw_ver", "=", "state", ".", "pop", "(", "\"sw_ver\"", ")", "state", "[", "\"version\"", "]", "=", "(", "(", "sw_ver", ">>", "16", ")", "&", "0xFF", ",", "(", "sw_ver", ">>", "8", ")", "&", "0xFF", ",", "(", "sw_ver", ">>", "0", ")", "&", "0xFF", ")", "for", "newname", ",", "oldname", "in", "[", "(", "\"iobuf_address\"", ",", "\"iobuf\"", ")", ",", "(", "\"program_state_register\"", ",", "\"psr\"", ")", ",", "(", "\"stack_pointer\"", ",", "\"sp\"", ")", ",", "(", "\"link_register\"", ",", "\"lr\"", ")", ",", "]", ":", "state", "[", "newname", "]", "=", "state", ".", "pop", "(", "oldname", ")", "state", ".", "pop", "(", "\"__PAD\"", ")", "return", "ProcessorStatus", "(", "*", "*", "state", ")" ]
Get the status of a given core and the application executing on it. Returns ------- :py:class:`.ProcessorStatus` Representation of the current state of the processor.
[ "Get", "the", "status", "of", "a", "given", "core", "and", "the", "application", "executing", "on", "it", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L832-L871
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_iobuf
def get_iobuf(self, p, x, y): """Read the messages ``io_printf``'d into the ``IOBUF`` buffer on a specified core. See also: :py:meth:`.get_iobuf_bytes` which returns the undecoded raw bytes in the ``IOBUF``. Useful if the IOBUF contains non-text or non-UTF-8 encoded text. Returns ------- str The string in the ``IOBUF``, decoded from UTF-8. """ return self.get_iobuf_bytes(p, x, y).decode("utf-8")
python
def get_iobuf(self, p, x, y): """Read the messages ``io_printf``'d into the ``IOBUF`` buffer on a specified core. See also: :py:meth:`.get_iobuf_bytes` which returns the undecoded raw bytes in the ``IOBUF``. Useful if the IOBUF contains non-text or non-UTF-8 encoded text. Returns ------- str The string in the ``IOBUF``, decoded from UTF-8. """ return self.get_iobuf_bytes(p, x, y).decode("utf-8")
[ "def", "get_iobuf", "(", "self", ",", "p", ",", "x", ",", "y", ")", ":", "return", "self", ".", "get_iobuf_bytes", "(", "p", ",", "x", ",", "y", ")", ".", "decode", "(", "\"utf-8\"", ")" ]
Read the messages ``io_printf``'d into the ``IOBUF`` buffer on a specified core. See also: :py:meth:`.get_iobuf_bytes` which returns the undecoded raw bytes in the ``IOBUF``. Useful if the IOBUF contains non-text or non-UTF-8 encoded text. Returns ------- str The string in the ``IOBUF``, decoded from UTF-8.
[ "Read", "the", "messages", "io_printf", "d", "into", "the", "IOBUF", "buffer", "on", "a", "specified", "core", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L874-L887
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_iobuf_bytes
def get_iobuf_bytes(self, p, x, y): """Read raw bytes ``io_printf``'d into the ``IOBUF`` buffer on a specified core. This may be useful when the data contained in the ``IOBUF`` is not UTF-8 encoded text. See also: :py:meth:`.get_iobuf` which returns a decoded string rather than raw bytes. Returns ------- bytes The raw, undecoded string data in the buffer. """ # The IOBUF data is stored in a linked-list of blocks of memory in # SDRAM. The size of each block is given in SV iobuf_size = self.read_struct_field("sv", "iobuf_size", x, y) # The first block in the list is given in the core's VCPU field address = self.read_vcpu_struct_field("iobuf", x, y, p) iobuf = b"" while address: # The IOBUF data is proceeded by a header which gives the next # address and also the length of the string in the current buffer. iobuf_data = self.read(address, iobuf_size + 16, x, y) address, time, ms, length = struct.unpack("<4I", iobuf_data[:16]) iobuf += iobuf_data[16:16 + length] return iobuf
python
def get_iobuf_bytes(self, p, x, y): """Read raw bytes ``io_printf``'d into the ``IOBUF`` buffer on a specified core. This may be useful when the data contained in the ``IOBUF`` is not UTF-8 encoded text. See also: :py:meth:`.get_iobuf` which returns a decoded string rather than raw bytes. Returns ------- bytes The raw, undecoded string data in the buffer. """ # The IOBUF data is stored in a linked-list of blocks of memory in # SDRAM. The size of each block is given in SV iobuf_size = self.read_struct_field("sv", "iobuf_size", x, y) # The first block in the list is given in the core's VCPU field address = self.read_vcpu_struct_field("iobuf", x, y, p) iobuf = b"" while address: # The IOBUF data is proceeded by a header which gives the next # address and also the length of the string in the current buffer. iobuf_data = self.read(address, iobuf_size + 16, x, y) address, time, ms, length = struct.unpack("<4I", iobuf_data[:16]) iobuf += iobuf_data[16:16 + length] return iobuf
[ "def", "get_iobuf_bytes", "(", "self", ",", "p", ",", "x", ",", "y", ")", ":", "# The IOBUF data is stored in a linked-list of blocks of memory in", "# SDRAM. The size of each block is given in SV", "iobuf_size", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"iobuf_size\"", ",", "x", ",", "y", ")", "# The first block in the list is given in the core's VCPU field", "address", "=", "self", ".", "read_vcpu_struct_field", "(", "\"iobuf\"", ",", "x", ",", "y", ",", "p", ")", "iobuf", "=", "b\"\"", "while", "address", ":", "# The IOBUF data is proceeded by a header which gives the next", "# address and also the length of the string in the current buffer.", "iobuf_data", "=", "self", ".", "read", "(", "address", ",", "iobuf_size", "+", "16", ",", "x", ",", "y", ")", "address", ",", "time", ",", "ms", ",", "length", "=", "struct", ".", "unpack", "(", "\"<4I\"", ",", "iobuf_data", "[", ":", "16", "]", ")", "iobuf", "+=", "iobuf_data", "[", "16", ":", "16", "+", "length", "]", "return", "iobuf" ]
Read raw bytes ``io_printf``'d into the ``IOBUF`` buffer on a specified core. This may be useful when the data contained in the ``IOBUF`` is not UTF-8 encoded text. See also: :py:meth:`.get_iobuf` which returns a decoded string rather than raw bytes. Returns ------- bytes The raw, undecoded string data in the buffer.
[ "Read", "raw", "bytes", "io_printf", "d", "into", "the", "IOBUF", "buffer", "on", "a", "specified", "core", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L890-L921
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_router_diagnostics
def get_router_diagnostics(self, x, y): """Get the values of the router diagnostic counters. Returns ------- :py:class:`~.RouterDiagnostics` Description of the state of the counters. """ # Read the block of memory data = self.read(0xe1000300, 64, x=x, y=y) # Convert to 16 ints, then process that as the appropriate tuple type return RouterDiagnostics(*struct.unpack("<16I", data))
python
def get_router_diagnostics(self, x, y): """Get the values of the router diagnostic counters. Returns ------- :py:class:`~.RouterDiagnostics` Description of the state of the counters. """ # Read the block of memory data = self.read(0xe1000300, 64, x=x, y=y) # Convert to 16 ints, then process that as the appropriate tuple type return RouterDiagnostics(*struct.unpack("<16I", data))
[ "def", "get_router_diagnostics", "(", "self", ",", "x", ",", "y", ")", ":", "# Read the block of memory", "data", "=", "self", ".", "read", "(", "0xe1000300", ",", "64", ",", "x", "=", "x", ",", "y", "=", "y", ")", "# Convert to 16 ints, then process that as the appropriate tuple type", "return", "RouterDiagnostics", "(", "*", "struct", ".", "unpack", "(", "\"<16I\"", ",", "data", ")", ")" ]
Get the values of the router diagnostic counters. Returns ------- :py:class:`~.RouterDiagnostics` Description of the state of the counters.
[ "Get", "the", "values", "of", "the", "router", "diagnostic", "counters", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L924-L936
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.iptag_set
def iptag_set(self, iptag, addr, port, x, y): """Set the value of an IPTag. Forward SDP packets with the specified IP tag sent by a SpiNNaker application to a given external IP address. A :ref:`tutorial example <scp-and-sdp-tutorial>` of the use of IP Tags to send and receive SDP packets to and from applications is also available. Parameters ---------- iptag : int Index of the IPTag to set addr : string IP address or hostname that the IPTag should point at. port : int UDP port that the IPTag should direct packets to. """ # Format the IP address ip_addr = struct.pack('!4B', *map(int, socket.gethostbyname(addr).split('.'))) self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.set) << 16 | iptag, port, struct.unpack('<I', ip_addr)[0])
python
def iptag_set(self, iptag, addr, port, x, y): """Set the value of an IPTag. Forward SDP packets with the specified IP tag sent by a SpiNNaker application to a given external IP address. A :ref:`tutorial example <scp-and-sdp-tutorial>` of the use of IP Tags to send and receive SDP packets to and from applications is also available. Parameters ---------- iptag : int Index of the IPTag to set addr : string IP address or hostname that the IPTag should point at. port : int UDP port that the IPTag should direct packets to. """ # Format the IP address ip_addr = struct.pack('!4B', *map(int, socket.gethostbyname(addr).split('.'))) self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.set) << 16 | iptag, port, struct.unpack('<I', ip_addr)[0])
[ "def", "iptag_set", "(", "self", ",", "iptag", ",", "addr", ",", "port", ",", "x", ",", "y", ")", ":", "# Format the IP address", "ip_addr", "=", "struct", ".", "pack", "(", "'!4B'", ",", "*", "map", "(", "int", ",", "socket", ".", "gethostbyname", "(", "addr", ")", ".", "split", "(", "'.'", ")", ")", ")", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "iptag", ",", "int", "(", "consts", ".", "IPTagCommands", ".", "set", ")", "<<", "16", "|", "iptag", ",", "port", ",", "struct", ".", "unpack", "(", "'<I'", ",", "ip_addr", ")", "[", "0", "]", ")" ]
Set the value of an IPTag. Forward SDP packets with the specified IP tag sent by a SpiNNaker application to a given external IP address. A :ref:`tutorial example <scp-and-sdp-tutorial>` of the use of IP Tags to send and receive SDP packets to and from applications is also available. Parameters ---------- iptag : int Index of the IPTag to set addr : string IP address or hostname that the IPTag should point at. port : int UDP port that the IPTag should direct packets to.
[ "Set", "the", "value", "of", "an", "IPTag", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L939-L963
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.iptag_get
def iptag_get(self, iptag, x, y): """Get the value of an IPTag. Parameters ---------- iptag : int Index of the IPTag to get Returns ------- :py:class:`.IPTag` The IPTag returned from SpiNNaker. """ ack = self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.get) << 16 | iptag, 1, expected_args=0) return IPTag.from_bytestring(ack.data)
python
def iptag_get(self, iptag, x, y): """Get the value of an IPTag. Parameters ---------- iptag : int Index of the IPTag to get Returns ------- :py:class:`.IPTag` The IPTag returned from SpiNNaker. """ ack = self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.get) << 16 | iptag, 1, expected_args=0) return IPTag.from_bytestring(ack.data)
[ "def", "iptag_get", "(", "self", ",", "iptag", ",", "x", ",", "y", ")", ":", "ack", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "iptag", ",", "int", "(", "consts", ".", "IPTagCommands", ".", "get", ")", "<<", "16", "|", "iptag", ",", "1", ",", "expected_args", "=", "0", ")", "return", "IPTag", ".", "from_bytestring", "(", "ack", ".", "data", ")" ]
Get the value of an IPTag. Parameters ---------- iptag : int Index of the IPTag to get Returns ------- :py:class:`.IPTag` The IPTag returned from SpiNNaker.
[ "Get", "the", "value", "of", "an", "IPTag", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L966-L982
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.iptag_clear
def iptag_clear(self, iptag, x, y): """Clear an IPTag. Parameters ---------- iptag : int Index of the IPTag to clear. """ self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.clear) << 16 | iptag)
python
def iptag_clear(self, iptag, x, y): """Clear an IPTag. Parameters ---------- iptag : int Index of the IPTag to clear. """ self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.clear) << 16 | iptag)
[ "def", "iptag_clear", "(", "self", ",", "iptag", ",", "x", ",", "y", ")", ":", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "iptag", ",", "int", "(", "consts", ".", "IPTagCommands", ".", "clear", ")", "<<", "16", "|", "iptag", ")" ]
Clear an IPTag. Parameters ---------- iptag : int Index of the IPTag to clear.
[ "Clear", "an", "IPTag", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L985-L994
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.set_led
def set_led(self, led, action=None, x=Required, y=Required): """Set or toggle the state of an LED. .. note:: By default, SARK takes control of LED 0 and so changes to this LED will not typically last long enough to be useful. Parameters ---------- led : int or iterable Number of the LED or an iterable of LEDs to set the state of (0-3) action : bool or None State to set the LED to. True for on, False for off, None to toggle (default). """ if isinstance(led, int): leds = [led] else: leds = led arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds) self._send_scp(x, y, 0, SCPCommands.led, arg1=arg1, expected_args=0)
python
def set_led(self, led, action=None, x=Required, y=Required): """Set or toggle the state of an LED. .. note:: By default, SARK takes control of LED 0 and so changes to this LED will not typically last long enough to be useful. Parameters ---------- led : int or iterable Number of the LED or an iterable of LEDs to set the state of (0-3) action : bool or None State to set the LED to. True for on, False for off, None to toggle (default). """ if isinstance(led, int): leds = [led] else: leds = led arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds) self._send_scp(x, y, 0, SCPCommands.led, arg1=arg1, expected_args=0)
[ "def", "set_led", "(", "self", ",", "led", ",", "action", "=", "None", ",", "x", "=", "Required", ",", "y", "=", "Required", ")", ":", "if", "isinstance", "(", "led", ",", "int", ")", ":", "leds", "=", "[", "led", "]", "else", ":", "leds", "=", "led", "arg1", "=", "sum", "(", "LEDAction", ".", "from_bool", "(", "action", ")", "<<", "(", "led", "*", "2", ")", "for", "led", "in", "leds", ")", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "led", ",", "arg1", "=", "arg1", ",", "expected_args", "=", "0", ")" ]
Set or toggle the state of an LED. .. note:: By default, SARK takes control of LED 0 and so changes to this LED will not typically last long enough to be useful. Parameters ---------- led : int or iterable Number of the LED or an iterable of LEDs to set the state of (0-3) action : bool or None State to set the LED to. True for on, False for off, None to toggle (default).
[ "Set", "or", "toggle", "the", "state", "of", "an", "LED", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L997-L1017
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.fill
def fill(self, address, data, size, x, y, p): """Fill a region of memory with the specified byte. Parameters ---------- data : int Data with which to fill memory. If `address` and `size` are word aligned then `data` is assumed to be a word; otherwise it is assumed to be a byte. Notes ----- If the address and size are word aligned then a fast fill method will be used, otherwise a much slower write will be incurred. """ if size % 4 or address % 4: # If neither the size nor the address are word aligned we have to # use `write` as `sark_word_set` can only work with words. # Convert the data into a string and then write: data = struct.pack('<B', data) * size self.write(address, data, x, y, p) else: # We can perform a fill, this will call `sark_word_set` internally. self._send_scp(x, y, p, SCPCommands.fill, address, data, size)
python
def fill(self, address, data, size, x, y, p): """Fill a region of memory with the specified byte. Parameters ---------- data : int Data with which to fill memory. If `address` and `size` are word aligned then `data` is assumed to be a word; otherwise it is assumed to be a byte. Notes ----- If the address and size are word aligned then a fast fill method will be used, otherwise a much slower write will be incurred. """ if size % 4 or address % 4: # If neither the size nor the address are word aligned we have to # use `write` as `sark_word_set` can only work with words. # Convert the data into a string and then write: data = struct.pack('<B', data) * size self.write(address, data, x, y, p) else: # We can perform a fill, this will call `sark_word_set` internally. self._send_scp(x, y, p, SCPCommands.fill, address, data, size)
[ "def", "fill", "(", "self", ",", "address", ",", "data", ",", "size", ",", "x", ",", "y", ",", "p", ")", ":", "if", "size", "%", "4", "or", "address", "%", "4", ":", "# If neither the size nor the address are word aligned we have to", "# use `write` as `sark_word_set` can only work with words.", "# Convert the data into a string and then write:", "data", "=", "struct", ".", "pack", "(", "'<B'", ",", "data", ")", "*", "size", "self", ".", "write", "(", "address", ",", "data", ",", "x", ",", "y", ",", "p", ")", "else", ":", "# We can perform a fill, this will call `sark_word_set` internally.", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "p", ",", "SCPCommands", ".", "fill", ",", "address", ",", "data", ",", "size", ")" ]
Fill a region of memory with the specified byte. Parameters ---------- data : int Data with which to fill memory. If `address` and `size` are word aligned then `data` is assumed to be a word; otherwise it is assumed to be a byte. Notes ----- If the address and size are word aligned then a fast fill method will be used, otherwise a much slower write will be incurred.
[ "Fill", "a", "region", "of", "memory", "with", "the", "specified", "byte", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1020-L1043
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.sdram_alloc
def sdram_alloc(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False): """Allocate a region of SDRAM for an application. Requests SARK to allocate a block of SDRAM for an application and raises a :py:exc:`.SpiNNakerMemoryError` on failure. This allocation will be freed when the application is stopped. Parameters ---------- size : int Number of bytes to attempt to allocate in SDRAM. tag : int 8-bit tag that can be looked up by a SpiNNaker application to discover the address of the allocated block. The tag must be unique for this ``app_id`` on this chip. Attempting to allocate two blocks on the same chip and for the same ``app_id`` will fail. If ``0`` (the default) then no tag is applied. For example, if some SDRAM is allocated with ``tag=12``, a SpiNNaker application can later discover the address using:: void *allocated_data = sark_tag_ptr(12, 0); A common convention is to allocate one block of SDRAM per application core and give each allocation the associated core number as its tag. This way the underlying SpiNNaker applications can simply call:: void *allocated_data = sark_tag_ptr(sark_core_id(), 0); clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Returns ------- int Address of the start of the region. The allocated SDRAM remains valid until either the 'stop' signal is sent to the application ID associated with the allocation or :py:meth:`.sdram_free` is called on the address returned. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, the tag is already taken or it is invalid. """ assert 0 <= tag < 256 # Construct arg1 (app_id << 8) | op code arg1 = app_id << 8 | consts.AllocOperations.alloc_sdram # Send the packet and retrieve the address rv = self._send_scp(x, y, 0, SCPCommands.alloc_free, arg1, size, tag) if rv.arg1 == 0: # Allocation failed tag_in_use = False if tag != 0: # If a tag was specified then read the allocation table to see # if the tag was already in use or whether we ran out of # memory. alloc_tags = self.read_struct_field("sv", "alloc_tag", x, y) index = (app_id << 8) + tag entry = self.read(alloc_tags + index, 4, x, y) tag_in_use = (entry != 0) raise SpiNNakerMemoryError(size, x, y, tag, tag_in_use) # Get the address address = rv.arg1 if clear: # Clear the memory if so desired self.fill(address, 0, size, x, y, 0) return address
python
def sdram_alloc(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False): """Allocate a region of SDRAM for an application. Requests SARK to allocate a block of SDRAM for an application and raises a :py:exc:`.SpiNNakerMemoryError` on failure. This allocation will be freed when the application is stopped. Parameters ---------- size : int Number of bytes to attempt to allocate in SDRAM. tag : int 8-bit tag that can be looked up by a SpiNNaker application to discover the address of the allocated block. The tag must be unique for this ``app_id`` on this chip. Attempting to allocate two blocks on the same chip and for the same ``app_id`` will fail. If ``0`` (the default) then no tag is applied. For example, if some SDRAM is allocated with ``tag=12``, a SpiNNaker application can later discover the address using:: void *allocated_data = sark_tag_ptr(12, 0); A common convention is to allocate one block of SDRAM per application core and give each allocation the associated core number as its tag. This way the underlying SpiNNaker applications can simply call:: void *allocated_data = sark_tag_ptr(sark_core_id(), 0); clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Returns ------- int Address of the start of the region. The allocated SDRAM remains valid until either the 'stop' signal is sent to the application ID associated with the allocation or :py:meth:`.sdram_free` is called on the address returned. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, the tag is already taken or it is invalid. """ assert 0 <= tag < 256 # Construct arg1 (app_id << 8) | op code arg1 = app_id << 8 | consts.AllocOperations.alloc_sdram # Send the packet and retrieve the address rv = self._send_scp(x, y, 0, SCPCommands.alloc_free, arg1, size, tag) if rv.arg1 == 0: # Allocation failed tag_in_use = False if tag != 0: # If a tag was specified then read the allocation table to see # if the tag was already in use or whether we ran out of # memory. alloc_tags = self.read_struct_field("sv", "alloc_tag", x, y) index = (app_id << 8) + tag entry = self.read(alloc_tags + index, 4, x, y) tag_in_use = (entry != 0) raise SpiNNakerMemoryError(size, x, y, tag, tag_in_use) # Get the address address = rv.arg1 if clear: # Clear the memory if so desired self.fill(address, 0, size, x, y, 0) return address
[ "def", "sdram_alloc", "(", "self", ",", "size", ",", "tag", "=", "0", ",", "x", "=", "Required", ",", "y", "=", "Required", ",", "app_id", "=", "Required", ",", "clear", "=", "False", ")", ":", "assert", "0", "<=", "tag", "<", "256", "# Construct arg1 (app_id << 8) | op code", "arg1", "=", "app_id", "<<", "8", "|", "consts", ".", "AllocOperations", ".", "alloc_sdram", "# Send the packet and retrieve the address", "rv", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "alloc_free", ",", "arg1", ",", "size", ",", "tag", ")", "if", "rv", ".", "arg1", "==", "0", ":", "# Allocation failed", "tag_in_use", "=", "False", "if", "tag", "!=", "0", ":", "# If a tag was specified then read the allocation table to see", "# if the tag was already in use or whether we ran out of", "# memory.", "alloc_tags", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"alloc_tag\"", ",", "x", ",", "y", ")", "index", "=", "(", "app_id", "<<", "8", ")", "+", "tag", "entry", "=", "self", ".", "read", "(", "alloc_tags", "+", "index", ",", "4", ",", "x", ",", "y", ")", "tag_in_use", "=", "(", "entry", "!=", "0", ")", "raise", "SpiNNakerMemoryError", "(", "size", ",", "x", ",", "y", ",", "tag", ",", "tag_in_use", ")", "# Get the address", "address", "=", "rv", ".", "arg1", "if", "clear", ":", "# Clear the memory if so desired", "self", ".", "fill", "(", "address", ",", "0", ",", "size", ",", "x", ",", "y", ",", "0", ")", "return", "address" ]
Allocate a region of SDRAM for an application. Requests SARK to allocate a block of SDRAM for an application and raises a :py:exc:`.SpiNNakerMemoryError` on failure. This allocation will be freed when the application is stopped. Parameters ---------- size : int Number of bytes to attempt to allocate in SDRAM. tag : int 8-bit tag that can be looked up by a SpiNNaker application to discover the address of the allocated block. The tag must be unique for this ``app_id`` on this chip. Attempting to allocate two blocks on the same chip and for the same ``app_id`` will fail. If ``0`` (the default) then no tag is applied. For example, if some SDRAM is allocated with ``tag=12``, a SpiNNaker application can later discover the address using:: void *allocated_data = sark_tag_ptr(12, 0); A common convention is to allocate one block of SDRAM per application core and give each allocation the associated core number as its tag. This way the underlying SpiNNaker applications can simply call:: void *allocated_data = sark_tag_ptr(sark_core_id(), 0); clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Returns ------- int Address of the start of the region. The allocated SDRAM remains valid until either the 'stop' signal is sent to the application ID associated with the allocation or :py:meth:`.sdram_free` is called on the address returned. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, the tag is already taken or it is invalid.
[ "Allocate", "a", "region", "of", "SDRAM", "for", "an", "application", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1046-L1124
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.sdram_alloc_as_filelike
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False): """Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like object <.MemoryIO>` which allows safe reading and writing to the block that is allocated. Returns ------- :py:class:`.MemoryIO` File-like object which allows accessing the newly allocated region of memory. For example:: >>> # Read, write and seek through the allocated memory just >>> # like a file >>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP >>> mem.write(b"Hello, world") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(5) # doctest: +SKIP b"Hello" >>> mem.read(7) # doctest: +SKIP b", world" >>> # Reads and writes are truncated to the allocated region, >>> # preventing accidental clobbering/access of memory. >>> mem.seek(0) # doctest: +SKIP >>> mem.write(b"How are you today?") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(100) # doctest: +SKIP b"How are you " See the :py:class:`.MemoryIO` class for details of other features of these file-like views of SpiNNaker's memory. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or the tag is already taken or invalid. """ # Perform the malloc start_address = self.sdram_alloc(size, tag, x, y, app_id, clear) return MemoryIO(self, x, y, start_address, start_address + size)
python
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False): """Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like object <.MemoryIO>` which allows safe reading and writing to the block that is allocated. Returns ------- :py:class:`.MemoryIO` File-like object which allows accessing the newly allocated region of memory. For example:: >>> # Read, write and seek through the allocated memory just >>> # like a file >>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP >>> mem.write(b"Hello, world") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(5) # doctest: +SKIP b"Hello" >>> mem.read(7) # doctest: +SKIP b", world" >>> # Reads and writes are truncated to the allocated region, >>> # preventing accidental clobbering/access of memory. >>> mem.seek(0) # doctest: +SKIP >>> mem.write(b"How are you today?") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(100) # doctest: +SKIP b"How are you " See the :py:class:`.MemoryIO` class for details of other features of these file-like views of SpiNNaker's memory. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or the tag is already taken or invalid. """ # Perform the malloc start_address = self.sdram_alloc(size, tag, x, y, app_id, clear) return MemoryIO(self, x, y, start_address, start_address + size)
[ "def", "sdram_alloc_as_filelike", "(", "self", ",", "size", ",", "tag", "=", "0", ",", "x", "=", "Required", ",", "y", "=", "Required", ",", "app_id", "=", "Required", ",", "clear", "=", "False", ")", ":", "# Perform the malloc", "start_address", "=", "self", ".", "sdram_alloc", "(", "size", ",", "tag", ",", "x", ",", "y", ",", "app_id", ",", "clear", ")", "return", "MemoryIO", "(", "self", ",", "x", ",", "y", ",", "start_address", ",", "start_address", "+", "size", ")" ]
Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like object <.MemoryIO>` which allows safe reading and writing to the block that is allocated. Returns ------- :py:class:`.MemoryIO` File-like object which allows accessing the newly allocated region of memory. For example:: >>> # Read, write and seek through the allocated memory just >>> # like a file >>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP >>> mem.write(b"Hello, world") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(5) # doctest: +SKIP b"Hello" >>> mem.read(7) # doctest: +SKIP b", world" >>> # Reads and writes are truncated to the allocated region, >>> # preventing accidental clobbering/access of memory. >>> mem.seek(0) # doctest: +SKIP >>> mem.write(b"How are you today?") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(100) # doctest: +SKIP b"How are you " See the :py:class:`.MemoryIO` class for details of other features of these file-like views of SpiNNaker's memory. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or the tag is already taken or invalid.
[ "Like", ":", "py", ":", "meth", ":", ".", "sdram_alloc", "but", "returns", "a", ":", "py", ":", "class", ":", "file", "-", "like", "object", "<", ".", "MemoryIO", ">", "which", "allows", "safe", "reading", "and", "writing", "to", "the", "block", "that", "is", "allocated", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1127-L1170
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.sdram_free
def sdram_free(self, ptr, x=Required, y=Required): """Free an allocated block of memory in SDRAM. .. note:: All unfreed SDRAM allocations associated with an application are automatically freed when the 'stop' signal is sent (e.g. after leaving a :py:meth:`.application` block). As such, this method is only useful when specific blocks are to be freed while retaining others. Parameters ---------- ptr : int Address of the block of memory to free. """ self._send_scp(x, y, 0, SCPCommands.alloc_free, consts.AllocOperations.free_sdram_by_ptr, ptr)
python
def sdram_free(self, ptr, x=Required, y=Required): """Free an allocated block of memory in SDRAM. .. note:: All unfreed SDRAM allocations associated with an application are automatically freed when the 'stop' signal is sent (e.g. after leaving a :py:meth:`.application` block). As such, this method is only useful when specific blocks are to be freed while retaining others. Parameters ---------- ptr : int Address of the block of memory to free. """ self._send_scp(x, y, 0, SCPCommands.alloc_free, consts.AllocOperations.free_sdram_by_ptr, ptr)
[ "def", "sdram_free", "(", "self", ",", "ptr", ",", "x", "=", "Required", ",", "y", "=", "Required", ")", ":", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "alloc_free", ",", "consts", ".", "AllocOperations", ".", "free_sdram_by_ptr", ",", "ptr", ")" ]
Free an allocated block of memory in SDRAM. .. note:: All unfreed SDRAM allocations associated with an application are automatically freed when the 'stop' signal is sent (e.g. after leaving a :py:meth:`.application` block). As such, this method is only useful when specific blocks are to be freed while retaining others. Parameters ---------- ptr : int Address of the block of memory to free.
[ "Free", "an", "allocated", "block", "of", "memory", "in", "SDRAM", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1173-L1190
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._get_next_nn_id
def _get_next_nn_id(self): """Get the next nearest neighbour ID.""" self._nn_id = self._nn_id + 1 if self._nn_id < 126 else 1 return self._nn_id * 2
python
def _get_next_nn_id(self): """Get the next nearest neighbour ID.""" self._nn_id = self._nn_id + 1 if self._nn_id < 126 else 1 return self._nn_id * 2
[ "def", "_get_next_nn_id", "(", "self", ")", ":", "self", ".", "_nn_id", "=", "self", ".", "_nn_id", "+", "1", "if", "self", ".", "_nn_id", "<", "126", "else", "1", "return", "self", ".", "_nn_id", "*", "2" ]
Get the next nearest neighbour ID.
[ "Get", "the", "next", "nearest", "neighbour", "ID", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1192-L1195
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._send_ffs
def _send_ffs(self, pid, n_blocks, fr): """Send a flood-fill start packet. The cores and regions that the application should be loaded to will be specified by a stream of flood-fill core select packets (FFCS). """ sfr = fr | (1 << 31) self._send_scp( 255, 255, 0, SCPCommands.nearest_neighbour_packet, (NNCommands.flood_fill_start << 24) | (pid << 16) | (n_blocks << 8), 0x0, sfr )
python
def _send_ffs(self, pid, n_blocks, fr): """Send a flood-fill start packet. The cores and regions that the application should be loaded to will be specified by a stream of flood-fill core select packets (FFCS). """ sfr = fr | (1 << 31) self._send_scp( 255, 255, 0, SCPCommands.nearest_neighbour_packet, (NNCommands.flood_fill_start << 24) | (pid << 16) | (n_blocks << 8), 0x0, sfr )
[ "def", "_send_ffs", "(", "self", ",", "pid", ",", "n_blocks", ",", "fr", ")", ":", "sfr", "=", "fr", "|", "(", "1", "<<", "31", ")", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "nearest_neighbour_packet", ",", "(", "NNCommands", ".", "flood_fill_start", "<<", "24", ")", "|", "(", "pid", "<<", "16", ")", "|", "(", "n_blocks", "<<", "8", ")", ",", "0x0", ",", "sfr", ")" ]
Send a flood-fill start packet. The cores and regions that the application should be loaded to will be specified by a stream of flood-fill core select packets (FFCS).
[ "Send", "a", "flood", "-", "fill", "start", "packet", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1197-L1208
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._send_ffcs
def _send_ffcs(self, region, core_mask, fr): """Send a flood-fill core select packet. This packet was added in a patched SC&MP 1.34*. Each packet includes a region and a core mask; every core that is in the region ORs the core mask with a mask it stores locally. On receiving a flood-fill end (FFE) packet the application is loaded to the cores specified by this composed core mask. FFCS packets should be sent in ascending order of `(region << 18) | core`. * See https://bitbucket.org/mundya/scamp/branch/new-ff2 """ arg1 = (NNCommands.flood_fill_core_select << 24) | core_mask arg2 = region self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet, arg1, arg2, fr)
python
def _send_ffcs(self, region, core_mask, fr): """Send a flood-fill core select packet. This packet was added in a patched SC&MP 1.34*. Each packet includes a region and a core mask; every core that is in the region ORs the core mask with a mask it stores locally. On receiving a flood-fill end (FFE) packet the application is loaded to the cores specified by this composed core mask. FFCS packets should be sent in ascending order of `(region << 18) | core`. * See https://bitbucket.org/mundya/scamp/branch/new-ff2 """ arg1 = (NNCommands.flood_fill_core_select << 24) | core_mask arg2 = region self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet, arg1, arg2, fr)
[ "def", "_send_ffcs", "(", "self", ",", "region", ",", "core_mask", ",", "fr", ")", ":", "arg1", "=", "(", "NNCommands", ".", "flood_fill_core_select", "<<", "24", ")", "|", "core_mask", "arg2", "=", "region", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "nearest_neighbour_packet", ",", "arg1", ",", "arg2", ",", "fr", ")" ]
Send a flood-fill core select packet. This packet was added in a patched SC&MP 1.34*. Each packet includes a region and a core mask; every core that is in the region ORs the core mask with a mask it stores locally. On receiving a flood-fill end (FFE) packet the application is loaded to the cores specified by this composed core mask. FFCS packets should be sent in ascending order of `(region << 18) | core`. * See https://bitbucket.org/mundya/scamp/branch/new-ff2
[ "Send", "a", "flood", "-", "fill", "core", "select", "packet", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1210-L1227
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._send_ffd
def _send_ffd(self, pid, aplx_data, address): """Send flood-fill data packets.""" block = 0 pos = 0 aplx_size = len(aplx_data) while pos < aplx_size: # Get the next block of data, send and progress the block # counter and the address data = aplx_data[pos:pos + self.scp_data_length] data_size = len(data) size = data_size // 4 - 1 arg1 = (NNConstants.forward << 24 | NNConstants.retry << 16 | pid) arg2 = (block << 16) | (size << 8) self._send_scp(255, 255, 0, SCPCommands.flood_fill_data, arg1, arg2, address, data) # Increment the address and the block counter block += 1 address += data_size pos += data_size
python
def _send_ffd(self, pid, aplx_data, address): """Send flood-fill data packets.""" block = 0 pos = 0 aplx_size = len(aplx_data) while pos < aplx_size: # Get the next block of data, send and progress the block # counter and the address data = aplx_data[pos:pos + self.scp_data_length] data_size = len(data) size = data_size // 4 - 1 arg1 = (NNConstants.forward << 24 | NNConstants.retry << 16 | pid) arg2 = (block << 16) | (size << 8) self._send_scp(255, 255, 0, SCPCommands.flood_fill_data, arg1, arg2, address, data) # Increment the address and the block counter block += 1 address += data_size pos += data_size
[ "def", "_send_ffd", "(", "self", ",", "pid", ",", "aplx_data", ",", "address", ")", ":", "block", "=", "0", "pos", "=", "0", "aplx_size", "=", "len", "(", "aplx_data", ")", "while", "pos", "<", "aplx_size", ":", "# Get the next block of data, send and progress the block", "# counter and the address", "data", "=", "aplx_data", "[", "pos", ":", "pos", "+", "self", ".", "scp_data_length", "]", "data_size", "=", "len", "(", "data", ")", "size", "=", "data_size", "//", "4", "-", "1", "arg1", "=", "(", "NNConstants", ".", "forward", "<<", "24", "|", "NNConstants", ".", "retry", "<<", "16", "|", "pid", ")", "arg2", "=", "(", "block", "<<", "16", ")", "|", "(", "size", "<<", "8", ")", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "flood_fill_data", ",", "arg1", ",", "arg2", ",", "address", ",", "data", ")", "# Increment the address and the block counter", "block", "+=", "1", "address", "+=", "data_size", "pos", "+=", "data_size" ]
Send flood-fill data packets.
[ "Send", "flood", "-", "fill", "data", "packets", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1229-L1250
project-rig/rig
rig/machine_control/machine_controller.py
MachineController._send_ffe
def _send_ffe(self, pid, app_id, app_flags, fr): """Send a flood-fill end packet. The cores and regions that the application should be loaded to will have been specified by a stream of flood-fill core select packets (FFCS). """ arg1 = (NNCommands.flood_fill_end << 24) | pid arg2 = (app_id << 24) | (app_flags << 18) self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet, arg1, arg2, fr)
python
def _send_ffe(self, pid, app_id, app_flags, fr): """Send a flood-fill end packet. The cores and regions that the application should be loaded to will have been specified by a stream of flood-fill core select packets (FFCS). """ arg1 = (NNCommands.flood_fill_end << 24) | pid arg2 = (app_id << 24) | (app_flags << 18) self._send_scp(255, 255, 0, SCPCommands.nearest_neighbour_packet, arg1, arg2, fr)
[ "def", "_send_ffe", "(", "self", ",", "pid", ",", "app_id", ",", "app_flags", ",", "fr", ")", ":", "arg1", "=", "(", "NNCommands", ".", "flood_fill_end", "<<", "24", ")", "|", "pid", "arg2", "=", "(", "app_id", "<<", "24", ")", "|", "(", "app_flags", "<<", "18", ")", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "nearest_neighbour_packet", ",", "arg1", ",", "arg2", ",", "fr", ")" ]
Send a flood-fill end packet. The cores and regions that the application should be loaded to will have been specified by a stream of flood-fill core select packets (FFCS).
[ "Send", "a", "flood", "-", "fill", "end", "packet", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1252-L1262
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.flood_fill_aplx
def flood_fill_aplx(self, *args, **kwargs): """Unreliably flood-fill APLX to a set of application cores. .. note:: Most users should use the :py:meth:`.load_application` wrapper around this method which guarantees successful loading. This method can be called in either of the following ways:: flood_fill_aplx("/path/to/app.aplx", {(x, y): {core, ...}, ...}) flood_fill_aplx({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. .. warning:: The loading process is likely, but not guaranteed, to succeed. This is because the flood-fill packets used during loading are not guaranteed to arrive. The effect is that some chips may not receive the complete application binary and will silently ignore the application loading request. As a result, the user is responsible for checking that each core was successfully loaded with the correct binary. At present, the two recommended approaches to this are: * If the ``wait`` argument is given then the user should check that the correct number of application binaries reach the initial barrier (i.e., the ``wait`` state). If the number does not match the expected number of loaded cores the next approach must be used: * The user can check the process list of each chip to ensure the application was loaded into the correct set of cores. See :py:meth:`.read_vcpu_struct_field`. Parameters ---------- app_id : int wait : bool (Default: True) Should the application await the AppSignal.start signal after it has been loaded? """ # Coerce the arguments into a single form. If there are two arguments # then assume that we have filename and a map of chips and cores; # otherwise there should be ONE argument which is of the form of the # return value of `build_application_map`. application_map = {} if len(args) == 1: application_map = args[0] elif len(args) == 2: application_map = {args[0]: args[1]} else: raise TypeError( "flood_fill_aplx: accepts either 1 or 2 positional arguments: " "a map of filenames to targets OR a single filename and its" "targets" ) # Get the application ID, the context system will guarantee that this # is available app_id = kwargs.pop("app_id") flags = 0x0000 if kwargs.pop("wait"): flags |= AppFlags.wait # The forward and retry parameters fr = NNConstants.forward << 8 | NNConstants.retry # Load each APLX in turn for (aplx, targets) in iteritems(application_map): # Determine the minimum number of flood-fills that are necessary to # load the APLX. The regions and cores should be sorted into # ascending order, `compress_flood_fill_regions` ensures this is # done. fills = regions.compress_flood_fill_regions(targets) # Load the APLX data with open(aplx, "rb") as f: aplx_data = f.read() n_blocks = ((len(aplx_data) + self.scp_data_length - 1) // self.scp_data_length) # Start the flood fill for this application # Get an index for the nearest neighbour operation pid = self._get_next_nn_id() # Send the flood-fill start packet self._send_ffs(pid, n_blocks, fr) # Send the core select packets for (region, cores) in fills: self._send_ffcs(region, cores, fr) # Send the data base_address = self.read_struct_field( "sv", "sdram_sys", 255, 255) self._send_ffd(pid, aplx_data, base_address) # Send the flood-fill END packet self._send_ffe(pid, app_id, flags, fr)
python
def flood_fill_aplx(self, *args, **kwargs): """Unreliably flood-fill APLX to a set of application cores. .. note:: Most users should use the :py:meth:`.load_application` wrapper around this method which guarantees successful loading. This method can be called in either of the following ways:: flood_fill_aplx("/path/to/app.aplx", {(x, y): {core, ...}, ...}) flood_fill_aplx({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. .. warning:: The loading process is likely, but not guaranteed, to succeed. This is because the flood-fill packets used during loading are not guaranteed to arrive. The effect is that some chips may not receive the complete application binary and will silently ignore the application loading request. As a result, the user is responsible for checking that each core was successfully loaded with the correct binary. At present, the two recommended approaches to this are: * If the ``wait`` argument is given then the user should check that the correct number of application binaries reach the initial barrier (i.e., the ``wait`` state). If the number does not match the expected number of loaded cores the next approach must be used: * The user can check the process list of each chip to ensure the application was loaded into the correct set of cores. See :py:meth:`.read_vcpu_struct_field`. Parameters ---------- app_id : int wait : bool (Default: True) Should the application await the AppSignal.start signal after it has been loaded? """ # Coerce the arguments into a single form. If there are two arguments # then assume that we have filename and a map of chips and cores; # otherwise there should be ONE argument which is of the form of the # return value of `build_application_map`. application_map = {} if len(args) == 1: application_map = args[0] elif len(args) == 2: application_map = {args[0]: args[1]} else: raise TypeError( "flood_fill_aplx: accepts either 1 or 2 positional arguments: " "a map of filenames to targets OR a single filename and its" "targets" ) # Get the application ID, the context system will guarantee that this # is available app_id = kwargs.pop("app_id") flags = 0x0000 if kwargs.pop("wait"): flags |= AppFlags.wait # The forward and retry parameters fr = NNConstants.forward << 8 | NNConstants.retry # Load each APLX in turn for (aplx, targets) in iteritems(application_map): # Determine the minimum number of flood-fills that are necessary to # load the APLX. The regions and cores should be sorted into # ascending order, `compress_flood_fill_regions` ensures this is # done. fills = regions.compress_flood_fill_regions(targets) # Load the APLX data with open(aplx, "rb") as f: aplx_data = f.read() n_blocks = ((len(aplx_data) + self.scp_data_length - 1) // self.scp_data_length) # Start the flood fill for this application # Get an index for the nearest neighbour operation pid = self._get_next_nn_id() # Send the flood-fill start packet self._send_ffs(pid, n_blocks, fr) # Send the core select packets for (region, cores) in fills: self._send_ffcs(region, cores, fr) # Send the data base_address = self.read_struct_field( "sv", "sdram_sys", 255, 255) self._send_ffd(pid, aplx_data, base_address) # Send the flood-fill END packet self._send_ffe(pid, app_id, flags, fr)
[ "def", "flood_fill_aplx", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Coerce the arguments into a single form. If there are two arguments", "# then assume that we have filename and a map of chips and cores;", "# otherwise there should be ONE argument which is of the form of the", "# return value of `build_application_map`.", "application_map", "=", "{", "}", "if", "len", "(", "args", ")", "==", "1", ":", "application_map", "=", "args", "[", "0", "]", "elif", "len", "(", "args", ")", "==", "2", ":", "application_map", "=", "{", "args", "[", "0", "]", ":", "args", "[", "1", "]", "}", "else", ":", "raise", "TypeError", "(", "\"flood_fill_aplx: accepts either 1 or 2 positional arguments: \"", "\"a map of filenames to targets OR a single filename and its\"", "\"targets\"", ")", "# Get the application ID, the context system will guarantee that this", "# is available", "app_id", "=", "kwargs", ".", "pop", "(", "\"app_id\"", ")", "flags", "=", "0x0000", "if", "kwargs", ".", "pop", "(", "\"wait\"", ")", ":", "flags", "|=", "AppFlags", ".", "wait", "# The forward and retry parameters", "fr", "=", "NNConstants", ".", "forward", "<<", "8", "|", "NNConstants", ".", "retry", "# Load each APLX in turn", "for", "(", "aplx", ",", "targets", ")", "in", "iteritems", "(", "application_map", ")", ":", "# Determine the minimum number of flood-fills that are necessary to", "# load the APLX. The regions and cores should be sorted into", "# ascending order, `compress_flood_fill_regions` ensures this is", "# done.", "fills", "=", "regions", ".", "compress_flood_fill_regions", "(", "targets", ")", "# Load the APLX data", "with", "open", "(", "aplx", ",", "\"rb\"", ")", "as", "f", ":", "aplx_data", "=", "f", ".", "read", "(", ")", "n_blocks", "=", "(", "(", "len", "(", "aplx_data", ")", "+", "self", ".", "scp_data_length", "-", "1", ")", "//", "self", ".", "scp_data_length", ")", "# Start the flood fill for this application", "# Get an index for the nearest neighbour operation", "pid", "=", "self", ".", "_get_next_nn_id", "(", ")", "# Send the flood-fill start packet", "self", ".", "_send_ffs", "(", "pid", ",", "n_blocks", ",", "fr", ")", "# Send the core select packets", "for", "(", "region", ",", "cores", ")", "in", "fills", ":", "self", ".", "_send_ffcs", "(", "region", ",", "cores", ",", "fr", ")", "# Send the data", "base_address", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"sdram_sys\"", ",", "255", ",", "255", ")", "self", ".", "_send_ffd", "(", "pid", ",", "aplx_data", ",", "base_address", ")", "# Send the flood-fill END packet", "self", ".", "_send_ffe", "(", "pid", ",", "app_id", ",", "flags", ",", "fr", ")" ]
Unreliably flood-fill APLX to a set of application cores. .. note:: Most users should use the :py:meth:`.load_application` wrapper around this method which guarantees successful loading. This method can be called in either of the following ways:: flood_fill_aplx("/path/to/app.aplx", {(x, y): {core, ...}, ...}) flood_fill_aplx({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. .. warning:: The loading process is likely, but not guaranteed, to succeed. This is because the flood-fill packets used during loading are not guaranteed to arrive. The effect is that some chips may not receive the complete application binary and will silently ignore the application loading request. As a result, the user is responsible for checking that each core was successfully loaded with the correct binary. At present, the two recommended approaches to this are: * If the ``wait`` argument is given then the user should check that the correct number of application binaries reach the initial barrier (i.e., the ``wait`` state). If the number does not match the expected number of loaded cores the next approach must be used: * The user can check the process list of each chip to ensure the application was loaded into the correct set of cores. See :py:meth:`.read_vcpu_struct_field`. Parameters ---------- app_id : int wait : bool (Default: True) Should the application await the AppSignal.start signal after it has been loaded?
[ "Unreliably", "flood", "-", "fill", "APLX", "to", "a", "set", "of", "application", "cores", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1265-L1368
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.load_application
def load_application(self, *args, **kwargs): """Load an application to a set of application cores. This method guarantees that once it returns, all required cores will have been loaded. If this is not possible after a small number of attempts, a :py:exc:`.SpiNNakerLoadingError` will be raised. This method can be called in either of the following ways:: load_application("/path/to/app.aplx", {(x, y): {core, ...}, ...}) load_application({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. Parameters ---------- app_id : int wait : bool Leave the application in a wait state after successfully loading it. n_tries : int Number attempts to make to load the application. app_start_delay : float Time to pause (in seconds) after loading to ensure that the application successfully reaches the wait state before checking for success. use_count : bool If True (the default) then the targets dictionary will be assumed to represent _all_ the cores that will be loaded and a faster method to determine whether all applications have been loaded correctly will be used. If False a fallback method will be used. Raises ------ rig.machine_control.machine_controller.SpiNNakerLoadingError This exception is raised after some cores failed to load after ``n_tries`` attempts. """ # Get keyword arguments app_id = kwargs.pop("app_id") wait = kwargs.pop("wait") n_tries = kwargs.pop("n_tries") app_start_delay = kwargs.pop("app_start_delay") use_count = kwargs.pop("use_count", True) # Coerce the arguments into a single form. If there are two arguments # then assume that we have filename and a map of chips and cores; # otherwise there should be ONE argument which is of the form of the # return value of `build_application_map`. application_map = {} if len(args) == 1: application_map = args[0] elif len(args) == 2: application_map = {args[0]: args[1]} else: raise TypeError( "load_application: accepts either 1 or 2 positional arguments:" "a map of filenames to targets OR a single filename and its" "targets" ) # Count the number of cores being loaded core_count = sum( len(cores) for ts in six.itervalues(application_map) for cores in six.itervalues(ts) ) # Mark all targets as unloaded unloaded = application_map # Try to load the applications, then determine which are unloaded tries = 0 while unloaded != {} and tries <= n_tries: tries += 1 # Load all unloaded applications, then pause to ensure they reach # the wait state self.flood_fill_aplx(unloaded, app_id=app_id, wait=True) time.sleep(app_start_delay) # If running in "fast" mode then check that the correct number of # cores are in the "wait" state, if so then break out of this loop. if (use_count and core_count == self.count_cores_in_state("wait", app_id)): unloaded = {} continue # Query each target in turn to determine if it is loaded or # otherwise. If it is loaded (in the wait state) then remove it # from the unloaded list. new_unloadeds = dict() for app_name, targets in iteritems(unloaded): unloaded_targets = {} for (x, y), cores in iteritems(targets): unloaded_cores = set() for p in cores: # Read the struct value vcpu->cpu_state, if it is # anything BUT wait then we mark this core as unloaded. state = consts.AppState( self.read_vcpu_struct_field("cpu_state", x, y, p) ) if state is not consts.AppState.wait: unloaded_cores.add(p) if len(unloaded_cores) > 0: unloaded_targets[(x, y)] = unloaded_cores if len(unloaded_targets) > 0: new_unloadeds[app_name] = unloaded_targets unloaded = new_unloadeds # If there are still unloaded cores then we bail if unloaded != {}: raise SpiNNakerLoadingError(unloaded) # If not waiting then send the start signal if not wait: self.send_signal("start", app_id)
python
def load_application(self, *args, **kwargs): """Load an application to a set of application cores. This method guarantees that once it returns, all required cores will have been loaded. If this is not possible after a small number of attempts, a :py:exc:`.SpiNNakerLoadingError` will be raised. This method can be called in either of the following ways:: load_application("/path/to/app.aplx", {(x, y): {core, ...}, ...}) load_application({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. Parameters ---------- app_id : int wait : bool Leave the application in a wait state after successfully loading it. n_tries : int Number attempts to make to load the application. app_start_delay : float Time to pause (in seconds) after loading to ensure that the application successfully reaches the wait state before checking for success. use_count : bool If True (the default) then the targets dictionary will be assumed to represent _all_ the cores that will be loaded and a faster method to determine whether all applications have been loaded correctly will be used. If False a fallback method will be used. Raises ------ rig.machine_control.machine_controller.SpiNNakerLoadingError This exception is raised after some cores failed to load after ``n_tries`` attempts. """ # Get keyword arguments app_id = kwargs.pop("app_id") wait = kwargs.pop("wait") n_tries = kwargs.pop("n_tries") app_start_delay = kwargs.pop("app_start_delay") use_count = kwargs.pop("use_count", True) # Coerce the arguments into a single form. If there are two arguments # then assume that we have filename and a map of chips and cores; # otherwise there should be ONE argument which is of the form of the # return value of `build_application_map`. application_map = {} if len(args) == 1: application_map = args[0] elif len(args) == 2: application_map = {args[0]: args[1]} else: raise TypeError( "load_application: accepts either 1 or 2 positional arguments:" "a map of filenames to targets OR a single filename and its" "targets" ) # Count the number of cores being loaded core_count = sum( len(cores) for ts in six.itervalues(application_map) for cores in six.itervalues(ts) ) # Mark all targets as unloaded unloaded = application_map # Try to load the applications, then determine which are unloaded tries = 0 while unloaded != {} and tries <= n_tries: tries += 1 # Load all unloaded applications, then pause to ensure they reach # the wait state self.flood_fill_aplx(unloaded, app_id=app_id, wait=True) time.sleep(app_start_delay) # If running in "fast" mode then check that the correct number of # cores are in the "wait" state, if so then break out of this loop. if (use_count and core_count == self.count_cores_in_state("wait", app_id)): unloaded = {} continue # Query each target in turn to determine if it is loaded or # otherwise. If it is loaded (in the wait state) then remove it # from the unloaded list. new_unloadeds = dict() for app_name, targets in iteritems(unloaded): unloaded_targets = {} for (x, y), cores in iteritems(targets): unloaded_cores = set() for p in cores: # Read the struct value vcpu->cpu_state, if it is # anything BUT wait then we mark this core as unloaded. state = consts.AppState( self.read_vcpu_struct_field("cpu_state", x, y, p) ) if state is not consts.AppState.wait: unloaded_cores.add(p) if len(unloaded_cores) > 0: unloaded_targets[(x, y)] = unloaded_cores if len(unloaded_targets) > 0: new_unloadeds[app_name] = unloaded_targets unloaded = new_unloadeds # If there are still unloaded cores then we bail if unloaded != {}: raise SpiNNakerLoadingError(unloaded) # If not waiting then send the start signal if not wait: self.send_signal("start", app_id)
[ "def", "load_application", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get keyword arguments", "app_id", "=", "kwargs", ".", "pop", "(", "\"app_id\"", ")", "wait", "=", "kwargs", ".", "pop", "(", "\"wait\"", ")", "n_tries", "=", "kwargs", ".", "pop", "(", "\"n_tries\"", ")", "app_start_delay", "=", "kwargs", ".", "pop", "(", "\"app_start_delay\"", ")", "use_count", "=", "kwargs", ".", "pop", "(", "\"use_count\"", ",", "True", ")", "# Coerce the arguments into a single form. If there are two arguments", "# then assume that we have filename and a map of chips and cores;", "# otherwise there should be ONE argument which is of the form of the", "# return value of `build_application_map`.", "application_map", "=", "{", "}", "if", "len", "(", "args", ")", "==", "1", ":", "application_map", "=", "args", "[", "0", "]", "elif", "len", "(", "args", ")", "==", "2", ":", "application_map", "=", "{", "args", "[", "0", "]", ":", "args", "[", "1", "]", "}", "else", ":", "raise", "TypeError", "(", "\"load_application: accepts either 1 or 2 positional arguments:\"", "\"a map of filenames to targets OR a single filename and its\"", "\"targets\"", ")", "# Count the number of cores being loaded", "core_count", "=", "sum", "(", "len", "(", "cores", ")", "for", "ts", "in", "six", ".", "itervalues", "(", "application_map", ")", "for", "cores", "in", "six", ".", "itervalues", "(", "ts", ")", ")", "# Mark all targets as unloaded", "unloaded", "=", "application_map", "# Try to load the applications, then determine which are unloaded", "tries", "=", "0", "while", "unloaded", "!=", "{", "}", "and", "tries", "<=", "n_tries", ":", "tries", "+=", "1", "# Load all unloaded applications, then pause to ensure they reach", "# the wait state", "self", ".", "flood_fill_aplx", "(", "unloaded", ",", "app_id", "=", "app_id", ",", "wait", "=", "True", ")", "time", ".", "sleep", "(", "app_start_delay", ")", "# If running in \"fast\" mode then check that the correct number of", "# cores are in the \"wait\" state, if so then break out of this loop.", "if", "(", "use_count", "and", "core_count", "==", "self", ".", "count_cores_in_state", "(", "\"wait\"", ",", "app_id", ")", ")", ":", "unloaded", "=", "{", "}", "continue", "# Query each target in turn to determine if it is loaded or", "# otherwise. If it is loaded (in the wait state) then remove it", "# from the unloaded list.", "new_unloadeds", "=", "dict", "(", ")", "for", "app_name", ",", "targets", "in", "iteritems", "(", "unloaded", ")", ":", "unloaded_targets", "=", "{", "}", "for", "(", "x", ",", "y", ")", ",", "cores", "in", "iteritems", "(", "targets", ")", ":", "unloaded_cores", "=", "set", "(", ")", "for", "p", "in", "cores", ":", "# Read the struct value vcpu->cpu_state, if it is", "# anything BUT wait then we mark this core as unloaded.", "state", "=", "consts", ".", "AppState", "(", "self", ".", "read_vcpu_struct_field", "(", "\"cpu_state\"", ",", "x", ",", "y", ",", "p", ")", ")", "if", "state", "is", "not", "consts", ".", "AppState", ".", "wait", ":", "unloaded_cores", ".", "add", "(", "p", ")", "if", "len", "(", "unloaded_cores", ")", ">", "0", ":", "unloaded_targets", "[", "(", "x", ",", "y", ")", "]", "=", "unloaded_cores", "if", "len", "(", "unloaded_targets", ")", ">", "0", ":", "new_unloadeds", "[", "app_name", "]", "=", "unloaded_targets", "unloaded", "=", "new_unloadeds", "# If there are still unloaded cores then we bail", "if", "unloaded", "!=", "{", "}", ":", "raise", "SpiNNakerLoadingError", "(", "unloaded", ")", "# If not waiting then send the start signal", "if", "not", "wait", ":", "self", ".", "send_signal", "(", "\"start\"", ",", "app_id", ")" ]
Load an application to a set of application cores. This method guarantees that once it returns, all required cores will have been loaded. If this is not possible after a small number of attempts, a :py:exc:`.SpiNNakerLoadingError` will be raised. This method can be called in either of the following ways:: load_application("/path/to/app.aplx", {(x, y): {core, ...}, ...}) load_application({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. Parameters ---------- app_id : int wait : bool Leave the application in a wait state after successfully loading it. n_tries : int Number attempts to make to load the application. app_start_delay : float Time to pause (in seconds) after loading to ensure that the application successfully reaches the wait state before checking for success. use_count : bool If True (the default) then the targets dictionary will be assumed to represent _all_ the cores that will be loaded and a faster method to determine whether all applications have been loaded correctly will be used. If False a fallback method will be used. Raises ------ rig.machine_control.machine_controller.SpiNNakerLoadingError This exception is raised after some cores failed to load after ``n_tries`` attempts.
[ "Load", "an", "application", "to", "a", "set", "of", "application", "cores", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1373-L1491
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.send_signal
def send_signal(self, signal, app_id): """Transmit a signal to applications. .. warning:: In current implementations of SARK, signals are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- signal : string or :py:class:`~rig.machine_control.consts.AppSignal` Signal to transmit. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppSignal` enum or, for convenience, the name of a signal (defined in :py:class:`~rig.machine_control.consts.AppSignal`) as a string. """ if isinstance(signal, str): try: signal = getattr(consts.AppSignal, signal) except AttributeError: # The signal name is not present in consts.AppSignal! The next # test will throw an appropriate exception since no string can # be "in" an IntEnum. pass if signal not in consts.AppSignal: raise ValueError( "send_signal: Cannot transmit signal of type {}".format( repr(signal))) # Construct the packet for transmission arg1 = consts.signal_types[signal] arg2 = (signal << 16) | 0xff00 | app_id arg3 = 0x0000ffff # Meaning "transmit to all" self._send_scp(255, 255, 0, SCPCommands.signal, arg1, arg2, arg3)
python
def send_signal(self, signal, app_id): """Transmit a signal to applications. .. warning:: In current implementations of SARK, signals are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- signal : string or :py:class:`~rig.machine_control.consts.AppSignal` Signal to transmit. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppSignal` enum or, for convenience, the name of a signal (defined in :py:class:`~rig.machine_control.consts.AppSignal`) as a string. """ if isinstance(signal, str): try: signal = getattr(consts.AppSignal, signal) except AttributeError: # The signal name is not present in consts.AppSignal! The next # test will throw an appropriate exception since no string can # be "in" an IntEnum. pass if signal not in consts.AppSignal: raise ValueError( "send_signal: Cannot transmit signal of type {}".format( repr(signal))) # Construct the packet for transmission arg1 = consts.signal_types[signal] arg2 = (signal << 16) | 0xff00 | app_id arg3 = 0x0000ffff # Meaning "transmit to all" self._send_scp(255, 255, 0, SCPCommands.signal, arg1, arg2, arg3)
[ "def", "send_signal", "(", "self", ",", "signal", ",", "app_id", ")", ":", "if", "isinstance", "(", "signal", ",", "str", ")", ":", "try", ":", "signal", "=", "getattr", "(", "consts", ".", "AppSignal", ",", "signal", ")", "except", "AttributeError", ":", "# The signal name is not present in consts.AppSignal! The next", "# test will throw an appropriate exception since no string can", "# be \"in\" an IntEnum.", "pass", "if", "signal", "not", "in", "consts", ".", "AppSignal", ":", "raise", "ValueError", "(", "\"send_signal: Cannot transmit signal of type {}\"", ".", "format", "(", "repr", "(", "signal", ")", ")", ")", "# Construct the packet for transmission", "arg1", "=", "consts", ".", "signal_types", "[", "signal", "]", "arg2", "=", "(", "signal", "<<", "16", ")", "|", "0xff00", "|", "app_id", "arg3", "=", "0x0000ffff", "# Meaning \"transmit to all\"", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "signal", ",", "arg1", ",", "arg2", ",", "arg3", ")" ]
Transmit a signal to applications. .. warning:: In current implementations of SARK, signals are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- signal : string or :py:class:`~rig.machine_control.consts.AppSignal` Signal to transmit. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppSignal` enum or, for convenience, the name of a signal (defined in :py:class:`~rig.machine_control.consts.AppSignal`) as a string.
[ "Transmit", "a", "signal", "to", "applications", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1494-L1528
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.count_cores_in_state
def count_cores_in_state(self, state, app_id): """Count the number of cores in a given state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` or iterable Count the number of cores currently in this state. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string or an iterable of these, in which case the total count will be returned. """ if (isinstance(state, collections.Iterable) and not isinstance(state, str)): # If the state is iterable then call for each state and return the # sum. return sum(self.count_cores_in_state(s, app_id) for s in state) if isinstance(state, str): try: state = getattr(consts.AppState, state) except AttributeError: # The state name is not present in consts.AppSignal! The next # test will throw an appropriate exception since no string can # be "in" an IntEnum. pass if state not in consts.AppState: raise ValueError( "count_cores_in_state: Unknown state {}".format( repr(state))) # TODO Determine a way to nicely express a way to use the region data # stored in arg3. region = 0x0000ffff # Largest possible machine, level 0 level = (region >> 16) & 0x3 mask = region & 0x0000ffff # Construct the packet arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count] arg2 = ((level << 26) | (1 << 22) | (consts.AppDiagnosticSignal.count << 20) | (state << 16) | (0xff << 8) | app_id) # App mask for 1 app_id = 0xff arg3 = mask # Transmit and return the count return self._send_scp( 255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1
python
def count_cores_in_state(self, state, app_id): """Count the number of cores in a given state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` or iterable Count the number of cores currently in this state. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string or an iterable of these, in which case the total count will be returned. """ if (isinstance(state, collections.Iterable) and not isinstance(state, str)): # If the state is iterable then call for each state and return the # sum. return sum(self.count_cores_in_state(s, app_id) for s in state) if isinstance(state, str): try: state = getattr(consts.AppState, state) except AttributeError: # The state name is not present in consts.AppSignal! The next # test will throw an appropriate exception since no string can # be "in" an IntEnum. pass if state not in consts.AppState: raise ValueError( "count_cores_in_state: Unknown state {}".format( repr(state))) # TODO Determine a way to nicely express a way to use the region data # stored in arg3. region = 0x0000ffff # Largest possible machine, level 0 level = (region >> 16) & 0x3 mask = region & 0x0000ffff # Construct the packet arg1 = consts.diagnostic_signal_types[consts.AppDiagnosticSignal.count] arg2 = ((level << 26) | (1 << 22) | (consts.AppDiagnosticSignal.count << 20) | (state << 16) | (0xff << 8) | app_id) # App mask for 1 app_id = 0xff arg3 = mask # Transmit and return the count return self._send_scp( 255, 255, 0, SCPCommands.signal, arg1, arg2, arg3).arg1
[ "def", "count_cores_in_state", "(", "self", ",", "state", ",", "app_id", ")", ":", "if", "(", "isinstance", "(", "state", ",", "collections", ".", "Iterable", ")", "and", "not", "isinstance", "(", "state", ",", "str", ")", ")", ":", "# If the state is iterable then call for each state and return the", "# sum.", "return", "sum", "(", "self", ".", "count_cores_in_state", "(", "s", ",", "app_id", ")", "for", "s", "in", "state", ")", "if", "isinstance", "(", "state", ",", "str", ")", ":", "try", ":", "state", "=", "getattr", "(", "consts", ".", "AppState", ",", "state", ")", "except", "AttributeError", ":", "# The state name is not present in consts.AppSignal! The next", "# test will throw an appropriate exception since no string can", "# be \"in\" an IntEnum.", "pass", "if", "state", "not", "in", "consts", ".", "AppState", ":", "raise", "ValueError", "(", "\"count_cores_in_state: Unknown state {}\"", ".", "format", "(", "repr", "(", "state", ")", ")", ")", "# TODO Determine a way to nicely express a way to use the region data", "# stored in arg3.", "region", "=", "0x0000ffff", "# Largest possible machine, level 0", "level", "=", "(", "region", ">>", "16", ")", "&", "0x3", "mask", "=", "region", "&", "0x0000ffff", "# Construct the packet", "arg1", "=", "consts", ".", "diagnostic_signal_types", "[", "consts", ".", "AppDiagnosticSignal", ".", "count", "]", "arg2", "=", "(", "(", "level", "<<", "26", ")", "|", "(", "1", "<<", "22", ")", "|", "(", "consts", ".", "AppDiagnosticSignal", ".", "count", "<<", "20", ")", "|", "(", "state", "<<", "16", ")", "|", "(", "0xff", "<<", "8", ")", "|", "app_id", ")", "# App mask for 1 app_id = 0xff", "arg3", "=", "mask", "# Transmit and return the count", "return", "self", ".", "_send_scp", "(", "255", ",", "255", ",", "0", ",", "SCPCommands", ".", "signal", ",", "arg1", ",", "arg2", ",", "arg3", ")", ".", "arg1" ]
Count the number of cores in a given state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). Users should treat this mechanism with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` or iterable Count the number of cores currently in this state. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string or an iterable of these, in which case the total count will be returned.
[ "Count", "the", "number", "of", "cores", "in", "a", "given", "state", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1531-L1587
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.wait_for_cores_to_reach_state
def wait_for_cores_to_reach_state(self, state, count, app_id, poll_interval=0.1, timeout=None): """Block until the specified number of cores reach the specified state. This is a simple utility-wrapper around the :py:meth:`.count_cores_in_state` method which polls the machine until (at least) the supplied number of cores has reached the specified state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). As a result, in uncommon-but-possible circumstances, this function may never exit. Users should treat this function with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` The state to wait for cores to enter. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string. count : int The (minimum) number of cores reach the specified state before this method terminates. poll_interval : float Number of seconds between state counting requests sent to the machine. timeout : float or Null Maximum number of seconds which may elapse before giving up. If None, keep trying forever. Returns ------- int The number of cores in the given state (which will be less than the number required if the method timed out). """ if timeout is not None: timeout_time = time.time() + timeout while True: cur_count = self.count_cores_in_state(state, app_id) if cur_count >= count: break # Stop if timeout elapsed if timeout is not None and time.time() > timeout_time: break # Pause before retrying time.sleep(poll_interval) return cur_count
python
def wait_for_cores_to_reach_state(self, state, count, app_id, poll_interval=0.1, timeout=None): """Block until the specified number of cores reach the specified state. This is a simple utility-wrapper around the :py:meth:`.count_cores_in_state` method which polls the machine until (at least) the supplied number of cores has reached the specified state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). As a result, in uncommon-but-possible circumstances, this function may never exit. Users should treat this function with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` The state to wait for cores to enter. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string. count : int The (minimum) number of cores reach the specified state before this method terminates. poll_interval : float Number of seconds between state counting requests sent to the machine. timeout : float or Null Maximum number of seconds which may elapse before giving up. If None, keep trying forever. Returns ------- int The number of cores in the given state (which will be less than the number required if the method timed out). """ if timeout is not None: timeout_time = time.time() + timeout while True: cur_count = self.count_cores_in_state(state, app_id) if cur_count >= count: break # Stop if timeout elapsed if timeout is not None and time.time() > timeout_time: break # Pause before retrying time.sleep(poll_interval) return cur_count
[ "def", "wait_for_cores_to_reach_state", "(", "self", ",", "state", ",", "count", ",", "app_id", ",", "poll_interval", "=", "0.1", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "timeout_time", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "True", ":", "cur_count", "=", "self", ".", "count_cores_in_state", "(", "state", ",", "app_id", ")", "if", "cur_count", ">=", "count", ":", "break", "# Stop if timeout elapsed", "if", "timeout", "is", "not", "None", "and", "time", ".", "time", "(", ")", ">", "timeout_time", ":", "break", "# Pause before retrying", "time", ".", "sleep", "(", "poll_interval", ")", "return", "cur_count" ]
Block until the specified number of cores reach the specified state. This is a simple utility-wrapper around the :py:meth:`.count_cores_in_state` method which polls the machine until (at least) the supplied number of cores has reached the specified state. .. warning:: In current implementations of SARK, signals (which are used to determine the state of cores) are highly likely to arrive but this is not guaranteed (especially when the system's network is heavily utilised). As a result, in uncommon-but-possible circumstances, this function may never exit. Users should treat this function with caution. Future versions of SARK may resolve this issue. Parameters ---------- state : string or :py:class:`~rig.machine_control.consts.AppState` The state to wait for cores to enter. This may be either an entry of the :py:class:`~rig.machine_control.consts.AppState` enum or, for convenience, the name of a state (defined in :py:class:`~rig.machine_control.consts.AppState`) as a string. count : int The (minimum) number of cores reach the specified state before this method terminates. poll_interval : float Number of seconds between state counting requests sent to the machine. timeout : float or Null Maximum number of seconds which may elapse before giving up. If None, keep trying forever. Returns ------- int The number of cores in the given state (which will be less than the number required if the method timed out).
[ "Block", "until", "the", "specified", "number", "of", "cores", "reach", "the", "specified", "state", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1590-L1646
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.load_routing_tables
def load_routing_tables(self, routing_tables, app_id): """Allocate space for an load multicast routing tables. The routing table entries will be removed automatically when the associated application is stopped. Parameters ---------- routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`\ (...), ...], ...} Map of chip co-ordinates to routing table entries, as produced, for example by :py:func:`~rig.routing_table.routing_tree_to_tables` and :py:func:`~rig.routing_table.minimise_tables`. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries. """ for (x, y), table in iteritems(routing_tables): self.load_routing_table_entries(table, x=x, y=y, app_id=app_id)
python
def load_routing_tables(self, routing_tables, app_id): """Allocate space for an load multicast routing tables. The routing table entries will be removed automatically when the associated application is stopped. Parameters ---------- routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`\ (...), ...], ...} Map of chip co-ordinates to routing table entries, as produced, for example by :py:func:`~rig.routing_table.routing_tree_to_tables` and :py:func:`~rig.routing_table.minimise_tables`. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries. """ for (x, y), table in iteritems(routing_tables): self.load_routing_table_entries(table, x=x, y=y, app_id=app_id)
[ "def", "load_routing_tables", "(", "self", ",", "routing_tables", ",", "app_id", ")", ":", "for", "(", "x", ",", "y", ")", ",", "table", "in", "iteritems", "(", "routing_tables", ")", ":", "self", ".", "load_routing_table_entries", "(", "table", ",", "x", "=", "x", ",", "y", "=", "y", ",", "app_id", "=", "app_id", ")" ]
Allocate space for an load multicast routing tables. The routing table entries will be removed automatically when the associated application is stopped. Parameters ---------- routing_tables : {(x, y): \ [:py:class:`~rig.routing_table.RoutingTableEntry`\ (...), ...], ...} Map of chip co-ordinates to routing table entries, as produced, for example by :py:func:`~rig.routing_table.routing_tree_to_tables` and :py:func:`~rig.routing_table.minimise_tables`. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries.
[ "Allocate", "space", "for", "an", "load", "multicast", "routing", "tables", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1649-L1671
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.load_routing_table_entries
def load_routing_table_entries(self, entries, x, y, app_id): """Allocate space for and load multicast routing table entries into the router of a SpiNNaker chip. .. note:: This method only loads routing table entries for a single chip. Most users should use :py:meth:`.load_routing_tables` which loads routing tables to multiple chips. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] List of :py:class:`rig.routing_table.RoutingTableEntry`\ s. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries. """ count = len(entries) # Try to allocate room for the entries rv = self._send_scp( x, y, 0, SCPCommands.alloc_free, (app_id << 8) | consts.AllocOperations.alloc_rtr, count ) rtr_base = rv.arg1 # Index of the first allocated entry, 0 if failed if rtr_base == 0: raise SpiNNakerRouterError(count, x, y) # Determine where to write into memory buf = self.read_struct_field("sv", "sdram_sys", x, y) # Build the data to write in, then perform the write data = bytearray(16 * len(entries)) for i, entry in enumerate(entries): # Build the route as a 32-bit value route = 0x00000000 for r in entry.route: route |= 1 << r struct.pack_into(consts.RTE_PACK_STRING, data, i*16, i, 0, route, entry.key, entry.mask) self.write(buf, data, x, y) # Perform the load of the data into the router self._send_scp( x, y, 0, SCPCommands.router, (count << 16) | (app_id << 8) | consts.RouterOperations.load, buf, rtr_base )
python
def load_routing_table_entries(self, entries, x, y, app_id): """Allocate space for and load multicast routing table entries into the router of a SpiNNaker chip. .. note:: This method only loads routing table entries for a single chip. Most users should use :py:meth:`.load_routing_tables` which loads routing tables to multiple chips. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] List of :py:class:`rig.routing_table.RoutingTableEntry`\ s. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries. """ count = len(entries) # Try to allocate room for the entries rv = self._send_scp( x, y, 0, SCPCommands.alloc_free, (app_id << 8) | consts.AllocOperations.alloc_rtr, count ) rtr_base = rv.arg1 # Index of the first allocated entry, 0 if failed if rtr_base == 0: raise SpiNNakerRouterError(count, x, y) # Determine where to write into memory buf = self.read_struct_field("sv", "sdram_sys", x, y) # Build the data to write in, then perform the write data = bytearray(16 * len(entries)) for i, entry in enumerate(entries): # Build the route as a 32-bit value route = 0x00000000 for r in entry.route: route |= 1 << r struct.pack_into(consts.RTE_PACK_STRING, data, i*16, i, 0, route, entry.key, entry.mask) self.write(buf, data, x, y) # Perform the load of the data into the router self._send_scp( x, y, 0, SCPCommands.router, (count << 16) | (app_id << 8) | consts.RouterOperations.load, buf, rtr_base )
[ "def", "load_routing_table_entries", "(", "self", ",", "entries", ",", "x", ",", "y", ",", "app_id", ")", ":", "count", "=", "len", "(", "entries", ")", "# Try to allocate room for the entries", "rv", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "alloc_free", ",", "(", "app_id", "<<", "8", ")", "|", "consts", ".", "AllocOperations", ".", "alloc_rtr", ",", "count", ")", "rtr_base", "=", "rv", ".", "arg1", "# Index of the first allocated entry, 0 if failed", "if", "rtr_base", "==", "0", ":", "raise", "SpiNNakerRouterError", "(", "count", ",", "x", ",", "y", ")", "# Determine where to write into memory", "buf", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"sdram_sys\"", ",", "x", ",", "y", ")", "# Build the data to write in, then perform the write", "data", "=", "bytearray", "(", "16", "*", "len", "(", "entries", ")", ")", "for", "i", ",", "entry", "in", "enumerate", "(", "entries", ")", ":", "# Build the route as a 32-bit value", "route", "=", "0x00000000", "for", "r", "in", "entry", ".", "route", ":", "route", "|=", "1", "<<", "r", "struct", ".", "pack_into", "(", "consts", ".", "RTE_PACK_STRING", ",", "data", ",", "i", "*", "16", ",", "i", ",", "0", ",", "route", ",", "entry", ".", "key", ",", "entry", ".", "mask", ")", "self", ".", "write", "(", "buf", ",", "data", ",", "x", ",", "y", ")", "# Perform the load of the data into the router", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "router", ",", "(", "count", "<<", "16", ")", "|", "(", "app_id", "<<", "8", ")", "|", "consts", ".", "RouterOperations", ".", "load", ",", "buf", ",", "rtr_base", ")" ]
Allocate space for and load multicast routing table entries into the router of a SpiNNaker chip. .. note:: This method only loads routing table entries for a single chip. Most users should use :py:meth:`.load_routing_tables` which loads routing tables to multiple chips. Parameters ---------- entries : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] List of :py:class:`rig.routing_table.RoutingTableEntry`\ s. Raises ------ rig.machine_control.machine_controller.SpiNNakerRouterError If it is not possible to allocate sufficient routing table entries.
[ "Allocate", "space", "for", "and", "load", "multicast", "routing", "table", "entries", "into", "the", "router", "of", "a", "SpiNNaker", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1674-L1725
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_routing_table_entries
def get_routing_table_entries(self, x, y): """Dump the multicast routing table of a given chip. Returns ------- [(:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) \ or None, ...] Ordered list of routing table entries with app_ids and core numbers. """ # Determine where to read from, perform the read rtr_addr = self.read_struct_field("sv", "rtr_copy", x, y) read_size = struct.calcsize(consts.RTE_PACK_STRING) rtr_data = self.read(rtr_addr, consts.RTR_ENTRIES * read_size, x, y) # Read each routing table entry in turn table = list() while len(rtr_data) > 0: entry, rtr_data = rtr_data[:read_size], rtr_data[read_size:] table.append(unpack_routing_table_entry(entry)) return table
python
def get_routing_table_entries(self, x, y): """Dump the multicast routing table of a given chip. Returns ------- [(:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) \ or None, ...] Ordered list of routing table entries with app_ids and core numbers. """ # Determine where to read from, perform the read rtr_addr = self.read_struct_field("sv", "rtr_copy", x, y) read_size = struct.calcsize(consts.RTE_PACK_STRING) rtr_data = self.read(rtr_addr, consts.RTR_ENTRIES * read_size, x, y) # Read each routing table entry in turn table = list() while len(rtr_data) > 0: entry, rtr_data = rtr_data[:read_size], rtr_data[read_size:] table.append(unpack_routing_table_entry(entry)) return table
[ "def", "get_routing_table_entries", "(", "self", ",", "x", ",", "y", ")", ":", "# Determine where to read from, perform the read", "rtr_addr", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"rtr_copy\"", ",", "x", ",", "y", ")", "read_size", "=", "struct", ".", "calcsize", "(", "consts", ".", "RTE_PACK_STRING", ")", "rtr_data", "=", "self", ".", "read", "(", "rtr_addr", ",", "consts", ".", "RTR_ENTRIES", "*", "read_size", ",", "x", ",", "y", ")", "# Read each routing table entry in turn", "table", "=", "list", "(", ")", "while", "len", "(", "rtr_data", ")", ">", "0", ":", "entry", ",", "rtr_data", "=", "rtr_data", "[", ":", "read_size", "]", ",", "rtr_data", "[", "read_size", ":", "]", "table", ".", "append", "(", "unpack_routing_table_entry", "(", "entry", ")", ")", "return", "table" ]
Dump the multicast routing table of a given chip. Returns ------- [(:py:class:`~rig.routing_table.RoutingTableEntry`, app_id, core) \ or None, ...] Ordered list of routing table entries with app_ids and core numbers.
[ "Dump", "the", "multicast", "routing", "table", "of", "a", "given", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1728-L1748
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.clear_routing_table_entries
def clear_routing_table_entries(self, x, y, app_id): """Clear the routing table entries associated with a given application. """ # Construct the arguments arg1 = (app_id << 8) | consts.AllocOperations.free_rtr_by_app self._send_scp(x, y, 0, SCPCommands.alloc_free, arg1, 0x1)
python
def clear_routing_table_entries(self, x, y, app_id): """Clear the routing table entries associated with a given application. """ # Construct the arguments arg1 = (app_id << 8) | consts.AllocOperations.free_rtr_by_app self._send_scp(x, y, 0, SCPCommands.alloc_free, arg1, 0x1)
[ "def", "clear_routing_table_entries", "(", "self", ",", "x", ",", "y", ",", "app_id", ")", ":", "# Construct the arguments", "arg1", "=", "(", "app_id", "<<", "8", ")", "|", "consts", ".", "AllocOperations", ".", "free_rtr_by_app", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "alloc_free", ",", "arg1", ",", "0x1", ")" ]
Clear the routing table entries associated with a given application.
[ "Clear", "the", "routing", "table", "entries", "associated", "with", "a", "given", "application", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1751-L1756
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_p2p_routing_table
def get_p2p_routing_table(self, x, y): """Dump the contents of a chip's P2P routing table. This method can be indirectly used to get a list of functioning chips. .. note:: This method only returns the entries for chips within the bounds of the system. E.g. if booted with 8x8 only entries for these 8x8 chips will be returned. Returns ------- {(x, y): :py:class:`~rig.machine_control.consts.P2PTableEntry`, ...} """ table = {} # Get the dimensions of the system p2p_dims = self.read_struct_field("sv", "p2p_dims", x, y) width = (p2p_dims >> 8) & 0xFF height = (p2p_dims >> 0) & 0xFF # Read out the P2P table data, one column at a time (note that eight # entries are packed into each 32-bit word) col_words = (((height + 7) // 8) * 4) for col in range(width): # Read the entries for this row raw_table_col = self.read( consts.SPINNAKER_RTR_P2P + (((256 * col) // 8) * 4), col_words, x, y ) row = 0 while row < height: raw_word, raw_table_col = raw_table_col[:4], raw_table_col[4:] word, = struct.unpack("<I", raw_word) for entry in range(min(8, height - row)): table[(col, row)] = \ consts.P2PTableEntry((word >> (3*entry)) & 0b111) row += 1 return table
python
def get_p2p_routing_table(self, x, y): """Dump the contents of a chip's P2P routing table. This method can be indirectly used to get a list of functioning chips. .. note:: This method only returns the entries for chips within the bounds of the system. E.g. if booted with 8x8 only entries for these 8x8 chips will be returned. Returns ------- {(x, y): :py:class:`~rig.machine_control.consts.P2PTableEntry`, ...} """ table = {} # Get the dimensions of the system p2p_dims = self.read_struct_field("sv", "p2p_dims", x, y) width = (p2p_dims >> 8) & 0xFF height = (p2p_dims >> 0) & 0xFF # Read out the P2P table data, one column at a time (note that eight # entries are packed into each 32-bit word) col_words = (((height + 7) // 8) * 4) for col in range(width): # Read the entries for this row raw_table_col = self.read( consts.SPINNAKER_RTR_P2P + (((256 * col) // 8) * 4), col_words, x, y ) row = 0 while row < height: raw_word, raw_table_col = raw_table_col[:4], raw_table_col[4:] word, = struct.unpack("<I", raw_word) for entry in range(min(8, height - row)): table[(col, row)] = \ consts.P2PTableEntry((word >> (3*entry)) & 0b111) row += 1 return table
[ "def", "get_p2p_routing_table", "(", "self", ",", "x", ",", "y", ")", ":", "table", "=", "{", "}", "# Get the dimensions of the system", "p2p_dims", "=", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"p2p_dims\"", ",", "x", ",", "y", ")", "width", "=", "(", "p2p_dims", ">>", "8", ")", "&", "0xFF", "height", "=", "(", "p2p_dims", ">>", "0", ")", "&", "0xFF", "# Read out the P2P table data, one column at a time (note that eight", "# entries are packed into each 32-bit word)", "col_words", "=", "(", "(", "(", "height", "+", "7", ")", "//", "8", ")", "*", "4", ")", "for", "col", "in", "range", "(", "width", ")", ":", "# Read the entries for this row", "raw_table_col", "=", "self", ".", "read", "(", "consts", ".", "SPINNAKER_RTR_P2P", "+", "(", "(", "(", "256", "*", "col", ")", "//", "8", ")", "*", "4", ")", ",", "col_words", ",", "x", ",", "y", ")", "row", "=", "0", "while", "row", "<", "height", ":", "raw_word", ",", "raw_table_col", "=", "raw_table_col", "[", ":", "4", "]", ",", "raw_table_col", "[", "4", ":", "]", "word", ",", "=", "struct", ".", "unpack", "(", "\"<I\"", ",", "raw_word", ")", "for", "entry", "in", "range", "(", "min", "(", "8", ",", "height", "-", "row", ")", ")", ":", "table", "[", "(", "col", ",", "row", ")", "]", "=", "consts", ".", "P2PTableEntry", "(", "(", "word", ">>", "(", "3", "*", "entry", ")", ")", "&", "0b111", ")", "row", "+=", "1", "return", "table" ]
Dump the contents of a chip's P2P routing table. This method can be indirectly used to get a list of functioning chips. .. note:: This method only returns the entries for chips within the bounds of the system. E.g. if booted with 8x8 only entries for these 8x8 chips will be returned. Returns ------- {(x, y): :py:class:`~rig.machine_control.consts.P2PTableEntry`, ...}
[ "Dump", "the", "contents", "of", "a", "chip", "s", "P2P", "routing", "table", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1759-L1800
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_chip_info
def get_chip_info(self, x, y): """Get general information about the resources available on a chip. Returns ------- :py:class:`.ChipInfo` A named tuple indicating the number of working cores, the states of all working cores, the set of working links and the size of the largest free block in SDRAM and SRAM. """ info = self._send_scp(x, y, 0, SCPCommands.info, expected_args=3) # Unpack values encoded in the argument fields num_cores = info.arg1 & 0x1F working_links = set(link for link in Links if (info.arg1 >> (8 + link)) & 1) largest_free_rtr_mc_block = (info.arg1 >> 14) & 0x7FF ethernet_up = bool(info.arg1 & (1 << 25)) # Unpack the values in the data payload data = struct.unpack_from("<18BHI", info.data) core_states = [consts.AppState(c) for c in data[:18]] local_ethernet_chip = ((data[18] >> 8) & 0xFF, (data[18] >> 0) & 0xFF) ip_address = ".".join(str((data[19] >> i) & 0xFF) for i in range(0, 32, 8)) return ChipInfo( num_cores=num_cores, core_states=core_states[:num_cores], working_links=working_links, largest_free_sdram_block=info.arg2, largest_free_sram_block=info.arg3, largest_free_rtr_mc_block=largest_free_rtr_mc_block, ethernet_up=ethernet_up, ip_address=ip_address, local_ethernet_chip=local_ethernet_chip, )
python
def get_chip_info(self, x, y): """Get general information about the resources available on a chip. Returns ------- :py:class:`.ChipInfo` A named tuple indicating the number of working cores, the states of all working cores, the set of working links and the size of the largest free block in SDRAM and SRAM. """ info = self._send_scp(x, y, 0, SCPCommands.info, expected_args=3) # Unpack values encoded in the argument fields num_cores = info.arg1 & 0x1F working_links = set(link for link in Links if (info.arg1 >> (8 + link)) & 1) largest_free_rtr_mc_block = (info.arg1 >> 14) & 0x7FF ethernet_up = bool(info.arg1 & (1 << 25)) # Unpack the values in the data payload data = struct.unpack_from("<18BHI", info.data) core_states = [consts.AppState(c) for c in data[:18]] local_ethernet_chip = ((data[18] >> 8) & 0xFF, (data[18] >> 0) & 0xFF) ip_address = ".".join(str((data[19] >> i) & 0xFF) for i in range(0, 32, 8)) return ChipInfo( num_cores=num_cores, core_states=core_states[:num_cores], working_links=working_links, largest_free_sdram_block=info.arg2, largest_free_sram_block=info.arg3, largest_free_rtr_mc_block=largest_free_rtr_mc_block, ethernet_up=ethernet_up, ip_address=ip_address, local_ethernet_chip=local_ethernet_chip, )
[ "def", "get_chip_info", "(", "self", ",", "x", ",", "y", ")", ":", "info", "=", "self", ".", "_send_scp", "(", "x", ",", "y", ",", "0", ",", "SCPCommands", ".", "info", ",", "expected_args", "=", "3", ")", "# Unpack values encoded in the argument fields", "num_cores", "=", "info", ".", "arg1", "&", "0x1F", "working_links", "=", "set", "(", "link", "for", "link", "in", "Links", "if", "(", "info", ".", "arg1", ">>", "(", "8", "+", "link", ")", ")", "&", "1", ")", "largest_free_rtr_mc_block", "=", "(", "info", ".", "arg1", ">>", "14", ")", "&", "0x7FF", "ethernet_up", "=", "bool", "(", "info", ".", "arg1", "&", "(", "1", "<<", "25", ")", ")", "# Unpack the values in the data payload", "data", "=", "struct", ".", "unpack_from", "(", "\"<18BHI\"", ",", "info", ".", "data", ")", "core_states", "=", "[", "consts", ".", "AppState", "(", "c", ")", "for", "c", "in", "data", "[", ":", "18", "]", "]", "local_ethernet_chip", "=", "(", "(", "data", "[", "18", "]", ">>", "8", ")", "&", "0xFF", ",", "(", "data", "[", "18", "]", ">>", "0", ")", "&", "0xFF", ")", "ip_address", "=", "\".\"", ".", "join", "(", "str", "(", "(", "data", "[", "19", "]", ">>", "i", ")", "&", "0xFF", ")", "for", "i", "in", "range", "(", "0", ",", "32", ",", "8", ")", ")", "return", "ChipInfo", "(", "num_cores", "=", "num_cores", ",", "core_states", "=", "core_states", "[", ":", "num_cores", "]", ",", "working_links", "=", "working_links", ",", "largest_free_sdram_block", "=", "info", ".", "arg2", ",", "largest_free_sram_block", "=", "info", ".", "arg3", ",", "largest_free_rtr_mc_block", "=", "largest_free_rtr_mc_block", ",", "ethernet_up", "=", "ethernet_up", ",", "ip_address", "=", "ip_address", ",", "local_ethernet_chip", "=", "local_ethernet_chip", ",", ")" ]
Get general information about the resources available on a chip. Returns ------- :py:class:`.ChipInfo` A named tuple indicating the number of working cores, the states of all working cores, the set of working links and the size of the largest free block in SDRAM and SRAM.
[ "Get", "general", "information", "about", "the", "resources", "available", "on", "a", "chip", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1803-L1840
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_system_info
def get_system_info(self, x=255, y=255): """Discover the integrity and resource availability of a whole SpiNNaker system. This command performs :py:meth:`.get_chip_info` on all working chips in the system returning an enhanced :py:class:`dict` (:py:class:`.SystemInfo`) containing a look-up from chip coordinate to :py:class:`.ChipInfo`. In addition to standard dictionary functionality, :py:class:`.SystemInfo` provides a number of convenience methods, which allow convenient iteration over various aspects of the information stored. .. note:: This method replaces the deprecated :py:meth:`.get_machine` method. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route.utils import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) Parameters ---------- x : int y : int The coordinates of the chip from which system exploration should begin, by default (255, 255). Most users will not need to change these parameters. Returns ------- :py:class:`.SystemInfo` An enhanced :py:class:`dict` object {(x, y): :py:class:`.ChipInfo`, ...} with a number of utility methods for accessing higher-level system information. """ # A quick way of getting a list of working chips p2p_tables = self.get_p2p_routing_table(x, y) # Calculate the extent of the system max_x = max(x_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) max_y = max(y_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) sys_info = SystemInfo(max_x + 1, max_y + 1) for (x, y), p2p_route in iteritems(p2p_tables): if p2p_route != consts.P2PTableEntry.none: try: sys_info[(x, y)] = self.get_chip_info(x, y) except SCPError: # The chip was listed in the P2P table but is not # responding. Assume it is dead and don't include it in # the info returned. pass return sys_info
python
def get_system_info(self, x=255, y=255): """Discover the integrity and resource availability of a whole SpiNNaker system. This command performs :py:meth:`.get_chip_info` on all working chips in the system returning an enhanced :py:class:`dict` (:py:class:`.SystemInfo`) containing a look-up from chip coordinate to :py:class:`.ChipInfo`. In addition to standard dictionary functionality, :py:class:`.SystemInfo` provides a number of convenience methods, which allow convenient iteration over various aspects of the information stored. .. note:: This method replaces the deprecated :py:meth:`.get_machine` method. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route.utils import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) Parameters ---------- x : int y : int The coordinates of the chip from which system exploration should begin, by default (255, 255). Most users will not need to change these parameters. Returns ------- :py:class:`.SystemInfo` An enhanced :py:class:`dict` object {(x, y): :py:class:`.ChipInfo`, ...} with a number of utility methods for accessing higher-level system information. """ # A quick way of getting a list of working chips p2p_tables = self.get_p2p_routing_table(x, y) # Calculate the extent of the system max_x = max(x_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) max_y = max(y_ for (x_, y_), r in iteritems(p2p_tables) if r != consts.P2PTableEntry.none) sys_info = SystemInfo(max_x + 1, max_y + 1) for (x, y), p2p_route in iteritems(p2p_tables): if p2p_route != consts.P2PTableEntry.none: try: sys_info[(x, y)] = self.get_chip_info(x, y) except SCPError: # The chip was listed in the P2P table but is not # responding. Assume it is dead and don't include it in # the info returned. pass return sys_info
[ "def", "get_system_info", "(", "self", ",", "x", "=", "255", ",", "y", "=", "255", ")", ":", "# A quick way of getting a list of working chips", "p2p_tables", "=", "self", ".", "get_p2p_routing_table", "(", "x", ",", "y", ")", "# Calculate the extent of the system", "max_x", "=", "max", "(", "x_", "for", "(", "x_", ",", "y_", ")", ",", "r", "in", "iteritems", "(", "p2p_tables", ")", "if", "r", "!=", "consts", ".", "P2PTableEntry", ".", "none", ")", "max_y", "=", "max", "(", "y_", "for", "(", "x_", ",", "y_", ")", ",", "r", "in", "iteritems", "(", "p2p_tables", ")", "if", "r", "!=", "consts", ".", "P2PTableEntry", ".", "none", ")", "sys_info", "=", "SystemInfo", "(", "max_x", "+", "1", ",", "max_y", "+", "1", ")", "for", "(", "x", ",", "y", ")", ",", "p2p_route", "in", "iteritems", "(", "p2p_tables", ")", ":", "if", "p2p_route", "!=", "consts", ".", "P2PTableEntry", ".", "none", ":", "try", ":", "sys_info", "[", "(", "x", ",", "y", ")", "]", "=", "self", ".", "get_chip_info", "(", "x", ",", "y", ")", "except", "SCPError", ":", "# The chip was listed in the P2P table but is not", "# responding. Assume it is dead and don't include it in", "# the info returned.", "pass", "return", "sys_info" ]
Discover the integrity and resource availability of a whole SpiNNaker system. This command performs :py:meth:`.get_chip_info` on all working chips in the system returning an enhanced :py:class:`dict` (:py:class:`.SystemInfo`) containing a look-up from chip coordinate to :py:class:`.ChipInfo`. In addition to standard dictionary functionality, :py:class:`.SystemInfo` provides a number of convenience methods, which allow convenient iteration over various aspects of the information stored. .. note:: This method replaces the deprecated :py:meth:`.get_machine` method. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route.utils import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) Parameters ---------- x : int y : int The coordinates of the chip from which system exploration should begin, by default (255, 255). Most users will not need to change these parameters. Returns ------- :py:class:`.SystemInfo` An enhanced :py:class:`dict` object {(x, y): :py:class:`.ChipInfo`, ...} with a number of utility methods for accessing higher-level system information.
[ "Discover", "the", "integrity", "and", "resource", "availability", "of", "a", "whole", "SpiNNaker", "system", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1869-L1928
project-rig/rig
rig/machine_control/machine_controller.py
MachineController.get_machine
def get_machine(self, x=255, y=255, default_num_cores=18): """**Deprecated.** Probe the machine to discover which cores and links are working. .. warning:: This method has been deprecated in favour of :py:meth:`.get_system_info` for getting information about the general resources available in a SpiNNaker machine. This method may be removed in the future. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) This method also historically used the size of the SDRAM and SRAM heaps to set the respective resource values in the :py:class:`~rig.place_and_route.Machine`. :py:meth:`.get_machine` since changed to reporting the size of the largest free block in the SDRAM and SRAM heaps on each chip. Most applications should not be negatively impacted by this change. .. note:: The chip (x, y) supplied is the one where the search for working chips begins. Selecting anything other than (255, 255), the default, may be useful when debugging very broken machines. Parameters ---------- default_num_cores : int This argument is ignored. Returns ------- :py:class:`~rig.place_and_route.Machine` This Machine will include all cores reported as working by the system software with the following resources defined: :py:data:`~rig.place_and_route.Cores` Number of working cores on each chip (including the monitor core, any cores already running applications and idle cores). :py:data:`~rig.place_and_route.SDRAM` The size of the largest free block of SDRAM on the heap. This gives a conservative measure of how much SDRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented. :py:data:`~rig.place_and_route.SRAM` The size of the largest free block of SRAM on the heap. This gives a conservative measure of how much SRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented. """ warnings.warn( "MachineController.get_machine() is deprecated, " "see get_system_info().", DeprecationWarning) from rig.place_and_route.utils import build_machine system_info = self.get_system_info(x, y) return build_machine(system_info)
python
def get_machine(self, x=255, y=255, default_num_cores=18): """**Deprecated.** Probe the machine to discover which cores and links are working. .. warning:: This method has been deprecated in favour of :py:meth:`.get_system_info` for getting information about the general resources available in a SpiNNaker machine. This method may be removed in the future. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) This method also historically used the size of the SDRAM and SRAM heaps to set the respective resource values in the :py:class:`~rig.place_and_route.Machine`. :py:meth:`.get_machine` since changed to reporting the size of the largest free block in the SDRAM and SRAM heaps on each chip. Most applications should not be negatively impacted by this change. .. note:: The chip (x, y) supplied is the one where the search for working chips begins. Selecting anything other than (255, 255), the default, may be useful when debugging very broken machines. Parameters ---------- default_num_cores : int This argument is ignored. Returns ------- :py:class:`~rig.place_and_route.Machine` This Machine will include all cores reported as working by the system software with the following resources defined: :py:data:`~rig.place_and_route.Cores` Number of working cores on each chip (including the monitor core, any cores already running applications and idle cores). :py:data:`~rig.place_and_route.SDRAM` The size of the largest free block of SDRAM on the heap. This gives a conservative measure of how much SDRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented. :py:data:`~rig.place_and_route.SRAM` The size of the largest free block of SRAM on the heap. This gives a conservative measure of how much SRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented. """ warnings.warn( "MachineController.get_machine() is deprecated, " "see get_system_info().", DeprecationWarning) from rig.place_and_route.utils import build_machine system_info = self.get_system_info(x, y) return build_machine(system_info)
[ "def", "get_machine", "(", "self", ",", "x", "=", "255", ",", "y", "=", "255", ",", "default_num_cores", "=", "18", ")", ":", "warnings", ".", "warn", "(", "\"MachineController.get_machine() is deprecated, \"", "\"see get_system_info().\"", ",", "DeprecationWarning", ")", "from", "rig", ".", "place_and_route", ".", "utils", "import", "build_machine", "system_info", "=", "self", ".", "get_system_info", "(", "x", ",", "y", ")", "return", "build_machine", "(", "system_info", ")" ]
**Deprecated.** Probe the machine to discover which cores and links are working. .. warning:: This method has been deprecated in favour of :py:meth:`.get_system_info` for getting information about the general resources available in a SpiNNaker machine. This method may be removed in the future. To build a :py:class:`~rig.place_and_route.Machine` for place-and-route purposes, the :py:func:`rig.place_and_route.utils.build_machine` utility function may be used with :py:meth:`.get_system_info` like so:: >> from rig.place_and_route import build_machine >> sys_info = mc.get_system_info() >> machine = build_machine(sys_info) This method also historically used the size of the SDRAM and SRAM heaps to set the respective resource values in the :py:class:`~rig.place_and_route.Machine`. :py:meth:`.get_machine` since changed to reporting the size of the largest free block in the SDRAM and SRAM heaps on each chip. Most applications should not be negatively impacted by this change. .. note:: The chip (x, y) supplied is the one where the search for working chips begins. Selecting anything other than (255, 255), the default, may be useful when debugging very broken machines. Parameters ---------- default_num_cores : int This argument is ignored. Returns ------- :py:class:`~rig.place_and_route.Machine` This Machine will include all cores reported as working by the system software with the following resources defined: :py:data:`~rig.place_and_route.Cores` Number of working cores on each chip (including the monitor core, any cores already running applications and idle cores). :py:data:`~rig.place_and_route.SDRAM` The size of the largest free block of SDRAM on the heap. This gives a conservative measure of how much SDRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented. :py:data:`~rig.place_and_route.SRAM` The size of the largest free block of SRAM on the heap. This gives a conservative measure of how much SRAM is free on a given chip (which will underestimate availability if the system's memory is highly fragmented.
[ "**", "Deprecated", ".", "**", "Probe", "the", "machine", "to", "discover", "which", "cores", "and", "links", "are", "working", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1930-L1993
project-rig/rig
rig/machine_control/machine_controller.py
SystemInfo.ethernet_connected_chips
def ethernet_connected_chips(self): """Iterate over the coordinates of Ethernet connected chips. Yields ------ ((x, y), str) The coordinate and IP address of each Ethernet connected chip in the system. """ for xy, chip_info in six.iteritems(self): if chip_info.ethernet_up: yield (xy, chip_info.ip_address)
python
def ethernet_connected_chips(self): """Iterate over the coordinates of Ethernet connected chips. Yields ------ ((x, y), str) The coordinate and IP address of each Ethernet connected chip in the system. """ for xy, chip_info in six.iteritems(self): if chip_info.ethernet_up: yield (xy, chip_info.ip_address)
[ "def", "ethernet_connected_chips", "(", "self", ")", ":", "for", "xy", ",", "chip_info", "in", "six", ".", "iteritems", "(", "self", ")", ":", "if", "chip_info", ".", "ethernet_up", ":", "yield", "(", "xy", ",", "chip_info", ".", "ip_address", ")" ]
Iterate over the coordinates of Ethernet connected chips. Yields ------ ((x, y), str) The coordinate and IP address of each Ethernet connected chip in the system.
[ "Iterate", "over", "the", "coordinates", "of", "Ethernet", "connected", "chips", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2146-L2157
project-rig/rig
rig/machine_control/machine_controller.py
SystemInfo.dead_chips
def dead_chips(self): """Generate the coordinates of all dead chips. Yields ------ (x, y) The coordinate of a dead chip. """ for x in range(self.width): for y in range(self.height): if (x, y) not in self: yield (x, y)
python
def dead_chips(self): """Generate the coordinates of all dead chips. Yields ------ (x, y) The coordinate of a dead chip. """ for x in range(self.width): for y in range(self.height): if (x, y) not in self: yield (x, y)
[ "def", "dead_chips", "(", "self", ")", ":", "for", "x", "in", "range", "(", "self", ".", "width", ")", ":", "for", "y", "in", "range", "(", "self", ".", "height", ")", ":", "if", "(", "x", ",", "y", ")", "not", "in", "self", ":", "yield", "(", "x", ",", "y", ")" ]
Generate the coordinates of all dead chips. Yields ------ (x, y) The coordinate of a dead chip.
[ "Generate", "the", "coordinates", "of", "all", "dead", "chips", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2159-L2170
project-rig/rig
rig/machine_control/machine_controller.py
SystemInfo.links
def links(self): """Generate the coordinates of all working links. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1). """ for (x, y), chip_info in iteritems(self): for link in chip_info.working_links: yield (x, y, link)
python
def links(self): """Generate the coordinates of all working links. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1). """ for (x, y), chip_info in iteritems(self): for link in chip_info.working_links: yield (x, y, link)
[ "def", "links", "(", "self", ")", ":", "for", "(", "x", ",", "y", ")", ",", "chip_info", "in", "iteritems", "(", "self", ")", ":", "for", "link", "in", "chip_info", ".", "working_links", ":", "yield", "(", "x", ",", "y", ",", "link", ")" ]
Generate the coordinates of all working links. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1).
[ "Generate", "the", "coordinates", "of", "all", "working", "links", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2172-L2184
project-rig/rig
rig/machine_control/machine_controller.py
SystemInfo.dead_links
def dead_links(self): """Generate the coordinates of all dead links leaving working chips. Any link leading to a dead chip will also be included in the list of dead links. In non-torroidal SpiNNaker sysmtes (e.g. single SpiNN-5 boards), links on the periphery of the system will be marked as dead. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1). """ for (x, y), chip_info in iteritems(self): for link in Links: if link not in chip_info.working_links: yield (x, y, link)
python
def dead_links(self): """Generate the coordinates of all dead links leaving working chips. Any link leading to a dead chip will also be included in the list of dead links. In non-torroidal SpiNNaker sysmtes (e.g. single SpiNN-5 boards), links on the periphery of the system will be marked as dead. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1). """ for (x, y), chip_info in iteritems(self): for link in Links: if link not in chip_info.working_links: yield (x, y, link)
[ "def", "dead_links", "(", "self", ")", ":", "for", "(", "x", ",", "y", ")", ",", "chip_info", "in", "iteritems", "(", "self", ")", ":", "for", "link", "in", "Links", ":", "if", "link", "not", "in", "chip_info", ".", "working_links", ":", "yield", "(", "x", ",", "y", ",", "link", ")" ]
Generate the coordinates of all dead links leaving working chips. Any link leading to a dead chip will also be included in the list of dead links. In non-torroidal SpiNNaker sysmtes (e.g. single SpiNN-5 boards), links on the periphery of the system will be marked as dead. Yields ------ (x, y, :py:class:`rig.links.Links`) A working link leaving a chip from the perspective of the chip. For example ``(0, 0, Links.north)`` would be the link going north from chip (0, 0) to chip (0, 1).
[ "Generate", "the", "coordinates", "of", "all", "dead", "links", "leaving", "working", "chips", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2186-L2203
project-rig/rig
rig/machine_control/machine_controller.py
SystemInfo.cores
def cores(self): """Generate the set of all cores in the system. Yields ------ (x, y, p, :py:class:`~rig.machine_control.consts.AppState`) A core in the machine, and its state. Cores related to a specific chip are yielded consecutively in ascending order of core number. """ for (x, y), chip_info in iteritems(self): for p, state in enumerate(chip_info.core_states): yield (x, y, p, state)
python
def cores(self): """Generate the set of all cores in the system. Yields ------ (x, y, p, :py:class:`~rig.machine_control.consts.AppState`) A core in the machine, and its state. Cores related to a specific chip are yielded consecutively in ascending order of core number. """ for (x, y), chip_info in iteritems(self): for p, state in enumerate(chip_info.core_states): yield (x, y, p, state)
[ "def", "cores", "(", "self", ")", ":", "for", "(", "x", ",", "y", ")", ",", "chip_info", "in", "iteritems", "(", "self", ")", ":", "for", "p", ",", "state", "in", "enumerate", "(", "chip_info", ".", "core_states", ")", ":", "yield", "(", "x", ",", "y", ",", "p", ",", "state", ")" ]
Generate the set of all cores in the system. Yields ------ (x, y, p, :py:class:`~rig.machine_control.consts.AppState`) A core in the machine, and its state. Cores related to a specific chip are yielded consecutively in ascending order of core number.
[ "Generate", "the", "set", "of", "all", "cores", "in", "the", "system", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2205-L2216
project-rig/rig
rig/machine_control/machine_controller.py
SlicedMemoryIO.read
def read(self, n_bytes=-1): """Read a number of bytes from the memory. .. note:: Reads beyond the specified memory range will be truncated. .. note:: Produces a :py:exc:`.TruncationWarning` if fewer bytes are read than requested. These warnings can be converted into exceptions using :py:func:`warnings.simplefilter`:: >>> import warnings >>> from rig.machine_control.machine_controller \\ ... import TruncationWarning >>> warnings.simplefilter('error', TruncationWarning) Parameters ---------- n_bytes : int A number of bytes to read. If the number of bytes is negative or omitted then read all data until the end of memory region. Returns ------- :py:class:`bytes` Data read from SpiNNaker as a bytestring. """ # If n_bytes is negative then calculate it as the number of bytes left if n_bytes < 0: n_bytes = self._end_address - self.address # Determine how far to read, then read nothing beyond that point. if self.address + n_bytes > self._end_address: new_n_bytes = self._end_address - self.address warnings.warn("read truncated from {} to {} bytes".format( n_bytes, new_n_bytes), TruncationWarning, stacklevel=3) n_bytes = new_n_bytes if n_bytes <= 0: return b'' # Perform the read and increment the offset data = self._parent._perform_read(self.address, n_bytes) self._offset += n_bytes return data
python
def read(self, n_bytes=-1): """Read a number of bytes from the memory. .. note:: Reads beyond the specified memory range will be truncated. .. note:: Produces a :py:exc:`.TruncationWarning` if fewer bytes are read than requested. These warnings can be converted into exceptions using :py:func:`warnings.simplefilter`:: >>> import warnings >>> from rig.machine_control.machine_controller \\ ... import TruncationWarning >>> warnings.simplefilter('error', TruncationWarning) Parameters ---------- n_bytes : int A number of bytes to read. If the number of bytes is negative or omitted then read all data until the end of memory region. Returns ------- :py:class:`bytes` Data read from SpiNNaker as a bytestring. """ # If n_bytes is negative then calculate it as the number of bytes left if n_bytes < 0: n_bytes = self._end_address - self.address # Determine how far to read, then read nothing beyond that point. if self.address + n_bytes > self._end_address: new_n_bytes = self._end_address - self.address warnings.warn("read truncated from {} to {} bytes".format( n_bytes, new_n_bytes), TruncationWarning, stacklevel=3) n_bytes = new_n_bytes if n_bytes <= 0: return b'' # Perform the read and increment the offset data = self._parent._perform_read(self.address, n_bytes) self._offset += n_bytes return data
[ "def", "read", "(", "self", ",", "n_bytes", "=", "-", "1", ")", ":", "# If n_bytes is negative then calculate it as the number of bytes left", "if", "n_bytes", "<", "0", ":", "n_bytes", "=", "self", ".", "_end_address", "-", "self", ".", "address", "# Determine how far to read, then read nothing beyond that point.", "if", "self", ".", "address", "+", "n_bytes", ">", "self", ".", "_end_address", ":", "new_n_bytes", "=", "self", ".", "_end_address", "-", "self", ".", "address", "warnings", ".", "warn", "(", "\"read truncated from {} to {} bytes\"", ".", "format", "(", "n_bytes", ",", "new_n_bytes", ")", ",", "TruncationWarning", ",", "stacklevel", "=", "3", ")", "n_bytes", "=", "new_n_bytes", "if", "n_bytes", "<=", "0", ":", "return", "b''", "# Perform the read and increment the offset", "data", "=", "self", ".", "_parent", ".", "_perform_read", "(", "self", ".", "address", ",", "n_bytes", ")", "self", ".", "_offset", "+=", "n_bytes", "return", "data" ]
Read a number of bytes from the memory. .. note:: Reads beyond the specified memory range will be truncated. .. note:: Produces a :py:exc:`.TruncationWarning` if fewer bytes are read than requested. These warnings can be converted into exceptions using :py:func:`warnings.simplefilter`:: >>> import warnings >>> from rig.machine_control.machine_controller \\ ... import TruncationWarning >>> warnings.simplefilter('error', TruncationWarning) Parameters ---------- n_bytes : int A number of bytes to read. If the number of bytes is negative or omitted then read all data until the end of memory region. Returns ------- :py:class:`bytes` Data read from SpiNNaker as a bytestring.
[ "Read", "a", "number", "of", "bytes", "from", "the", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2588-L2632