repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
project-rig/rig | rig/machine_control/machine_controller.py | SlicedMemoryIO.write | def write(self, bytes):
"""Write data to the memory.
.. note::
Writes beyond the specified memory range will be truncated and a
:py:exc:`.TruncationWarning` is produced. These warnings can be
converted into exceptions using :py:func:`warnings.simplefilter`::
>>> import warnings
>>> from rig.machine_control.machine_controller \\
... import TruncationWarning
>>> warnings.simplefilter('error', TruncationWarning)
Parameters
----------
bytes : :py:class:`bytes`
Data to write to the memory as a bytestring.
Returns
-------
int
Number of bytes written.
"""
if self.address + len(bytes) > self._end_address:
n_bytes = self._end_address - self.address
warnings.warn("write truncated from {} to {} bytes".format(
len(bytes), n_bytes), TruncationWarning, stacklevel=3)
bytes = bytes[:n_bytes]
if len(bytes) == 0:
return 0
# Perform the write and increment the offset
self._parent._perform_write(self.address, bytes)
self._offset += len(bytes)
return len(bytes) | python | def write(self, bytes):
"""Write data to the memory.
.. note::
Writes beyond the specified memory range will be truncated and a
:py:exc:`.TruncationWarning` is produced. These warnings can be
converted into exceptions using :py:func:`warnings.simplefilter`::
>>> import warnings
>>> from rig.machine_control.machine_controller \\
... import TruncationWarning
>>> warnings.simplefilter('error', TruncationWarning)
Parameters
----------
bytes : :py:class:`bytes`
Data to write to the memory as a bytestring.
Returns
-------
int
Number of bytes written.
"""
if self.address + len(bytes) > self._end_address:
n_bytes = self._end_address - self.address
warnings.warn("write truncated from {} to {} bytes".format(
len(bytes), n_bytes), TruncationWarning, stacklevel=3)
bytes = bytes[:n_bytes]
if len(bytes) == 0:
return 0
# Perform the write and increment the offset
self._parent._perform_write(self.address, bytes)
self._offset += len(bytes)
return len(bytes) | [
"def",
"write",
"(",
"self",
",",
"bytes",
")",
":",
"if",
"self",
".",
"address",
"+",
"len",
"(",
"bytes",
")",
">",
"self",
".",
"_end_address",
":",
"n_bytes",
"=",
"self",
".",
"_end_address",
"-",
"self",
".",
"address",
"warnings",
".",
"warn",
"(",
"\"write truncated from {} to {} bytes\"",
".",
"format",
"(",
"len",
"(",
"bytes",
")",
",",
"n_bytes",
")",
",",
"TruncationWarning",
",",
"stacklevel",
"=",
"3",
")",
"bytes",
"=",
"bytes",
"[",
":",
"n_bytes",
"]",
"if",
"len",
"(",
"bytes",
")",
"==",
"0",
":",
"return",
"0",
"# Perform the write and increment the offset",
"self",
".",
"_parent",
".",
"_perform_write",
"(",
"self",
".",
"address",
",",
"bytes",
")",
"self",
".",
"_offset",
"+=",
"len",
"(",
"bytes",
")",
"return",
"len",
"(",
"bytes",
")"
] | Write data to the memory.
.. note::
Writes beyond the specified memory range will be truncated and a
:py:exc:`.TruncationWarning` is produced. These warnings can be
converted into exceptions using :py:func:`warnings.simplefilter`::
>>> import warnings
>>> from rig.machine_control.machine_controller \\
... import TruncationWarning
>>> warnings.simplefilter('error', TruncationWarning)
Parameters
----------
bytes : :py:class:`bytes`
Data to write to the memory as a bytestring.
Returns
-------
int
Number of bytes written. | [
"Write",
"data",
"to",
"the",
"memory",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2635-L2671 |
project-rig/rig | rig/machine_control/machine_controller.py | SlicedMemoryIO.seek | def seek(self, n_bytes, from_what=os.SEEK_SET):
"""Seek to a new position in the memory region.
Parameters
----------
n_bytes : int
Number of bytes to seek.
from_what : int
As in the Python standard: `0` seeks from the start of the memory
region, `1` seeks from the current position and `2` seeks from the
end of the memory region. For example::
mem.seek(-1, 2) # Goes to the last byte in the region
mem.seek(-5, 1) # Goes 5 bytes before that point
mem.seek(0) # Returns to the start of the region
Note that `os.SEEK_END`, `os.SEEK_CUR` and `os.SEEK_SET` are also
valid arguments.
"""
if from_what == 0:
self._offset = n_bytes
elif from_what == 1:
self._offset += n_bytes
elif from_what == 2:
self._offset = (self._end_address - self._start_address) - n_bytes
else:
raise ValueError(
"from_what: can only take values 0 (from start), "
"1 (from current) or 2 (from end) not {}".format(from_what)
) | python | def seek(self, n_bytes, from_what=os.SEEK_SET):
"""Seek to a new position in the memory region.
Parameters
----------
n_bytes : int
Number of bytes to seek.
from_what : int
As in the Python standard: `0` seeks from the start of the memory
region, `1` seeks from the current position and `2` seeks from the
end of the memory region. For example::
mem.seek(-1, 2) # Goes to the last byte in the region
mem.seek(-5, 1) # Goes 5 bytes before that point
mem.seek(0) # Returns to the start of the region
Note that `os.SEEK_END`, `os.SEEK_CUR` and `os.SEEK_SET` are also
valid arguments.
"""
if from_what == 0:
self._offset = n_bytes
elif from_what == 1:
self._offset += n_bytes
elif from_what == 2:
self._offset = (self._end_address - self._start_address) - n_bytes
else:
raise ValueError(
"from_what: can only take values 0 (from start), "
"1 (from current) or 2 (from end) not {}".format(from_what)
) | [
"def",
"seek",
"(",
"self",
",",
"n_bytes",
",",
"from_what",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"if",
"from_what",
"==",
"0",
":",
"self",
".",
"_offset",
"=",
"n_bytes",
"elif",
"from_what",
"==",
"1",
":",
"self",
".",
"_offset",
"+=",
"n_bytes",
"elif",
"from_what",
"==",
"2",
":",
"self",
".",
"_offset",
"=",
"(",
"self",
".",
"_end_address",
"-",
"self",
".",
"_start_address",
")",
"-",
"n_bytes",
"else",
":",
"raise",
"ValueError",
"(",
"\"from_what: can only take values 0 (from start), \"",
"\"1 (from current) or 2 (from end) not {}\"",
".",
"format",
"(",
"from_what",
")",
")"
] | Seek to a new position in the memory region.
Parameters
----------
n_bytes : int
Number of bytes to seek.
from_what : int
As in the Python standard: `0` seeks from the start of the memory
region, `1` seeks from the current position and `2` seeks from the
end of the memory region. For example::
mem.seek(-1, 2) # Goes to the last byte in the region
mem.seek(-5, 1) # Goes 5 bytes before that point
mem.seek(0) # Returns to the start of the region
Note that `os.SEEK_END`, `os.SEEK_CUR` and `os.SEEK_SET` are also
valid arguments. | [
"Seek",
"to",
"a",
"new",
"position",
"in",
"the",
"memory",
"region",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2706-L2735 |
project-rig/rig | rig/machine_control/machine_controller.py | MemoryIO.free | def free(self):
"""Free the memory referred to by the file-like, any subsequent
operations on this file-like or slices of it will fail.
"""
# Free the memory
self._machine_controller.sdram_free(self._start_address,
self._x, self._y)
# Mark as freed
self._freed = True | python | def free(self):
"""Free the memory referred to by the file-like, any subsequent
operations on this file-like or slices of it will fail.
"""
# Free the memory
self._machine_controller.sdram_free(self._start_address,
self._x, self._y)
# Mark as freed
self._freed = True | [
"def",
"free",
"(",
"self",
")",
":",
"# Free the memory",
"self",
".",
"_machine_controller",
".",
"sdram_free",
"(",
"self",
".",
"_start_address",
",",
"self",
".",
"_x",
",",
"self",
".",
"_y",
")",
"# Mark as freed",
"self",
".",
"_freed",
"=",
"True"
] | Free the memory referred to by the file-like, any subsequent
operations on this file-like or slices of it will fail. | [
"Free",
"the",
"memory",
"referred",
"to",
"by",
"the",
"file",
"-",
"like",
"any",
"subsequent",
"operations",
"on",
"this",
"file",
"-",
"like",
"or",
"slices",
"of",
"it",
"will",
"fail",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2810-L2819 |
project-rig/rig | rig/machine_control/machine_controller.py | MemoryIO._perform_read | def _perform_read(self, addr, size):
"""Perform a read using the machine controller."""
return self._machine_controller.read(addr, size, self._x, self._y, 0) | python | def _perform_read(self, addr, size):
"""Perform a read using the machine controller."""
return self._machine_controller.read(addr, size, self._x, self._y, 0) | [
"def",
"_perform_read",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"return",
"self",
".",
"_machine_controller",
".",
"read",
"(",
"addr",
",",
"size",
",",
"self",
".",
"_x",
",",
"self",
".",
"_y",
",",
"0",
")"
] | Perform a read using the machine controller. | [
"Perform",
"a",
"read",
"using",
"the",
"machine",
"controller",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2822-L2824 |
project-rig/rig | rig/machine_control/machine_controller.py | MemoryIO._perform_write | def _perform_write(self, addr, data):
"""Perform a write using the machine controller."""
return self._machine_controller.write(addr, data, self._x, self._y, 0) | python | def _perform_write(self, addr, data):
"""Perform a write using the machine controller."""
return self._machine_controller.write(addr, data, self._x, self._y, 0) | [
"def",
"_perform_write",
"(",
"self",
",",
"addr",
",",
"data",
")",
":",
"return",
"self",
".",
"_machine_controller",
".",
"write",
"(",
"addr",
",",
"data",
",",
"self",
".",
"_x",
",",
"self",
".",
"_y",
",",
"0",
")"
] | Perform a write using the machine controller. | [
"Perform",
"a",
"write",
"using",
"the",
"machine",
"controller",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L2827-L2829 |
project-rig/rig | rig/machine_control/packets.py | _unpack_sdp_into_packet | def _unpack_sdp_into_packet(packet, bytestring):
"""Unpack the SDP header from a bytestring into a packet.
Parameters
----------
packet : :py:class:`.SDPPacket`
Packet into which to store the unpacked header.
bytestring : bytes
Bytes from which to unpack the header data.
"""
# Extract the header and the data from the packet
packet.data = bytestring[10:] # Everything but the header
# Unpack the header
(flags, packet.tag, dest_cpu_port, src_cpu_port,
packet.dest_y, packet.dest_x,
packet.src_y, packet.src_x) = struct.unpack_from('<2x8B', bytestring)
packet.reply_expected = flags == FLAG_REPLY
# Neaten up the combined VCPU and port fields
packet.dest_cpu = dest_cpu_port & 0x1f
packet.dest_port = (dest_cpu_port >> 5) # & 0x07
packet.src_cpu = src_cpu_port & 0x1f
packet.src_port = (src_cpu_port >> 5) | python | def _unpack_sdp_into_packet(packet, bytestring):
"""Unpack the SDP header from a bytestring into a packet.
Parameters
----------
packet : :py:class:`.SDPPacket`
Packet into which to store the unpacked header.
bytestring : bytes
Bytes from which to unpack the header data.
"""
# Extract the header and the data from the packet
packet.data = bytestring[10:] # Everything but the header
# Unpack the header
(flags, packet.tag, dest_cpu_port, src_cpu_port,
packet.dest_y, packet.dest_x,
packet.src_y, packet.src_x) = struct.unpack_from('<2x8B', bytestring)
packet.reply_expected = flags == FLAG_REPLY
# Neaten up the combined VCPU and port fields
packet.dest_cpu = dest_cpu_port & 0x1f
packet.dest_port = (dest_cpu_port >> 5) # & 0x07
packet.src_cpu = src_cpu_port & 0x1f
packet.src_port = (src_cpu_port >> 5) | [
"def",
"_unpack_sdp_into_packet",
"(",
"packet",
",",
"bytestring",
")",
":",
"# Extract the header and the data from the packet",
"packet",
".",
"data",
"=",
"bytestring",
"[",
"10",
":",
"]",
"# Everything but the header",
"# Unpack the header",
"(",
"flags",
",",
"packet",
".",
"tag",
",",
"dest_cpu_port",
",",
"src_cpu_port",
",",
"packet",
".",
"dest_y",
",",
"packet",
".",
"dest_x",
",",
"packet",
".",
"src_y",
",",
"packet",
".",
"src_x",
")",
"=",
"struct",
".",
"unpack_from",
"(",
"'<2x8B'",
",",
"bytestring",
")",
"packet",
".",
"reply_expected",
"=",
"flags",
"==",
"FLAG_REPLY",
"# Neaten up the combined VCPU and port fields",
"packet",
".",
"dest_cpu",
"=",
"dest_cpu_port",
"&",
"0x1f",
"packet",
".",
"dest_port",
"=",
"(",
"dest_cpu_port",
">>",
"5",
")",
"# & 0x07",
"packet",
".",
"src_cpu",
"=",
"src_cpu_port",
"&",
"0x1f",
"packet",
".",
"src_port",
"=",
"(",
"src_cpu_port",
">>",
"5",
")"
] | Unpack the SDP header from a bytestring into a packet.
Parameters
----------
packet : :py:class:`.SDPPacket`
Packet into which to store the unpacked header.
bytestring : bytes
Bytes from which to unpack the header data. | [
"Unpack",
"the",
"SDP",
"header",
"from",
"a",
"bytestring",
"into",
"a",
"packet",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/packets.py#L237-L260 |
project-rig/rig | rig/machine_control/packets.py | SCPPacket.packed_data | def packed_data(self):
"""Pack the data for the SCP packet."""
# Pack the header
scp_header = struct.pack("<2H", self.cmd_rc, self.seq)
# Potential loop intentionally unrolled
if self.arg1 is not None:
scp_header += struct.pack('<I', self.arg1)
if self.arg2 is not None:
scp_header += struct.pack('<I', self.arg2)
if self.arg3 is not None:
scp_header += struct.pack('<I', self.arg3)
# Return the SCP header and the rest of the data
return scp_header + self.data | python | def packed_data(self):
"""Pack the data for the SCP packet."""
# Pack the header
scp_header = struct.pack("<2H", self.cmd_rc, self.seq)
# Potential loop intentionally unrolled
if self.arg1 is not None:
scp_header += struct.pack('<I', self.arg1)
if self.arg2 is not None:
scp_header += struct.pack('<I', self.arg2)
if self.arg3 is not None:
scp_header += struct.pack('<I', self.arg3)
# Return the SCP header and the rest of the data
return scp_header + self.data | [
"def",
"packed_data",
"(",
"self",
")",
":",
"# Pack the header",
"scp_header",
"=",
"struct",
".",
"pack",
"(",
"\"<2H\"",
",",
"self",
".",
"cmd_rc",
",",
"self",
".",
"seq",
")",
"# Potential loop intentionally unrolled",
"if",
"self",
".",
"arg1",
"is",
"not",
"None",
":",
"scp_header",
"+=",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"self",
".",
"arg1",
")",
"if",
"self",
".",
"arg2",
"is",
"not",
"None",
":",
"scp_header",
"+=",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"self",
".",
"arg2",
")",
"if",
"self",
".",
"arg3",
"is",
"not",
"None",
":",
"scp_header",
"+=",
"struct",
".",
"pack",
"(",
"'<I'",
",",
"self",
".",
"arg3",
")",
"# Return the SCP header and the rest of the data",
"return",
"scp_header",
"+",
"self",
".",
"data"
] | Pack the data for the SCP packet. | [
"Pack",
"the",
"data",
"for",
"the",
"SCP",
"packet",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/packets.py#L208-L222 |
Parsely/probably | probably/hashfunctions.py | hash64 | def hash64(key, seed):
"""
Wrapper around mmh3.hash64 to get us single 64-bit value.
This also does the extra work of ensuring that we always treat the
returned values as big-endian unsigned long, like smhasher used to
do.
"""
hash_val = mmh3.hash64(key, seed)[0]
return struct.unpack('>Q', struct.pack('q', hash_val))[0] | python | def hash64(key, seed):
"""
Wrapper around mmh3.hash64 to get us single 64-bit value.
This also does the extra work of ensuring that we always treat the
returned values as big-endian unsigned long, like smhasher used to
do.
"""
hash_val = mmh3.hash64(key, seed)[0]
return struct.unpack('>Q', struct.pack('q', hash_val))[0] | [
"def",
"hash64",
"(",
"key",
",",
"seed",
")",
":",
"hash_val",
"=",
"mmh3",
".",
"hash64",
"(",
"key",
",",
"seed",
")",
"[",
"0",
"]",
"return",
"struct",
".",
"unpack",
"(",
"'>Q'",
",",
"struct",
".",
"pack",
"(",
"'q'",
",",
"hash_val",
")",
")",
"[",
"0",
"]"
] | Wrapper around mmh3.hash64 to get us single 64-bit value.
This also does the extra work of ensuring that we always treat the
returned values as big-endian unsigned long, like smhasher used to
do. | [
"Wrapper",
"around",
"mmh3",
".",
"hash64",
"to",
"get",
"us",
"single",
"64",
"-",
"bit",
"value",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hashfunctions.py#L8-L17 |
Parsely/probably | probably/hashfunctions.py | generate_hashfunctions | def generate_hashfunctions(nbr_bits, nbr_slices):
"""Generate a set of hash functions.
The core method is a 64-bit murmur3 hash which has a good distribution.
Multiple hashes can be generate using the previous hash value as a seed.
"""
def _make_hashfuncs(key):
if isinstance(key, text_type):
key = key.encode('utf-8')
else:
key = str(key)
rval = []
current_hash = 0
for i in range(nbr_slices):
seed = current_hash
current_hash = hash64(key, seed)
rval.append(current_hash % nbr_bits)
return rval
return _make_hashfuncs | python | def generate_hashfunctions(nbr_bits, nbr_slices):
"""Generate a set of hash functions.
The core method is a 64-bit murmur3 hash which has a good distribution.
Multiple hashes can be generate using the previous hash value as a seed.
"""
def _make_hashfuncs(key):
if isinstance(key, text_type):
key = key.encode('utf-8')
else:
key = str(key)
rval = []
current_hash = 0
for i in range(nbr_slices):
seed = current_hash
current_hash = hash64(key, seed)
rval.append(current_hash % nbr_bits)
return rval
return _make_hashfuncs | [
"def",
"generate_hashfunctions",
"(",
"nbr_bits",
",",
"nbr_slices",
")",
":",
"def",
"_make_hashfuncs",
"(",
"key",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"text_type",
")",
":",
"key",
"=",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"key",
"=",
"str",
"(",
"key",
")",
"rval",
"=",
"[",
"]",
"current_hash",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"nbr_slices",
")",
":",
"seed",
"=",
"current_hash",
"current_hash",
"=",
"hash64",
"(",
"key",
",",
"seed",
")",
"rval",
".",
"append",
"(",
"current_hash",
"%",
"nbr_bits",
")",
"return",
"rval",
"return",
"_make_hashfuncs"
] | Generate a set of hash functions.
The core method is a 64-bit murmur3 hash which has a good distribution.
Multiple hashes can be generate using the previous hash value as a seed. | [
"Generate",
"a",
"set",
"of",
"hash",
"functions",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/hashfunctions.py#L20-L38 |
project-rig/rig | rig/utils/contexts.py | ContextMixin.get_context_arguments | def get_context_arguments(self):
"""Return a dictionary containing the current context arguments."""
cargs = {}
for context in self.__context_stack:
cargs.update(context.context_arguments)
return cargs | python | def get_context_arguments(self):
"""Return a dictionary containing the current context arguments."""
cargs = {}
for context in self.__context_stack:
cargs.update(context.context_arguments)
return cargs | [
"def",
"get_context_arguments",
"(",
"self",
")",
":",
"cargs",
"=",
"{",
"}",
"for",
"context",
"in",
"self",
".",
"__context_stack",
":",
"cargs",
".",
"update",
"(",
"context",
".",
"context_arguments",
")",
"return",
"cargs"
] | Return a dictionary containing the current context arguments. | [
"Return",
"a",
"dictionary",
"containing",
"the",
"current",
"context",
"arguments",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/utils/contexts.py#L83-L88 |
project-rig/rig | rig/utils/contexts.py | ContextMixin.use_contextual_arguments | def use_contextual_arguments(**kw_only_args_defaults):
"""Decorator function which allows the wrapped function to accept
arguments not specified in the call from the context.
Arguments whose default value is set to the Required sentinel must be
supplied either by the context or the caller and a TypeError is raised
if not.
.. warning::
Due to a limitation in the Python 2 version of the introspection
library, this decorator only works with functions which do not have
any keyword-only arguments. For example this function cannot be
handled::
def f(*args, kw_only_arg=123)
Note, however, that the decorated function *can* accept and pass-on
keyword-only arguments specified via `**kw_only_args_defaults`.
Parameters
----------
**kw_only_args_defaults : {name: default, ...}
Specifies the set of keyword-only arguments (and their default
values) accepted by the underlying function. These will be passed
via the kwargs to the underlying function, e.g.::
@ContextMixin.use_contextual_arguments(kw_only_arg=123)
def f(self, **kwargs):
kw_only_arg = kwargs.pop("kw_only_arg")
# Wrapped function can be called with keyword-only-arguments:
spam.f(*[], kw_only_arg=12)
Keyword-only arguments can be made mandatory by setting their
default value to the Required sentinel.
"""
def decorator(f):
# Extract any positional and positional-and-key-word arguments
# which may be set.
arg_names, varargs, keywords, defaults = inspect.getargspec(f)
# Sanity check: non-keyword-only arguments should't be present in
# the keyword-only-arguments list.
assert set(keywords or {}).isdisjoint(set(kw_only_args_defaults))
# Fully populate the default argument values list, setting the
# default for mandatory arguments to the 'Required' sentinel.
if defaults is None:
defaults = []
defaults = (([Required] * (len(arg_names) - len(defaults))) +
list(defaults))
# Update the docstring signature to include the specified arguments
@add_signature_to_docstring(f, kw_only_args=kw_only_args_defaults)
@functools.wraps(f)
def f_(self, *args, **kwargs):
# Construct a dictionary of arguments (and their default
# values) which may potentially be set by the context. This
# includes any non-supplied positional arguments and any
# keyword-only arguments.
new_kwargs = dict(zip(arg_names[1 + len(args):],
defaults[1 + len(args):]))
new_kwargs.update(kw_only_args_defaults)
# Values from the context take priority over default argument
# values.
context = self.get_context_arguments()
for name, val in iteritems(context):
if name in new_kwargs:
new_kwargs[name] = val
# Finally, the values actually pased to the function call take
# ultimate priority.
new_kwargs.update(kwargs)
# Raise a TypeError if any `Required` sentinels remain
for k, v in iteritems(new_kwargs):
if v is Required:
raise TypeError(
"{!s}: missing argument {}".format(f.__name__, k))
return f(self, *args, **new_kwargs)
return f_
return decorator | python | def use_contextual_arguments(**kw_only_args_defaults):
"""Decorator function which allows the wrapped function to accept
arguments not specified in the call from the context.
Arguments whose default value is set to the Required sentinel must be
supplied either by the context or the caller and a TypeError is raised
if not.
.. warning::
Due to a limitation in the Python 2 version of the introspection
library, this decorator only works with functions which do not have
any keyword-only arguments. For example this function cannot be
handled::
def f(*args, kw_only_arg=123)
Note, however, that the decorated function *can* accept and pass-on
keyword-only arguments specified via `**kw_only_args_defaults`.
Parameters
----------
**kw_only_args_defaults : {name: default, ...}
Specifies the set of keyword-only arguments (and their default
values) accepted by the underlying function. These will be passed
via the kwargs to the underlying function, e.g.::
@ContextMixin.use_contextual_arguments(kw_only_arg=123)
def f(self, **kwargs):
kw_only_arg = kwargs.pop("kw_only_arg")
# Wrapped function can be called with keyword-only-arguments:
spam.f(*[], kw_only_arg=12)
Keyword-only arguments can be made mandatory by setting their
default value to the Required sentinel.
"""
def decorator(f):
# Extract any positional and positional-and-key-word arguments
# which may be set.
arg_names, varargs, keywords, defaults = inspect.getargspec(f)
# Sanity check: non-keyword-only arguments should't be present in
# the keyword-only-arguments list.
assert set(keywords or {}).isdisjoint(set(kw_only_args_defaults))
# Fully populate the default argument values list, setting the
# default for mandatory arguments to the 'Required' sentinel.
if defaults is None:
defaults = []
defaults = (([Required] * (len(arg_names) - len(defaults))) +
list(defaults))
# Update the docstring signature to include the specified arguments
@add_signature_to_docstring(f, kw_only_args=kw_only_args_defaults)
@functools.wraps(f)
def f_(self, *args, **kwargs):
# Construct a dictionary of arguments (and their default
# values) which may potentially be set by the context. This
# includes any non-supplied positional arguments and any
# keyword-only arguments.
new_kwargs = dict(zip(arg_names[1 + len(args):],
defaults[1 + len(args):]))
new_kwargs.update(kw_only_args_defaults)
# Values from the context take priority over default argument
# values.
context = self.get_context_arguments()
for name, val in iteritems(context):
if name in new_kwargs:
new_kwargs[name] = val
# Finally, the values actually pased to the function call take
# ultimate priority.
new_kwargs.update(kwargs)
# Raise a TypeError if any `Required` sentinels remain
for k, v in iteritems(new_kwargs):
if v is Required:
raise TypeError(
"{!s}: missing argument {}".format(f.__name__, k))
return f(self, *args, **new_kwargs)
return f_
return decorator | [
"def",
"use_contextual_arguments",
"(",
"*",
"*",
"kw_only_args_defaults",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"# Extract any positional and positional-and-key-word arguments",
"# which may be set.",
"arg_names",
",",
"varargs",
",",
"keywords",
",",
"defaults",
"=",
"inspect",
".",
"getargspec",
"(",
"f",
")",
"# Sanity check: non-keyword-only arguments should't be present in",
"# the keyword-only-arguments list.",
"assert",
"set",
"(",
"keywords",
"or",
"{",
"}",
")",
".",
"isdisjoint",
"(",
"set",
"(",
"kw_only_args_defaults",
")",
")",
"# Fully populate the default argument values list, setting the",
"# default for mandatory arguments to the 'Required' sentinel.",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"[",
"]",
"defaults",
"=",
"(",
"(",
"[",
"Required",
"]",
"*",
"(",
"len",
"(",
"arg_names",
")",
"-",
"len",
"(",
"defaults",
")",
")",
")",
"+",
"list",
"(",
"defaults",
")",
")",
"# Update the docstring signature to include the specified arguments",
"@",
"add_signature_to_docstring",
"(",
"f",
",",
"kw_only_args",
"=",
"kw_only_args_defaults",
")",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"f_",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Construct a dictionary of arguments (and their default",
"# values) which may potentially be set by the context. This",
"# includes any non-supplied positional arguments and any",
"# keyword-only arguments.",
"new_kwargs",
"=",
"dict",
"(",
"zip",
"(",
"arg_names",
"[",
"1",
"+",
"len",
"(",
"args",
")",
":",
"]",
",",
"defaults",
"[",
"1",
"+",
"len",
"(",
"args",
")",
":",
"]",
")",
")",
"new_kwargs",
".",
"update",
"(",
"kw_only_args_defaults",
")",
"# Values from the context take priority over default argument",
"# values.",
"context",
"=",
"self",
".",
"get_context_arguments",
"(",
")",
"for",
"name",
",",
"val",
"in",
"iteritems",
"(",
"context",
")",
":",
"if",
"name",
"in",
"new_kwargs",
":",
"new_kwargs",
"[",
"name",
"]",
"=",
"val",
"# Finally, the values actually pased to the function call take",
"# ultimate priority.",
"new_kwargs",
".",
"update",
"(",
"kwargs",
")",
"# Raise a TypeError if any `Required` sentinels remain",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"new_kwargs",
")",
":",
"if",
"v",
"is",
"Required",
":",
"raise",
"TypeError",
"(",
"\"{!s}: missing argument {}\"",
".",
"format",
"(",
"f",
".",
"__name__",
",",
"k",
")",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"new_kwargs",
")",
"return",
"f_",
"return",
"decorator"
] | Decorator function which allows the wrapped function to accept
arguments not specified in the call from the context.
Arguments whose default value is set to the Required sentinel must be
supplied either by the context or the caller and a TypeError is raised
if not.
.. warning::
Due to a limitation in the Python 2 version of the introspection
library, this decorator only works with functions which do not have
any keyword-only arguments. For example this function cannot be
handled::
def f(*args, kw_only_arg=123)
Note, however, that the decorated function *can* accept and pass-on
keyword-only arguments specified via `**kw_only_args_defaults`.
Parameters
----------
**kw_only_args_defaults : {name: default, ...}
Specifies the set of keyword-only arguments (and their default
values) accepted by the underlying function. These will be passed
via the kwargs to the underlying function, e.g.::
@ContextMixin.use_contextual_arguments(kw_only_arg=123)
def f(self, **kwargs):
kw_only_arg = kwargs.pop("kw_only_arg")
# Wrapped function can be called with keyword-only-arguments:
spam.f(*[], kw_only_arg=12)
Keyword-only arguments can be made mandatory by setting their
default value to the Required sentinel. | [
"Decorator",
"function",
"which",
"allows",
"the",
"wrapped",
"function",
"to",
"accept",
"arguments",
"not",
"specified",
"in",
"the",
"call",
"from",
"the",
"context",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/utils/contexts.py#L91-L175 |
project-rig/rig | rig/routing_table/ordered_covering.py | minimise | def minimise(routing_table, target_length):
"""Reduce the size of a routing table by merging together entries where
possible and by removing any remaining default routes.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
"""
table, _ = ordered_covering(routing_table, target_length, no_raise=True)
return remove_default_routes(table, target_length) | python | def minimise(routing_table, target_length):
"""Reduce the size of a routing table by merging together entries where
possible and by removing any remaining default routes.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
"""
table, _ = ordered_covering(routing_table, target_length, no_raise=True)
return remove_default_routes(table, target_length) | [
"def",
"minimise",
"(",
"routing_table",
",",
"target_length",
")",
":",
"table",
",",
"_",
"=",
"ordered_covering",
"(",
"routing_table",
",",
"target_length",
",",
"no_raise",
"=",
"True",
")",
"return",
"remove_default_routes",
"(",
"table",
",",
"target_length",
")"
] | Reduce the size of a routing table by merging together entries where
possible and by removing any remaining default routes.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries. | [
"Reduce",
"the",
"size",
"of",
"a",
"routing",
"table",
"by",
"merging",
"together",
"entries",
"where",
"possible",
"and",
"by",
"removing",
"any",
"remaining",
"default",
"routes",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L134-L185 |
project-rig/rig | rig/routing_table/ordered_covering.py | ordered_covering | def ordered_covering(routing_table, target_length, aliases=dict(),
no_raise=False):
"""Reduce the size of a routing table by merging together entries where
possible.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Other Parameters
----------------
aliases : {(key, mask): {(key, mask), ...}, ...}
Dictionary of which keys and masks in the routing table are
combinations of other (now removed) keys and masks; this allows us to
consider only the keys and masks the user actually cares about when
determining if inserting a new entry will break the correctness of the
table. This should be supplied when using this method to update an
already minimised table.
no_raise : bool
If False (the default) then an error will be raised if the table cannot
be minimised to be smaller than `target_length` and `target_length` is
not None. If True then a table will be returned regardless of the size
of the final table.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length` and `no_raise` is False.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
{(key, mask): {(key, mask), ...}, ...}
A new aliases dictionary.
"""
# Copy the aliases dictionary
aliases = dict(aliases)
# Perform an initial sort of the routing table in order of increasing
# generality.
routing_table = sorted(
routing_table,
key=lambda entry: _get_generality(entry.key, entry.mask)
)
while target_length is None or len(routing_table) > target_length:
# Get the best merge
merge = _get_best_merge(routing_table, aliases)
# If there is no merge then stop
if merge.goodness <= 0:
break
# Otherwise apply the merge, this returns a new routing table and a new
# aliases dictionary.
routing_table, aliases = merge.apply(aliases)
# If the table is still too big then raise an error
if (not no_raise and
target_length is not None and
len(routing_table) > target_length):
raise MinimisationFailedError(target_length, len(routing_table))
# Return the finished routing table and aliases table
return routing_table, aliases | python | def ordered_covering(routing_table, target_length, aliases=dict(),
no_raise=False):
"""Reduce the size of a routing table by merging together entries where
possible.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Other Parameters
----------------
aliases : {(key, mask): {(key, mask), ...}, ...}
Dictionary of which keys and masks in the routing table are
combinations of other (now removed) keys and masks; this allows us to
consider only the keys and masks the user actually cares about when
determining if inserting a new entry will break the correctness of the
table. This should be supplied when using this method to update an
already minimised table.
no_raise : bool
If False (the default) then an error will be raised if the table cannot
be minimised to be smaller than `target_length` and `target_length` is
not None. If True then a table will be returned regardless of the size
of the final table.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length` and `no_raise` is False.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
{(key, mask): {(key, mask), ...}, ...}
A new aliases dictionary.
"""
# Copy the aliases dictionary
aliases = dict(aliases)
# Perform an initial sort of the routing table in order of increasing
# generality.
routing_table = sorted(
routing_table,
key=lambda entry: _get_generality(entry.key, entry.mask)
)
while target_length is None or len(routing_table) > target_length:
# Get the best merge
merge = _get_best_merge(routing_table, aliases)
# If there is no merge then stop
if merge.goodness <= 0:
break
# Otherwise apply the merge, this returns a new routing table and a new
# aliases dictionary.
routing_table, aliases = merge.apply(aliases)
# If the table is still too big then raise an error
if (not no_raise and
target_length is not None and
len(routing_table) > target_length):
raise MinimisationFailedError(target_length, len(routing_table))
# Return the finished routing table and aliases table
return routing_table, aliases | [
"def",
"ordered_covering",
"(",
"routing_table",
",",
"target_length",
",",
"aliases",
"=",
"dict",
"(",
")",
",",
"no_raise",
"=",
"False",
")",
":",
"# Copy the aliases dictionary",
"aliases",
"=",
"dict",
"(",
"aliases",
")",
"# Perform an initial sort of the routing table in order of increasing",
"# generality.",
"routing_table",
"=",
"sorted",
"(",
"routing_table",
",",
"key",
"=",
"lambda",
"entry",
":",
"_get_generality",
"(",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
")",
")",
"while",
"target_length",
"is",
"None",
"or",
"len",
"(",
"routing_table",
")",
">",
"target_length",
":",
"# Get the best merge",
"merge",
"=",
"_get_best_merge",
"(",
"routing_table",
",",
"aliases",
")",
"# If there is no merge then stop",
"if",
"merge",
".",
"goodness",
"<=",
"0",
":",
"break",
"# Otherwise apply the merge, this returns a new routing table and a new",
"# aliases dictionary.",
"routing_table",
",",
"aliases",
"=",
"merge",
".",
"apply",
"(",
"aliases",
")",
"# If the table is still too big then raise an error",
"if",
"(",
"not",
"no_raise",
"and",
"target_length",
"is",
"not",
"None",
"and",
"len",
"(",
"routing_table",
")",
">",
"target_length",
")",
":",
"raise",
"MinimisationFailedError",
"(",
"target_length",
",",
"len",
"(",
"routing_table",
")",
")",
"# Return the finished routing table and aliases table",
"return",
"routing_table",
",",
"aliases"
] | Reduce the size of a routing table by merging together entries where
possible.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Other Parameters
----------------
aliases : {(key, mask): {(key, mask), ...}, ...}
Dictionary of which keys and masks in the routing table are
combinations of other (now removed) keys and masks; this allows us to
consider only the keys and masks the user actually cares about when
determining if inserting a new entry will break the correctness of the
table. This should be supplied when using this method to update an
already minimised table.
no_raise : bool
If False (the default) then an error will be raised if the table cannot
be minimised to be smaller than `target_length` and `target_length` is
not None. If True then a table will be returned regardless of the size
of the final table.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length` and `no_raise` is False.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
{(key, mask): {(key, mask), ...}, ...}
A new aliases dictionary. | [
"Reduce",
"the",
"size",
"of",
"a",
"routing",
"table",
"by",
"merging",
"together",
"entries",
"where",
"possible",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L188-L285 |
project-rig/rig | rig/routing_table/ordered_covering.py | _get_generality | def _get_generality(key, mask):
"""Count the number of Xs in the key-mask pair.
For example, there are 32 Xs in ``0x00000000/0x00000000``::
>>> _get_generality(0x0, 0x0)
32
And no Xs in ``0xffffffff/0xffffffff``::
>>> _get_generality(0xffffffff, 0xffffffff)
0
"""
xs = (~key) & (~mask)
return sum(1 for i in range(32) if xs & (1 << i)) | python | def _get_generality(key, mask):
"""Count the number of Xs in the key-mask pair.
For example, there are 32 Xs in ``0x00000000/0x00000000``::
>>> _get_generality(0x0, 0x0)
32
And no Xs in ``0xffffffff/0xffffffff``::
>>> _get_generality(0xffffffff, 0xffffffff)
0
"""
xs = (~key) & (~mask)
return sum(1 for i in range(32) if xs & (1 << i)) | [
"def",
"_get_generality",
"(",
"key",
",",
"mask",
")",
":",
"xs",
"=",
"(",
"~",
"key",
")",
"&",
"(",
"~",
"mask",
")",
"return",
"sum",
"(",
"1",
"for",
"i",
"in",
"range",
"(",
"32",
")",
"if",
"xs",
"&",
"(",
"1",
"<<",
"i",
")",
")"
] | Count the number of Xs in the key-mask pair.
For example, there are 32 Xs in ``0x00000000/0x00000000``::
>>> _get_generality(0x0, 0x0)
32
And no Xs in ``0xffffffff/0xffffffff``::
>>> _get_generality(0xffffffff, 0xffffffff)
0 | [
"Count",
"the",
"number",
"of",
"Xs",
"in",
"the",
"key",
"-",
"mask",
"pair",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L288-L302 |
project-rig/rig | rig/routing_table/ordered_covering.py | _get_best_merge | def _get_best_merge(routing_table, aliases):
"""Inspect all possible merges for the routing table and return the merge
which would combine the greatest number of entries.
Returns
-------
:py:class:`~.Merge`
"""
# Create an empty merge to start with
best_merge = _Merge(routing_table)
best_goodness = 0
# Look through every merge, discarding those that are no better than the
# best we currently know about.
for merge in _get_all_merges(routing_table):
# If the merge isn't sufficiently good ignore it and move on
if merge.goodness <= best_goodness:
continue
# After the merge refines itself to remove entries which would either
# be aliased under other entries or entries which would cause the
# aliasing of other entries we check if it is better than the current
# best merge and reject it if it isn't.
merge = _refine_merge(merge, aliases, min_goodness=best_goodness)
if merge.goodness > best_goodness:
# The merge we now have a reference to is better than the best
# merge that we've previously encountered.
best_merge = merge
best_goodness = merge.goodness
# Return the best merge and the best goodness for the calling method
return best_merge | python | def _get_best_merge(routing_table, aliases):
"""Inspect all possible merges for the routing table and return the merge
which would combine the greatest number of entries.
Returns
-------
:py:class:`~.Merge`
"""
# Create an empty merge to start with
best_merge = _Merge(routing_table)
best_goodness = 0
# Look through every merge, discarding those that are no better than the
# best we currently know about.
for merge in _get_all_merges(routing_table):
# If the merge isn't sufficiently good ignore it and move on
if merge.goodness <= best_goodness:
continue
# After the merge refines itself to remove entries which would either
# be aliased under other entries or entries which would cause the
# aliasing of other entries we check if it is better than the current
# best merge and reject it if it isn't.
merge = _refine_merge(merge, aliases, min_goodness=best_goodness)
if merge.goodness > best_goodness:
# The merge we now have a reference to is better than the best
# merge that we've previously encountered.
best_merge = merge
best_goodness = merge.goodness
# Return the best merge and the best goodness for the calling method
return best_merge | [
"def",
"_get_best_merge",
"(",
"routing_table",
",",
"aliases",
")",
":",
"# Create an empty merge to start with",
"best_merge",
"=",
"_Merge",
"(",
"routing_table",
")",
"best_goodness",
"=",
"0",
"# Look through every merge, discarding those that are no better than the",
"# best we currently know about.",
"for",
"merge",
"in",
"_get_all_merges",
"(",
"routing_table",
")",
":",
"# If the merge isn't sufficiently good ignore it and move on",
"if",
"merge",
".",
"goodness",
"<=",
"best_goodness",
":",
"continue",
"# After the merge refines itself to remove entries which would either",
"# be aliased under other entries or entries which would cause the",
"# aliasing of other entries we check if it is better than the current",
"# best merge and reject it if it isn't.",
"merge",
"=",
"_refine_merge",
"(",
"merge",
",",
"aliases",
",",
"min_goodness",
"=",
"best_goodness",
")",
"if",
"merge",
".",
"goodness",
">",
"best_goodness",
":",
"# The merge we now have a reference to is better than the best",
"# merge that we've previously encountered.",
"best_merge",
"=",
"merge",
"best_goodness",
"=",
"merge",
".",
"goodness",
"# Return the best merge and the best goodness for the calling method",
"return",
"best_merge"
] | Inspect all possible merges for the routing table and return the merge
which would combine the greatest number of entries.
Returns
-------
:py:class:`~.Merge` | [
"Inspect",
"all",
"possible",
"merges",
"for",
"the",
"routing",
"table",
"and",
"return",
"the",
"merge",
"which",
"would",
"combine",
"the",
"greatest",
"number",
"of",
"entries",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L305-L336 |
project-rig/rig | rig/routing_table/ordered_covering.py | _get_all_merges | def _get_all_merges(routing_table):
"""Get possible sets of entries to merge.
Yields
------
:py:class:`~.Merge`
"""
# Memorise entries that have been considered as part of a merge
considered_entries = set()
for i, entry in enumerate(routing_table):
# If we've already considered this entry then skip
if i in considered_entries:
continue
# Construct a merge by including other routing table entries below this
# one which have equivalent routes.
merge = set([i])
merge.update(
j for j, other_entry in enumerate(routing_table[i+1:], start=i+1)
if entry.route == other_entry.route
)
# Mark all these entries as considered
considered_entries.update(merge)
# If the merge contains multiple entries then yield it
if len(merge) > 1:
yield _Merge(routing_table, merge) | python | def _get_all_merges(routing_table):
"""Get possible sets of entries to merge.
Yields
------
:py:class:`~.Merge`
"""
# Memorise entries that have been considered as part of a merge
considered_entries = set()
for i, entry in enumerate(routing_table):
# If we've already considered this entry then skip
if i in considered_entries:
continue
# Construct a merge by including other routing table entries below this
# one which have equivalent routes.
merge = set([i])
merge.update(
j for j, other_entry in enumerate(routing_table[i+1:], start=i+1)
if entry.route == other_entry.route
)
# Mark all these entries as considered
considered_entries.update(merge)
# If the merge contains multiple entries then yield it
if len(merge) > 1:
yield _Merge(routing_table, merge) | [
"def",
"_get_all_merges",
"(",
"routing_table",
")",
":",
"# Memorise entries that have been considered as part of a merge",
"considered_entries",
"=",
"set",
"(",
")",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"routing_table",
")",
":",
"# If we've already considered this entry then skip",
"if",
"i",
"in",
"considered_entries",
":",
"continue",
"# Construct a merge by including other routing table entries below this",
"# one which have equivalent routes.",
"merge",
"=",
"set",
"(",
"[",
"i",
"]",
")",
"merge",
".",
"update",
"(",
"j",
"for",
"j",
",",
"other_entry",
"in",
"enumerate",
"(",
"routing_table",
"[",
"i",
"+",
"1",
":",
"]",
",",
"start",
"=",
"i",
"+",
"1",
")",
"if",
"entry",
".",
"route",
"==",
"other_entry",
".",
"route",
")",
"# Mark all these entries as considered",
"considered_entries",
".",
"update",
"(",
"merge",
")",
"# If the merge contains multiple entries then yield it",
"if",
"len",
"(",
"merge",
")",
">",
"1",
":",
"yield",
"_Merge",
"(",
"routing_table",
",",
"merge",
")"
] | Get possible sets of entries to merge.
Yields
------
:py:class:`~.Merge` | [
"Get",
"possible",
"sets",
"of",
"entries",
"to",
"merge",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L339-L367 |
project-rig/rig | rig/routing_table/ordered_covering.py | _get_insertion_index | def _get_insertion_index(routing_table, generality):
"""Determine the index in the routing table where a new entry should be
inserted.
"""
# We insert before blocks of equivalent generality, so decrement the given
# generality.
generality -= 1
# Wrapper for _get_generality which accepts a routing entry
def gg(entry):
return _get_generality(entry.key, entry.mask)
# Perform a binary search through the routing table
bottom = 0
top = len(routing_table)
pos = (top - bottom) // 2
pg = gg(routing_table[pos])
while pg != generality and bottom < pos < top:
if pg < generality:
bottom = pos # Move up
else: # pg > generality
top = pos # Move down
# Compute a new position
pos = bottom + (top - bottom) // 2
pg = gg(routing_table[pos])
while (pos < len(routing_table) and
gg(routing_table[pos]) <= generality):
pos += 1
return pos | python | def _get_insertion_index(routing_table, generality):
"""Determine the index in the routing table where a new entry should be
inserted.
"""
# We insert before blocks of equivalent generality, so decrement the given
# generality.
generality -= 1
# Wrapper for _get_generality which accepts a routing entry
def gg(entry):
return _get_generality(entry.key, entry.mask)
# Perform a binary search through the routing table
bottom = 0
top = len(routing_table)
pos = (top - bottom) // 2
pg = gg(routing_table[pos])
while pg != generality and bottom < pos < top:
if pg < generality:
bottom = pos # Move up
else: # pg > generality
top = pos # Move down
# Compute a new position
pos = bottom + (top - bottom) // 2
pg = gg(routing_table[pos])
while (pos < len(routing_table) and
gg(routing_table[pos]) <= generality):
pos += 1
return pos | [
"def",
"_get_insertion_index",
"(",
"routing_table",
",",
"generality",
")",
":",
"# We insert before blocks of equivalent generality, so decrement the given",
"# generality.",
"generality",
"-=",
"1",
"# Wrapper for _get_generality which accepts a routing entry",
"def",
"gg",
"(",
"entry",
")",
":",
"return",
"_get_generality",
"(",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
")",
"# Perform a binary search through the routing table",
"bottom",
"=",
"0",
"top",
"=",
"len",
"(",
"routing_table",
")",
"pos",
"=",
"(",
"top",
"-",
"bottom",
")",
"//",
"2",
"pg",
"=",
"gg",
"(",
"routing_table",
"[",
"pos",
"]",
")",
"while",
"pg",
"!=",
"generality",
"and",
"bottom",
"<",
"pos",
"<",
"top",
":",
"if",
"pg",
"<",
"generality",
":",
"bottom",
"=",
"pos",
"# Move up",
"else",
":",
"# pg > generality",
"top",
"=",
"pos",
"# Move down",
"# Compute a new position",
"pos",
"=",
"bottom",
"+",
"(",
"top",
"-",
"bottom",
")",
"//",
"2",
"pg",
"=",
"gg",
"(",
"routing_table",
"[",
"pos",
"]",
")",
"while",
"(",
"pos",
"<",
"len",
"(",
"routing_table",
")",
"and",
"gg",
"(",
"routing_table",
"[",
"pos",
"]",
")",
"<=",
"generality",
")",
":",
"pos",
"+=",
"1",
"return",
"pos"
] | Determine the index in the routing table where a new entry should be
inserted. | [
"Determine",
"the",
"index",
"in",
"the",
"routing",
"table",
"where",
"a",
"new",
"entry",
"should",
"be",
"inserted",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L370-L402 |
project-rig/rig | rig/routing_table/ordered_covering.py | _refine_merge | def _refine_merge(merge, aliases, min_goodness):
"""Remove entries from a merge to generate a valid merge which may be
applied to the routing table.
Parameters
----------
merge : :py:class:`~.Merge`
Initial merge to refine.
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
min_goodness : int
Reject merges which are worse than the minimum goodness.
Returns
-------
:py:class:`~.Merge`
Valid merge which may be applied to the routing table.
"""
# Perform the down-check
merge = _refine_downcheck(merge, aliases, min_goodness)
# If the merge is still sufficiently good then continue to refine it.
if merge.goodness > min_goodness:
# Perform the up-check
merge, changed = _refine_upcheck(merge, min_goodness)
if changed and merge.goodness > min_goodness:
# If the up-check removed any entries we need to re-perform the
# down-check; but we do not need to re-perform the up-check as the
# down check can only move the resultant merge nearer the top of
# the routing table.
merge = _refine_downcheck(merge, aliases, min_goodness)
return merge | python | def _refine_merge(merge, aliases, min_goodness):
"""Remove entries from a merge to generate a valid merge which may be
applied to the routing table.
Parameters
----------
merge : :py:class:`~.Merge`
Initial merge to refine.
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
min_goodness : int
Reject merges which are worse than the minimum goodness.
Returns
-------
:py:class:`~.Merge`
Valid merge which may be applied to the routing table.
"""
# Perform the down-check
merge = _refine_downcheck(merge, aliases, min_goodness)
# If the merge is still sufficiently good then continue to refine it.
if merge.goodness > min_goodness:
# Perform the up-check
merge, changed = _refine_upcheck(merge, min_goodness)
if changed and merge.goodness > min_goodness:
# If the up-check removed any entries we need to re-perform the
# down-check; but we do not need to re-perform the up-check as the
# down check can only move the resultant merge nearer the top of
# the routing table.
merge = _refine_downcheck(merge, aliases, min_goodness)
return merge | [
"def",
"_refine_merge",
"(",
"merge",
",",
"aliases",
",",
"min_goodness",
")",
":",
"# Perform the down-check",
"merge",
"=",
"_refine_downcheck",
"(",
"merge",
",",
"aliases",
",",
"min_goodness",
")",
"# If the merge is still sufficiently good then continue to refine it.",
"if",
"merge",
".",
"goodness",
">",
"min_goodness",
":",
"# Perform the up-check",
"merge",
",",
"changed",
"=",
"_refine_upcheck",
"(",
"merge",
",",
"min_goodness",
")",
"if",
"changed",
"and",
"merge",
".",
"goodness",
">",
"min_goodness",
":",
"# If the up-check removed any entries we need to re-perform the",
"# down-check; but we do not need to re-perform the up-check as the",
"# down check can only move the resultant merge nearer the top of",
"# the routing table.",
"merge",
"=",
"_refine_downcheck",
"(",
"merge",
",",
"aliases",
",",
"min_goodness",
")",
"return",
"merge"
] | Remove entries from a merge to generate a valid merge which may be
applied to the routing table.
Parameters
----------
merge : :py:class:`~.Merge`
Initial merge to refine.
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
min_goodness : int
Reject merges which are worse than the minimum goodness.
Returns
-------
:py:class:`~.Merge`
Valid merge which may be applied to the routing table. | [
"Remove",
"entries",
"from",
"a",
"merge",
"to",
"generate",
"a",
"valid",
"merge",
"which",
"may",
"be",
"applied",
"to",
"the",
"routing",
"table",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L516-L550 |
project-rig/rig | rig/routing_table/ordered_covering.py | _refine_upcheck | def _refine_upcheck(merge, min_goodness):
"""Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
"""
# Remove any entries which would be covered by entries above the merge
# position.
changed = False
for i in sorted(merge.entries, reverse=True):
# Get all the entries that are between the entry we're looking at the
# insertion index of the proposed merged index. If this entry would be
# covered up by any of them then we remove it from the merge.
entry = merge.routing_table[i]
key, mask = entry.key, entry.mask
if any(intersect(key, mask, other.key, other.mask) for other in
merge.routing_table[i+1:merge.insertion_index]):
# The entry would be partially or wholly covered by another entry,
# remove it from the merge and return a new merge.
merge = _Merge(merge.routing_table, merge.entries - {i})
changed = True
# Check if the merge is sufficiently good
if merge.goodness <= min_goodness:
merge = _Merge(merge.routing_table) # Replace with empty merge
break
# Return the final merge
return merge, changed | python | def _refine_upcheck(merge, min_goodness):
"""Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
"""
# Remove any entries which would be covered by entries above the merge
# position.
changed = False
for i in sorted(merge.entries, reverse=True):
# Get all the entries that are between the entry we're looking at the
# insertion index of the proposed merged index. If this entry would be
# covered up by any of them then we remove it from the merge.
entry = merge.routing_table[i]
key, mask = entry.key, entry.mask
if any(intersect(key, mask, other.key, other.mask) for other in
merge.routing_table[i+1:merge.insertion_index]):
# The entry would be partially or wholly covered by another entry,
# remove it from the merge and return a new merge.
merge = _Merge(merge.routing_table, merge.entries - {i})
changed = True
# Check if the merge is sufficiently good
if merge.goodness <= min_goodness:
merge = _Merge(merge.routing_table) # Replace with empty merge
break
# Return the final merge
return merge, changed | [
"def",
"_refine_upcheck",
"(",
"merge",
",",
"min_goodness",
")",
":",
"# Remove any entries which would be covered by entries above the merge",
"# position.",
"changed",
"=",
"False",
"for",
"i",
"in",
"sorted",
"(",
"merge",
".",
"entries",
",",
"reverse",
"=",
"True",
")",
":",
"# Get all the entries that are between the entry we're looking at the",
"# insertion index of the proposed merged index. If this entry would be",
"# covered up by any of them then we remove it from the merge.",
"entry",
"=",
"merge",
".",
"routing_table",
"[",
"i",
"]",
"key",
",",
"mask",
"=",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
"if",
"any",
"(",
"intersect",
"(",
"key",
",",
"mask",
",",
"other",
".",
"key",
",",
"other",
".",
"mask",
")",
"for",
"other",
"in",
"merge",
".",
"routing_table",
"[",
"i",
"+",
"1",
":",
"merge",
".",
"insertion_index",
"]",
")",
":",
"# The entry would be partially or wholly covered by another entry,",
"# remove it from the merge and return a new merge.",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
",",
"merge",
".",
"entries",
"-",
"{",
"i",
"}",
")",
"changed",
"=",
"True",
"# Check if the merge is sufficiently good",
"if",
"merge",
".",
"goodness",
"<=",
"min_goodness",
":",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
")",
"# Replace with empty merge",
"break",
"# Return the final merge",
"return",
"merge",
",",
"changed"
] | Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all. | [
"Remove",
"from",
"the",
"merge",
"any",
"entries",
"which",
"would",
"be",
"covered",
"by",
"entries",
"between",
"their",
"current",
"position",
"and",
"the",
"merge",
"insertion",
"position",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L553-L598 |
project-rig/rig | rig/routing_table/ordered_covering.py | _refine_downcheck | def _refine_downcheck(merge, aliases, min_goodness):
"""Prune the merge to avoid it covering up any entries which are below the
merge insertion position.
For example, in the (non-orthogonal) table::
00001 -> N S
00011 -> N S
00100 -> N S
00X00 -> N S
XX1XX -> 3 5
Merging the first four entries would generate the new key-mask ``00XXX``
which would be inserted above the entry with the key-mask ``XX1XX``.
However ``00XXX`` would stop the key ``00110`` from reaching its correct
route, that is ``00110`` would be covered by ``00XXX``. To avoid this one
could just abandon the merge entirely, but a better solution is to attempt
to reduce the merge such that it no longer covers any entries below it.
To do this we first identify the bits that ARE ``X`` s in the merged
key-mask but which are NOT ``X`` s in the entry that we're covering. For
this example this is the 3rd bit. We then look to remove from the merge any
entries which are either ``X`` s in this position OR have the same value as
in this bit as the aliased entry. As the 4th entry in the table has an
``X`` in this position we remove it, and as the 3rd entry has a ``1`` we
also remove it. For this example we would then consider merging only the
first two entries, leading to a new key-mask pair of ``000X1`` which can be
safely inserted between ``00X00`` and ``XX1XX``::
00100 -> N S
00X00 -> N S
000X1 -> N S
XX1XX -> 3 5
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
"""
# Operation
# ---------
# While the merge is still better than `min_goodness` we determine which
# entries below it in the table it covers. For each of these covered
# entries we find which bits are Xs in the merged entry and are NOT Xs in
# the covered entry.
#
# For example:
#
# Merged entry: ...0XXX1...
# Covered entry: ...010XX...
# Bits of interest: ^^
# Label used below: mn
#
# NOTE:
# The covered entry may be of lower generality than the prospective
# merged entry if it is contained within the aliases dictionary (e.g.,
# ...010XX... may be part of
# ``aliases = {...XXXXX...: {..., ...010XX..., ...}, ...})``
#
# In this case there are 2 bits of interest highlighted. These are bits in
# the merge entry whose value can be set (by removing entries from the
# merge) to avoid covering the covered entry. Whenever we have multiple
# covered entries we care only about the entries with the fewest number of
# ``settable`` bits because these most constrain which entries we may
# remove from the merge to avoid covering up the lower entry.
#
# NOTE:
# * If there is only 1 ``settable`` bit then we are very constrained in
# terms of which entries must be removed from the merge to avoid
# covering a lower entry.
# * If there are no ``settable`` bits then we cannot possibly avoid
# covering the lower entry - the only correct action is to return an
# empty merge.
#
# Assuming that there were no covered entries without any ``settable`` bits
# (that is ``stringency > 0``) then ``bits_and_vals`` contains pairs of
# bits and boolean values which indicate which values need to be removed
# from which bit positions to avoid covering up lower entries. If the
# example above were the only covered entry then ``bits_and_vals`` would
# contain ``(m, True)`` to indicate that all entries containing Xs or 1s in
# the left-most bit of interest could be removed to avoid the covered entry
# and ``(n, False)`` to indicate that all entries containing Xs or 0s in
# the right-most bit of interest could be removed to avoid covering the
# entry.
#
# NOTE:
# ``bits_and_vals`` consists of a set of options (e.g., we *could* remove
# all entries with Xs or 1s in bit ``m`` *or* we could remove all entries
# with Xs or 0s in bit ``n``, either would resolve the above covering).
#
# To determine which course of action to take we build a dictionary mapping
# each of the pairs in ``bits_and_vals`` to the entries that would need to
# be removed to "set" that bit in the merged entry. For example, we might
# end up with:
#
# options = {(m, True): {1, 4, 5},
# (n, False): {3, 7}}
#
# Indicating that we'd need to remove entries 1, 4 and 5 from the merge to
# "set" the mth bit of the merged to 0 or that we'd need to remove entries
# 3 and 7 to set the nth bit of the merged entry to set the nth bit to 1.
#
# NOTE:
# The boolean part of the pair indicates which value needs to be removed
# (True -> remove all 1s and Xs; False -> remove all 0s and Xs). If all
# Xs and 1s in a given bit position are removed from a merge then the
# merged entry is guaranteed to have a 0 in the bit position. Vice-versa
# removing all Xs and 0s in a given bit position from a merge will result
# in a merged entry with a 1 in that position.
#
# As we want to make our merges as large as possible we select the smallest
# set of entries to remove from the merge from ``options``.
#
# The whole process is then repeated since:
# * we ignored covered entries with more ``settable`` bits there may
# still be covered entries below the merged entry
# * after removing entries from the merge the merged entry is of lower
# generality and is therefore nearer the top of the table so new
# entries may be have become covered
# Set of bit positions
all_bits = tuple(1 << i for i in range(32))
# While the merge is still worth considering continue to perform the
# down-check.
while merge.goodness > min_goodness:
covered = list(_get_covered_keys_and_masks(merge, aliases))
# If there are no covered entries (the merge is valid) then break out
# of the loop.
if not covered:
break
# For each covered entry work out which bits in the key-mask pair which
# are not Xs are not covered by Xs in the merge key-mask pair. Only
# keep track of the entries which have the fewest bits that we could
# set.
most_stringent = 33 # Not at all stringent
bits_and_vals = set()
for key, mask in covered:
# Get the bit positions where there ISN'T an X in the covered entry
# but there IS an X in the merged entry.
settable = mask & ~merge.mask
# Count the number of settable bits, if this is a more stringent
# constraint than the previous constraint then ensure that we
# record the new stringency and store which bits we need to set to
# meet the constraint.
n_settable = sum(1 for bit in all_bits if bit & settable)
if n_settable <= most_stringent:
if n_settable < most_stringent:
most_stringent = n_settable
bits_and_vals = set()
# Add this settable mask and the required values to the
# settables list.
bits_and_vals.update((bit, not (key & bit)) for bit in
all_bits if bit & settable)
if most_stringent == 0:
# If are there any instances where we could not possibly change a
# bit to avoid aliasing an entry we'll return an empty merge and
# give up.
merge = _Merge(merge.routing_table, set())
break
else:
# Get the smallest number of entries to remove to modify the
# resultant key-mask to avoid covering a lower entry. Prefer to
# modify more significant bits of the key mask.
remove = set() # Entries to remove
for bit, val in sorted(bits_and_vals, reverse=True):
working_remove = set() # Holder for working remove set
for i in merge.entries:
entry = merge.routing_table[i]
if ((not entry.mask & bit) or
(bool(entry.key & bit) is (not val))):
# If the entry has an X in this position then it will
# need to be removed regardless of whether we want to
# set a 0 or a 1 in this position, likewise it will
# need to be removed if it is a 0 and we want a 1 or
# vice-versa.
working_remove.add(i)
# If the current remove set is empty or the new remove set is
# smaller update the remove set.
if not remove or len(working_remove) < len(remove):
remove = working_remove
# Remove the selected entries from the merge
merge = _Merge(merge.routing_table, merge.entries - remove)
else:
# NOTE: If there are no covered entries, that is, if the merge is
# better than min goodness AND valid this `else` clause is not reached.
# Ensure than an empty merge is returned if the above loop was aborted
# early with a non-empty merge.
merge = _Merge(merge.routing_table, set())
return merge | python | def _refine_downcheck(merge, aliases, min_goodness):
"""Prune the merge to avoid it covering up any entries which are below the
merge insertion position.
For example, in the (non-orthogonal) table::
00001 -> N S
00011 -> N S
00100 -> N S
00X00 -> N S
XX1XX -> 3 5
Merging the first four entries would generate the new key-mask ``00XXX``
which would be inserted above the entry with the key-mask ``XX1XX``.
However ``00XXX`` would stop the key ``00110`` from reaching its correct
route, that is ``00110`` would be covered by ``00XXX``. To avoid this one
could just abandon the merge entirely, but a better solution is to attempt
to reduce the merge such that it no longer covers any entries below it.
To do this we first identify the bits that ARE ``X`` s in the merged
key-mask but which are NOT ``X`` s in the entry that we're covering. For
this example this is the 3rd bit. We then look to remove from the merge any
entries which are either ``X`` s in this position OR have the same value as
in this bit as the aliased entry. As the 4th entry in the table has an
``X`` in this position we remove it, and as the 3rd entry has a ``1`` we
also remove it. For this example we would then consider merging only the
first two entries, leading to a new key-mask pair of ``000X1`` which can be
safely inserted between ``00X00`` and ``XX1XX``::
00100 -> N S
00X00 -> N S
000X1 -> N S
XX1XX -> 3 5
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
"""
# Operation
# ---------
# While the merge is still better than `min_goodness` we determine which
# entries below it in the table it covers. For each of these covered
# entries we find which bits are Xs in the merged entry and are NOT Xs in
# the covered entry.
#
# For example:
#
# Merged entry: ...0XXX1...
# Covered entry: ...010XX...
# Bits of interest: ^^
# Label used below: mn
#
# NOTE:
# The covered entry may be of lower generality than the prospective
# merged entry if it is contained within the aliases dictionary (e.g.,
# ...010XX... may be part of
# ``aliases = {...XXXXX...: {..., ...010XX..., ...}, ...})``
#
# In this case there are 2 bits of interest highlighted. These are bits in
# the merge entry whose value can be set (by removing entries from the
# merge) to avoid covering the covered entry. Whenever we have multiple
# covered entries we care only about the entries with the fewest number of
# ``settable`` bits because these most constrain which entries we may
# remove from the merge to avoid covering up the lower entry.
#
# NOTE:
# * If there is only 1 ``settable`` bit then we are very constrained in
# terms of which entries must be removed from the merge to avoid
# covering a lower entry.
# * If there are no ``settable`` bits then we cannot possibly avoid
# covering the lower entry - the only correct action is to return an
# empty merge.
#
# Assuming that there were no covered entries without any ``settable`` bits
# (that is ``stringency > 0``) then ``bits_and_vals`` contains pairs of
# bits and boolean values which indicate which values need to be removed
# from which bit positions to avoid covering up lower entries. If the
# example above were the only covered entry then ``bits_and_vals`` would
# contain ``(m, True)`` to indicate that all entries containing Xs or 1s in
# the left-most bit of interest could be removed to avoid the covered entry
# and ``(n, False)`` to indicate that all entries containing Xs or 0s in
# the right-most bit of interest could be removed to avoid covering the
# entry.
#
# NOTE:
# ``bits_and_vals`` consists of a set of options (e.g., we *could* remove
# all entries with Xs or 1s in bit ``m`` *or* we could remove all entries
# with Xs or 0s in bit ``n``, either would resolve the above covering).
#
# To determine which course of action to take we build a dictionary mapping
# each of the pairs in ``bits_and_vals`` to the entries that would need to
# be removed to "set" that bit in the merged entry. For example, we might
# end up with:
#
# options = {(m, True): {1, 4, 5},
# (n, False): {3, 7}}
#
# Indicating that we'd need to remove entries 1, 4 and 5 from the merge to
# "set" the mth bit of the merged to 0 or that we'd need to remove entries
# 3 and 7 to set the nth bit of the merged entry to set the nth bit to 1.
#
# NOTE:
# The boolean part of the pair indicates which value needs to be removed
# (True -> remove all 1s and Xs; False -> remove all 0s and Xs). If all
# Xs and 1s in a given bit position are removed from a merge then the
# merged entry is guaranteed to have a 0 in the bit position. Vice-versa
# removing all Xs and 0s in a given bit position from a merge will result
# in a merged entry with a 1 in that position.
#
# As we want to make our merges as large as possible we select the smallest
# set of entries to remove from the merge from ``options``.
#
# The whole process is then repeated since:
# * we ignored covered entries with more ``settable`` bits there may
# still be covered entries below the merged entry
# * after removing entries from the merge the merged entry is of lower
# generality and is therefore nearer the top of the table so new
# entries may be have become covered
# Set of bit positions
all_bits = tuple(1 << i for i in range(32))
# While the merge is still worth considering continue to perform the
# down-check.
while merge.goodness > min_goodness:
covered = list(_get_covered_keys_and_masks(merge, aliases))
# If there are no covered entries (the merge is valid) then break out
# of the loop.
if not covered:
break
# For each covered entry work out which bits in the key-mask pair which
# are not Xs are not covered by Xs in the merge key-mask pair. Only
# keep track of the entries which have the fewest bits that we could
# set.
most_stringent = 33 # Not at all stringent
bits_and_vals = set()
for key, mask in covered:
# Get the bit positions where there ISN'T an X in the covered entry
# but there IS an X in the merged entry.
settable = mask & ~merge.mask
# Count the number of settable bits, if this is a more stringent
# constraint than the previous constraint then ensure that we
# record the new stringency and store which bits we need to set to
# meet the constraint.
n_settable = sum(1 for bit in all_bits if bit & settable)
if n_settable <= most_stringent:
if n_settable < most_stringent:
most_stringent = n_settable
bits_and_vals = set()
# Add this settable mask and the required values to the
# settables list.
bits_and_vals.update((bit, not (key & bit)) for bit in
all_bits if bit & settable)
if most_stringent == 0:
# If are there any instances where we could not possibly change a
# bit to avoid aliasing an entry we'll return an empty merge and
# give up.
merge = _Merge(merge.routing_table, set())
break
else:
# Get the smallest number of entries to remove to modify the
# resultant key-mask to avoid covering a lower entry. Prefer to
# modify more significant bits of the key mask.
remove = set() # Entries to remove
for bit, val in sorted(bits_and_vals, reverse=True):
working_remove = set() # Holder for working remove set
for i in merge.entries:
entry = merge.routing_table[i]
if ((not entry.mask & bit) or
(bool(entry.key & bit) is (not val))):
# If the entry has an X in this position then it will
# need to be removed regardless of whether we want to
# set a 0 or a 1 in this position, likewise it will
# need to be removed if it is a 0 and we want a 1 or
# vice-versa.
working_remove.add(i)
# If the current remove set is empty or the new remove set is
# smaller update the remove set.
if not remove or len(working_remove) < len(remove):
remove = working_remove
# Remove the selected entries from the merge
merge = _Merge(merge.routing_table, merge.entries - remove)
else:
# NOTE: If there are no covered entries, that is, if the merge is
# better than min goodness AND valid this `else` clause is not reached.
# Ensure than an empty merge is returned if the above loop was aborted
# early with a non-empty merge.
merge = _Merge(merge.routing_table, set())
return merge | [
"def",
"_refine_downcheck",
"(",
"merge",
",",
"aliases",
",",
"min_goodness",
")",
":",
"# Operation",
"# ---------",
"# While the merge is still better than `min_goodness` we determine which",
"# entries below it in the table it covers. For each of these covered",
"# entries we find which bits are Xs in the merged entry and are NOT Xs in",
"# the covered entry.",
"#",
"# For example:",
"#",
"# Merged entry: ...0XXX1...",
"# Covered entry: ...010XX...",
"# Bits of interest: ^^",
"# Label used below: mn",
"#",
"# NOTE:",
"# The covered entry may be of lower generality than the prospective",
"# merged entry if it is contained within the aliases dictionary (e.g.,",
"# ...010XX... may be part of",
"# ``aliases = {...XXXXX...: {..., ...010XX..., ...}, ...})``",
"#",
"# In this case there are 2 bits of interest highlighted. These are bits in",
"# the merge entry whose value can be set (by removing entries from the",
"# merge) to avoid covering the covered entry. Whenever we have multiple",
"# covered entries we care only about the entries with the fewest number of",
"# ``settable`` bits because these most constrain which entries we may",
"# remove from the merge to avoid covering up the lower entry.",
"#",
"# NOTE:",
"# * If there is only 1 ``settable`` bit then we are very constrained in",
"# terms of which entries must be removed from the merge to avoid",
"# covering a lower entry.",
"# * If there are no ``settable`` bits then we cannot possibly avoid",
"# covering the lower entry - the only correct action is to return an",
"# empty merge.",
"#",
"# Assuming that there were no covered entries without any ``settable`` bits",
"# (that is ``stringency > 0``) then ``bits_and_vals`` contains pairs of",
"# bits and boolean values which indicate which values need to be removed",
"# from which bit positions to avoid covering up lower entries. If the",
"# example above were the only covered entry then ``bits_and_vals`` would",
"# contain ``(m, True)`` to indicate that all entries containing Xs or 1s in",
"# the left-most bit of interest could be removed to avoid the covered entry",
"# and ``(n, False)`` to indicate that all entries containing Xs or 0s in",
"# the right-most bit of interest could be removed to avoid covering the",
"# entry.",
"#",
"# NOTE:",
"# ``bits_and_vals`` consists of a set of options (e.g., we *could* remove",
"# all entries with Xs or 1s in bit ``m`` *or* we could remove all entries",
"# with Xs or 0s in bit ``n``, either would resolve the above covering).",
"#",
"# To determine which course of action to take we build a dictionary mapping",
"# each of the pairs in ``bits_and_vals`` to the entries that would need to",
"# be removed to \"set\" that bit in the merged entry. For example, we might",
"# end up with:",
"#",
"# options = {(m, True): {1, 4, 5},",
"# (n, False): {3, 7}}",
"#",
"# Indicating that we'd need to remove entries 1, 4 and 5 from the merge to",
"# \"set\" the mth bit of the merged to 0 or that we'd need to remove entries",
"# 3 and 7 to set the nth bit of the merged entry to set the nth bit to 1.",
"#",
"# NOTE:",
"# The boolean part of the pair indicates which value needs to be removed",
"# (True -> remove all 1s and Xs; False -> remove all 0s and Xs). If all",
"# Xs and 1s in a given bit position are removed from a merge then the",
"# merged entry is guaranteed to have a 0 in the bit position. Vice-versa",
"# removing all Xs and 0s in a given bit position from a merge will result",
"# in a merged entry with a 1 in that position.",
"#",
"# As we want to make our merges as large as possible we select the smallest",
"# set of entries to remove from the merge from ``options``.",
"#",
"# The whole process is then repeated since:",
"# * we ignored covered entries with more ``settable`` bits there may",
"# still be covered entries below the merged entry",
"# * after removing entries from the merge the merged entry is of lower",
"# generality and is therefore nearer the top of the table so new",
"# entries may be have become covered",
"# Set of bit positions",
"all_bits",
"=",
"tuple",
"(",
"1",
"<<",
"i",
"for",
"i",
"in",
"range",
"(",
"32",
")",
")",
"# While the merge is still worth considering continue to perform the",
"# down-check.",
"while",
"merge",
".",
"goodness",
">",
"min_goodness",
":",
"covered",
"=",
"list",
"(",
"_get_covered_keys_and_masks",
"(",
"merge",
",",
"aliases",
")",
")",
"# If there are no covered entries (the merge is valid) then break out",
"# of the loop.",
"if",
"not",
"covered",
":",
"break",
"# For each covered entry work out which bits in the key-mask pair which",
"# are not Xs are not covered by Xs in the merge key-mask pair. Only",
"# keep track of the entries which have the fewest bits that we could",
"# set.",
"most_stringent",
"=",
"33",
"# Not at all stringent",
"bits_and_vals",
"=",
"set",
"(",
")",
"for",
"key",
",",
"mask",
"in",
"covered",
":",
"# Get the bit positions where there ISN'T an X in the covered entry",
"# but there IS an X in the merged entry.",
"settable",
"=",
"mask",
"&",
"~",
"merge",
".",
"mask",
"# Count the number of settable bits, if this is a more stringent",
"# constraint than the previous constraint then ensure that we",
"# record the new stringency and store which bits we need to set to",
"# meet the constraint.",
"n_settable",
"=",
"sum",
"(",
"1",
"for",
"bit",
"in",
"all_bits",
"if",
"bit",
"&",
"settable",
")",
"if",
"n_settable",
"<=",
"most_stringent",
":",
"if",
"n_settable",
"<",
"most_stringent",
":",
"most_stringent",
"=",
"n_settable",
"bits_and_vals",
"=",
"set",
"(",
")",
"# Add this settable mask and the required values to the",
"# settables list.",
"bits_and_vals",
".",
"update",
"(",
"(",
"bit",
",",
"not",
"(",
"key",
"&",
"bit",
")",
")",
"for",
"bit",
"in",
"all_bits",
"if",
"bit",
"&",
"settable",
")",
"if",
"most_stringent",
"==",
"0",
":",
"# If are there any instances where we could not possibly change a",
"# bit to avoid aliasing an entry we'll return an empty merge and",
"# give up.",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
",",
"set",
"(",
")",
")",
"break",
"else",
":",
"# Get the smallest number of entries to remove to modify the",
"# resultant key-mask to avoid covering a lower entry. Prefer to",
"# modify more significant bits of the key mask.",
"remove",
"=",
"set",
"(",
")",
"# Entries to remove",
"for",
"bit",
",",
"val",
"in",
"sorted",
"(",
"bits_and_vals",
",",
"reverse",
"=",
"True",
")",
":",
"working_remove",
"=",
"set",
"(",
")",
"# Holder for working remove set",
"for",
"i",
"in",
"merge",
".",
"entries",
":",
"entry",
"=",
"merge",
".",
"routing_table",
"[",
"i",
"]",
"if",
"(",
"(",
"not",
"entry",
".",
"mask",
"&",
"bit",
")",
"or",
"(",
"bool",
"(",
"entry",
".",
"key",
"&",
"bit",
")",
"is",
"(",
"not",
"val",
")",
")",
")",
":",
"# If the entry has an X in this position then it will",
"# need to be removed regardless of whether we want to",
"# set a 0 or a 1 in this position, likewise it will",
"# need to be removed if it is a 0 and we want a 1 or",
"# vice-versa.",
"working_remove",
".",
"add",
"(",
"i",
")",
"# If the current remove set is empty or the new remove set is",
"# smaller update the remove set.",
"if",
"not",
"remove",
"or",
"len",
"(",
"working_remove",
")",
"<",
"len",
"(",
"remove",
")",
":",
"remove",
"=",
"working_remove",
"# Remove the selected entries from the merge",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
",",
"merge",
".",
"entries",
"-",
"remove",
")",
"else",
":",
"# NOTE: If there are no covered entries, that is, if the merge is",
"# better than min goodness AND valid this `else` clause is not reached.",
"# Ensure than an empty merge is returned if the above loop was aborted",
"# early with a non-empty merge.",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
",",
"set",
"(",
")",
")",
"return",
"merge"
] | Prune the merge to avoid it covering up any entries which are below the
merge insertion position.
For example, in the (non-orthogonal) table::
00001 -> N S
00011 -> N S
00100 -> N S
00X00 -> N S
XX1XX -> 3 5
Merging the first four entries would generate the new key-mask ``00XXX``
which would be inserted above the entry with the key-mask ``XX1XX``.
However ``00XXX`` would stop the key ``00110`` from reaching its correct
route, that is ``00110`` would be covered by ``00XXX``. To avoid this one
could just abandon the merge entirely, but a better solution is to attempt
to reduce the merge such that it no longer covers any entries below it.
To do this we first identify the bits that ARE ``X`` s in the merged
key-mask but which are NOT ``X`` s in the entry that we're covering. For
this example this is the 3rd bit. We then look to remove from the merge any
entries which are either ``X`` s in this position OR have the same value as
in this bit as the aliased entry. As the 4th entry in the table has an
``X`` in this position we remove it, and as the 3rd entry has a ``1`` we
also remove it. For this example we would then consider merging only the
first two entries, leading to a new key-mask pair of ``000X1`` which can be
safely inserted between ``00X00`` and ``XX1XX``::
00100 -> N S
00X00 -> N S
000X1 -> N S
XX1XX -> 3 5
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned. | [
"Prune",
"the",
"merge",
"to",
"avoid",
"it",
"covering",
"up",
"any",
"entries",
"which",
"are",
"below",
"the",
"merge",
"insertion",
"position",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L601-L801 |
project-rig/rig | rig/routing_table/ordered_covering.py | _get_covered_keys_and_masks | def _get_covered_keys_and_masks(merge, aliases):
"""Get keys and masks which would be covered by the entry resulting from
the merge.
Parameters
----------
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
Yields
------
(key, mask)
Pairs of keys and masks which would be covered if the given `merge`
were to be applied to the routing table.
"""
# For every entry in the table below the insertion index see which keys
# and masks would overlap with the key and mask of the merged entry.
for entry in merge.routing_table[merge.insertion_index:]:
key_mask = (entry.key, entry.mask)
keys_masks = aliases.get(key_mask, [key_mask])
for key, mask in keys_masks:
if intersect(merge.key, merge.mask, key, mask):
yield key, mask | python | def _get_covered_keys_and_masks(merge, aliases):
"""Get keys and masks which would be covered by the entry resulting from
the merge.
Parameters
----------
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
Yields
------
(key, mask)
Pairs of keys and masks which would be covered if the given `merge`
were to be applied to the routing table.
"""
# For every entry in the table below the insertion index see which keys
# and masks would overlap with the key and mask of the merged entry.
for entry in merge.routing_table[merge.insertion_index:]:
key_mask = (entry.key, entry.mask)
keys_masks = aliases.get(key_mask, [key_mask])
for key, mask in keys_masks:
if intersect(merge.key, merge.mask, key, mask):
yield key, mask | [
"def",
"_get_covered_keys_and_masks",
"(",
"merge",
",",
"aliases",
")",
":",
"# For every entry in the table below the insertion index see which keys",
"# and masks would overlap with the key and mask of the merged entry.",
"for",
"entry",
"in",
"merge",
".",
"routing_table",
"[",
"merge",
".",
"insertion_index",
":",
"]",
":",
"key_mask",
"=",
"(",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
")",
"keys_masks",
"=",
"aliases",
".",
"get",
"(",
"key_mask",
",",
"[",
"key_mask",
"]",
")",
"for",
"key",
",",
"mask",
"in",
"keys_masks",
":",
"if",
"intersect",
"(",
"merge",
".",
"key",
",",
"merge",
".",
"mask",
",",
"key",
",",
"mask",
")",
":",
"yield",
"key",
",",
"mask"
] | Get keys and masks which would be covered by the entry resulting from
the merge.
Parameters
----------
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
Yields
------
(key, mask)
Pairs of keys and masks which would be covered if the given `merge`
were to be applied to the routing table. | [
"Get",
"keys",
"and",
"masks",
"which",
"would",
"be",
"covered",
"by",
"the",
"entry",
"resulting",
"from",
"the",
"merge",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L804-L828 |
project-rig/rig | rig/routing_table/ordered_covering.py | _Merge.apply | def apply(self, aliases):
"""Apply the merge to the routing table it is defined against and get a
new routing table and alias dictionary.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
A new routing table which may be minimised further.
{(key, mask): {(key, mask), ...}}
A new aliases dictionary.
"""
# Create a new routing table of the correct size
new_size = len(self.routing_table) - len(self.entries) + 1
new_table = [None for _ in range(new_size)]
# Create a copy of the aliases dictionary
aliases = dict(aliases)
# Get the new entry
new_entry = RoutingTableEntry(
route=self.routing_table[next(iter(self.entries))].route,
key=self.key, mask=self.mask, sources=self.sources
)
aliases[(self.key, self.mask)] = our_aliases = set([])
# Iterate through the old table copying entries acrosss
insert = 0
for i, entry in enumerate(self.routing_table):
# If this is the insertion point then insert
if i == self.insertion_index:
new_table[insert] = new_entry
insert += 1
if i not in self.entries:
# If this entry isn't to be removed then copy it across to the
# new table.
new_table[insert] = entry
insert += 1
else:
# If this entry is to be removed then add it to the aliases
# dictionary.
km = (entry.key, entry.mask)
our_aliases.update(aliases.pop(km, {km}))
# If inserting beyond the end of the old table then insert at the end
# of the new table.
if self.insertion_index == len(self.routing_table):
new_table[insert] = new_entry
return new_table, aliases | python | def apply(self, aliases):
"""Apply the merge to the routing table it is defined against and get a
new routing table and alias dictionary.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
A new routing table which may be minimised further.
{(key, mask): {(key, mask), ...}}
A new aliases dictionary.
"""
# Create a new routing table of the correct size
new_size = len(self.routing_table) - len(self.entries) + 1
new_table = [None for _ in range(new_size)]
# Create a copy of the aliases dictionary
aliases = dict(aliases)
# Get the new entry
new_entry = RoutingTableEntry(
route=self.routing_table[next(iter(self.entries))].route,
key=self.key, mask=self.mask, sources=self.sources
)
aliases[(self.key, self.mask)] = our_aliases = set([])
# Iterate through the old table copying entries acrosss
insert = 0
for i, entry in enumerate(self.routing_table):
# If this is the insertion point then insert
if i == self.insertion_index:
new_table[insert] = new_entry
insert += 1
if i not in self.entries:
# If this entry isn't to be removed then copy it across to the
# new table.
new_table[insert] = entry
insert += 1
else:
# If this entry is to be removed then add it to the aliases
# dictionary.
km = (entry.key, entry.mask)
our_aliases.update(aliases.pop(km, {km}))
# If inserting beyond the end of the old table then insert at the end
# of the new table.
if self.insertion_index == len(self.routing_table):
new_table[insert] = new_entry
return new_table, aliases | [
"def",
"apply",
"(",
"self",
",",
"aliases",
")",
":",
"# Create a new routing table of the correct size",
"new_size",
"=",
"len",
"(",
"self",
".",
"routing_table",
")",
"-",
"len",
"(",
"self",
".",
"entries",
")",
"+",
"1",
"new_table",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"new_size",
")",
"]",
"# Create a copy of the aliases dictionary",
"aliases",
"=",
"dict",
"(",
"aliases",
")",
"# Get the new entry",
"new_entry",
"=",
"RoutingTableEntry",
"(",
"route",
"=",
"self",
".",
"routing_table",
"[",
"next",
"(",
"iter",
"(",
"self",
".",
"entries",
")",
")",
"]",
".",
"route",
",",
"key",
"=",
"self",
".",
"key",
",",
"mask",
"=",
"self",
".",
"mask",
",",
"sources",
"=",
"self",
".",
"sources",
")",
"aliases",
"[",
"(",
"self",
".",
"key",
",",
"self",
".",
"mask",
")",
"]",
"=",
"our_aliases",
"=",
"set",
"(",
"[",
"]",
")",
"# Iterate through the old table copying entries acrosss",
"insert",
"=",
"0",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"self",
".",
"routing_table",
")",
":",
"# If this is the insertion point then insert",
"if",
"i",
"==",
"self",
".",
"insertion_index",
":",
"new_table",
"[",
"insert",
"]",
"=",
"new_entry",
"insert",
"+=",
"1",
"if",
"i",
"not",
"in",
"self",
".",
"entries",
":",
"# If this entry isn't to be removed then copy it across to the",
"# new table.",
"new_table",
"[",
"insert",
"]",
"=",
"entry",
"insert",
"+=",
"1",
"else",
":",
"# If this entry is to be removed then add it to the aliases",
"# dictionary.",
"km",
"=",
"(",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
")",
"our_aliases",
".",
"update",
"(",
"aliases",
".",
"pop",
"(",
"km",
",",
"{",
"km",
"}",
")",
")",
"# If inserting beyond the end of the old table then insert at the end",
"# of the new table.",
"if",
"self",
".",
"insertion_index",
"==",
"len",
"(",
"self",
".",
"routing_table",
")",
":",
"new_table",
"[",
"insert",
"]",
"=",
"new_entry",
"return",
"new_table",
",",
"aliases"
] | Apply the merge to the routing table it is defined against and get a
new routing table and alias dictionary.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
A new routing table which may be minimised further.
{(key, mask): {(key, mask), ...}}
A new aliases dictionary. | [
"Apply",
"the",
"merge",
"to",
"the",
"routing",
"table",
"it",
"is",
"defined",
"against",
"and",
"get",
"a",
"new",
"routing",
"table",
"and",
"alias",
"dictionary",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L464-L513 |
Metatab/metapack | metapack/cli/install_file.py | find_packages | def find_packages(name, pkg_dir):
"""Locate pre-built packages in the _packages directory"""
for c in (FileSystemPackageBuilder, ZipPackageBuilder, ExcelPackageBuilder):
package_path, cache_path = c.make_package_path(pkg_dir, name)
if package_path.exists():
yield c.type_code, package_path, cache_path | python | def find_packages(name, pkg_dir):
"""Locate pre-built packages in the _packages directory"""
for c in (FileSystemPackageBuilder, ZipPackageBuilder, ExcelPackageBuilder):
package_path, cache_path = c.make_package_path(pkg_dir, name)
if package_path.exists():
yield c.type_code, package_path, cache_path | [
"def",
"find_packages",
"(",
"name",
",",
"pkg_dir",
")",
":",
"for",
"c",
"in",
"(",
"FileSystemPackageBuilder",
",",
"ZipPackageBuilder",
",",
"ExcelPackageBuilder",
")",
":",
"package_path",
",",
"cache_path",
"=",
"c",
".",
"make_package_path",
"(",
"pkg_dir",
",",
"name",
")",
"if",
"package_path",
".",
"exists",
"(",
")",
":",
"yield",
"c",
".",
"type_code",
",",
"package_path",
",",
"cache_path"
] | Locate pre-built packages in the _packages directory | [
"Locate",
"pre",
"-",
"built",
"packages",
"in",
"the",
"_packages",
"directory"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/install_file.py#L60-L68 |
Metatab/metapack | metapack/cli/new.py | new_args | def new_args(subparsers):
"""
The `mp new` command creates source package directories
with a proper name, a `.gitignore` file, and optionally, example data,
entries and code. Typical usage, for creating a new package with most
of the example options, is ::
mp new -o metatab.org -d tutorial -L -E -T "Quickstart Example Package"
The :option:`-C` option will set a configuration file, which is a
Metatab file that with terms that are copied into the `metadata.csv` file
of the new package. Currently, it copies a limited number of terms,
including:
- Terms in the Contacts section
- Root.Space
- Root.Time
- Root.Grain
- Root.Variant
- Root.Version
"""
parser = subparsers.add_parser(
'new',
help='Create new Metatab packages',
description=new_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=new_cmd)
parser.add_argument('-o', '--origin', help="Dataset origin, usually a second-level domain name. Required")
parser.add_argument('-d', '--dataset', help="Main dataset name. Required", required=True)
parser.add_argument('-t', '--time', help="Temporal extents, usually a year, ISO8601 time, or interval. ")
parser.add_argument('-s', '--space', help="Space, geographic extent, such as a name of a state or a Census geoid")
parser.add_argument('-g', '--grain', help="Grain, the type of entity a row represents")
parser.add_argument('-v', '--variant', help="Variant, any distinguishing string")
parser.add_argument('-r', '--revision', help="Version, defaults to 1", default=1)
parser.add_argument('-T', '--title', help="Set the title")
parser.add_argument('-L', '--pylib', help="Configure a pylib directory for Python code extensions",
action='store_true')
parser.add_argument('-E', '--example', help="Add examples of resources",
action='store_true')
parser.add_argument('-J', '--jupyter', help="Create a Jupyter notebook source package",
action='store_true')
parser.add_argument('--template', help="Metatab file template, defaults to 'metatab' ", default='metatab')
parser.add_argument('-C', '--config', help="Path to config file. "
"Defaults to ~/.metapack-defaults.csv or value of METAPACK_DEFAULTS env var."
"Sets defaults for specia root terms and the Contacts section.")
return parser | python | def new_args(subparsers):
"""
The `mp new` command creates source package directories
with a proper name, a `.gitignore` file, and optionally, example data,
entries and code. Typical usage, for creating a new package with most
of the example options, is ::
mp new -o metatab.org -d tutorial -L -E -T "Quickstart Example Package"
The :option:`-C` option will set a configuration file, which is a
Metatab file that with terms that are copied into the `metadata.csv` file
of the new package. Currently, it copies a limited number of terms,
including:
- Terms in the Contacts section
- Root.Space
- Root.Time
- Root.Grain
- Root.Variant
- Root.Version
"""
parser = subparsers.add_parser(
'new',
help='Create new Metatab packages',
description=new_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=new_cmd)
parser.add_argument('-o', '--origin', help="Dataset origin, usually a second-level domain name. Required")
parser.add_argument('-d', '--dataset', help="Main dataset name. Required", required=True)
parser.add_argument('-t', '--time', help="Temporal extents, usually a year, ISO8601 time, or interval. ")
parser.add_argument('-s', '--space', help="Space, geographic extent, such as a name of a state or a Census geoid")
parser.add_argument('-g', '--grain', help="Grain, the type of entity a row represents")
parser.add_argument('-v', '--variant', help="Variant, any distinguishing string")
parser.add_argument('-r', '--revision', help="Version, defaults to 1", default=1)
parser.add_argument('-T', '--title', help="Set the title")
parser.add_argument('-L', '--pylib', help="Configure a pylib directory for Python code extensions",
action='store_true')
parser.add_argument('-E', '--example', help="Add examples of resources",
action='store_true')
parser.add_argument('-J', '--jupyter', help="Create a Jupyter notebook source package",
action='store_true')
parser.add_argument('--template', help="Metatab file template, defaults to 'metatab' ", default='metatab')
parser.add_argument('-C', '--config', help="Path to config file. "
"Defaults to ~/.metapack-defaults.csv or value of METAPACK_DEFAULTS env var."
"Sets defaults for specia root terms and the Contacts section.")
return parser | [
"def",
"new_args",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'new'",
",",
"help",
"=",
"'Create new Metatab packages'",
",",
"description",
"=",
"new_args",
".",
"__doc__",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
")",
"parser",
".",
"set_defaults",
"(",
"run_command",
"=",
"new_cmd",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--origin'",
",",
"help",
"=",
"\"Dataset origin, usually a second-level domain name. Required\"",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--dataset'",
",",
"help",
"=",
"\"Main dataset name. Required\"",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--time'",
",",
"help",
"=",
"\"Temporal extents, usually a year, ISO8601 time, or interval. \"",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--space'",
",",
"help",
"=",
"\"Space, geographic extent, such as a name of a state or a Census geoid\"",
")",
"parser",
".",
"add_argument",
"(",
"'-g'",
",",
"'--grain'",
",",
"help",
"=",
"\"Grain, the type of entity a row represents\"",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--variant'",
",",
"help",
"=",
"\"Variant, any distinguishing string\"",
")",
"parser",
".",
"add_argument",
"(",
"'-r'",
",",
"'--revision'",
",",
"help",
"=",
"\"Version, defaults to 1\"",
",",
"default",
"=",
"1",
")",
"parser",
".",
"add_argument",
"(",
"'-T'",
",",
"'--title'",
",",
"help",
"=",
"\"Set the title\"",
")",
"parser",
".",
"add_argument",
"(",
"'-L'",
",",
"'--pylib'",
",",
"help",
"=",
"\"Configure a pylib directory for Python code extensions\"",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'-E'",
",",
"'--example'",
",",
"help",
"=",
"\"Add examples of resources\"",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'-J'",
",",
"'--jupyter'",
",",
"help",
"=",
"\"Create a Jupyter notebook source package\"",
",",
"action",
"=",
"'store_true'",
")",
"parser",
".",
"add_argument",
"(",
"'--template'",
",",
"help",
"=",
"\"Metatab file template, defaults to 'metatab' \"",
",",
"default",
"=",
"'metatab'",
")",
"parser",
".",
"add_argument",
"(",
"'-C'",
",",
"'--config'",
",",
"help",
"=",
"\"Path to config file. \"",
"\"Defaults to ~/.metapack-defaults.csv or value of METAPACK_DEFAULTS env var.\"",
"\"Sets defaults for specia root terms and the Contacts section.\"",
")",
"return",
"parser"
] | The `mp new` command creates source package directories
with a proper name, a `.gitignore` file, and optionally, example data,
entries and code. Typical usage, for creating a new package with most
of the example options, is ::
mp new -o metatab.org -d tutorial -L -E -T "Quickstart Example Package"
The :option:`-C` option will set a configuration file, which is a
Metatab file that with terms that are copied into the `metadata.csv` file
of the new package. Currently, it copies a limited number of terms,
including:
- Terms in the Contacts section
- Root.Space
- Root.Time
- Root.Grain
- Root.Variant
- Root.Version | [
"The",
"mp",
"new",
"command",
"creates",
"source",
"package",
"directories",
"with",
"a",
"proper",
"name",
"a",
".",
"gitignore",
"file",
"and",
"optionally",
"example",
"data",
"entries",
"and",
"code",
".",
"Typical",
"usage",
"for",
"creating",
"a",
"new",
"package",
"with",
"most",
"of",
"the",
"example",
"options",
"is",
"::",
"mp",
"new",
"-",
"o",
"metatab",
".",
"org",
"-",
"d",
"tutorial",
"-",
"L",
"-",
"E",
"-",
"T",
"Quickstart",
"Example",
"Package",
"The",
":",
"option",
":",
"-",
"C",
"option",
"will",
"set",
"a",
"configuration",
"file",
"which",
"is",
"a",
"Metatab",
"file",
"that",
"with",
"terms",
"that",
"are",
"copied",
"into",
"the",
"metadata",
".",
"csv",
"file",
"of",
"the",
"new",
"package",
".",
"Currently",
"it",
"copies",
"a",
"limited",
"number",
"of",
"terms",
"including",
":",
"-",
"Terms",
"in",
"the",
"Contacts",
"section",
"-",
"Root",
".",
"Space",
"-",
"Root",
".",
"Time",
"-",
"Root",
".",
"Grain",
"-",
"Root",
".",
"Variant",
"-",
"Root",
".",
"Version"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/new.py#L29-L83 |
Parsely/probably | probably/temporal_daily.py | DailyTemporalBloomFilter.initialize_bitarray | def initialize_bitarray(self):
"""Initialize both bitarray.
This BF contain two bit arrays instead of single one like a plain BF. bitarray
is the main bit array where all the historical items are stored. It's the one
used for the membership query. The second one, current_day_bitarray is the one
used for creating the daily snapshot.
"""
self.bitarray = bitarray.bitarray(self.nbr_bits)
self.current_day_bitarray = bitarray.bitarray(self.nbr_bits)
self.bitarray.setall(False)
self.current_day_bitarray.setall(False) | python | def initialize_bitarray(self):
"""Initialize both bitarray.
This BF contain two bit arrays instead of single one like a plain BF. bitarray
is the main bit array where all the historical items are stored. It's the one
used for the membership query. The second one, current_day_bitarray is the one
used for creating the daily snapshot.
"""
self.bitarray = bitarray.bitarray(self.nbr_bits)
self.current_day_bitarray = bitarray.bitarray(self.nbr_bits)
self.bitarray.setall(False)
self.current_day_bitarray.setall(False) | [
"def",
"initialize_bitarray",
"(",
"self",
")",
":",
"self",
".",
"bitarray",
"=",
"bitarray",
".",
"bitarray",
"(",
"self",
".",
"nbr_bits",
")",
"self",
".",
"current_day_bitarray",
"=",
"bitarray",
".",
"bitarray",
"(",
"self",
".",
"nbr_bits",
")",
"self",
".",
"bitarray",
".",
"setall",
"(",
"False",
")",
"self",
".",
"current_day_bitarray",
".",
"setall",
"(",
"False",
")"
] | Initialize both bitarray.
This BF contain two bit arrays instead of single one like a plain BF. bitarray
is the main bit array where all the historical items are stored. It's the one
used for the membership query. The second one, current_day_bitarray is the one
used for creating the daily snapshot. | [
"Initialize",
"both",
"bitarray",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/temporal_daily.py#L51-L62 |
Parsely/probably | probably/temporal_daily.py | DailyTemporalBloomFilter.initialize_period | def initialize_period(self, period=None):
"""Initialize the period of BF.
:period: datetime.datetime for setting the period explicity.
"""
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d") | python | def initialize_period(self, period=None):
"""Initialize the period of BF.
:period: datetime.datetime for setting the period explicity.
"""
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d") | [
"def",
"initialize_period",
"(",
"self",
",",
"period",
"=",
"None",
")",
":",
"if",
"not",
"period",
":",
"self",
".",
"current_period",
"=",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
"else",
":",
"self",
".",
"current_period",
"=",
"period",
"self",
".",
"current_period",
"=",
"dt",
".",
"datetime",
"(",
"self",
".",
"current_period",
".",
"year",
",",
"self",
".",
"current_period",
".",
"month",
",",
"self",
".",
"current_period",
".",
"day",
")",
"self",
".",
"date",
"=",
"self",
".",
"current_period",
".",
"strftime",
"(",
"\"%Y-%m-%d\"",
")"
] | Initialize the period of BF.
:period: datetime.datetime for setting the period explicity. | [
"Initialize",
"the",
"period",
"of",
"BF",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/temporal_daily.py#L87-L97 |
Parsely/probably | probably/temporal_daily.py | DailyTemporalBloomFilter.warm | def warm(self, jittering_ratio=0.2):
"""Progressively load the previous snapshot during the day.
Loading all the snapshots at once can takes a substantial amount of time. This method, if called
periodically during the day will progressively load those snapshots one by one. Because many workers are
going to use this method at the same time, we add a jittering to the period between load to avoid
hammering the disk at the same time.
"""
if self.snapshot_to_load == None:
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
self.compute_refresh_period()
self.snapshot_to_load = []
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period >= last_period:
self.snapshot_to_load.append(filename)
self.ready = False
if self.snapshot_to_load and self._should_warm():
filename = self.snapshot_to_load.pop()
self._union_bf_from_file(filename)
jittering = self.warm_period * (np.random.random()-0.5) * jittering_ratio
self.next_snapshot_load = time.time() + self.warm_period + jittering
if not self.snapshot_to_load:
self.ready = True | python | def warm(self, jittering_ratio=0.2):
"""Progressively load the previous snapshot during the day.
Loading all the snapshots at once can takes a substantial amount of time. This method, if called
periodically during the day will progressively load those snapshots one by one. Because many workers are
going to use this method at the same time, we add a jittering to the period between load to avoid
hammering the disk at the same time.
"""
if self.snapshot_to_load == None:
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
self.compute_refresh_period()
self.snapshot_to_load = []
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period >= last_period:
self.snapshot_to_load.append(filename)
self.ready = False
if self.snapshot_to_load and self._should_warm():
filename = self.snapshot_to_load.pop()
self._union_bf_from_file(filename)
jittering = self.warm_period * (np.random.random()-0.5) * jittering_ratio
self.next_snapshot_load = time.time() + self.warm_period + jittering
if not self.snapshot_to_load:
self.ready = True | [
"def",
"warm",
"(",
"self",
",",
"jittering_ratio",
"=",
"0.2",
")",
":",
"if",
"self",
".",
"snapshot_to_load",
"==",
"None",
":",
"last_period",
"=",
"self",
".",
"current_period",
"-",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"self",
".",
"expiration",
"-",
"1",
")",
"self",
".",
"compute_refresh_period",
"(",
")",
"self",
".",
"snapshot_to_load",
"=",
"[",
"]",
"base_filename",
"=",
"\"%s/%s_%s_*.dat\"",
"%",
"(",
"self",
".",
"snapshot_path",
",",
"self",
".",
"name",
",",
"self",
".",
"expiration",
")",
"availables_snapshots",
"=",
"glob",
".",
"glob",
"(",
"base_filename",
")",
"for",
"filename",
"in",
"availables_snapshots",
":",
"snapshot_period",
"=",
"dt",
".",
"datetime",
".",
"strptime",
"(",
"filename",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"'.dat'",
")",
",",
"\"%Y-%m-%d\"",
")",
"if",
"snapshot_period",
">=",
"last_period",
":",
"self",
".",
"snapshot_to_load",
".",
"append",
"(",
"filename",
")",
"self",
".",
"ready",
"=",
"False",
"if",
"self",
".",
"snapshot_to_load",
"and",
"self",
".",
"_should_warm",
"(",
")",
":",
"filename",
"=",
"self",
".",
"snapshot_to_load",
".",
"pop",
"(",
")",
"self",
".",
"_union_bf_from_file",
"(",
"filename",
")",
"jittering",
"=",
"self",
".",
"warm_period",
"*",
"(",
"np",
".",
"random",
".",
"random",
"(",
")",
"-",
"0.5",
")",
"*",
"jittering_ratio",
"self",
".",
"next_snapshot_load",
"=",
"time",
".",
"time",
"(",
")",
"+",
"self",
".",
"warm_period",
"+",
"jittering",
"if",
"not",
"self",
".",
"snapshot_to_load",
":",
"self",
".",
"ready",
"=",
"True"
] | Progressively load the previous snapshot during the day.
Loading all the snapshots at once can takes a substantial amount of time. This method, if called
periodically during the day will progressively load those snapshots one by one. Because many workers are
going to use this method at the same time, we add a jittering to the period between load to avoid
hammering the disk at the same time. | [
"Progressively",
"load",
"the",
"previous",
"snapshot",
"during",
"the",
"day",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/temporal_daily.py#L115-L141 |
Parsely/probably | probably/temporal_daily.py | DailyTemporalBloomFilter.restore_from_disk | def restore_from_disk(self, clean_old_snapshot=False):
"""Restore the state of the BF using previous snapshots.
:clean_old_snapshot: Delete the old snapshot on the disk (period < current - expiration)
"""
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period < last_period and not clean_old_snapshot:
continue
else:
self._union_bf_from_file(filename)
if snapshot_period == self.current_period:
self._union_bf_from_file(filename, current=True)
if snapshot_period < last_period and clean_old_snapshot:
os.remove(filename)
self.ready = True | python | def restore_from_disk(self, clean_old_snapshot=False):
"""Restore the state of the BF using previous snapshots.
:clean_old_snapshot: Delete the old snapshot on the disk (period < current - expiration)
"""
base_filename = "%s/%s_%s_*.dat" % (self.snapshot_path, self.name, self.expiration)
availables_snapshots = glob.glob(base_filename)
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
for filename in availables_snapshots:
snapshot_period = dt.datetime.strptime(filename.split('_')[-1].strip('.dat'), "%Y-%m-%d")
if snapshot_period < last_period and not clean_old_snapshot:
continue
else:
self._union_bf_from_file(filename)
if snapshot_period == self.current_period:
self._union_bf_from_file(filename, current=True)
if snapshot_period < last_period and clean_old_snapshot:
os.remove(filename)
self.ready = True | [
"def",
"restore_from_disk",
"(",
"self",
",",
"clean_old_snapshot",
"=",
"False",
")",
":",
"base_filename",
"=",
"\"%s/%s_%s_*.dat\"",
"%",
"(",
"self",
".",
"snapshot_path",
",",
"self",
".",
"name",
",",
"self",
".",
"expiration",
")",
"availables_snapshots",
"=",
"glob",
".",
"glob",
"(",
"base_filename",
")",
"last_period",
"=",
"self",
".",
"current_period",
"-",
"dt",
".",
"timedelta",
"(",
"days",
"=",
"self",
".",
"expiration",
"-",
"1",
")",
"for",
"filename",
"in",
"availables_snapshots",
":",
"snapshot_period",
"=",
"dt",
".",
"datetime",
".",
"strptime",
"(",
"filename",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
"'.dat'",
")",
",",
"\"%Y-%m-%d\"",
")",
"if",
"snapshot_period",
"<",
"last_period",
"and",
"not",
"clean_old_snapshot",
":",
"continue",
"else",
":",
"self",
".",
"_union_bf_from_file",
"(",
"filename",
")",
"if",
"snapshot_period",
"==",
"self",
".",
"current_period",
":",
"self",
".",
"_union_bf_from_file",
"(",
"filename",
",",
"current",
"=",
"True",
")",
"if",
"snapshot_period",
"<",
"last_period",
"and",
"clean_old_snapshot",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"self",
".",
"ready",
"=",
"True"
] | Restore the state of the BF using previous snapshots.
:clean_old_snapshot: Delete the old snapshot on the disk (period < current - expiration) | [
"Restore",
"the",
"state",
"of",
"the",
"BF",
"using",
"previous",
"snapshots",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/temporal_daily.py#L151-L170 |
Parsely/probably | probably/temporal_daily.py | DailyTemporalBloomFilter.save_snaphot | def save_snaphot(self):
"""Save the current state of the current day bitarray on disk.
Save the internal representation (bitarray) into a binary file using this format:
filename : name_expiration_2013-01-01.dat
"""
filename = "%s/%s_%s_%s.dat" % (self.snapshot_path, self.name, self.expiration, self.date)
with open(filename, 'w') as f:
f.write(zlib.compress(pickle.dumps(self.current_day_bitarray, protocol=pickle.HIGHEST_PROTOCOL))) | python | def save_snaphot(self):
"""Save the current state of the current day bitarray on disk.
Save the internal representation (bitarray) into a binary file using this format:
filename : name_expiration_2013-01-01.dat
"""
filename = "%s/%s_%s_%s.dat" % (self.snapshot_path, self.name, self.expiration, self.date)
with open(filename, 'w') as f:
f.write(zlib.compress(pickle.dumps(self.current_day_bitarray, protocol=pickle.HIGHEST_PROTOCOL))) | [
"def",
"save_snaphot",
"(",
"self",
")",
":",
"filename",
"=",
"\"%s/%s_%s_%s.dat\"",
"%",
"(",
"self",
".",
"snapshot_path",
",",
"self",
".",
"name",
",",
"self",
".",
"expiration",
",",
"self",
".",
"date",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"zlib",
".",
"compress",
"(",
"pickle",
".",
"dumps",
"(",
"self",
".",
"current_day_bitarray",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
")",
")"
] | Save the current state of the current day bitarray on disk.
Save the internal representation (bitarray) into a binary file using this format:
filename : name_expiration_2013-01-01.dat | [
"Save",
"the",
"current",
"state",
"of",
"the",
"current",
"day",
"bitarray",
"on",
"disk",
"."
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/temporal_daily.py#L172-L180 |
project-rig/rig | rig/place_and_route/place/sequential.py | place | def place(vertices_resources, nets, machine, constraints,
vertex_order=None, chip_order=None):
"""Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped).
"""
# If no vertices to place, just stop (from here on we presume that at least
# one vertex will be placed)
if len(vertices_resources) == 0:
return {}
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
vertices_resources, nets, constraints, substitutions = \
apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError(
"Chip requested by {} unavailable".format(machine))
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location],
resources)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
elif isinstance(constraint, # pragma: no branch
ReserveResourceConstraint):
apply_reserve_resource_constraint(machine, constraint)
if vertex_order is not None:
# Must modify the vertex_order to substitute the merged vertices
# inserted by apply_reserve_resource_constraint.
vertex_order = list(vertex_order)
for merged_vertex in substitutions:
# Swap the first merged vertex for its MergedVertex object and
# remove all other vertices from the merged set
vertex_order[vertex_order.index(merged_vertex.vertices[0])] \
= merged_vertex
# Remove all other vertices in the MergedVertex
already_removed = set([merged_vertex.vertices[0]])
for vertex in merged_vertex.vertices[1:]:
if vertex not in already_removed:
vertex_order.remove(vertex)
already_removed.add(vertex)
# The set of vertices which have not been constrained, in iteration order
movable_vertices = (v for v in (vertices_resources
if vertex_order is None
else vertex_order)
if v not in placements)
# A cyclic iterator over all available chips
chips = cycle(c for c in (machine if chip_order is None else chip_order)
if c in machine)
chips_iter = iter(chips)
try:
cur_chip = next(chips_iter)
except StopIteration:
raise InsufficientResourceError("No working chips in machine.")
# The last chip that we successfully placed something on. Used to detect
# when we've tried all available chips and not found a suitable candidate
last_successful_chip = cur_chip
# Place each vertex in turn
for vertex in movable_vertices:
while True:
resources_if_placed = subtract_resources(
machine[cur_chip], vertices_resources[vertex])
if not overallocated(resources_if_placed):
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = cur_chip
machine[cur_chip] = resources_if_placed
last_successful_chip = cur_chip
break
else:
# The vertex won't fit on this chip, move onto the next one
# available.
cur_chip = next(chips_iter)
# If we've looped around all the available chips without
# managing to place the vertex, give up!
if cur_chip == last_successful_chip:
raise InsufficientResourceError(
"Ran out of chips while attempting to place vertex "
"{}".format(vertex))
finalise_same_chip_constraints(substitutions, placements)
return placements | python | def place(vertices_resources, nets, machine, constraints,
vertex_order=None, chip_order=None):
"""Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped).
"""
# If no vertices to place, just stop (from here on we presume that at least
# one vertex will be placed)
if len(vertices_resources) == 0:
return {}
# Within the algorithm we modify the resource availability values in the
# machine to account for the effects of the current placement. As a result,
# an internal copy of the structure must be made.
machine = machine.copy()
# {vertex: (x, y), ...} gives the location of all vertices, updated
# throughout the function.
placements = {}
# Handle constraints
vertices_resources, nets, constraints, substitutions = \
apply_same_chip_constraints(vertices_resources, nets, constraints)
for constraint in constraints:
if isinstance(constraint, LocationConstraint):
# Location constraints are handled by recording the set of fixed
# vertex locations and subtracting their resources from the chips
# they're allocated to.
location = constraint.location
if location not in machine:
raise InvalidConstraintError(
"Chip requested by {} unavailable".format(machine))
vertex = constraint.vertex
# Record the constrained vertex's location
placements[vertex] = location
# Make sure the vertex fits at the requested location (updating the
# resource availability after placement)
resources = vertices_resources[vertex]
machine[location] = subtract_resources(machine[location],
resources)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
elif isinstance(constraint, # pragma: no branch
ReserveResourceConstraint):
apply_reserve_resource_constraint(machine, constraint)
if vertex_order is not None:
# Must modify the vertex_order to substitute the merged vertices
# inserted by apply_reserve_resource_constraint.
vertex_order = list(vertex_order)
for merged_vertex in substitutions:
# Swap the first merged vertex for its MergedVertex object and
# remove all other vertices from the merged set
vertex_order[vertex_order.index(merged_vertex.vertices[0])] \
= merged_vertex
# Remove all other vertices in the MergedVertex
already_removed = set([merged_vertex.vertices[0]])
for vertex in merged_vertex.vertices[1:]:
if vertex not in already_removed:
vertex_order.remove(vertex)
already_removed.add(vertex)
# The set of vertices which have not been constrained, in iteration order
movable_vertices = (v for v in (vertices_resources
if vertex_order is None
else vertex_order)
if v not in placements)
# A cyclic iterator over all available chips
chips = cycle(c for c in (machine if chip_order is None else chip_order)
if c in machine)
chips_iter = iter(chips)
try:
cur_chip = next(chips_iter)
except StopIteration:
raise InsufficientResourceError("No working chips in machine.")
# The last chip that we successfully placed something on. Used to detect
# when we've tried all available chips and not found a suitable candidate
last_successful_chip = cur_chip
# Place each vertex in turn
for vertex in movable_vertices:
while True:
resources_if_placed = subtract_resources(
machine[cur_chip], vertices_resources[vertex])
if not overallocated(resources_if_placed):
# The vertex fits: record the resources consumed and move on to
# the next vertex.
placements[vertex] = cur_chip
machine[cur_chip] = resources_if_placed
last_successful_chip = cur_chip
break
else:
# The vertex won't fit on this chip, move onto the next one
# available.
cur_chip = next(chips_iter)
# If we've looped around all the available chips without
# managing to place the vertex, give up!
if cur_chip == last_successful_chip:
raise InsufficientResourceError(
"Ran out of chips while attempting to place vertex "
"{}".format(vertex))
finalise_same_chip_constraints(substitutions, placements)
return placements | [
"def",
"place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"vertex_order",
"=",
"None",
",",
"chip_order",
"=",
"None",
")",
":",
"# If no vertices to place, just stop (from here on we presume that at least",
"# one vertex will be placed)",
"if",
"len",
"(",
"vertices_resources",
")",
"==",
"0",
":",
"return",
"{",
"}",
"# Within the algorithm we modify the resource availability values in the",
"# machine to account for the effects of the current placement. As a result,",
"# an internal copy of the structure must be made.",
"machine",
"=",
"machine",
".",
"copy",
"(",
")",
"# {vertex: (x, y), ...} gives the location of all vertices, updated",
"# throughout the function.",
"placements",
"=",
"{",
"}",
"# Handle constraints",
"vertices_resources",
",",
"nets",
",",
"constraints",
",",
"substitutions",
"=",
"apply_same_chip_constraints",
"(",
"vertices_resources",
",",
"nets",
",",
"constraints",
")",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"LocationConstraint",
")",
":",
"# Location constraints are handled by recording the set of fixed",
"# vertex locations and subtracting their resources from the chips",
"# they're allocated to.",
"location",
"=",
"constraint",
".",
"location",
"if",
"location",
"not",
"in",
"machine",
":",
"raise",
"InvalidConstraintError",
"(",
"\"Chip requested by {} unavailable\"",
".",
"format",
"(",
"machine",
")",
")",
"vertex",
"=",
"constraint",
".",
"vertex",
"# Record the constrained vertex's location",
"placements",
"[",
"vertex",
"]",
"=",
"location",
"# Make sure the vertex fits at the requested location (updating the",
"# resource availability after placement)",
"resources",
"=",
"vertices_resources",
"[",
"vertex",
"]",
"machine",
"[",
"location",
"]",
"=",
"subtract_resources",
"(",
"machine",
"[",
"location",
"]",
",",
"resources",
")",
"if",
"overallocated",
"(",
"machine",
"[",
"location",
"]",
")",
":",
"raise",
"InsufficientResourceError",
"(",
"\"Cannot meet {}\"",
".",
"format",
"(",
"constraint",
")",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"# pragma: no branch",
"ReserveResourceConstraint",
")",
":",
"apply_reserve_resource_constraint",
"(",
"machine",
",",
"constraint",
")",
"if",
"vertex_order",
"is",
"not",
"None",
":",
"# Must modify the vertex_order to substitute the merged vertices",
"# inserted by apply_reserve_resource_constraint.",
"vertex_order",
"=",
"list",
"(",
"vertex_order",
")",
"for",
"merged_vertex",
"in",
"substitutions",
":",
"# Swap the first merged vertex for its MergedVertex object and",
"# remove all other vertices from the merged set",
"vertex_order",
"[",
"vertex_order",
".",
"index",
"(",
"merged_vertex",
".",
"vertices",
"[",
"0",
"]",
")",
"]",
"=",
"merged_vertex",
"# Remove all other vertices in the MergedVertex",
"already_removed",
"=",
"set",
"(",
"[",
"merged_vertex",
".",
"vertices",
"[",
"0",
"]",
"]",
")",
"for",
"vertex",
"in",
"merged_vertex",
".",
"vertices",
"[",
"1",
":",
"]",
":",
"if",
"vertex",
"not",
"in",
"already_removed",
":",
"vertex_order",
".",
"remove",
"(",
"vertex",
")",
"already_removed",
".",
"add",
"(",
"vertex",
")",
"# The set of vertices which have not been constrained, in iteration order",
"movable_vertices",
"=",
"(",
"v",
"for",
"v",
"in",
"(",
"vertices_resources",
"if",
"vertex_order",
"is",
"None",
"else",
"vertex_order",
")",
"if",
"v",
"not",
"in",
"placements",
")",
"# A cyclic iterator over all available chips",
"chips",
"=",
"cycle",
"(",
"c",
"for",
"c",
"in",
"(",
"machine",
"if",
"chip_order",
"is",
"None",
"else",
"chip_order",
")",
"if",
"c",
"in",
"machine",
")",
"chips_iter",
"=",
"iter",
"(",
"chips",
")",
"try",
":",
"cur_chip",
"=",
"next",
"(",
"chips_iter",
")",
"except",
"StopIteration",
":",
"raise",
"InsufficientResourceError",
"(",
"\"No working chips in machine.\"",
")",
"# The last chip that we successfully placed something on. Used to detect",
"# when we've tried all available chips and not found a suitable candidate",
"last_successful_chip",
"=",
"cur_chip",
"# Place each vertex in turn",
"for",
"vertex",
"in",
"movable_vertices",
":",
"while",
"True",
":",
"resources_if_placed",
"=",
"subtract_resources",
"(",
"machine",
"[",
"cur_chip",
"]",
",",
"vertices_resources",
"[",
"vertex",
"]",
")",
"if",
"not",
"overallocated",
"(",
"resources_if_placed",
")",
":",
"# The vertex fits: record the resources consumed and move on to",
"# the next vertex.",
"placements",
"[",
"vertex",
"]",
"=",
"cur_chip",
"machine",
"[",
"cur_chip",
"]",
"=",
"resources_if_placed",
"last_successful_chip",
"=",
"cur_chip",
"break",
"else",
":",
"# The vertex won't fit on this chip, move onto the next one",
"# available.",
"cur_chip",
"=",
"next",
"(",
"chips_iter",
")",
"# If we've looped around all the available chips without",
"# managing to place the vertex, give up!",
"if",
"cur_chip",
"==",
"last_successful_chip",
":",
"raise",
"InsufficientResourceError",
"(",
"\"Ran out of chips while attempting to place vertex \"",
"\"{}\"",
".",
"format",
"(",
"vertex",
")",
")",
"finalise_same_chip_constraints",
"(",
"substitutions",
",",
"placements",
")",
"return",
"placements"
] | Blindly places vertices in sequential order onto chips in the machine.
This algorithm sequentially places vertices onto chips in the order
specified (or in an undefined order if not specified). This algorithm is
essentially the simplest possible valid placement algorithm and is intended
to form the basis of other simple sequential and greedy placers.
The algorithm proceeds by attempting to place each vertex on the a chip. If
the vertex fits we move onto the next vertex (but keep filling the same
vertex). If the vertex does not fit we move onto the next candidate chip
until we find somewhere the vertex fits. The algorithm will raise an
:py:exc:`rig.place_and_route.exceptions.InsufficientResourceError`
if it has failed to fit a vertex on every chip.
Parameters
----------
vertex_order : None or iterable
The order in which the vertices should be attemted to be placed.
If None (the default), the vertices will be placed in the default
iteration order of the ``vertices_resources`` argument. If an iterable,
the iteration sequence should produce each vertex in vertices_resources
*exactly once*.
chip_order : None or iterable
The order in which chips should be tried as a candidate location for a
vertex.
If None (the default), the chips will be used in the default iteration
order of the ``machine`` object (a raster scan). If an iterable, the
iteration sequence should produce (x, y) pairs giving the coordinates
of chips to use. All working chip coordinates must be included in the
iteration sequence *exactly once*. Additional chip coordinates of
non-existant or dead chips are also allowed (and will simply be
skipped). | [
"Blindly",
"places",
"vertices",
"in",
"sequential",
"order",
"onto",
"chips",
"in",
"the",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sequential.py#L19-L163 |
project-rig/rig | rig/place_and_route/wrapper.py | place_and_route_wrapper | def place_and_route_wrapper(vertices_resources, vertices_applications,
nets, net_keys,
system_info, constraints=[],
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
minimise_tables_methods=(remove_default_entries,
ordered_covering),
core_resource=Cores, sdram_resource=SDRAM,
sram_resource=SRAM):
"""Wrapper for core place-and-route tasks for the common case.
This function takes a set of vertices and nets and produces placements,
allocations, minimised routing tables and application loading information.
.. note::
This function replaces the deprecated :py:func:`.wrapper` function and
makes use of the additional information provided by the
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object
to infer the constraints required by most applications such as
reserving non-idle cores such as the monitor processor.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
system_info : \
:py:class:`~rig.machine_control.machine_controller.SystemInfo`
A data structure which defines the resources available in the target
SpiNNaker machine, typically returned by
:py:meth:`rig.machine_control.MachineController.get_system_info`. This
information will be used internally to build a
:py:class:`~rig.place_and_route.Machine` and set of
:py:mod:`rig.place_and_route.constraints` which describe the SpiNNaker
machine used and ensure placement, allocation and routing only use
working and unused chips, cores, memory and links. If greater control
over these datastructures is required this wrapper may not be
appropriate.
constraints : [constraint, ...]
**Optional.** A list of additional constraints on placement, allocation
and routing. Available constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module. These constraints
will be added to those derrived from the ``system_info`` argument which
restrict placement and allocation to only idle cores.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
minimise_tables_methods : [:py:func:`rig.routing_table.minimise`, ...]
**Optional.** An iterable of routing table minimisation algorithms to
use when routing tables outgrow the space available. Each method is
tried in the order presented and the first to meet the required target
length for a given chip is used. Consequently less computationally
costly algorithms should be nearer the start of the list. The default
methods will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
sram_resource : resource (Default: :py:data:`~rig.place_and_route.SRAM`)
**Optional.** The resource identifier used for SRAM (System RAM).
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
# Infer place-and-route data-structures from SystemInfo
machine = build_machine(system_info,
core_resource=core_resource,
sdram_resource=sdram_resource,
sram_resource=sram_resource)
base_constraints = build_core_constraints(system_info, core_resource)
constraints = base_constraints + constraints
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build routing tables from the generated routes
routing_tables = routing_tree_to_tables(routes, net_keys)
# Minimise the routing tables, if required
target_lengths = build_routing_table_target_lengths(system_info)
routing_tables = minimise_tables(routing_tables,
target_lengths,
minimise_tables_methods)
return placements, allocations, application_map, routing_tables | python | def place_and_route_wrapper(vertices_resources, vertices_applications,
nets, net_keys,
system_info, constraints=[],
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
minimise_tables_methods=(remove_default_entries,
ordered_covering),
core_resource=Cores, sdram_resource=SDRAM,
sram_resource=SRAM):
"""Wrapper for core place-and-route tasks for the common case.
This function takes a set of vertices and nets and produces placements,
allocations, minimised routing tables and application loading information.
.. note::
This function replaces the deprecated :py:func:`.wrapper` function and
makes use of the additional information provided by the
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object
to infer the constraints required by most applications such as
reserving non-idle cores such as the monitor processor.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
system_info : \
:py:class:`~rig.machine_control.machine_controller.SystemInfo`
A data structure which defines the resources available in the target
SpiNNaker machine, typically returned by
:py:meth:`rig.machine_control.MachineController.get_system_info`. This
information will be used internally to build a
:py:class:`~rig.place_and_route.Machine` and set of
:py:mod:`rig.place_and_route.constraints` which describe the SpiNNaker
machine used and ensure placement, allocation and routing only use
working and unused chips, cores, memory and links. If greater control
over these datastructures is required this wrapper may not be
appropriate.
constraints : [constraint, ...]
**Optional.** A list of additional constraints on placement, allocation
and routing. Available constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module. These constraints
will be added to those derrived from the ``system_info`` argument which
restrict placement and allocation to only idle cores.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
minimise_tables_methods : [:py:func:`rig.routing_table.minimise`, ...]
**Optional.** An iterable of routing table minimisation algorithms to
use when routing tables outgrow the space available. Each method is
tried in the order presented and the first to meet the required target
length for a given chip is used. Consequently less computationally
costly algorithms should be nearer the start of the list. The default
methods will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
sram_resource : resource (Default: :py:data:`~rig.place_and_route.SRAM`)
**Optional.** The resource identifier used for SRAM (System RAM).
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
# Infer place-and-route data-structures from SystemInfo
machine = build_machine(system_info,
core_resource=core_resource,
sdram_resource=sdram_resource,
sram_resource=sram_resource)
base_constraints = build_core_constraints(system_info, core_resource)
constraints = base_constraints + constraints
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build routing tables from the generated routes
routing_tables = routing_tree_to_tables(routes, net_keys)
# Minimise the routing tables, if required
target_lengths = build_routing_table_target_lengths(system_info)
routing_tables = minimise_tables(routing_tables,
target_lengths,
minimise_tables_methods)
return placements, allocations, application_map, routing_tables | [
"def",
"place_and_route_wrapper",
"(",
"vertices_resources",
",",
"vertices_applications",
",",
"nets",
",",
"net_keys",
",",
"system_info",
",",
"constraints",
"=",
"[",
"]",
",",
"place",
"=",
"default_place",
",",
"place_kwargs",
"=",
"{",
"}",
",",
"allocate",
"=",
"default_allocate",
",",
"allocate_kwargs",
"=",
"{",
"}",
",",
"route",
"=",
"default_route",
",",
"route_kwargs",
"=",
"{",
"}",
",",
"minimise_tables_methods",
"=",
"(",
"remove_default_entries",
",",
"ordered_covering",
")",
",",
"core_resource",
"=",
"Cores",
",",
"sdram_resource",
"=",
"SDRAM",
",",
"sram_resource",
"=",
"SRAM",
")",
":",
"# Infer place-and-route data-structures from SystemInfo",
"machine",
"=",
"build_machine",
"(",
"system_info",
",",
"core_resource",
"=",
"core_resource",
",",
"sdram_resource",
"=",
"sdram_resource",
",",
"sram_resource",
"=",
"sram_resource",
")",
"base_constraints",
"=",
"build_core_constraints",
"(",
"system_info",
",",
"core_resource",
")",
"constraints",
"=",
"base_constraints",
"+",
"constraints",
"# Place/Allocate/Route",
"placements",
"=",
"place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"*",
"*",
"place_kwargs",
")",
"allocations",
"=",
"allocate",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
",",
"*",
"*",
"allocate_kwargs",
")",
"routes",
"=",
"route",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
",",
"allocations",
",",
"core_resource",
",",
"*",
"*",
"route_kwargs",
")",
"# Build data-structures ready to feed to the machine loading functions",
"application_map",
"=",
"build_application_map",
"(",
"vertices_applications",
",",
"placements",
",",
"allocations",
",",
"core_resource",
")",
"# Build routing tables from the generated routes",
"routing_tables",
"=",
"routing_tree_to_tables",
"(",
"routes",
",",
"net_keys",
")",
"# Minimise the routing tables, if required",
"target_lengths",
"=",
"build_routing_table_target_lengths",
"(",
"system_info",
")",
"routing_tables",
"=",
"minimise_tables",
"(",
"routing_tables",
",",
"target_lengths",
",",
"minimise_tables_methods",
")",
"return",
"placements",
",",
"allocations",
",",
"application_map",
",",
"routing_tables"
] | Wrapper for core place-and-route tasks for the common case.
This function takes a set of vertices and nets and produces placements,
allocations, minimised routing tables and application loading information.
.. note::
This function replaces the deprecated :py:func:`.wrapper` function and
makes use of the additional information provided by the
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object
to infer the constraints required by most applications such as
reserving non-idle cores such as the monitor processor.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
system_info : \
:py:class:`~rig.machine_control.machine_controller.SystemInfo`
A data structure which defines the resources available in the target
SpiNNaker machine, typically returned by
:py:meth:`rig.machine_control.MachineController.get_system_info`. This
information will be used internally to build a
:py:class:`~rig.place_and_route.Machine` and set of
:py:mod:`rig.place_and_route.constraints` which describe the SpiNNaker
machine used and ensure placement, allocation and routing only use
working and unused chips, cores, memory and links. If greater control
over these datastructures is required this wrapper may not be
appropriate.
constraints : [constraint, ...]
**Optional.** A list of additional constraints on placement, allocation
and routing. Available constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module. These constraints
will be added to those derrived from the ``system_info`` argument which
restrict placement and allocation to only idle cores.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
minimise_tables_methods : [:py:func:`rig.routing_table.minimise`, ...]
**Optional.** An iterable of routing table minimisation algorithms to
use when routing tables outgrow the space available. Each method is
tried in the order presented and the first to meet the required target
length for a given chip is used. Consequently less computationally
costly algorithms should be nearer the start of the list. The default
methods will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
sram_resource : resource (Default: :py:data:`~rig.place_and_route.SRAM`)
**Optional.** The resource identifier used for SRAM (System RAM).
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries. | [
"Wrapper",
"for",
"core",
"place",
"-",
"and",
"-",
"route",
"tasks",
"for",
"the",
"common",
"case",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/wrapper.py#L27-L168 |
project-rig/rig | rig/place_and_route/wrapper.py | wrapper | def wrapper(vertices_resources, vertices_applications,
nets, net_keys,
machine, constraints=[],
reserve_monitor=True, align_sdram=True,
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
core_resource=Cores, sdram_resource=SDRAM):
"""Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
warnings.warn("rig.place_and_route.wrapper is deprecated "
"use rig.place_and_route.place_and_route_wrapper instead in "
"new applications.",
DeprecationWarning)
constraints = constraints[:]
# Augment constraints with (historically) commonly used constraints
if reserve_monitor:
constraints.append(
ReserveResourceConstraint(core_resource, slice(0, 1)))
if align_sdram:
constraints.append(AlignResourceConstraint(sdram_resource, 4))
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build data-structures ready to feed to the machine loading functions
from rig.place_and_route.utils import build_routing_tables
routing_tables = build_routing_tables(routes, net_keys)
return placements, allocations, application_map, routing_tables | python | def wrapper(vertices_resources, vertices_applications,
nets, net_keys,
machine, constraints=[],
reserve_monitor=True, align_sdram=True,
place=default_place, place_kwargs={},
allocate=default_allocate, allocate_kwargs={},
route=default_route, route_kwargs={},
core_resource=Cores, sdram_resource=SDRAM):
"""Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries.
"""
warnings.warn("rig.place_and_route.wrapper is deprecated "
"use rig.place_and_route.place_and_route_wrapper instead in "
"new applications.",
DeprecationWarning)
constraints = constraints[:]
# Augment constraints with (historically) commonly used constraints
if reserve_monitor:
constraints.append(
ReserveResourceConstraint(core_resource, slice(0, 1)))
if align_sdram:
constraints.append(AlignResourceConstraint(sdram_resource, 4))
# Place/Allocate/Route
placements = place(vertices_resources, nets, machine, constraints,
**place_kwargs)
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, **allocate_kwargs)
routes = route(vertices_resources, nets, machine, constraints, placements,
allocations, core_resource, **route_kwargs)
# Build data-structures ready to feed to the machine loading functions
application_map = build_application_map(vertices_applications, placements,
allocations, core_resource)
# Build data-structures ready to feed to the machine loading functions
from rig.place_and_route.utils import build_routing_tables
routing_tables = build_routing_tables(routes, net_keys)
return placements, allocations, application_map, routing_tables | [
"def",
"wrapper",
"(",
"vertices_resources",
",",
"vertices_applications",
",",
"nets",
",",
"net_keys",
",",
"machine",
",",
"constraints",
"=",
"[",
"]",
",",
"reserve_monitor",
"=",
"True",
",",
"align_sdram",
"=",
"True",
",",
"place",
"=",
"default_place",
",",
"place_kwargs",
"=",
"{",
"}",
",",
"allocate",
"=",
"default_allocate",
",",
"allocate_kwargs",
"=",
"{",
"}",
",",
"route",
"=",
"default_route",
",",
"route_kwargs",
"=",
"{",
"}",
",",
"core_resource",
"=",
"Cores",
",",
"sdram_resource",
"=",
"SDRAM",
")",
":",
"warnings",
".",
"warn",
"(",
"\"rig.place_and_route.wrapper is deprecated \"",
"\"use rig.place_and_route.place_and_route_wrapper instead in \"",
"\"new applications.\"",
",",
"DeprecationWarning",
")",
"constraints",
"=",
"constraints",
"[",
":",
"]",
"# Augment constraints with (historically) commonly used constraints",
"if",
"reserve_monitor",
":",
"constraints",
".",
"append",
"(",
"ReserveResourceConstraint",
"(",
"core_resource",
",",
"slice",
"(",
"0",
",",
"1",
")",
")",
")",
"if",
"align_sdram",
":",
"constraints",
".",
"append",
"(",
"AlignResourceConstraint",
"(",
"sdram_resource",
",",
"4",
")",
")",
"# Place/Allocate/Route",
"placements",
"=",
"place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"*",
"*",
"place_kwargs",
")",
"allocations",
"=",
"allocate",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
",",
"*",
"*",
"allocate_kwargs",
")",
"routes",
"=",
"route",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
",",
"allocations",
",",
"core_resource",
",",
"*",
"*",
"route_kwargs",
")",
"# Build data-structures ready to feed to the machine loading functions",
"application_map",
"=",
"build_application_map",
"(",
"vertices_applications",
",",
"placements",
",",
"allocations",
",",
"core_resource",
")",
"# Build data-structures ready to feed to the machine loading functions",
"from",
"rig",
".",
"place_and_route",
".",
"utils",
"import",
"build_routing_tables",
"routing_tables",
"=",
"build_routing_tables",
"(",
"routes",
",",
"net_keys",
")",
"return",
"placements",
",",
"allocations",
",",
"application_map",
",",
"routing_tables"
] | Wrapper for core place-and-route tasks for the common case.
At a high level this function essentially takes a set of vertices and nets
and produces placements, memory allocations, routing tables and application
loading information.
.. warning::
This function is deprecated. New users should use
:py:func:`.place_and_route_wrapper` along with
:py:meth:`rig.machine_control.MachineController.get_system_info` in
place of this function. The new wrapper automatically reserves cores
and SDRAM already in use in the target machine, improving on the
behaviour of this wrapper which blindly reserves certain ranges of
resources presuming only core 0 (the monitor processor) is not idle.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
A dictionary from vertex to the required resources for that vertex.
This dictionary must include an entry for every vertex in the
application.
Resource requirements are specified by a dictionary `{resource:
quantity, ...}` where `resource` is some resource identifier and
`quantity` is a non-negative integer representing the quantity of that
resource required.
vertices_applications : {vertex: application, ...}
A dictionary from vertices to the application binary to load
onto cores associated with that vertex. Applications are given as a
string containing the file name of the binary to load.
nets : [:py:class:`~rig.netlist.Net`, ...]
A list (in no particular order) defining the nets connecting vertices.
net_keys : {:py:class:`~rig.netlist.Net`: (key, mask), ...}
A dictionary from nets to (key, mask) tuples to be used in SpiNNaker
routing tables for routes implementing this net. The key and mask
should be given as 32-bit integers.
machine : :py:class:`rig.place_and_route.Machine`
A data structure which defines the resources available in the target
SpiNNaker machine.
constraints : [constraint, ...]
A list of constraints on placement, allocation and routing. Available
constraints are provided in the
:py:mod:`rig.place_and_route.constraints` module.
reserve_monitor : bool (Default: True)
**Optional.** If True, reserve core zero since it will be used as the
monitor processor using a
:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`.
align_sdram : bool (Default: True)
**Optional.** If True, SDRAM allocations will be aligned to 4-byte
addresses. Specifically, the supplied constraints will be augmented
with an `AlignResourceConstraint(sdram_resource, 4)`.
place : function (Default: :py:func:`rig.place_and_route.place`)
**Optional.** Placement algorithm to use.
place_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the placer.
allocate : function (Default: :py:func:`rig.place_and_route.allocate`)
**Optional.** Allocation algorithm to use.
allocate_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the allocator.
route : function (Default: :py:func:`rig.place_and_route.route`)
**Optional.** Routing algorithm to use.
route_kwargs : dict (Default: {})
**Optional.** Algorithm-specific arguments for the router.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
**Optional.** The resource identifier used for cores.
sdram_resource : resource (Default: :py:data:`~rig.place_and_route.SDRAM`)
**Optional.** The resource identifier used for SDRAM.
Returns
-------
placements : {vertex: (x, y), ...}
A dictionary from vertices to the chip coordinate produced by
placement.
allocations : {vertex: {resource: slice, ...}, ...}
A dictionary from vertices to the resources allocated to it. Resource
allocations are dictionaries from resources to a :py:class:`slice`
defining the range of the given resource type allocated to the vertex.
These :py:class:`slice` objects have `start` <= `end` and `step` set to
None.
application_map : {application: {(x, y): set([core_num, ...]), ...}, ...}
A dictionary from application to the set of cores it should be loaded
onto. The set of cores is given as a dictionary from chip to sets of
core numbers.
routing_tables : {(x, y): \
[:py:class:`~rig.routing_table.RoutingTableEntry`, \
...], ...}
The generated routing tables. Provided as a dictionary from chip to a
list of routing table entries. | [
"Wrapper",
"for",
"core",
"place",
"-",
"and",
"-",
"route",
"tasks",
"for",
"the",
"common",
"case",
".",
"At",
"a",
"high",
"level",
"this",
"function",
"essentially",
"takes",
"a",
"set",
"of",
"vertices",
"and",
"nets",
"and",
"produces",
"placements",
"memory",
"allocations",
"routing",
"tables",
"and",
"application",
"loading",
"information",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/wrapper.py#L171-L296 |
project-rig/rig | rig/machine_control/regions.py | get_region_for_chip | def get_region_for_chip(x, y, level=3):
"""Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
"""
shift = 6 - 2*level
bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set
mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00}
nx = x & mask # The mask guarantees that bits 1:0 will be cleared
ny = y & mask # The mask guarantees that bits 1:0 will be cleared
# sig bits x | sig bits y | 2-bit level | region select bits
region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit)
return region | python | def get_region_for_chip(x, y, level=3):
"""Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected.
"""
shift = 6 - 2*level
bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) # bit in bits 15:0 to set
mask = 0xffff ^ ((4 << shift) - 1) # in {0xfffc, 0xfff0, 0xffc0, 0xff00}
nx = x & mask # The mask guarantees that bits 1:0 will be cleared
ny = y & mask # The mask guarantees that bits 1:0 will be cleared
# sig bits x | sig bits y | 2-bit level | region select bits
region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit)
return region | [
"def",
"get_region_for_chip",
"(",
"x",
",",
"y",
",",
"level",
"=",
"3",
")",
":",
"shift",
"=",
"6",
"-",
"2",
"*",
"level",
"bit",
"=",
"(",
"(",
"x",
">>",
"shift",
")",
"&",
"3",
")",
"+",
"4",
"*",
"(",
"(",
"y",
">>",
"shift",
")",
"&",
"3",
")",
"# bit in bits 15:0 to set",
"mask",
"=",
"0xffff",
"^",
"(",
"(",
"4",
"<<",
"shift",
")",
"-",
"1",
")",
"# in {0xfffc, 0xfff0, 0xffc0, 0xff00}",
"nx",
"=",
"x",
"&",
"mask",
"# The mask guarantees that bits 1:0 will be cleared",
"ny",
"=",
"y",
"&",
"mask",
"# The mask guarantees that bits 1:0 will be cleared",
"# sig bits x | sig bits y | 2-bit level | region select bits",
"region",
"=",
"(",
"nx",
"<<",
"24",
")",
"|",
"(",
"ny",
"<<",
"16",
")",
"|",
"(",
"level",
"<<",
"16",
")",
"|",
"(",
"1",
"<<",
"bit",
")",
"return",
"region"
] | Get the region word for the given chip co-ordinates.
Parameters
----------
x : int
x co-ordinate
y : int
y co-ordinate
level : int
Level of region to build. 0 is the most coarse and 3 is the finest.
When 3 is used the specified region will ONLY select the given chip,
for other regions surrounding chips will also be selected.
Returns
-------
int
A 32-bit value representing the co-ordinates of the chunk of SpiNNaker
chips that should be selected and the blocks within this chunk that are
selected. As long as bits (31:16) are the same these values may be
OR-ed together to increase the number of sub-blocks selected. | [
"Get",
"the",
"region",
"word",
"for",
"the",
"given",
"chip",
"co",
"-",
"ordinates",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/regions.py#L20-L52 |
project-rig/rig | rig/machine_control/regions.py | compress_flood_fill_regions | def compress_flood_fill_regions(targets):
"""Generate a reduced set of flood fill parameters.
Parameters
----------
targets : {(x, y) : set([c, ...]), ...}
For each used chip a set of core numbers onto which an application
should be loaded. E.g., the output of
:py:func:`~rig.place_and_route.util.build_application_map` when indexed
by an application.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine and a
core mask of selected cores within that region for use in flood-filling
an application. `region` and `core_mask` are both integer
representations of bit fields that are understood by SCAMP.
The pairs are yielded in an order suitable for direct use with SCAMP's
flood-fill core select (FFCS) method of loading.
"""
t = RegionCoreTree()
for (x, y), cores in iteritems(targets):
for p in cores:
t.add_core(x, y, p)
return sorted(t.get_regions_and_coremasks()) | python | def compress_flood_fill_regions(targets):
"""Generate a reduced set of flood fill parameters.
Parameters
----------
targets : {(x, y) : set([c, ...]), ...}
For each used chip a set of core numbers onto which an application
should be loaded. E.g., the output of
:py:func:`~rig.place_and_route.util.build_application_map` when indexed
by an application.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine and a
core mask of selected cores within that region for use in flood-filling
an application. `region` and `core_mask` are both integer
representations of bit fields that are understood by SCAMP.
The pairs are yielded in an order suitable for direct use with SCAMP's
flood-fill core select (FFCS) method of loading.
"""
t = RegionCoreTree()
for (x, y), cores in iteritems(targets):
for p in cores:
t.add_core(x, y, p)
return sorted(t.get_regions_and_coremasks()) | [
"def",
"compress_flood_fill_regions",
"(",
"targets",
")",
":",
"t",
"=",
"RegionCoreTree",
"(",
")",
"for",
"(",
"x",
",",
"y",
")",
",",
"cores",
"in",
"iteritems",
"(",
"targets",
")",
":",
"for",
"p",
"in",
"cores",
":",
"t",
".",
"add_core",
"(",
"x",
",",
"y",
",",
"p",
")",
"return",
"sorted",
"(",
"t",
".",
"get_regions_and_coremasks",
"(",
")",
")"
] | Generate a reduced set of flood fill parameters.
Parameters
----------
targets : {(x, y) : set([c, ...]), ...}
For each used chip a set of core numbers onto which an application
should be loaded. E.g., the output of
:py:func:`~rig.place_and_route.util.build_application_map` when indexed
by an application.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine and a
core mask of selected cores within that region for use in flood-filling
an application. `region` and `core_mask` are both integer
representations of bit fields that are understood by SCAMP.
The pairs are yielded in an order suitable for direct use with SCAMP's
flood-fill core select (FFCS) method of loading. | [
"Generate",
"a",
"reduced",
"set",
"of",
"flood",
"fill",
"parameters",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/regions.py#L55-L83 |
project-rig/rig | rig/machine_control/regions.py | RegionCoreTree.get_regions_and_coremasks | def get_regions_and_coremasks(self):
"""Generate a set of ordered paired region and core mask representations.
.. note::
The region and core masks are ordered such that ``(region << 32) |
core_mask`` is monotonically increasing. Consequently region and
core masks generated by this method can be used with SCAMP's
Flood-Fill Core Select (FFSC) method.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine
and a core mask of selected cores within that region.
"""
region_code = ((self.base_x << 24) | (self.base_y << 16) |
(self.level << 16))
# Generate core masks for any regions which are selected at this level
# Create a mapping from subregion mask to core numbers
subregions_cores = collections.defaultdict(lambda: 0x0)
for core, subregions in enumerate(self.locally_selected):
if subregions: # If any subregions are selected on this level
subregions_cores[subregions] |= 1 << core
# Order the locally selected items and then yield them
for (subregions, coremask) in sorted(subregions_cores.items()):
yield (region_code | subregions), coremask
if self.level < 3:
# Iterate through the subregions and recurse, we iterate through in
# the order which ensures that anything we yield is in increasing
# order.
for i in (4*x + y for y in range(4) for x in range(4)):
subregion = self.subregions[i]
if subregion is not None:
for (region, coremask) in \
subregion.get_regions_and_coremasks():
yield (region, coremask) | python | def get_regions_and_coremasks(self):
"""Generate a set of ordered paired region and core mask representations.
.. note::
The region and core masks are ordered such that ``(region << 32) |
core_mask`` is monotonically increasing. Consequently region and
core masks generated by this method can be used with SCAMP's
Flood-Fill Core Select (FFSC) method.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine
and a core mask of selected cores within that region.
"""
region_code = ((self.base_x << 24) | (self.base_y << 16) |
(self.level << 16))
# Generate core masks for any regions which are selected at this level
# Create a mapping from subregion mask to core numbers
subregions_cores = collections.defaultdict(lambda: 0x0)
for core, subregions in enumerate(self.locally_selected):
if subregions: # If any subregions are selected on this level
subregions_cores[subregions] |= 1 << core
# Order the locally selected items and then yield them
for (subregions, coremask) in sorted(subregions_cores.items()):
yield (region_code | subregions), coremask
if self.level < 3:
# Iterate through the subregions and recurse, we iterate through in
# the order which ensures that anything we yield is in increasing
# order.
for i in (4*x + y for y in range(4) for x in range(4)):
subregion = self.subregions[i]
if subregion is not None:
for (region, coremask) in \
subregion.get_regions_and_coremasks():
yield (region, coremask) | [
"def",
"get_regions_and_coremasks",
"(",
"self",
")",
":",
"region_code",
"=",
"(",
"(",
"self",
".",
"base_x",
"<<",
"24",
")",
"|",
"(",
"self",
".",
"base_y",
"<<",
"16",
")",
"|",
"(",
"self",
".",
"level",
"<<",
"16",
")",
")",
"# Generate core masks for any regions which are selected at this level",
"# Create a mapping from subregion mask to core numbers",
"subregions_cores",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"0x0",
")",
"for",
"core",
",",
"subregions",
"in",
"enumerate",
"(",
"self",
".",
"locally_selected",
")",
":",
"if",
"subregions",
":",
"# If any subregions are selected on this level",
"subregions_cores",
"[",
"subregions",
"]",
"|=",
"1",
"<<",
"core",
"# Order the locally selected items and then yield them",
"for",
"(",
"subregions",
",",
"coremask",
")",
"in",
"sorted",
"(",
"subregions_cores",
".",
"items",
"(",
")",
")",
":",
"yield",
"(",
"region_code",
"|",
"subregions",
")",
",",
"coremask",
"if",
"self",
".",
"level",
"<",
"3",
":",
"# Iterate through the subregions and recurse, we iterate through in",
"# the order which ensures that anything we yield is in increasing",
"# order.",
"for",
"i",
"in",
"(",
"4",
"*",
"x",
"+",
"y",
"for",
"y",
"in",
"range",
"(",
"4",
")",
"for",
"x",
"in",
"range",
"(",
"4",
")",
")",
":",
"subregion",
"=",
"self",
".",
"subregions",
"[",
"i",
"]",
"if",
"subregion",
"is",
"not",
"None",
":",
"for",
"(",
"region",
",",
"coremask",
")",
"in",
"subregion",
".",
"get_regions_and_coremasks",
"(",
")",
":",
"yield",
"(",
"region",
",",
"coremask",
")"
] | Generate a set of ordered paired region and core mask representations.
.. note::
The region and core masks are ordered such that ``(region << 32) |
core_mask`` is monotonically increasing. Consequently region and
core masks generated by this method can be used with SCAMP's
Flood-Fill Core Select (FFSC) method.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine
and a core mask of selected cores within that region. | [
"Generate",
"a",
"set",
"of",
"ordered",
"paired",
"region",
"and",
"core",
"mask",
"representations",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/regions.py#L129-L167 |
project-rig/rig | rig/machine_control/regions.py | RegionCoreTree.add_core | def add_core(self, x, y, p):
"""Add a new core to the region tree.
Raises
------
ValueError
If the co-ordinate is not contained within this part of the tree or
the core number is out of range.
Returns
-------
bool
True if the specified core is to be loaded to all subregions.
"""
# Check that the co-ordinate is contained in this region
if ((p < 0 or p > 17) or
(x < self.base_x or x >= self.base_x + self.scale) or
(y < self.base_y or y >= self.base_y + self.scale)):
raise ValueError((x, y, p))
# Determine which subregion this refers to
subregion = ((x >> self.shift) & 0x3) + 4*((y >> self.shift) & 0x3)
if self.level == 3:
# If level-3 then we just add to the locally selected regions
self.locally_selected[p] |= 1 << subregion
elif not self.locally_selected[p] & (1 << subregion):
# If the subregion isn't in `locally_selected` for this core number
# then add the core to the subregion.
if self.subregions[subregion] is None:
# "Lazy": if the subtree doesn't exist yet then add it
base_x = int(self.base_x + (self.scale / 4) * (subregion % 4))
base_y = int(self.base_y + (self.scale / 4) * (subregion // 4))
self.subregions[subregion] = RegionCoreTree(base_x, base_y,
self.level + 1)
# If the subregion reports that all of its subregions for this core
# are selected then we need to add it to `locally_selected`.
if self.subregions[subregion].add_core(x, y, p):
self.locally_selected[p] |= 1 << subregion
# If all subregions are selected for this core and this is not the top
# level in the hierarchy then return True after emptying the local
# selection for the core.
if self.locally_selected[p] == 0xffff and self.level != 0:
self.locally_selected[p] = 0x0
return True
else:
return False | python | def add_core(self, x, y, p):
"""Add a new core to the region tree.
Raises
------
ValueError
If the co-ordinate is not contained within this part of the tree or
the core number is out of range.
Returns
-------
bool
True if the specified core is to be loaded to all subregions.
"""
# Check that the co-ordinate is contained in this region
if ((p < 0 or p > 17) or
(x < self.base_x or x >= self.base_x + self.scale) or
(y < self.base_y or y >= self.base_y + self.scale)):
raise ValueError((x, y, p))
# Determine which subregion this refers to
subregion = ((x >> self.shift) & 0x3) + 4*((y >> self.shift) & 0x3)
if self.level == 3:
# If level-3 then we just add to the locally selected regions
self.locally_selected[p] |= 1 << subregion
elif not self.locally_selected[p] & (1 << subregion):
# If the subregion isn't in `locally_selected` for this core number
# then add the core to the subregion.
if self.subregions[subregion] is None:
# "Lazy": if the subtree doesn't exist yet then add it
base_x = int(self.base_x + (self.scale / 4) * (subregion % 4))
base_y = int(self.base_y + (self.scale / 4) * (subregion // 4))
self.subregions[subregion] = RegionCoreTree(base_x, base_y,
self.level + 1)
# If the subregion reports that all of its subregions for this core
# are selected then we need to add it to `locally_selected`.
if self.subregions[subregion].add_core(x, y, p):
self.locally_selected[p] |= 1 << subregion
# If all subregions are selected for this core and this is not the top
# level in the hierarchy then return True after emptying the local
# selection for the core.
if self.locally_selected[p] == 0xffff and self.level != 0:
self.locally_selected[p] = 0x0
return True
else:
return False | [
"def",
"add_core",
"(",
"self",
",",
"x",
",",
"y",
",",
"p",
")",
":",
"# Check that the co-ordinate is contained in this region",
"if",
"(",
"(",
"p",
"<",
"0",
"or",
"p",
">",
"17",
")",
"or",
"(",
"x",
"<",
"self",
".",
"base_x",
"or",
"x",
">=",
"self",
".",
"base_x",
"+",
"self",
".",
"scale",
")",
"or",
"(",
"y",
"<",
"self",
".",
"base_y",
"or",
"y",
">=",
"self",
".",
"base_y",
"+",
"self",
".",
"scale",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"x",
",",
"y",
",",
"p",
")",
")",
"# Determine which subregion this refers to",
"subregion",
"=",
"(",
"(",
"x",
">>",
"self",
".",
"shift",
")",
"&",
"0x3",
")",
"+",
"4",
"*",
"(",
"(",
"y",
">>",
"self",
".",
"shift",
")",
"&",
"0x3",
")",
"if",
"self",
".",
"level",
"==",
"3",
":",
"# If level-3 then we just add to the locally selected regions",
"self",
".",
"locally_selected",
"[",
"p",
"]",
"|=",
"1",
"<<",
"subregion",
"elif",
"not",
"self",
".",
"locally_selected",
"[",
"p",
"]",
"&",
"(",
"1",
"<<",
"subregion",
")",
":",
"# If the subregion isn't in `locally_selected` for this core number",
"# then add the core to the subregion.",
"if",
"self",
".",
"subregions",
"[",
"subregion",
"]",
"is",
"None",
":",
"# \"Lazy\": if the subtree doesn't exist yet then add it",
"base_x",
"=",
"int",
"(",
"self",
".",
"base_x",
"+",
"(",
"self",
".",
"scale",
"/",
"4",
")",
"*",
"(",
"subregion",
"%",
"4",
")",
")",
"base_y",
"=",
"int",
"(",
"self",
".",
"base_y",
"+",
"(",
"self",
".",
"scale",
"/",
"4",
")",
"*",
"(",
"subregion",
"//",
"4",
")",
")",
"self",
".",
"subregions",
"[",
"subregion",
"]",
"=",
"RegionCoreTree",
"(",
"base_x",
",",
"base_y",
",",
"self",
".",
"level",
"+",
"1",
")",
"# If the subregion reports that all of its subregions for this core",
"# are selected then we need to add it to `locally_selected`.",
"if",
"self",
".",
"subregions",
"[",
"subregion",
"]",
".",
"add_core",
"(",
"x",
",",
"y",
",",
"p",
")",
":",
"self",
".",
"locally_selected",
"[",
"p",
"]",
"|=",
"1",
"<<",
"subregion",
"# If all subregions are selected for this core and this is not the top",
"# level in the hierarchy then return True after emptying the local",
"# selection for the core.",
"if",
"self",
".",
"locally_selected",
"[",
"p",
"]",
"==",
"0xffff",
"and",
"self",
".",
"level",
"!=",
"0",
":",
"self",
".",
"locally_selected",
"[",
"p",
"]",
"=",
"0x0",
"return",
"True",
"else",
":",
"return",
"False"
] | Add a new core to the region tree.
Raises
------
ValueError
If the co-ordinate is not contained within this part of the tree or
the core number is out of range.
Returns
-------
bool
True if the specified core is to be loaded to all subregions. | [
"Add",
"a",
"new",
"core",
"to",
"the",
"region",
"tree",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/regions.py#L169-L217 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.send_scp | def send_scp(self, *args, **kwargs):
"""Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int
"""
# Retrieve contextual arguments from the keyword arguments. The
# context system ensures that these values are present.
cabinet = kwargs.pop("cabinet")
frame = kwargs.pop("frame")
board = kwargs.pop("board")
return self._send_scp(cabinet, frame, board, *args, **kwargs) | python | def send_scp(self, *args, **kwargs):
"""Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int
"""
# Retrieve contextual arguments from the keyword arguments. The
# context system ensures that these values are present.
cabinet = kwargs.pop("cabinet")
frame = kwargs.pop("frame")
board = kwargs.pop("board")
return self._send_scp(cabinet, frame, board, *args, **kwargs) | [
"def",
"send_scp",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Retrieve contextual arguments from the keyword arguments. The",
"# context system ensures that these values are present.",
"cabinet",
"=",
"kwargs",
".",
"pop",
"(",
"\"cabinet\"",
")",
"frame",
"=",
"kwargs",
".",
"pop",
"(",
"\"frame\"",
")",
"board",
"=",
"kwargs",
".",
"pop",
"(",
"\"board\"",
")",
"return",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Transmit an SCP Packet to a specific board.
Automatically determines the appropriate connection to use.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
Parameters
----------
cabinet : int
frame : int
board : int | [
"Transmit",
"an",
"SCP",
"Packet",
"to",
"a",
"specific",
"board",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L125-L145 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController._send_scp | def _send_scp(self, cabinet, frame, board, *args, **kwargs):
"""Determine the best connection to use to send an SCP packet and use
it to transmit.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
"""
# Find the connection which best matches the specified coordinates,
# preferring direct connections to a board when available.
connection = self.connections.get((cabinet, frame, board), None)
if connection is None:
connection = self.connections.get((cabinet, frame), None)
assert connection is not None, \
"No connection available to ({}, {}, {})".format(cabinet,
frame,
board)
# Determine the size of packet we expect in return, this is usually the
# size that we are informed we should expect by SCAMP/SARK or else is
# the default.
if self._scp_data_length is None:
length = consts.SCP_SVER_RECEIVE_LENGTH_MAX
else:
length = self._scp_data_length
return connection.send_scp(length, 0, 0, board, *args, **kwargs) | python | def _send_scp(self, cabinet, frame, board, *args, **kwargs):
"""Determine the best connection to use to send an SCP packet and use
it to transmit.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details.
"""
# Find the connection which best matches the specified coordinates,
# preferring direct connections to a board when available.
connection = self.connections.get((cabinet, frame, board), None)
if connection is None:
connection = self.connections.get((cabinet, frame), None)
assert connection is not None, \
"No connection available to ({}, {}, {})".format(cabinet,
frame,
board)
# Determine the size of packet we expect in return, this is usually the
# size that we are informed we should expect by SCAMP/SARK or else is
# the default.
if self._scp_data_length is None:
length = consts.SCP_SVER_RECEIVE_LENGTH_MAX
else:
length = self._scp_data_length
return connection.send_scp(length, 0, 0, board, *args, **kwargs) | [
"def",
"_send_scp",
"(",
"self",
",",
"cabinet",
",",
"frame",
",",
"board",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Find the connection which best matches the specified coordinates,",
"# preferring direct connections to a board when available.",
"connection",
"=",
"self",
".",
"connections",
".",
"get",
"(",
"(",
"cabinet",
",",
"frame",
",",
"board",
")",
",",
"None",
")",
"if",
"connection",
"is",
"None",
":",
"connection",
"=",
"self",
".",
"connections",
".",
"get",
"(",
"(",
"cabinet",
",",
"frame",
")",
",",
"None",
")",
"assert",
"connection",
"is",
"not",
"None",
",",
"\"No connection available to ({}, {}, {})\"",
".",
"format",
"(",
"cabinet",
",",
"frame",
",",
"board",
")",
"# Determine the size of packet we expect in return, this is usually the",
"# size that we are informed we should expect by SCAMP/SARK or else is",
"# the default.",
"if",
"self",
".",
"_scp_data_length",
"is",
"None",
":",
"length",
"=",
"consts",
".",
"SCP_SVER_RECEIVE_LENGTH_MAX",
"else",
":",
"length",
"=",
"self",
".",
"_scp_data_length",
"return",
"connection",
".",
"send_scp",
"(",
"length",
",",
"0",
",",
"0",
",",
"board",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Determine the best connection to use to send an SCP packet and use
it to transmit.
See the arguments for
:py:meth:`~rig.machine_control.scp_connection.SCPConnection` for
details. | [
"Determine",
"the",
"best",
"connection",
"to",
"use",
"to",
"send",
"an",
"SCP",
"packet",
"and",
"use",
"it",
"to",
"transmit",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L147-L173 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.get_software_version | def get_software_version(self, cabinet, frame, board):
"""Get the software version for a given BMP.
Returns
-------
:py:class:`.BMPInfo`
Information about the software running on a BMP.
"""
sver = self._send_scp(cabinet, frame, board, SCPCommands.sver)
# Format the result
# arg1
code_block = (sver.arg1 >> 24) & 0xff
frame_id = (sver.arg1 >> 16) & 0xff
can_id = (sver.arg1 >> 8) & 0xff
board_id = sver.arg1 & 0xff
# arg2 (version field unpacked separately)
buffer_size = (sver.arg2 & 0xffff)
software_name, version, version_labels = \
unpack_sver_response_version(sver)
return BMPInfo(code_block, frame_id, can_id, board_id, version,
buffer_size, sver.arg3, software_name, version_labels) | python | def get_software_version(self, cabinet, frame, board):
"""Get the software version for a given BMP.
Returns
-------
:py:class:`.BMPInfo`
Information about the software running on a BMP.
"""
sver = self._send_scp(cabinet, frame, board, SCPCommands.sver)
# Format the result
# arg1
code_block = (sver.arg1 >> 24) & 0xff
frame_id = (sver.arg1 >> 16) & 0xff
can_id = (sver.arg1 >> 8) & 0xff
board_id = sver.arg1 & 0xff
# arg2 (version field unpacked separately)
buffer_size = (sver.arg2 & 0xffff)
software_name, version, version_labels = \
unpack_sver_response_version(sver)
return BMPInfo(code_block, frame_id, can_id, board_id, version,
buffer_size, sver.arg3, software_name, version_labels) | [
"def",
"get_software_version",
"(",
"self",
",",
"cabinet",
",",
"frame",
",",
"board",
")",
":",
"sver",
"=",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"SCPCommands",
".",
"sver",
")",
"# Format the result",
"# arg1",
"code_block",
"=",
"(",
"sver",
".",
"arg1",
">>",
"24",
")",
"&",
"0xff",
"frame_id",
"=",
"(",
"sver",
".",
"arg1",
">>",
"16",
")",
"&",
"0xff",
"can_id",
"=",
"(",
"sver",
".",
"arg1",
">>",
"8",
")",
"&",
"0xff",
"board_id",
"=",
"sver",
".",
"arg1",
"&",
"0xff",
"# arg2 (version field unpacked separately)",
"buffer_size",
"=",
"(",
"sver",
".",
"arg2",
"&",
"0xffff",
")",
"software_name",
",",
"version",
",",
"version_labels",
"=",
"unpack_sver_response_version",
"(",
"sver",
")",
"return",
"BMPInfo",
"(",
"code_block",
",",
"frame_id",
",",
"can_id",
",",
"board_id",
",",
"version",
",",
"buffer_size",
",",
"sver",
".",
"arg3",
",",
"software_name",
",",
"version_labels",
")"
] | Get the software version for a given BMP.
Returns
-------
:py:class:`.BMPInfo`
Information about the software running on a BMP. | [
"Get",
"the",
"software",
"version",
"for",
"a",
"given",
"BMP",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L176-L200 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.set_power | def set_power(self, state, cabinet, frame, board,
delay=0.0, post_power_on_delay=5.0):
"""Control power to the SpiNNaker chips and FPGAs on a board.
Returns
-------
state : bool
True for power on, False for power off.
board : int or iterable
Specifies the board to control the power of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent board 0, regardless of the set of boards
specified.
delay : float
Number of seconds delay between power state changes of different
boards.
post_power_on_delay : float
Number of seconds for this command to block once the power on
command has been carried out. A short delay (default) is useful at
this point since power-supplies and SpiNNaker chips may still be
coming on line immediately after the power-on command is sent.
.. warning::
If the set of boards to be powered-on does not include board 0,
this timeout should be extended by 2-3 seconds. This is due to
the fact that BMPs immediately acknowledge power-on commands to
boards other than board 0 but wait for the FPGAs to be loaded
before responding when board 0 is powered on.
"""
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
arg1 = int(delay * 1000) << 16 | (1 if state else 0)
arg2 = sum(1 << b for b in boards)
# Allow additional time for response when powering on (since FPGAs must
# be loaded). Also, always send the command to board 0. This is
# required by the BMPs which do not correctly handle the power-on
# command being sent to anything but board 0. Though this is a bug in
# the BMP firmware, it is considered sufficiently easy to work-around
# that no fix is planned.
self._send_scp(cabinet, frame, 0, SCPCommands.power,
arg1=arg1, arg2=arg2,
timeout=consts.BMP_POWER_ON_TIMEOUT if state else 0.0,
expected_args=0)
if state:
time.sleep(post_power_on_delay) | python | def set_power(self, state, cabinet, frame, board,
delay=0.0, post_power_on_delay=5.0):
"""Control power to the SpiNNaker chips and FPGAs on a board.
Returns
-------
state : bool
True for power on, False for power off.
board : int or iterable
Specifies the board to control the power of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent board 0, regardless of the set of boards
specified.
delay : float
Number of seconds delay between power state changes of different
boards.
post_power_on_delay : float
Number of seconds for this command to block once the power on
command has been carried out. A short delay (default) is useful at
this point since power-supplies and SpiNNaker chips may still be
coming on line immediately after the power-on command is sent.
.. warning::
If the set of boards to be powered-on does not include board 0,
this timeout should be extended by 2-3 seconds. This is due to
the fact that BMPs immediately acknowledge power-on commands to
boards other than board 0 but wait for the FPGAs to be loaded
before responding when board 0 is powered on.
"""
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
arg1 = int(delay * 1000) << 16 | (1 if state else 0)
arg2 = sum(1 << b for b in boards)
# Allow additional time for response when powering on (since FPGAs must
# be loaded). Also, always send the command to board 0. This is
# required by the BMPs which do not correctly handle the power-on
# command being sent to anything but board 0. Though this is a bug in
# the BMP firmware, it is considered sufficiently easy to work-around
# that no fix is planned.
self._send_scp(cabinet, frame, 0, SCPCommands.power,
arg1=arg1, arg2=arg2,
timeout=consts.BMP_POWER_ON_TIMEOUT if state else 0.0,
expected_args=0)
if state:
time.sleep(post_power_on_delay) | [
"def",
"set_power",
"(",
"self",
",",
"state",
",",
"cabinet",
",",
"frame",
",",
"board",
",",
"delay",
"=",
"0.0",
",",
"post_power_on_delay",
"=",
"5.0",
")",
":",
"if",
"isinstance",
"(",
"board",
",",
"int",
")",
":",
"boards",
"=",
"[",
"board",
"]",
"else",
":",
"boards",
"=",
"list",
"(",
"board",
")",
"arg1",
"=",
"int",
"(",
"delay",
"*",
"1000",
")",
"<<",
"16",
"|",
"(",
"1",
"if",
"state",
"else",
"0",
")",
"arg2",
"=",
"sum",
"(",
"1",
"<<",
"b",
"for",
"b",
"in",
"boards",
")",
"# Allow additional time for response when powering on (since FPGAs must",
"# be loaded). Also, always send the command to board 0. This is",
"# required by the BMPs which do not correctly handle the power-on",
"# command being sent to anything but board 0. Though this is a bug in",
"# the BMP firmware, it is considered sufficiently easy to work-around",
"# that no fix is planned.",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"0",
",",
"SCPCommands",
".",
"power",
",",
"arg1",
"=",
"arg1",
",",
"arg2",
"=",
"arg2",
",",
"timeout",
"=",
"consts",
".",
"BMP_POWER_ON_TIMEOUT",
"if",
"state",
"else",
"0.0",
",",
"expected_args",
"=",
"0",
")",
"if",
"state",
":",
"time",
".",
"sleep",
"(",
"post_power_on_delay",
")"
] | Control power to the SpiNNaker chips and FPGAs on a board.
Returns
-------
state : bool
True for power on, False for power off.
board : int or iterable
Specifies the board to control the power of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent board 0, regardless of the set of boards
specified.
delay : float
Number of seconds delay between power state changes of different
boards.
post_power_on_delay : float
Number of seconds for this command to block once the power on
command has been carried out. A short delay (default) is useful at
this point since power-supplies and SpiNNaker chips may still be
coming on line immediately after the power-on command is sent.
.. warning::
If the set of boards to be powered-on does not include board 0,
this timeout should be extended by 2-3 seconds. This is due to
the fact that BMPs immediately acknowledge power-on commands to
boards other than board 0 but wait for the FPGAs to be loaded
before responding when board 0 is powered on. | [
"Control",
"power",
"to",
"the",
"SpiNNaker",
"chips",
"and",
"FPGAs",
"on",
"a",
"board",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L203-L251 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.set_led | def set_led(self, led, action=None,
cabinet=Required, frame=Required, board=Required):
"""Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable.
"""
if isinstance(led, int):
leds = [led]
else:
leds = led
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
board = boards[0]
# LED setting actions
arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds)
# Bitmask of boards to control
arg2 = sum(1 << b for b in boards)
self._send_scp(cabinet, frame, board, SCPCommands.led, arg1=arg1,
arg2=arg2, expected_args=0) | python | def set_led(self, led, action=None,
cabinet=Required, frame=Required, board=Required):
"""Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable.
"""
if isinstance(led, int):
leds = [led]
else:
leds = led
if isinstance(board, int):
boards = [board]
else:
boards = list(board)
board = boards[0]
# LED setting actions
arg1 = sum(LEDAction.from_bool(action) << (led * 2) for led in leds)
# Bitmask of boards to control
arg2 = sum(1 << b for b in boards)
self._send_scp(cabinet, frame, board, SCPCommands.led, arg1=arg1,
arg2=arg2, expected_args=0) | [
"def",
"set_led",
"(",
"self",
",",
"led",
",",
"action",
"=",
"None",
",",
"cabinet",
"=",
"Required",
",",
"frame",
"=",
"Required",
",",
"board",
"=",
"Required",
")",
":",
"if",
"isinstance",
"(",
"led",
",",
"int",
")",
":",
"leds",
"=",
"[",
"led",
"]",
"else",
":",
"leds",
"=",
"led",
"if",
"isinstance",
"(",
"board",
",",
"int",
")",
":",
"boards",
"=",
"[",
"board",
"]",
"else",
":",
"boards",
"=",
"list",
"(",
"board",
")",
"board",
"=",
"boards",
"[",
"0",
"]",
"# LED setting actions",
"arg1",
"=",
"sum",
"(",
"LEDAction",
".",
"from_bool",
"(",
"action",
")",
"<<",
"(",
"led",
"*",
"2",
")",
"for",
"led",
"in",
"leds",
")",
"# Bitmask of boards to control",
"arg2",
"=",
"sum",
"(",
"1",
"<<",
"b",
"for",
"b",
"in",
"boards",
")",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"SCPCommands",
".",
"led",
",",
"arg1",
"=",
"arg1",
",",
"arg2",
"=",
"arg2",
",",
"expected_args",
"=",
"0",
")"
] | Set or toggle the state of an LED.
.. note::
At the time of writing, LED 7 is only set by the BMP on start-up to
indicate that the watchdog timer reset the board. After this point,
the LED is available for use by applications.
Parameters
----------
led : int or iterable
Number of the LED or an iterable of LEDs to set the state of (0-7)
action : bool or None
State to set the LED to. True for on, False for off, None to
toggle (default).
board : int or iterable
Specifies the board to control the LEDs of. This may also be an
iterable of multiple boards (in the same frame). The command will
actually be sent to the first board in the iterable. | [
"Set",
"or",
"toggle",
"the",
"state",
"of",
"an",
"LED",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L254-L292 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.read_fpga_reg | def read_fpga_reg(self, fpga_num, addr, cabinet, frame, board):
"""Read the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read to (will be rounded down to the nearest
32-bit word boundary).
Returns
-------
int
The 32-bit value at that address.
"""
arg1 = addr & (~0x3)
arg2 = 4 # Read a 32-bit value
arg3 = fpga_num
response = self._send_scp(cabinet, frame, board, SCPCommands.link_read,
arg1=arg1, arg2=arg2, arg3=arg3,
expected_args=0)
return struct.unpack("<I", response.data)[0] | python | def read_fpga_reg(self, fpga_num, addr, cabinet, frame, board):
"""Read the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read to (will be rounded down to the nearest
32-bit word boundary).
Returns
-------
int
The 32-bit value at that address.
"""
arg1 = addr & (~0x3)
arg2 = 4 # Read a 32-bit value
arg3 = fpga_num
response = self._send_scp(cabinet, frame, board, SCPCommands.link_read,
arg1=arg1, arg2=arg2, arg3=arg3,
expected_args=0)
return struct.unpack("<I", response.data)[0] | [
"def",
"read_fpga_reg",
"(",
"self",
",",
"fpga_num",
",",
"addr",
",",
"cabinet",
",",
"frame",
",",
"board",
")",
":",
"arg1",
"=",
"addr",
"&",
"(",
"~",
"0x3",
")",
"arg2",
"=",
"4",
"# Read a 32-bit value",
"arg3",
"=",
"fpga_num",
"response",
"=",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"SCPCommands",
".",
"link_read",
",",
"arg1",
"=",
"arg1",
",",
"arg2",
"=",
"arg2",
",",
"arg3",
"=",
"arg3",
",",
"expected_args",
"=",
"0",
")",
"return",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"response",
".",
"data",
")",
"[",
"0",
"]"
] | Read the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read to (will be rounded down to the nearest
32-bit word boundary).
Returns
-------
int
The 32-bit value at that address. | [
"Read",
"the",
"value",
"of",
"an",
"FPGA",
"(",
"SPI",
")",
"register",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L295-L324 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.write_fpga_reg | def write_fpga_reg(self, fpga_num, addr, value, cabinet, frame, board):
"""Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register
"""
arg1 = addr & (~0x3)
arg2 = 4 # Write a 32-bit value
arg3 = fpga_num
self._send_scp(cabinet, frame, board, SCPCommands.link_write,
arg1=arg1, arg2=arg2, arg3=arg3,
data=struct.pack("<I", value), expected_args=0) | python | def write_fpga_reg(self, fpga_num, addr, value, cabinet, frame, board):
"""Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register
"""
arg1 = addr & (~0x3)
arg2 = 4 # Write a 32-bit value
arg3 = fpga_num
self._send_scp(cabinet, frame, board, SCPCommands.link_write,
arg1=arg1, arg2=arg2, arg3=arg3,
data=struct.pack("<I", value), expected_args=0) | [
"def",
"write_fpga_reg",
"(",
"self",
",",
"fpga_num",
",",
"addr",
",",
"value",
",",
"cabinet",
",",
"frame",
",",
"board",
")",
":",
"arg1",
"=",
"addr",
"&",
"(",
"~",
"0x3",
")",
"arg2",
"=",
"4",
"# Write a 32-bit value",
"arg3",
"=",
"fpga_num",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"SCPCommands",
".",
"link_write",
",",
"arg1",
"=",
"arg1",
",",
"arg2",
"=",
"arg2",
",",
"arg3",
"=",
"arg3",
",",
"data",
"=",
"struct",
".",
"pack",
"(",
"\"<I\"",
",",
"value",
")",
",",
"expected_args",
"=",
"0",
")"
] | Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register | [
"Write",
"the",
"value",
"of",
"an",
"FPGA",
"(",
"SPI",
")",
"register",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L327-L352 |
project-rig/rig | rig/machine_control/bmp_controller.py | BMPController.read_adc | def read_adc(self, cabinet, frame, board):
"""Read ADC data from the BMP including voltages and temperature.
Returns
-------
:py:class:`.ADCInfo`
"""
response = self._send_scp(cabinet, frame, board, SCPCommands.bmp_info,
arg1=BMPInfoType.adc, expected_args=0)
data = struct.unpack("<" # Little-endian
"8H" # uint16_t adc[8]
"4h" # int16_t t_int[4]
"4h" # int16_t t_ext[4]
"4h" # int16_t fan[4]
"I" # uint32_t warning
"I", # uint32_t shutdown
response.data)
return ADCInfo(
voltage_1_2c=data[1] * BMP_V_SCALE_2_5,
voltage_1_2b=data[2] * BMP_V_SCALE_2_5,
voltage_1_2a=data[3] * BMP_V_SCALE_2_5,
voltage_1_8=data[4] * BMP_V_SCALE_2_5,
voltage_3_3=data[6] * BMP_V_SCALE_3_3,
voltage_supply=data[7] * BMP_V_SCALE_12,
temp_top=float(data[8]) * BMP_TEMP_SCALE,
temp_btm=float(data[9]) * BMP_TEMP_SCALE,
temp_ext_0=((float(data[12]) * BMP_TEMP_SCALE)
if data[12] != BMP_MISSING_TEMP else None),
temp_ext_1=((float(data[13]) * BMP_TEMP_SCALE)
if data[13] != BMP_MISSING_TEMP else None),
fan_0=float(data[16]) if data[16] != BMP_MISSING_FAN else None,
fan_1=float(data[17]) if data[17] != BMP_MISSING_FAN else None,
) | python | def read_adc(self, cabinet, frame, board):
"""Read ADC data from the BMP including voltages and temperature.
Returns
-------
:py:class:`.ADCInfo`
"""
response = self._send_scp(cabinet, frame, board, SCPCommands.bmp_info,
arg1=BMPInfoType.adc, expected_args=0)
data = struct.unpack("<" # Little-endian
"8H" # uint16_t adc[8]
"4h" # int16_t t_int[4]
"4h" # int16_t t_ext[4]
"4h" # int16_t fan[4]
"I" # uint32_t warning
"I", # uint32_t shutdown
response.data)
return ADCInfo(
voltage_1_2c=data[1] * BMP_V_SCALE_2_5,
voltage_1_2b=data[2] * BMP_V_SCALE_2_5,
voltage_1_2a=data[3] * BMP_V_SCALE_2_5,
voltage_1_8=data[4] * BMP_V_SCALE_2_5,
voltage_3_3=data[6] * BMP_V_SCALE_3_3,
voltage_supply=data[7] * BMP_V_SCALE_12,
temp_top=float(data[8]) * BMP_TEMP_SCALE,
temp_btm=float(data[9]) * BMP_TEMP_SCALE,
temp_ext_0=((float(data[12]) * BMP_TEMP_SCALE)
if data[12] != BMP_MISSING_TEMP else None),
temp_ext_1=((float(data[13]) * BMP_TEMP_SCALE)
if data[13] != BMP_MISSING_TEMP else None),
fan_0=float(data[16]) if data[16] != BMP_MISSING_FAN else None,
fan_1=float(data[17]) if data[17] != BMP_MISSING_FAN else None,
) | [
"def",
"read_adc",
"(",
"self",
",",
"cabinet",
",",
"frame",
",",
"board",
")",
":",
"response",
"=",
"self",
".",
"_send_scp",
"(",
"cabinet",
",",
"frame",
",",
"board",
",",
"SCPCommands",
".",
"bmp_info",
",",
"arg1",
"=",
"BMPInfoType",
".",
"adc",
",",
"expected_args",
"=",
"0",
")",
"data",
"=",
"struct",
".",
"unpack",
"(",
"\"<\"",
"# Little-endian",
"\"8H\"",
"# uint16_t adc[8]",
"\"4h\"",
"# int16_t t_int[4]",
"\"4h\"",
"# int16_t t_ext[4]",
"\"4h\"",
"# int16_t fan[4]",
"\"I\"",
"# uint32_t warning",
"\"I\"",
",",
"# uint32_t shutdown",
"response",
".",
"data",
")",
"return",
"ADCInfo",
"(",
"voltage_1_2c",
"=",
"data",
"[",
"1",
"]",
"*",
"BMP_V_SCALE_2_5",
",",
"voltage_1_2b",
"=",
"data",
"[",
"2",
"]",
"*",
"BMP_V_SCALE_2_5",
",",
"voltage_1_2a",
"=",
"data",
"[",
"3",
"]",
"*",
"BMP_V_SCALE_2_5",
",",
"voltage_1_8",
"=",
"data",
"[",
"4",
"]",
"*",
"BMP_V_SCALE_2_5",
",",
"voltage_3_3",
"=",
"data",
"[",
"6",
"]",
"*",
"BMP_V_SCALE_3_3",
",",
"voltage_supply",
"=",
"data",
"[",
"7",
"]",
"*",
"BMP_V_SCALE_12",
",",
"temp_top",
"=",
"float",
"(",
"data",
"[",
"8",
"]",
")",
"*",
"BMP_TEMP_SCALE",
",",
"temp_btm",
"=",
"float",
"(",
"data",
"[",
"9",
"]",
")",
"*",
"BMP_TEMP_SCALE",
",",
"temp_ext_0",
"=",
"(",
"(",
"float",
"(",
"data",
"[",
"12",
"]",
")",
"*",
"BMP_TEMP_SCALE",
")",
"if",
"data",
"[",
"12",
"]",
"!=",
"BMP_MISSING_TEMP",
"else",
"None",
")",
",",
"temp_ext_1",
"=",
"(",
"(",
"float",
"(",
"data",
"[",
"13",
"]",
")",
"*",
"BMP_TEMP_SCALE",
")",
"if",
"data",
"[",
"13",
"]",
"!=",
"BMP_MISSING_TEMP",
"else",
"None",
")",
",",
"fan_0",
"=",
"float",
"(",
"data",
"[",
"16",
"]",
")",
"if",
"data",
"[",
"16",
"]",
"!=",
"BMP_MISSING_FAN",
"else",
"None",
",",
"fan_1",
"=",
"float",
"(",
"data",
"[",
"17",
"]",
")",
"if",
"data",
"[",
"17",
"]",
"!=",
"BMP_MISSING_FAN",
"else",
"None",
",",
")"
] | Read ADC data from the BMP including voltages and temperature.
Returns
-------
:py:class:`.ADCInfo` | [
"Read",
"ADC",
"data",
"from",
"the",
"BMP",
"including",
"voltages",
"and",
"temperature",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/bmp_controller.py#L355-L388 |
project-rig/rig | rig/place_and_route/place/utils.py | add_resources | def add_resources(res_a, res_b):
"""Return the resources after adding res_b's resources to res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0.
"""
return {resource: value + res_b.get(resource, 0)
for resource, value in iteritems(res_a)} | python | def add_resources(res_a, res_b):
"""Return the resources after adding res_b's resources to res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0.
"""
return {resource: value + res_b.get(resource, 0)
for resource, value in iteritems(res_a)} | [
"def",
"add_resources",
"(",
"res_a",
",",
"res_b",
")",
":",
"return",
"{",
"resource",
":",
"value",
"+",
"res_b",
".",
"get",
"(",
"resource",
",",
"0",
")",
"for",
"resource",
",",
"value",
"in",
"iteritems",
"(",
"res_a",
")",
"}"
] | Return the resources after adding res_b's resources to res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0. | [
"Return",
"the",
"resources",
"after",
"adding",
"res_b",
"s",
"resources",
"to",
"res_a",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L13-L26 |
project-rig/rig | rig/place_and_route/place/utils.py | subtract_resources | def subtract_resources(res_a, res_b):
"""Return the resources remaining after subtracting res_b's resources from
res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0.
"""
return {resource: value - res_b.get(resource, 0)
for resource, value in iteritems(res_a)} | python | def subtract_resources(res_a, res_b):
"""Return the resources remaining after subtracting res_b's resources from
res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0.
"""
return {resource: value - res_b.get(resource, 0)
for resource, value in iteritems(res_a)} | [
"def",
"subtract_resources",
"(",
"res_a",
",",
"res_b",
")",
":",
"return",
"{",
"resource",
":",
"value",
"-",
"res_b",
".",
"get",
"(",
"resource",
",",
"0",
")",
"for",
"resource",
",",
"value",
"in",
"iteritems",
"(",
"res_a",
")",
"}"
] | Return the resources remaining after subtracting res_b's resources from
res_a.
Parameters
----------
res_a : dict
Dictionary `{resource: value, ...}`.
res_b : dict
Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of
res_a. If A resource is not present in res_b, the value is presumed to
be 0. | [
"Return",
"the",
"resources",
"remaining",
"after",
"subtracting",
"res_b",
"s",
"resources",
"from",
"res_a",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L29-L43 |
project-rig/rig | rig/place_and_route/place/utils.py | resources_after_reservation | def resources_after_reservation(res, constraint):
"""Return the resources available after a specified
ReserveResourceConstraint has been applied.
Note: the caller is responsible for testing that the constraint is
applicable to the core whose resources are being constrained.
Note: this function does not pay attention to the specific position of the
reserved regieon, only its magnitude.
"""
res = res.copy()
res[constraint.resource] -= (constraint.reservation.stop -
constraint.reservation.start)
return res | python | def resources_after_reservation(res, constraint):
"""Return the resources available after a specified
ReserveResourceConstraint has been applied.
Note: the caller is responsible for testing that the constraint is
applicable to the core whose resources are being constrained.
Note: this function does not pay attention to the specific position of the
reserved regieon, only its magnitude.
"""
res = res.copy()
res[constraint.resource] -= (constraint.reservation.stop -
constraint.reservation.start)
return res | [
"def",
"resources_after_reservation",
"(",
"res",
",",
"constraint",
")",
":",
"res",
"=",
"res",
".",
"copy",
"(",
")",
"res",
"[",
"constraint",
".",
"resource",
"]",
"-=",
"(",
"constraint",
".",
"reservation",
".",
"stop",
"-",
"constraint",
".",
"reservation",
".",
"start",
")",
"return",
"res"
] | Return the resources available after a specified
ReserveResourceConstraint has been applied.
Note: the caller is responsible for testing that the constraint is
applicable to the core whose resources are being constrained.
Note: this function does not pay attention to the specific position of the
reserved regieon, only its magnitude. | [
"Return",
"the",
"resources",
"available",
"after",
"a",
"specified",
"ReserveResourceConstraint",
"has",
"been",
"applied",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L52-L65 |
project-rig/rig | rig/place_and_route/place/utils.py | apply_reserve_resource_constraint | def apply_reserve_resource_constraint(machine, constraint):
"""Apply the changes implied by a reserve resource constraint to a
machine model."""
if constraint.location is None:
# Compensate for globally reserved resources
machine.chip_resources \
= resources_after_reservation(
machine.chip_resources, constraint)
if overallocated(machine.chip_resources):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
for location in machine.chip_resource_exceptions:
machine.chip_resource_exceptions[location] \
= resources_after_reservation(
machine.chip_resource_exceptions[location],
constraint)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
else:
# Compensate for reserved resources at a specified location
machine[constraint.location] = resources_after_reservation(
machine[constraint.location], constraint)
if overallocated(machine[constraint.location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint)) | python | def apply_reserve_resource_constraint(machine, constraint):
"""Apply the changes implied by a reserve resource constraint to a
machine model."""
if constraint.location is None:
# Compensate for globally reserved resources
machine.chip_resources \
= resources_after_reservation(
machine.chip_resources, constraint)
if overallocated(machine.chip_resources):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
for location in machine.chip_resource_exceptions:
machine.chip_resource_exceptions[location] \
= resources_after_reservation(
machine.chip_resource_exceptions[location],
constraint)
if overallocated(machine[location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint))
else:
# Compensate for reserved resources at a specified location
machine[constraint.location] = resources_after_reservation(
machine[constraint.location], constraint)
if overallocated(machine[constraint.location]):
raise InsufficientResourceError(
"Cannot meet {}".format(constraint)) | [
"def",
"apply_reserve_resource_constraint",
"(",
"machine",
",",
"constraint",
")",
":",
"if",
"constraint",
".",
"location",
"is",
"None",
":",
"# Compensate for globally reserved resources",
"machine",
".",
"chip_resources",
"=",
"resources_after_reservation",
"(",
"machine",
".",
"chip_resources",
",",
"constraint",
")",
"if",
"overallocated",
"(",
"machine",
".",
"chip_resources",
")",
":",
"raise",
"InsufficientResourceError",
"(",
"\"Cannot meet {}\"",
".",
"format",
"(",
"constraint",
")",
")",
"for",
"location",
"in",
"machine",
".",
"chip_resource_exceptions",
":",
"machine",
".",
"chip_resource_exceptions",
"[",
"location",
"]",
"=",
"resources_after_reservation",
"(",
"machine",
".",
"chip_resource_exceptions",
"[",
"location",
"]",
",",
"constraint",
")",
"if",
"overallocated",
"(",
"machine",
"[",
"location",
"]",
")",
":",
"raise",
"InsufficientResourceError",
"(",
"\"Cannot meet {}\"",
".",
"format",
"(",
"constraint",
")",
")",
"else",
":",
"# Compensate for reserved resources at a specified location",
"machine",
"[",
"constraint",
".",
"location",
"]",
"=",
"resources_after_reservation",
"(",
"machine",
"[",
"constraint",
".",
"location",
"]",
",",
"constraint",
")",
"if",
"overallocated",
"(",
"machine",
"[",
"constraint",
".",
"location",
"]",
")",
":",
"raise",
"InsufficientResourceError",
"(",
"\"Cannot meet {}\"",
".",
"format",
"(",
"constraint",
")",
")"
] | Apply the changes implied by a reserve resource constraint to a
machine model. | [
"Apply",
"the",
"changes",
"implied",
"by",
"a",
"reserve",
"resource",
"constraint",
"to",
"a",
"machine",
"model",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L68-L93 |
project-rig/rig | rig/place_and_route/place/utils.py | apply_same_chip_constraints | def apply_same_chip_constraints(vertices_resources, nets, constraints):
"""Modify a set of vertices_resources, nets and constraints to account for
all SameChipConstraints.
To allow placement algorithms to handle SameChipConstraints without any
special cases, Vertices identified in a SameChipConstraint are merged into
a new vertex whose vertices_resources are the sum total of their parts
which may be placed as if a single vertex. Once placed, the placement can
be expanded into a full placement of all the original vertices using
:py:func:`finalise_same_chip_constraints`.
A typical use pattern might look like::
def my_placer(vertices_resources, nets, machine, constraints):
# Should be done first thing since this may redefine
# vertices_resources, nets and constraints.
vertices_resources, nets, constraints, substitutions = \\
apply_same_chip_constraints(vertices_resources,
nets, constraints)
# ...deal with other types of constraint...
# ...perform placement...
finalise_same_chip_constraints(substitutions, placements)
return placements
Note that this function does not modify its arguments but rather returns
new copies of the structures supplied.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
nets : [:py:class:`~rig.netlist.Net`, ...]
constraints : [constraint, ...]
Returns
-------
(vertices_resources, nets, constraints, substitutions)
The vertices_resources, nets and constraints values contain modified
copies of the supplied data structures modified to contain a single
vertex in place of the individual constrained vertices.
substitutions is a list of :py:class:`MergedVertex` objects which
resulted from the combining of the constrained vertices. The order of
the list is the order the substitutions were carried out. The
:py:func:`finalise_same_chip_constraints` function can be used to
expand a set of substitutions.
"""
# Make a copy of the basic structures to be modified by this function
vertices_resources = vertices_resources.copy()
nets = nets[:]
constraints = constraints[:]
substitutions = []
for same_chip_constraint in constraints:
if not isinstance(same_chip_constraint, SameChipConstraint):
continue
# Skip constraints which don't actually merge anything...
if len(same_chip_constraint.vertices) <= 1:
continue
# The new (merged) vertex with which to replace the constrained
# vertices
merged_vertex = MergedVertex(same_chip_constraint.vertices)
substitutions.append(merged_vertex)
# A set containing the set of vertices to be merged (to remove
# duplicates)
merged_vertices = set(same_chip_constraint.vertices)
# Remove the merged vertices from the set of vertices resources and
# accumulate the total resources consumed. Note add_resources is not
# used since we don't know if the resources consumed by each vertex are
# overlapping.
total_resources = {}
for vertex in merged_vertices:
resources = vertices_resources.pop(vertex)
for resource, value in iteritems(resources):
total_resources[resource] = (total_resources.get(resource, 0) +
value)
vertices_resources[merged_vertex] = total_resources
# Update any nets which pointed to a merged vertex
for net_num, net in enumerate(nets):
net_changed = False
# Change net sources
if net.source in merged_vertices:
net_changed = True
net = Net(merged_vertex, net.sinks, net.weight)
# Change net sinks
for sink_num, sink in enumerate(net.sinks):
if sink in merged_vertices:
if not net_changed:
net = Net(net.source, net.sinks, net.weight)
net_changed = True
net.sinks[sink_num] = merged_vertex
if net_changed:
nets[net_num] = net
# Update any constraints which refer to a merged vertex
for constraint_num, constraint in enumerate(constraints):
if isinstance(constraint, LocationConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = LocationConstraint(
merged_vertex, constraint.location)
elif isinstance(constraint, SameChipConstraint):
if not set(constraint.vertices).isdisjoint(merged_vertices):
constraints[constraint_num] = SameChipConstraint([
merged_vertex if v in merged_vertices else v
for v in constraint.vertices
])
elif isinstance(constraint, RouteEndpointConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = RouteEndpointConstraint(
merged_vertex, constraint.route)
return (vertices_resources, nets, constraints, substitutions) | python | def apply_same_chip_constraints(vertices_resources, nets, constraints):
"""Modify a set of vertices_resources, nets and constraints to account for
all SameChipConstraints.
To allow placement algorithms to handle SameChipConstraints without any
special cases, Vertices identified in a SameChipConstraint are merged into
a new vertex whose vertices_resources are the sum total of their parts
which may be placed as if a single vertex. Once placed, the placement can
be expanded into a full placement of all the original vertices using
:py:func:`finalise_same_chip_constraints`.
A typical use pattern might look like::
def my_placer(vertices_resources, nets, machine, constraints):
# Should be done first thing since this may redefine
# vertices_resources, nets and constraints.
vertices_resources, nets, constraints, substitutions = \\
apply_same_chip_constraints(vertices_resources,
nets, constraints)
# ...deal with other types of constraint...
# ...perform placement...
finalise_same_chip_constraints(substitutions, placements)
return placements
Note that this function does not modify its arguments but rather returns
new copies of the structures supplied.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
nets : [:py:class:`~rig.netlist.Net`, ...]
constraints : [constraint, ...]
Returns
-------
(vertices_resources, nets, constraints, substitutions)
The vertices_resources, nets and constraints values contain modified
copies of the supplied data structures modified to contain a single
vertex in place of the individual constrained vertices.
substitutions is a list of :py:class:`MergedVertex` objects which
resulted from the combining of the constrained vertices. The order of
the list is the order the substitutions were carried out. The
:py:func:`finalise_same_chip_constraints` function can be used to
expand a set of substitutions.
"""
# Make a copy of the basic structures to be modified by this function
vertices_resources = vertices_resources.copy()
nets = nets[:]
constraints = constraints[:]
substitutions = []
for same_chip_constraint in constraints:
if not isinstance(same_chip_constraint, SameChipConstraint):
continue
# Skip constraints which don't actually merge anything...
if len(same_chip_constraint.vertices) <= 1:
continue
# The new (merged) vertex with which to replace the constrained
# vertices
merged_vertex = MergedVertex(same_chip_constraint.vertices)
substitutions.append(merged_vertex)
# A set containing the set of vertices to be merged (to remove
# duplicates)
merged_vertices = set(same_chip_constraint.vertices)
# Remove the merged vertices from the set of vertices resources and
# accumulate the total resources consumed. Note add_resources is not
# used since we don't know if the resources consumed by each vertex are
# overlapping.
total_resources = {}
for vertex in merged_vertices:
resources = vertices_resources.pop(vertex)
for resource, value in iteritems(resources):
total_resources[resource] = (total_resources.get(resource, 0) +
value)
vertices_resources[merged_vertex] = total_resources
# Update any nets which pointed to a merged vertex
for net_num, net in enumerate(nets):
net_changed = False
# Change net sources
if net.source in merged_vertices:
net_changed = True
net = Net(merged_vertex, net.sinks, net.weight)
# Change net sinks
for sink_num, sink in enumerate(net.sinks):
if sink in merged_vertices:
if not net_changed:
net = Net(net.source, net.sinks, net.weight)
net_changed = True
net.sinks[sink_num] = merged_vertex
if net_changed:
nets[net_num] = net
# Update any constraints which refer to a merged vertex
for constraint_num, constraint in enumerate(constraints):
if isinstance(constraint, LocationConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = LocationConstraint(
merged_vertex, constraint.location)
elif isinstance(constraint, SameChipConstraint):
if not set(constraint.vertices).isdisjoint(merged_vertices):
constraints[constraint_num] = SameChipConstraint([
merged_vertex if v in merged_vertices else v
for v in constraint.vertices
])
elif isinstance(constraint, RouteEndpointConstraint):
if constraint.vertex in merged_vertices:
constraints[constraint_num] = RouteEndpointConstraint(
merged_vertex, constraint.route)
return (vertices_resources, nets, constraints, substitutions) | [
"def",
"apply_same_chip_constraints",
"(",
"vertices_resources",
",",
"nets",
",",
"constraints",
")",
":",
"# Make a copy of the basic structures to be modified by this function",
"vertices_resources",
"=",
"vertices_resources",
".",
"copy",
"(",
")",
"nets",
"=",
"nets",
"[",
":",
"]",
"constraints",
"=",
"constraints",
"[",
":",
"]",
"substitutions",
"=",
"[",
"]",
"for",
"same_chip_constraint",
"in",
"constraints",
":",
"if",
"not",
"isinstance",
"(",
"same_chip_constraint",
",",
"SameChipConstraint",
")",
":",
"continue",
"# Skip constraints which don't actually merge anything...",
"if",
"len",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"<=",
"1",
":",
"continue",
"# The new (merged) vertex with which to replace the constrained",
"# vertices",
"merged_vertex",
"=",
"MergedVertex",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"substitutions",
".",
"append",
"(",
"merged_vertex",
")",
"# A set containing the set of vertices to be merged (to remove",
"# duplicates)",
"merged_vertices",
"=",
"set",
"(",
"same_chip_constraint",
".",
"vertices",
")",
"# Remove the merged vertices from the set of vertices resources and",
"# accumulate the total resources consumed. Note add_resources is not",
"# used since we don't know if the resources consumed by each vertex are",
"# overlapping.",
"total_resources",
"=",
"{",
"}",
"for",
"vertex",
"in",
"merged_vertices",
":",
"resources",
"=",
"vertices_resources",
".",
"pop",
"(",
"vertex",
")",
"for",
"resource",
",",
"value",
"in",
"iteritems",
"(",
"resources",
")",
":",
"total_resources",
"[",
"resource",
"]",
"=",
"(",
"total_resources",
".",
"get",
"(",
"resource",
",",
"0",
")",
"+",
"value",
")",
"vertices_resources",
"[",
"merged_vertex",
"]",
"=",
"total_resources",
"# Update any nets which pointed to a merged vertex",
"for",
"net_num",
",",
"net",
"in",
"enumerate",
"(",
"nets",
")",
":",
"net_changed",
"=",
"False",
"# Change net sources",
"if",
"net",
".",
"source",
"in",
"merged_vertices",
":",
"net_changed",
"=",
"True",
"net",
"=",
"Net",
"(",
"merged_vertex",
",",
"net",
".",
"sinks",
",",
"net",
".",
"weight",
")",
"# Change net sinks",
"for",
"sink_num",
",",
"sink",
"in",
"enumerate",
"(",
"net",
".",
"sinks",
")",
":",
"if",
"sink",
"in",
"merged_vertices",
":",
"if",
"not",
"net_changed",
":",
"net",
"=",
"Net",
"(",
"net",
".",
"source",
",",
"net",
".",
"sinks",
",",
"net",
".",
"weight",
")",
"net_changed",
"=",
"True",
"net",
".",
"sinks",
"[",
"sink_num",
"]",
"=",
"merged_vertex",
"if",
"net_changed",
":",
"nets",
"[",
"net_num",
"]",
"=",
"net",
"# Update any constraints which refer to a merged vertex",
"for",
"constraint_num",
",",
"constraint",
"in",
"enumerate",
"(",
"constraints",
")",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"LocationConstraint",
")",
":",
"if",
"constraint",
".",
"vertex",
"in",
"merged_vertices",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"LocationConstraint",
"(",
"merged_vertex",
",",
"constraint",
".",
"location",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"SameChipConstraint",
")",
":",
"if",
"not",
"set",
"(",
"constraint",
".",
"vertices",
")",
".",
"isdisjoint",
"(",
"merged_vertices",
")",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"SameChipConstraint",
"(",
"[",
"merged_vertex",
"if",
"v",
"in",
"merged_vertices",
"else",
"v",
"for",
"v",
"in",
"constraint",
".",
"vertices",
"]",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"RouteEndpointConstraint",
")",
":",
"if",
"constraint",
".",
"vertex",
"in",
"merged_vertices",
":",
"constraints",
"[",
"constraint_num",
"]",
"=",
"RouteEndpointConstraint",
"(",
"merged_vertex",
",",
"constraint",
".",
"route",
")",
"return",
"(",
"vertices_resources",
",",
"nets",
",",
"constraints",
",",
"substitutions",
")"
] | Modify a set of vertices_resources, nets and constraints to account for
all SameChipConstraints.
To allow placement algorithms to handle SameChipConstraints without any
special cases, Vertices identified in a SameChipConstraint are merged into
a new vertex whose vertices_resources are the sum total of their parts
which may be placed as if a single vertex. Once placed, the placement can
be expanded into a full placement of all the original vertices using
:py:func:`finalise_same_chip_constraints`.
A typical use pattern might look like::
def my_placer(vertices_resources, nets, machine, constraints):
# Should be done first thing since this may redefine
# vertices_resources, nets and constraints.
vertices_resources, nets, constraints, substitutions = \\
apply_same_chip_constraints(vertices_resources,
nets, constraints)
# ...deal with other types of constraint...
# ...perform placement...
finalise_same_chip_constraints(substitutions, placements)
return placements
Note that this function does not modify its arguments but rather returns
new copies of the structures supplied.
Parameters
----------
vertices_resources : {vertex: {resource: quantity, ...}, ...}
nets : [:py:class:`~rig.netlist.Net`, ...]
constraints : [constraint, ...]
Returns
-------
(vertices_resources, nets, constraints, substitutions)
The vertices_resources, nets and constraints values contain modified
copies of the supplied data structures modified to contain a single
vertex in place of the individual constrained vertices.
substitutions is a list of :py:class:`MergedVertex` objects which
resulted from the combining of the constrained vertices. The order of
the list is the order the substitutions were carried out. The
:py:func:`finalise_same_chip_constraints` function can be used to
expand a set of substitutions. | [
"Modify",
"a",
"set",
"of",
"vertices_resources",
"nets",
"and",
"constraints",
"to",
"account",
"for",
"all",
"SameChipConstraints",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L107-L229 |
project-rig/rig | rig/place_and_route/place/utils.py | finalise_same_chip_constraints | def finalise_same_chip_constraints(substitutions, placements):
"""Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace).
"""
for merged_vertex in reversed(substitutions):
placement = placements.pop(merged_vertex)
for v in merged_vertex.vertices:
placements[v] = placement | python | def finalise_same_chip_constraints(substitutions, placements):
"""Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace).
"""
for merged_vertex in reversed(substitutions):
placement = placements.pop(merged_vertex)
for v in merged_vertex.vertices:
placements[v] = placement | [
"def",
"finalise_same_chip_constraints",
"(",
"substitutions",
",",
"placements",
")",
":",
"for",
"merged_vertex",
"in",
"reversed",
"(",
"substitutions",
")",
":",
"placement",
"=",
"placements",
".",
"pop",
"(",
"merged_vertex",
")",
"for",
"v",
"in",
"merged_vertex",
".",
"vertices",
":",
"placements",
"[",
"v",
"]",
"=",
"placement"
] | Given a set of placements containing the supplied
:py:class:`MergedVertex`, remove the merged vertices replacing them with
their constituent vertices (changing the placements inplace). | [
"Given",
"a",
"set",
"of",
"placements",
"containing",
"the",
"supplied",
":",
"py",
":",
"class",
":",
"MergedVertex",
"remove",
"the",
"merged",
"vertices",
"replacing",
"them",
"with",
"their",
"constituent",
"vertices",
"(",
"changing",
"the",
"placements",
"inplace",
")",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/utils.py#L232-L240 |
Metatab/metapack | metapack/appurl.py | MetapackDocumentUrl.doc | def doc(self):
"""Return the metatab document for the URL"""
from metapack import MetapackDoc
t = self.get_resource().get_target()
return MetapackDoc(t, package_url=self.package_url) | python | def doc(self):
"""Return the metatab document for the URL"""
from metapack import MetapackDoc
t = self.get_resource().get_target()
return MetapackDoc(t, package_url=self.package_url) | [
"def",
"doc",
"(",
"self",
")",
":",
"from",
"metapack",
"import",
"MetapackDoc",
"t",
"=",
"self",
".",
"get_resource",
"(",
")",
".",
"get_target",
"(",
")",
"return",
"MetapackDoc",
"(",
"t",
",",
"package_url",
"=",
"self",
".",
"package_url",
")"
] | Return the metatab document for the URL | [
"Return",
"the",
"metatab",
"document",
"for",
"the",
"URL"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L116-L120 |
Metatab/metapack | metapack/appurl.py | MetapackDocumentUrl.resolve_url | def resolve_url(self, resource_name):
"""Return a URL to a local copy of a resource, suitable for get_generator()"""
if self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE:
# For CSV packages, need to get the package and open it to get the resoruce URL, becuase
# they are always absolute web URLs and may not be related to the location of the metadata.
s = self.get_resource()
rs = s.doc.resource(resource_name)
return parse_app_url(rs.url)
else:
jt = self.join_target(resource_name)
rs = jt.get_resource()
t = rs.get_target()
return t | python | def resolve_url(self, resource_name):
"""Return a URL to a local copy of a resource, suitable for get_generator()"""
if self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE:
# For CSV packages, need to get the package and open it to get the resoruce URL, becuase
# they are always absolute web URLs and may not be related to the location of the metadata.
s = self.get_resource()
rs = s.doc.resource(resource_name)
return parse_app_url(rs.url)
else:
jt = self.join_target(resource_name)
rs = jt.get_resource()
t = rs.get_target()
return t | [
"def",
"resolve_url",
"(",
"self",
",",
"resource_name",
")",
":",
"if",
"self",
".",
"target_format",
"==",
"'csv'",
"and",
"self",
".",
"target_file",
"!=",
"DEFAULT_METATAB_FILE",
":",
"# For CSV packages, need to get the package and open it to get the resoruce URL, becuase",
"# they are always absolute web URLs and may not be related to the location of the metadata.",
"s",
"=",
"self",
".",
"get_resource",
"(",
")",
"rs",
"=",
"s",
".",
"doc",
".",
"resource",
"(",
"resource_name",
")",
"return",
"parse_app_url",
"(",
"rs",
".",
"url",
")",
"else",
":",
"jt",
"=",
"self",
".",
"join_target",
"(",
"resource_name",
")",
"rs",
"=",
"jt",
".",
"get_resource",
"(",
")",
"t",
"=",
"rs",
".",
"get_target",
"(",
")",
"return",
"t"
] | Return a URL to a local copy of a resource, suitable for get_generator() | [
"Return",
"a",
"URL",
"to",
"a",
"local",
"copy",
"of",
"a",
"resource",
"suitable",
"for",
"get_generator",
"()"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L136-L149 |
Metatab/metapack | metapack/appurl.py | MetapackDocumentUrl.package_url | def package_url(self):
"""Return the package URL associated with this metadata"""
if self.resource_file == DEFAULT_METATAB_FILE or self.target_format in ('txt','ipynb'):
u = self.inner.clone().clear_fragment()
u.path = dirname(self.path) + '/'
u.scheme_extension = 'metapack'
else:
u = self
return MetapackPackageUrl(str(u.clear_fragment()), downloader=self._downloader) | python | def package_url(self):
"""Return the package URL associated with this metadata"""
if self.resource_file == DEFAULT_METATAB_FILE or self.target_format in ('txt','ipynb'):
u = self.inner.clone().clear_fragment()
u.path = dirname(self.path) + '/'
u.scheme_extension = 'metapack'
else:
u = self
return MetapackPackageUrl(str(u.clear_fragment()), downloader=self._downloader) | [
"def",
"package_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"resource_file",
"==",
"DEFAULT_METATAB_FILE",
"or",
"self",
".",
"target_format",
"in",
"(",
"'txt'",
",",
"'ipynb'",
")",
":",
"u",
"=",
"self",
".",
"inner",
".",
"clone",
"(",
")",
".",
"clear_fragment",
"(",
")",
"u",
".",
"path",
"=",
"dirname",
"(",
"self",
".",
"path",
")",
"+",
"'/'",
"u",
".",
"scheme_extension",
"=",
"'metapack'",
"else",
":",
"u",
"=",
"self",
"return",
"MetapackPackageUrl",
"(",
"str",
"(",
"u",
".",
"clear_fragment",
"(",
")",
")",
",",
"downloader",
"=",
"self",
".",
"_downloader",
")"
] | Return the package URL associated with this metadata | [
"Return",
"the",
"package",
"URL",
"associated",
"with",
"this",
"metadata"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L160-L170 |
Metatab/metapack | metapack/appurl.py | MetapackPackageUrl.join_resource_name | def join_resource_name(self, v):
"""Return a MetapackResourceUrl that includes a reference to the resource. Returns a
MetapackResourceUrl, which will have a fragment """
d = self.dict
d['fragment'] = [v, None]
return MetapackResourceUrl(downloader=self._downloader, **d) | python | def join_resource_name(self, v):
"""Return a MetapackResourceUrl that includes a reference to the resource. Returns a
MetapackResourceUrl, which will have a fragment """
d = self.dict
d['fragment'] = [v, None]
return MetapackResourceUrl(downloader=self._downloader, **d) | [
"def",
"join_resource_name",
"(",
"self",
",",
"v",
")",
":",
"d",
"=",
"self",
".",
"dict",
"d",
"[",
"'fragment'",
"]",
"=",
"[",
"v",
",",
"None",
"]",
"return",
"MetapackResourceUrl",
"(",
"downloader",
"=",
"self",
".",
"_downloader",
",",
"*",
"*",
"d",
")"
] | Return a MetapackResourceUrl that includes a reference to the resource. Returns a
MetapackResourceUrl, which will have a fragment | [
"Return",
"a",
"MetapackResourceUrl",
"that",
"includes",
"a",
"reference",
"to",
"the",
"resource",
".",
"Returns",
"a",
"MetapackResourceUrl",
"which",
"will",
"have",
"a",
"fragment"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L232-L237 |
Metatab/metapack | metapack/appurl.py | MetapackPackageUrl.resolve_url | def resolve_url(self, resource_name):
"""Return a URL to a local copy of a resource, suitable for get_generator()
For Package URLS, resolution involves generating a URL to a data file from the package URL and the
value of a resource. The resource value, the url, can be one of:
- An absolute URL, with a web scheme
- A relative URL, relative to the package, with a file scheme.
URLs with non-file schemes are returned. File scheme are assumed to be relative to the package,
and are resolved according to the type of resource.
"""
u = parse_app_url(resource_name)
if u.scheme != 'file':
t = u
elif self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE:
# Thre are two forms for CSV package URLS:
# - A CSV package, which can only have absolute URLs
# - A Filesystem package, which can have relative URLs.
# The complication is that the filsystem package usually has a metadata file named
# DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible
# to have a filesystem package with a non standard package name.
# So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard
# metadata file name.
# For CSV packages, need to get the package and open it to get the resource URL, because
# they are always absolute web URLs and may not be related to the location of the metadata.
s = self.get_resource()
rs = s.metadata_url.doc.resource(resource_name)
if rs is not None:
t = parse_app_url(rs.url)
else:
raise ResourceError("No resource for '{}' in '{}' ".format(resource_name, self))
else:
jt = self.join_target(resource_name)
try:
rs = jt.get_resource()
except DownloadError:
raise ResourceError(
"Failed to download resource for '{}' for '{}' in '{}'".format(jt, resource_name, self))
t = rs.get_target()
return t | python | def resolve_url(self, resource_name):
"""Return a URL to a local copy of a resource, suitable for get_generator()
For Package URLS, resolution involves generating a URL to a data file from the package URL and the
value of a resource. The resource value, the url, can be one of:
- An absolute URL, with a web scheme
- A relative URL, relative to the package, with a file scheme.
URLs with non-file schemes are returned. File scheme are assumed to be relative to the package,
and are resolved according to the type of resource.
"""
u = parse_app_url(resource_name)
if u.scheme != 'file':
t = u
elif self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE:
# Thre are two forms for CSV package URLS:
# - A CSV package, which can only have absolute URLs
# - A Filesystem package, which can have relative URLs.
# The complication is that the filsystem package usually has a metadata file named
# DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible
# to have a filesystem package with a non standard package name.
# So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard
# metadata file name.
# For CSV packages, need to get the package and open it to get the resource URL, because
# they are always absolute web URLs and may not be related to the location of the metadata.
s = self.get_resource()
rs = s.metadata_url.doc.resource(resource_name)
if rs is not None:
t = parse_app_url(rs.url)
else:
raise ResourceError("No resource for '{}' in '{}' ".format(resource_name, self))
else:
jt = self.join_target(resource_name)
try:
rs = jt.get_resource()
except DownloadError:
raise ResourceError(
"Failed to download resource for '{}' for '{}' in '{}'".format(jt, resource_name, self))
t = rs.get_target()
return t | [
"def",
"resolve_url",
"(",
"self",
",",
"resource_name",
")",
":",
"u",
"=",
"parse_app_url",
"(",
"resource_name",
")",
"if",
"u",
".",
"scheme",
"!=",
"'file'",
":",
"t",
"=",
"u",
"elif",
"self",
".",
"target_format",
"==",
"'csv'",
"and",
"self",
".",
"target_file",
"!=",
"DEFAULT_METATAB_FILE",
":",
"# Thre are two forms for CSV package URLS:",
"# - A CSV package, which can only have absolute URLs",
"# - A Filesystem package, which can have relative URLs.",
"# The complication is that the filsystem package usually has a metadata file named",
"# DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible",
"# to have a filesystem package with a non standard package name.",
"# So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard",
"# metadata file name.",
"# For CSV packages, need to get the package and open it to get the resource URL, because",
"# they are always absolute web URLs and may not be related to the location of the metadata.",
"s",
"=",
"self",
".",
"get_resource",
"(",
")",
"rs",
"=",
"s",
".",
"metadata_url",
".",
"doc",
".",
"resource",
"(",
"resource_name",
")",
"if",
"rs",
"is",
"not",
"None",
":",
"t",
"=",
"parse_app_url",
"(",
"rs",
".",
"url",
")",
"else",
":",
"raise",
"ResourceError",
"(",
"\"No resource for '{}' in '{}' \"",
".",
"format",
"(",
"resource_name",
",",
"self",
")",
")",
"else",
":",
"jt",
"=",
"self",
".",
"join_target",
"(",
"resource_name",
")",
"try",
":",
"rs",
"=",
"jt",
".",
"get_resource",
"(",
")",
"except",
"DownloadError",
":",
"raise",
"ResourceError",
"(",
"\"Failed to download resource for '{}' for '{}' in '{}'\"",
".",
"format",
"(",
"jt",
",",
"resource_name",
",",
"self",
")",
")",
"t",
"=",
"rs",
".",
"get_target",
"(",
")",
"return",
"t"
] | Return a URL to a local copy of a resource, suitable for get_generator()
For Package URLS, resolution involves generating a URL to a data file from the package URL and the
value of a resource. The resource value, the url, can be one of:
- An absolute URL, with a web scheme
- A relative URL, relative to the package, with a file scheme.
URLs with non-file schemes are returned. File scheme are assumed to be relative to the package,
and are resolved according to the type of resource. | [
"Return",
"a",
"URL",
"to",
"a",
"local",
"copy",
"of",
"a",
"resource",
"suitable",
"for",
"get_generator",
"()"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L250-L301 |
Metatab/metapack | metapack/appurl.py | MetapackResourceUrl.package_url | def package_url(self):
"""Return the package URL associated with this metadata"""
return MetapackDocumentUrl(str(self.clear_fragment()), downloader=self._downloader).package_url | python | def package_url(self):
"""Return the package URL associated with this metadata"""
return MetapackDocumentUrl(str(self.clear_fragment()), downloader=self._downloader).package_url | [
"def",
"package_url",
"(",
"self",
")",
":",
"return",
"MetapackDocumentUrl",
"(",
"str",
"(",
"self",
".",
"clear_fragment",
"(",
")",
")",
",",
"downloader",
"=",
"self",
".",
"_downloader",
")",
".",
"package_url"
] | Return the package URL associated with this metadata | [
"Return",
"the",
"package",
"URL",
"associated",
"with",
"this",
"metadata"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L350-L352 |
Metatab/metapack | metapack/appurl.py | SearchUrl.search_json_indexed_directory | def search_json_indexed_directory(directory):
"""Return a search function for searching a directory of packages, which has an index.json file
created by the `mp install file` command.
This will only search the issued index; it will not return results for the source index
"""
from metapack.index import SearchIndex, search_index_file
idx = SearchIndex(search_index_file())
def _search_function(url):
packages = idx.search(url, format='issued')
if not packages:
return None
package = packages.pop(0)
try:
resource_str = '#' + url.target_file if url.fragment[0] else ''
return parse_app_url(package['url'] + resource_str, downloader=url.downloader)
except KeyError as e:
return None
return _search_function | python | def search_json_indexed_directory(directory):
"""Return a search function for searching a directory of packages, which has an index.json file
created by the `mp install file` command.
This will only search the issued index; it will not return results for the source index
"""
from metapack.index import SearchIndex, search_index_file
idx = SearchIndex(search_index_file())
def _search_function(url):
packages = idx.search(url, format='issued')
if not packages:
return None
package = packages.pop(0)
try:
resource_str = '#' + url.target_file if url.fragment[0] else ''
return parse_app_url(package['url'] + resource_str, downloader=url.downloader)
except KeyError as e:
return None
return _search_function | [
"def",
"search_json_indexed_directory",
"(",
"directory",
")",
":",
"from",
"metapack",
".",
"index",
"import",
"SearchIndex",
",",
"search_index_file",
"idx",
"=",
"SearchIndex",
"(",
"search_index_file",
"(",
")",
")",
"def",
"_search_function",
"(",
"url",
")",
":",
"packages",
"=",
"idx",
".",
"search",
"(",
"url",
",",
"format",
"=",
"'issued'",
")",
"if",
"not",
"packages",
":",
"return",
"None",
"package",
"=",
"packages",
".",
"pop",
"(",
"0",
")",
"try",
":",
"resource_str",
"=",
"'#'",
"+",
"url",
".",
"target_file",
"if",
"url",
".",
"fragment",
"[",
"0",
"]",
"else",
"''",
"return",
"parse_app_url",
"(",
"package",
"[",
"'url'",
"]",
"+",
"resource_str",
",",
"downloader",
"=",
"url",
".",
"downloader",
")",
"except",
"KeyError",
"as",
"e",
":",
"return",
"None",
"return",
"_search_function"
] | Return a search function for searching a directory of packages, which has an index.json file
created by the `mp install file` command.
This will only search the issued index; it will not return results for the source index | [
"Return",
"a",
"search",
"function",
"for",
"searching",
"a",
"directory",
"of",
"packages",
"which",
"has",
"an",
"index",
".",
"json",
"file",
"created",
"by",
"the",
"mp",
"install",
"file",
"command",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L467-L494 |
Metatab/metapack | metapack/appurl.py | SearchUrl.search | def search(self):
"""Search for a url by returning the value from the first callback that
returns a non-None value"""
for cb in SearchUrl.search_callbacks:
try:
v = cb(self)
if v is not None:
return v
except Exception as e:
raise | python | def search(self):
"""Search for a url by returning the value from the first callback that
returns a non-None value"""
for cb in SearchUrl.search_callbacks:
try:
v = cb(self)
if v is not None:
return v
except Exception as e:
raise | [
"def",
"search",
"(",
"self",
")",
":",
"for",
"cb",
"in",
"SearchUrl",
".",
"search_callbacks",
":",
"try",
":",
"v",
"=",
"cb",
"(",
"self",
")",
"if",
"v",
"is",
"not",
"None",
":",
"return",
"v",
"except",
"Exception",
"as",
"e",
":",
"raise"
] | Search for a url by returning the value from the first callback that
returns a non-None value | [
"Search",
"for",
"a",
"url",
"by",
"returning",
"the",
"value",
"from",
"the",
"first",
"callback",
"that",
"returns",
"a",
"non",
"-",
"None",
"value"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/appurl.py#L496-L507 |
Metatab/metapack | metapack/jupyter/core.py | ensure_source_package_dir | def ensure_source_package_dir(nb_path, pkg_name):
"""Ensure all of the important directories in a source package exist"""
pkg_path = join(dirname(nb_path), pkg_name)
makedirs(join(pkg_path,'notebooks'),exist_ok=True)
makedirs(join(pkg_path, 'docs'), exist_ok=True)
return pkg_path | python | def ensure_source_package_dir(nb_path, pkg_name):
"""Ensure all of the important directories in a source package exist"""
pkg_path = join(dirname(nb_path), pkg_name)
makedirs(join(pkg_path,'notebooks'),exist_ok=True)
makedirs(join(pkg_path, 'docs'), exist_ok=True)
return pkg_path | [
"def",
"ensure_source_package_dir",
"(",
"nb_path",
",",
"pkg_name",
")",
":",
"pkg_path",
"=",
"join",
"(",
"dirname",
"(",
"nb_path",
")",
",",
"pkg_name",
")",
"makedirs",
"(",
"join",
"(",
"pkg_path",
",",
"'notebooks'",
")",
",",
"exist_ok",
"=",
"True",
")",
"makedirs",
"(",
"join",
"(",
"pkg_path",
",",
"'docs'",
")",
",",
"exist_ok",
"=",
"True",
")",
"return",
"pkg_path"
] | Ensure all of the important directories in a source package exist | [
"Ensure",
"all",
"of",
"the",
"important",
"directories",
"in",
"a",
"source",
"package",
"exist"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/core.py#L57-L65 |
Metatab/metapack | metapack/jupyter/core.py | get_metatab_doc | def get_metatab_doc(nb_path):
"""Read a notebook and extract the metatab document. Only returns the first document"""
from metatab.generate import CsvDataRowGenerator
from metatab.rowgenerators import TextRowGenerator
from metatab import MetatabDoc
with open(nb_path) as f:
nb = nbformat.reads(f.read(), as_version=4)
for cell in nb.cells:
source = ''.join(cell['source']).strip()
if source.startswith('%%metatab'):
return MetatabDoc(TextRowGenerator(source)) | python | def get_metatab_doc(nb_path):
"""Read a notebook and extract the metatab document. Only returns the first document"""
from metatab.generate import CsvDataRowGenerator
from metatab.rowgenerators import TextRowGenerator
from metatab import MetatabDoc
with open(nb_path) as f:
nb = nbformat.reads(f.read(), as_version=4)
for cell in nb.cells:
source = ''.join(cell['source']).strip()
if source.startswith('%%metatab'):
return MetatabDoc(TextRowGenerator(source)) | [
"def",
"get_metatab_doc",
"(",
"nb_path",
")",
":",
"from",
"metatab",
".",
"generate",
"import",
"CsvDataRowGenerator",
"from",
"metatab",
".",
"rowgenerators",
"import",
"TextRowGenerator",
"from",
"metatab",
"import",
"MetatabDoc",
"with",
"open",
"(",
"nb_path",
")",
"as",
"f",
":",
"nb",
"=",
"nbformat",
".",
"reads",
"(",
"f",
".",
"read",
"(",
")",
",",
"as_version",
"=",
"4",
")",
"for",
"cell",
"in",
"nb",
".",
"cells",
":",
"source",
"=",
"''",
".",
"join",
"(",
"cell",
"[",
"'source'",
"]",
")",
".",
"strip",
"(",
")",
"if",
"source",
".",
"startswith",
"(",
"'%%metatab'",
")",
":",
"return",
"MetatabDoc",
"(",
"TextRowGenerator",
"(",
"source",
")",
")"
] | Read a notebook and extract the metatab document. Only returns the first document | [
"Read",
"a",
"notebook",
"and",
"extract",
"the",
"metatab",
"document",
".",
"Only",
"returns",
"the",
"first",
"document"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/core.py#L68-L81 |
Metatab/metapack | metapack/jupyter/core.py | get_package_dir | def get_package_dir(nb_path):
"""Return the package directory for a Notebook that has an embeded Metatab doc, *not* for
notebooks that are part of a package """
doc = get_metatab_doc(nb_path)
doc.update_name(force=True, create_term=True)
pkg_name = doc['Root'].get_value('Root.Name')
assert pkg_name
return ensure_source_package_dir(nb_path, pkg_name), pkg_name | python | def get_package_dir(nb_path):
"""Return the package directory for a Notebook that has an embeded Metatab doc, *not* for
notebooks that are part of a package """
doc = get_metatab_doc(nb_path)
doc.update_name(force=True, create_term=True)
pkg_name = doc['Root'].get_value('Root.Name')
assert pkg_name
return ensure_source_package_dir(nb_path, pkg_name), pkg_name | [
"def",
"get_package_dir",
"(",
"nb_path",
")",
":",
"doc",
"=",
"get_metatab_doc",
"(",
"nb_path",
")",
"doc",
".",
"update_name",
"(",
"force",
"=",
"True",
",",
"create_term",
"=",
"True",
")",
"pkg_name",
"=",
"doc",
"[",
"'Root'",
"]",
".",
"get_value",
"(",
"'Root.Name'",
")",
"assert",
"pkg_name",
"return",
"ensure_source_package_dir",
"(",
"nb_path",
",",
"pkg_name",
")",
",",
"pkg_name"
] | Return the package directory for a Notebook that has an embeded Metatab doc, *not* for
notebooks that are part of a package | [
"Return",
"the",
"package",
"directory",
"for",
"a",
"Notebook",
"that",
"has",
"an",
"embeded",
"Metatab",
"doc",
"*",
"not",
"*",
"for",
"notebooks",
"that",
"are",
"part",
"of",
"a",
"package"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/core.py#L84-L92 |
Metatab/metapack | metapack/jupyter/core.py | process_schema | def process_schema(doc, resource, df):
"""Add schema entiries to a metatab doc from a dataframe"""
from rowgenerators import SourceError
from requests.exceptions import ConnectionError
from metapack.cli.core import extract_path_name, alt_col_name, type_map
from tableintuit import TypeIntuiter
from rowgenerators.generator.python import PandasDataframeSource
from appurl import parse_app_url
try:
doc['Schema']
except KeyError:
doc.new_section('Schema', ['DataType', 'Altname', 'Description'])
schema_name = resource.get_value('schema', resource.get_value('name'))
schema_term = doc.find_first(term='Table', value=schema_name, section='Schema')
if schema_term:
logger.info("Found table for '{}'; skipping".format(schema_name))
return
path, name = extract_path_name(resource.url)
logger.info("Processing {}".format(resource.url))
si = PandasDataframeSource(parse_app_url(resource.url), df, cache=doc._cache, )
try:
ti = TypeIntuiter().run(si)
except SourceError as e:
logger.warn("Failed to process '{}'; {}".format(path, e))
return
except ConnectionError as e:
logger.warn("Failed to download '{}'; {}".format(path, e))
return
table = doc['Schema'].new_term('Table', schema_name)
logger.info("Adding table '{}' to metatab schema".format(schema_name))
for i, c in enumerate(ti.to_rows()):
raw_alt_name = alt_col_name(c['header'], i)
alt_name = raw_alt_name if raw_alt_name != c['header'] else ''
t = table.new_child('Column', c['header'],
datatype=type_map.get(c['resolved_type'], c['resolved_type']),
altname=alt_name,
description=df[c['header']].description \
if hasattr(df, 'description') and df[c['header']].description else ''
)
return table | python | def process_schema(doc, resource, df):
"""Add schema entiries to a metatab doc from a dataframe"""
from rowgenerators import SourceError
from requests.exceptions import ConnectionError
from metapack.cli.core import extract_path_name, alt_col_name, type_map
from tableintuit import TypeIntuiter
from rowgenerators.generator.python import PandasDataframeSource
from appurl import parse_app_url
try:
doc['Schema']
except KeyError:
doc.new_section('Schema', ['DataType', 'Altname', 'Description'])
schema_name = resource.get_value('schema', resource.get_value('name'))
schema_term = doc.find_first(term='Table', value=schema_name, section='Schema')
if schema_term:
logger.info("Found table for '{}'; skipping".format(schema_name))
return
path, name = extract_path_name(resource.url)
logger.info("Processing {}".format(resource.url))
si = PandasDataframeSource(parse_app_url(resource.url), df, cache=doc._cache, )
try:
ti = TypeIntuiter().run(si)
except SourceError as e:
logger.warn("Failed to process '{}'; {}".format(path, e))
return
except ConnectionError as e:
logger.warn("Failed to download '{}'; {}".format(path, e))
return
table = doc['Schema'].new_term('Table', schema_name)
logger.info("Adding table '{}' to metatab schema".format(schema_name))
for i, c in enumerate(ti.to_rows()):
raw_alt_name = alt_col_name(c['header'], i)
alt_name = raw_alt_name if raw_alt_name != c['header'] else ''
t = table.new_child('Column', c['header'],
datatype=type_map.get(c['resolved_type'], c['resolved_type']),
altname=alt_name,
description=df[c['header']].description \
if hasattr(df, 'description') and df[c['header']].description else ''
)
return table | [
"def",
"process_schema",
"(",
"doc",
",",
"resource",
",",
"df",
")",
":",
"from",
"rowgenerators",
"import",
"SourceError",
"from",
"requests",
".",
"exceptions",
"import",
"ConnectionError",
"from",
"metapack",
".",
"cli",
".",
"core",
"import",
"extract_path_name",
",",
"alt_col_name",
",",
"type_map",
"from",
"tableintuit",
"import",
"TypeIntuiter",
"from",
"rowgenerators",
".",
"generator",
".",
"python",
"import",
"PandasDataframeSource",
"from",
"appurl",
"import",
"parse_app_url",
"try",
":",
"doc",
"[",
"'Schema'",
"]",
"except",
"KeyError",
":",
"doc",
".",
"new_section",
"(",
"'Schema'",
",",
"[",
"'DataType'",
",",
"'Altname'",
",",
"'Description'",
"]",
")",
"schema_name",
"=",
"resource",
".",
"get_value",
"(",
"'schema'",
",",
"resource",
".",
"get_value",
"(",
"'name'",
")",
")",
"schema_term",
"=",
"doc",
".",
"find_first",
"(",
"term",
"=",
"'Table'",
",",
"value",
"=",
"schema_name",
",",
"section",
"=",
"'Schema'",
")",
"if",
"schema_term",
":",
"logger",
".",
"info",
"(",
"\"Found table for '{}'; skipping\"",
".",
"format",
"(",
"schema_name",
")",
")",
"return",
"path",
",",
"name",
"=",
"extract_path_name",
"(",
"resource",
".",
"url",
")",
"logger",
".",
"info",
"(",
"\"Processing {}\"",
".",
"format",
"(",
"resource",
".",
"url",
")",
")",
"si",
"=",
"PandasDataframeSource",
"(",
"parse_app_url",
"(",
"resource",
".",
"url",
")",
",",
"df",
",",
"cache",
"=",
"doc",
".",
"_cache",
",",
")",
"try",
":",
"ti",
"=",
"TypeIntuiter",
"(",
")",
".",
"run",
"(",
"si",
")",
"except",
"SourceError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"Failed to process '{}'; {}\"",
".",
"format",
"(",
"path",
",",
"e",
")",
")",
"return",
"except",
"ConnectionError",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"\"Failed to download '{}'; {}\"",
".",
"format",
"(",
"path",
",",
"e",
")",
")",
"return",
"table",
"=",
"doc",
"[",
"'Schema'",
"]",
".",
"new_term",
"(",
"'Table'",
",",
"schema_name",
")",
"logger",
".",
"info",
"(",
"\"Adding table '{}' to metatab schema\"",
".",
"format",
"(",
"schema_name",
")",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"ti",
".",
"to_rows",
"(",
")",
")",
":",
"raw_alt_name",
"=",
"alt_col_name",
"(",
"c",
"[",
"'header'",
"]",
",",
"i",
")",
"alt_name",
"=",
"raw_alt_name",
"if",
"raw_alt_name",
"!=",
"c",
"[",
"'header'",
"]",
"else",
"''",
"t",
"=",
"table",
".",
"new_child",
"(",
"'Column'",
",",
"c",
"[",
"'header'",
"]",
",",
"datatype",
"=",
"type_map",
".",
"get",
"(",
"c",
"[",
"'resolved_type'",
"]",
",",
"c",
"[",
"'resolved_type'",
"]",
")",
",",
"altname",
"=",
"alt_name",
",",
"description",
"=",
"df",
"[",
"c",
"[",
"'header'",
"]",
"]",
".",
"description",
"if",
"hasattr",
"(",
"df",
",",
"'description'",
")",
"and",
"df",
"[",
"c",
"[",
"'header'",
"]",
"]",
".",
"description",
"else",
"''",
")",
"return",
"table"
] | Add schema entiries to a metatab doc from a dataframe | [
"Add",
"schema",
"entiries",
"to",
"a",
"metatab",
"doc",
"from",
"a",
"dataframe"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/core.py#L95-L148 |
project-rig/rig | rig/machine_control/boot.py | boot | def boot(hostname, boot_port=consts.BOOT_PORT,
scamp_binary=None, sark_struct=None,
boot_delay=0.05, post_boot_delay=2.0,
sv_overrides=dict(), **kwargs):
"""Boot a SpiNNaker machine of the given size.
Parameters
----------
hostname : str
Hostname or IP address of the SpiNNaker chip to use to boot the system.
boot_port : int
The port number to sent boot packets to.
scamp_binary : filename or None
Filename of the binary to boot the machine with or None to use the
SC&MP binary bundled with Rig.
sark_struct : filename or None
The 'sark.struct' file which defines the datastructures or None to use
the one bundled with Rig.
boot_delay : float
Number of seconds to pause between sending boot data packets.
post_boot_delay : float
Number of seconds to wait after sending last piece of boot data to give
SC&MP time to re-initialise the Ethernet interface. Note that this does
*not* wait for the system to fully boot.
sv_overrides : {name: value, ...}
Values used to override the defaults in the 'sv' struct defined in the
struct file.
Notes
-----
The constants `rig.machine_control.boot.spinX_boot_options` provide boot
parameters for specific SpiNNaker board revisions, for example::
boot("board1", **spin3_boot_options)
Will boot the Spin3 board connected with hostname "board1".
Returns
-------
{struct_name: :py:class:`~rig.machine_control.struct_file.Struct`}
Layout of structs in memory.
"""
# Get the boot data if not specified.
scamp_binary = (scamp_binary if scamp_binary is not None else
pkg_resources.resource_filename("rig", "boot/scamp.boot"))
sark_struct = (sark_struct if sark_struct is not None else
pkg_resources.resource_filename("rig", "boot/sark.struct"))
with open(scamp_binary, "rb") as f:
boot_data = f.read()
# Read the struct file and modify the "sv" struct to contain the
# configuration values and write this into the boot data.
with open(sark_struct, "rb") as f:
struct_data = f.read()
structs = struct_file.read_struct_file(struct_data)
sv = structs[b"sv"]
sv_overrides.update(kwargs) # Allow non-explicit keyword arguments for SV
sv.update_default_values(**sv_overrides)
sv.update_default_values(unix_time=int(time.time()),
boot_sig=int(time.time()),
root_chip=1)
struct_packed = sv.pack()
assert len(struct_packed) >= 128 # Otherwise shoving this data in is nasty
buf = bytearray(boot_data)
buf[BOOT_DATA_OFFSET:BOOT_DATA_OFFSET+BOOT_DATA_LENGTH] = \
struct_packed[:BOOT_DATA_LENGTH]
assert len(buf) < DTCM_SIZE # Assert that we fit in DTCM
boot_data = bytes(buf)
# Create a socket to communicate with the board
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((hostname, boot_port))
# Transmit the boot data as a series of SDP packets. First determine
# how many blocks must be sent and transmit that, then transmit each
# block.
n_blocks = (len(buf) + BOOT_BYTE_SIZE - 1) // BOOT_BYTE_SIZE
assert n_blocks <= BOOT_MAX_BLOCKS
boot_packet(sock, BootCommand.start, arg3=n_blocks - 1)
time.sleep(boot_delay)
block = 0
while len(boot_data) > 0:
# Get the data to transmit
data, boot_data = (boot_data[:BOOT_BYTE_SIZE],
boot_data[BOOT_BYTE_SIZE:])
# Transmit, delay and increment the block count
a1 = ((BOOT_WORD_SIZE - 1) << 8) | block
boot_packet(sock, BootCommand.send_block, a1, data=data)
time.sleep(boot_delay)
block += 1
# Send the END command
boot_packet(sock, BootCommand.end, 1)
# Close the socket and give time to boot
sock.close()
time.sleep(post_boot_delay)
return structs | python | def boot(hostname, boot_port=consts.BOOT_PORT,
scamp_binary=None, sark_struct=None,
boot_delay=0.05, post_boot_delay=2.0,
sv_overrides=dict(), **kwargs):
"""Boot a SpiNNaker machine of the given size.
Parameters
----------
hostname : str
Hostname or IP address of the SpiNNaker chip to use to boot the system.
boot_port : int
The port number to sent boot packets to.
scamp_binary : filename or None
Filename of the binary to boot the machine with or None to use the
SC&MP binary bundled with Rig.
sark_struct : filename or None
The 'sark.struct' file which defines the datastructures or None to use
the one bundled with Rig.
boot_delay : float
Number of seconds to pause between sending boot data packets.
post_boot_delay : float
Number of seconds to wait after sending last piece of boot data to give
SC&MP time to re-initialise the Ethernet interface. Note that this does
*not* wait for the system to fully boot.
sv_overrides : {name: value, ...}
Values used to override the defaults in the 'sv' struct defined in the
struct file.
Notes
-----
The constants `rig.machine_control.boot.spinX_boot_options` provide boot
parameters for specific SpiNNaker board revisions, for example::
boot("board1", **spin3_boot_options)
Will boot the Spin3 board connected with hostname "board1".
Returns
-------
{struct_name: :py:class:`~rig.machine_control.struct_file.Struct`}
Layout of structs in memory.
"""
# Get the boot data if not specified.
scamp_binary = (scamp_binary if scamp_binary is not None else
pkg_resources.resource_filename("rig", "boot/scamp.boot"))
sark_struct = (sark_struct if sark_struct is not None else
pkg_resources.resource_filename("rig", "boot/sark.struct"))
with open(scamp_binary, "rb") as f:
boot_data = f.read()
# Read the struct file and modify the "sv" struct to contain the
# configuration values and write this into the boot data.
with open(sark_struct, "rb") as f:
struct_data = f.read()
structs = struct_file.read_struct_file(struct_data)
sv = structs[b"sv"]
sv_overrides.update(kwargs) # Allow non-explicit keyword arguments for SV
sv.update_default_values(**sv_overrides)
sv.update_default_values(unix_time=int(time.time()),
boot_sig=int(time.time()),
root_chip=1)
struct_packed = sv.pack()
assert len(struct_packed) >= 128 # Otherwise shoving this data in is nasty
buf = bytearray(boot_data)
buf[BOOT_DATA_OFFSET:BOOT_DATA_OFFSET+BOOT_DATA_LENGTH] = \
struct_packed[:BOOT_DATA_LENGTH]
assert len(buf) < DTCM_SIZE # Assert that we fit in DTCM
boot_data = bytes(buf)
# Create a socket to communicate with the board
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((hostname, boot_port))
# Transmit the boot data as a series of SDP packets. First determine
# how many blocks must be sent and transmit that, then transmit each
# block.
n_blocks = (len(buf) + BOOT_BYTE_SIZE - 1) // BOOT_BYTE_SIZE
assert n_blocks <= BOOT_MAX_BLOCKS
boot_packet(sock, BootCommand.start, arg3=n_blocks - 1)
time.sleep(boot_delay)
block = 0
while len(boot_data) > 0:
# Get the data to transmit
data, boot_data = (boot_data[:BOOT_BYTE_SIZE],
boot_data[BOOT_BYTE_SIZE:])
# Transmit, delay and increment the block count
a1 = ((BOOT_WORD_SIZE - 1) << 8) | block
boot_packet(sock, BootCommand.send_block, a1, data=data)
time.sleep(boot_delay)
block += 1
# Send the END command
boot_packet(sock, BootCommand.end, 1)
# Close the socket and give time to boot
sock.close()
time.sleep(post_boot_delay)
return structs | [
"def",
"boot",
"(",
"hostname",
",",
"boot_port",
"=",
"consts",
".",
"BOOT_PORT",
",",
"scamp_binary",
"=",
"None",
",",
"sark_struct",
"=",
"None",
",",
"boot_delay",
"=",
"0.05",
",",
"post_boot_delay",
"=",
"2.0",
",",
"sv_overrides",
"=",
"dict",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the boot data if not specified.",
"scamp_binary",
"=",
"(",
"scamp_binary",
"if",
"scamp_binary",
"is",
"not",
"None",
"else",
"pkg_resources",
".",
"resource_filename",
"(",
"\"rig\"",
",",
"\"boot/scamp.boot\"",
")",
")",
"sark_struct",
"=",
"(",
"sark_struct",
"if",
"sark_struct",
"is",
"not",
"None",
"else",
"pkg_resources",
".",
"resource_filename",
"(",
"\"rig\"",
",",
"\"boot/sark.struct\"",
")",
")",
"with",
"open",
"(",
"scamp_binary",
",",
"\"rb\"",
")",
"as",
"f",
":",
"boot_data",
"=",
"f",
".",
"read",
"(",
")",
"# Read the struct file and modify the \"sv\" struct to contain the",
"# configuration values and write this into the boot data.",
"with",
"open",
"(",
"sark_struct",
",",
"\"rb\"",
")",
"as",
"f",
":",
"struct_data",
"=",
"f",
".",
"read",
"(",
")",
"structs",
"=",
"struct_file",
".",
"read_struct_file",
"(",
"struct_data",
")",
"sv",
"=",
"structs",
"[",
"b\"sv\"",
"]",
"sv_overrides",
".",
"update",
"(",
"kwargs",
")",
"# Allow non-explicit keyword arguments for SV",
"sv",
".",
"update_default_values",
"(",
"*",
"*",
"sv_overrides",
")",
"sv",
".",
"update_default_values",
"(",
"unix_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
",",
"boot_sig",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
",",
"root_chip",
"=",
"1",
")",
"struct_packed",
"=",
"sv",
".",
"pack",
"(",
")",
"assert",
"len",
"(",
"struct_packed",
")",
">=",
"128",
"# Otherwise shoving this data in is nasty",
"buf",
"=",
"bytearray",
"(",
"boot_data",
")",
"buf",
"[",
"BOOT_DATA_OFFSET",
":",
"BOOT_DATA_OFFSET",
"+",
"BOOT_DATA_LENGTH",
"]",
"=",
"struct_packed",
"[",
":",
"BOOT_DATA_LENGTH",
"]",
"assert",
"len",
"(",
"buf",
")",
"<",
"DTCM_SIZE",
"# Assert that we fit in DTCM",
"boot_data",
"=",
"bytes",
"(",
"buf",
")",
"# Create a socket to communicate with the board",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"sock",
".",
"connect",
"(",
"(",
"hostname",
",",
"boot_port",
")",
")",
"# Transmit the boot data as a series of SDP packets. First determine",
"# how many blocks must be sent and transmit that, then transmit each",
"# block.",
"n_blocks",
"=",
"(",
"len",
"(",
"buf",
")",
"+",
"BOOT_BYTE_SIZE",
"-",
"1",
")",
"//",
"BOOT_BYTE_SIZE",
"assert",
"n_blocks",
"<=",
"BOOT_MAX_BLOCKS",
"boot_packet",
"(",
"sock",
",",
"BootCommand",
".",
"start",
",",
"arg3",
"=",
"n_blocks",
"-",
"1",
")",
"time",
".",
"sleep",
"(",
"boot_delay",
")",
"block",
"=",
"0",
"while",
"len",
"(",
"boot_data",
")",
">",
"0",
":",
"# Get the data to transmit",
"data",
",",
"boot_data",
"=",
"(",
"boot_data",
"[",
":",
"BOOT_BYTE_SIZE",
"]",
",",
"boot_data",
"[",
"BOOT_BYTE_SIZE",
":",
"]",
")",
"# Transmit, delay and increment the block count",
"a1",
"=",
"(",
"(",
"BOOT_WORD_SIZE",
"-",
"1",
")",
"<<",
"8",
")",
"|",
"block",
"boot_packet",
"(",
"sock",
",",
"BootCommand",
".",
"send_block",
",",
"a1",
",",
"data",
"=",
"data",
")",
"time",
".",
"sleep",
"(",
"boot_delay",
")",
"block",
"+=",
"1",
"# Send the END command",
"boot_packet",
"(",
"sock",
",",
"BootCommand",
".",
"end",
",",
"1",
")",
"# Close the socket and give time to boot",
"sock",
".",
"close",
"(",
")",
"time",
".",
"sleep",
"(",
"post_boot_delay",
")",
"return",
"structs"
] | Boot a SpiNNaker machine of the given size.
Parameters
----------
hostname : str
Hostname or IP address of the SpiNNaker chip to use to boot the system.
boot_port : int
The port number to sent boot packets to.
scamp_binary : filename or None
Filename of the binary to boot the machine with or None to use the
SC&MP binary bundled with Rig.
sark_struct : filename or None
The 'sark.struct' file which defines the datastructures or None to use
the one bundled with Rig.
boot_delay : float
Number of seconds to pause between sending boot data packets.
post_boot_delay : float
Number of seconds to wait after sending last piece of boot data to give
SC&MP time to re-initialise the Ethernet interface. Note that this does
*not* wait for the system to fully boot.
sv_overrides : {name: value, ...}
Values used to override the defaults in the 'sv' struct defined in the
struct file.
Notes
-----
The constants `rig.machine_control.boot.spinX_boot_options` provide boot
parameters for specific SpiNNaker board revisions, for example::
boot("board1", **spin3_boot_options)
Will boot the Spin3 board connected with hostname "board1".
Returns
-------
{struct_name: :py:class:`~rig.machine_control.struct_file.Struct`}
Layout of structs in memory. | [
"Boot",
"a",
"SpiNNaker",
"machine",
"of",
"the",
"given",
"size",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/boot.py#L62-L164 |
project-rig/rig | rig/machine_control/boot.py | boot_packet | def boot_packet(sock, cmd, arg1=0, arg2=0, arg3=0, data=b""):
"""Create and transmit a packet to boot the machine.
Parameters
----------
sock : :py:class:`~socket.socket`
Connected socket to use to transmit the packet.
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : :py:class:`bytes`
Optional data to include in the packet.
"""
PROTOCOL_VERSION = 1
# Generate the (network-byte order) header
header = struct.pack("!H4I", PROTOCOL_VERSION, cmd, arg1, arg2, arg3)
assert len(data) % 4 == 0 # Data should always be word-sized
fdata = b""
# Format the data from little- to network-/big-endian
while len(data) > 0:
word, data = (data[:4], data[4:])
fdata += struct.pack("!I", struct.unpack("<I", word)[0])
# Transmit the packet
sock.send(header + fdata) | python | def boot_packet(sock, cmd, arg1=0, arg2=0, arg3=0, data=b""):
"""Create and transmit a packet to boot the machine.
Parameters
----------
sock : :py:class:`~socket.socket`
Connected socket to use to transmit the packet.
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : :py:class:`bytes`
Optional data to include in the packet.
"""
PROTOCOL_VERSION = 1
# Generate the (network-byte order) header
header = struct.pack("!H4I", PROTOCOL_VERSION, cmd, arg1, arg2, arg3)
assert len(data) % 4 == 0 # Data should always be word-sized
fdata = b""
# Format the data from little- to network-/big-endian
while len(data) > 0:
word, data = (data[:4], data[4:])
fdata += struct.pack("!I", struct.unpack("<I", word)[0])
# Transmit the packet
sock.send(header + fdata) | [
"def",
"boot_packet",
"(",
"sock",
",",
"cmd",
",",
"arg1",
"=",
"0",
",",
"arg2",
"=",
"0",
",",
"arg3",
"=",
"0",
",",
"data",
"=",
"b\"\"",
")",
":",
"PROTOCOL_VERSION",
"=",
"1",
"# Generate the (network-byte order) header",
"header",
"=",
"struct",
".",
"pack",
"(",
"\"!H4I\"",
",",
"PROTOCOL_VERSION",
",",
"cmd",
",",
"arg1",
",",
"arg2",
",",
"arg3",
")",
"assert",
"len",
"(",
"data",
")",
"%",
"4",
"==",
"0",
"# Data should always be word-sized",
"fdata",
"=",
"b\"\"",
"# Format the data from little- to network-/big-endian",
"while",
"len",
"(",
"data",
")",
">",
"0",
":",
"word",
",",
"data",
"=",
"(",
"data",
"[",
":",
"4",
"]",
",",
"data",
"[",
"4",
":",
"]",
")",
"fdata",
"+=",
"struct",
".",
"pack",
"(",
"\"!I\"",
",",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"word",
")",
"[",
"0",
"]",
")",
"# Transmit the packet",
"sock",
".",
"send",
"(",
"header",
"+",
"fdata",
")"
] | Create and transmit a packet to boot the machine.
Parameters
----------
sock : :py:class:`~socket.socket`
Connected socket to use to transmit the packet.
cmd : int
arg1 : int
arg2 : int
arg3 : int
data : :py:class:`bytes`
Optional data to include in the packet. | [
"Create",
"and",
"transmit",
"a",
"packet",
"to",
"boot",
"the",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/boot.py#L167-L195 |
project-rig/rig | rig/place_and_route/machine.py | Machine.copy | def copy(self):
"""Produce a copy of this datastructure."""
return Machine(
self.width, self.height,
self.chip_resources, self.chip_resource_exceptions,
self.dead_chips, self.dead_links) | python | def copy(self):
"""Produce a copy of this datastructure."""
return Machine(
self.width, self.height,
self.chip_resources, self.chip_resource_exceptions,
self.dead_chips, self.dead_links) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"Machine",
"(",
"self",
".",
"width",
",",
"self",
".",
"height",
",",
"self",
".",
"chip_resources",
",",
"self",
".",
"chip_resource_exceptions",
",",
"self",
".",
"dead_chips",
",",
"self",
".",
"dead_links",
")"
] | Produce a copy of this datastructure. | [
"Produce",
"a",
"copy",
"of",
"this",
"datastructure",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/machine.py#L115-L120 |
project-rig/rig | rig/place_and_route/machine.py | Machine.issubset | def issubset(self, other):
"""Test whether the resources available in this machine description are
a (non-strict) subset of those available in another machine.
.. note::
This test being False does not imply that the this machine is
a superset of the other machine; machines may have disjoint
resources.
"""
return (set(self).issubset(set(other)) and
set(self.iter_links()).issubset(set(other.iter_links())) and
all(set(self[chip]).issubset(other[chip]) and
all(self[chip][r] <= other[chip][r]
for r in self[chip])
for chip in self)) | python | def issubset(self, other):
"""Test whether the resources available in this machine description are
a (non-strict) subset of those available in another machine.
.. note::
This test being False does not imply that the this machine is
a superset of the other machine; machines may have disjoint
resources.
"""
return (set(self).issubset(set(other)) and
set(self.iter_links()).issubset(set(other.iter_links())) and
all(set(self[chip]).issubset(other[chip]) and
all(self[chip][r] <= other[chip][r]
for r in self[chip])
for chip in self)) | [
"def",
"issubset",
"(",
"self",
",",
"other",
")",
":",
"return",
"(",
"set",
"(",
"self",
")",
".",
"issubset",
"(",
"set",
"(",
"other",
")",
")",
"and",
"set",
"(",
"self",
".",
"iter_links",
"(",
")",
")",
".",
"issubset",
"(",
"set",
"(",
"other",
".",
"iter_links",
"(",
")",
")",
")",
"and",
"all",
"(",
"set",
"(",
"self",
"[",
"chip",
"]",
")",
".",
"issubset",
"(",
"other",
"[",
"chip",
"]",
")",
"and",
"all",
"(",
"self",
"[",
"chip",
"]",
"[",
"r",
"]",
"<=",
"other",
"[",
"chip",
"]",
"[",
"r",
"]",
"for",
"r",
"in",
"self",
"[",
"chip",
"]",
")",
"for",
"chip",
"in",
"self",
")",
")"
] | Test whether the resources available in this machine description are
a (non-strict) subset of those available in another machine.
.. note::
This test being False does not imply that the this machine is
a superset of the other machine; machines may have disjoint
resources. | [
"Test",
"whether",
"the",
"resources",
"available",
"in",
"this",
"machine",
"description",
"are",
"a",
"(",
"non",
"-",
"strict",
")",
"subset",
"of",
"those",
"available",
"in",
"another",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/machine.py#L137-L152 |
project-rig/rig | rig/place_and_route/machine.py | Machine.iter_links | def iter_links(self):
"""An iterator over the working links in the machine.
Generates a series of (x, y, link) tuples.
"""
for x in range(self.width):
for y in range(self.height):
for link in Links:
if (x, y, link) in self:
yield (x, y, link) | python | def iter_links(self):
"""An iterator over the working links in the machine.
Generates a series of (x, y, link) tuples.
"""
for x in range(self.width):
for y in range(self.height):
for link in Links:
if (x, y, link) in self:
yield (x, y, link) | [
"def",
"iter_links",
"(",
"self",
")",
":",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"width",
")",
":",
"for",
"y",
"in",
"range",
"(",
"self",
".",
"height",
")",
":",
"for",
"link",
"in",
"Links",
":",
"if",
"(",
"x",
",",
"y",
",",
"link",
")",
"in",
"self",
":",
"yield",
"(",
"x",
",",
"y",
",",
"link",
")"
] | An iterator over the working links in the machine.
Generates a series of (x, y, link) tuples. | [
"An",
"iterator",
"over",
"the",
"working",
"links",
"in",
"the",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/machine.py#L209-L218 |
project-rig/rig | rig/place_and_route/machine.py | Machine.has_wrap_around_links | def has_wrap_around_links(self, minimum_working=0.9):
"""Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not.
"""
working = 0
for x in range(self.width):
if (x, 0, Links.south) in self:
working += 1
if (x, self.height - 1, Links.north) in self:
working += 1
if (x, 0, Links.south_west) in self:
working += 1
if (x, self.height - 1, Links.north_east) in self:
working += 1
for y in range(self.height):
if (0, y, Links.west) in self:
working += 1
if (self.width - 1, y, Links.east) in self:
working += 1
# Don't re-count links counted when scanning the x-axis
if y != 0 and (0, y, Links.south_west) in self:
working += 1
if (y != self.height - 1 and
(self.width - 1, y, Links.north_east) in self):
working += 1
total = (4 * self.width) + (4 * self.height) - 2
return (float(working) / float(total)) >= minimum_working | python | def has_wrap_around_links(self, minimum_working=0.9):
"""Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not.
"""
working = 0
for x in range(self.width):
if (x, 0, Links.south) in self:
working += 1
if (x, self.height - 1, Links.north) in self:
working += 1
if (x, 0, Links.south_west) in self:
working += 1
if (x, self.height - 1, Links.north_east) in self:
working += 1
for y in range(self.height):
if (0, y, Links.west) in self:
working += 1
if (self.width - 1, y, Links.east) in self:
working += 1
# Don't re-count links counted when scanning the x-axis
if y != 0 and (0, y, Links.south_west) in self:
working += 1
if (y != self.height - 1 and
(self.width - 1, y, Links.north_east) in self):
working += 1
total = (4 * self.width) + (4 * self.height) - 2
return (float(working) / float(total)) >= minimum_working | [
"def",
"has_wrap_around_links",
"(",
"self",
",",
"minimum_working",
"=",
"0.9",
")",
":",
"working",
"=",
"0",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"width",
")",
":",
"if",
"(",
"x",
",",
"0",
",",
"Links",
".",
"south",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"if",
"(",
"x",
",",
"self",
".",
"height",
"-",
"1",
",",
"Links",
".",
"north",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"if",
"(",
"x",
",",
"0",
",",
"Links",
".",
"south_west",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"if",
"(",
"x",
",",
"self",
".",
"height",
"-",
"1",
",",
"Links",
".",
"north_east",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"for",
"y",
"in",
"range",
"(",
"self",
".",
"height",
")",
":",
"if",
"(",
"0",
",",
"y",
",",
"Links",
".",
"west",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"if",
"(",
"self",
".",
"width",
"-",
"1",
",",
"y",
",",
"Links",
".",
"east",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"# Don't re-count links counted when scanning the x-axis",
"if",
"y",
"!=",
"0",
"and",
"(",
"0",
",",
"y",
",",
"Links",
".",
"south_west",
")",
"in",
"self",
":",
"working",
"+=",
"1",
"if",
"(",
"y",
"!=",
"self",
".",
"height",
"-",
"1",
"and",
"(",
"self",
".",
"width",
"-",
"1",
",",
"y",
",",
"Links",
".",
"north_east",
")",
"in",
"self",
")",
":",
"working",
"+=",
"1",
"total",
"=",
"(",
"4",
"*",
"self",
".",
"width",
")",
"+",
"(",
"4",
"*",
"self",
".",
"height",
")",
"-",
"2",
"return",
"(",
"float",
"(",
"working",
")",
"/",
"float",
"(",
"total",
")",
")",
">=",
"minimum_working"
] | Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not. | [
"Test",
"if",
"a",
"machine",
"has",
"wrap",
"-",
"around",
"connections",
"installed",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/machine.py#L220-L265 |
Metatab/metapack | metapack/cli/core.py | write_doc | def write_doc(doc : MetapackDoc, mt_file=None):
"""
Write a Metatab doc to a CSV file, and update the Modified time
:param doc:
:param mt_file:
:return:
"""
from rowgenerators import parse_app_url
if not mt_file:
mt_file = doc.ref
add_giturl(doc)
u = parse_app_url(mt_file)
if u.scheme == 'file':
doc.write(mt_file)
return True
else:
return False | python | def write_doc(doc : MetapackDoc, mt_file=None):
"""
Write a Metatab doc to a CSV file, and update the Modified time
:param doc:
:param mt_file:
:return:
"""
from rowgenerators import parse_app_url
if not mt_file:
mt_file = doc.ref
add_giturl(doc)
u = parse_app_url(mt_file)
if u.scheme == 'file':
doc.write(mt_file)
return True
else:
return False | [
"def",
"write_doc",
"(",
"doc",
":",
"MetapackDoc",
",",
"mt_file",
"=",
"None",
")",
":",
"from",
"rowgenerators",
"import",
"parse_app_url",
"if",
"not",
"mt_file",
":",
"mt_file",
"=",
"doc",
".",
"ref",
"add_giturl",
"(",
"doc",
")",
"u",
"=",
"parse_app_url",
"(",
"mt_file",
")",
"if",
"u",
".",
"scheme",
"==",
"'file'",
":",
"doc",
".",
"write",
"(",
"mt_file",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Write a Metatab doc to a CSV file, and update the Modified time
:param doc:
:param mt_file:
:return: | [
"Write",
"a",
"Metatab",
"doc",
"to",
"a",
"CSV",
"file",
"and",
"update",
"the",
"Modified",
"time",
":",
"param",
"doc",
":",
":",
"param",
"mt_file",
":",
":",
"return",
":"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/core.py#L315-L336 |
Metatab/metapack | metapack/cli/core.py | update_resource_properties | def update_resource_properties(r, orig_columns={}, force=False):
"""Get descriptions and other properties from this, or upstream, packages, and add them to the schema. """
added = []
schema_term = r.schema_term
if not schema_term:
warn("No schema term for ", r.name)
return
rg = r.raw_row_generator
# Get columns information from the schema, or, if it is a package reference,
# from the upstream schema
upstream_columns = {e['name'].lower() if e['name'] else '': e for e in r.columns() or {}}
# Just from the local schema
schema_columns = {e['name'].lower() if e['name'] else '': e for e in r.schema_columns or {}}
# Ask the generator if it can provide column descriptions and types
generator_columns = {e['name'].lower() if e['name'] else '': e for e in rg.columns or {}}
def get_col_value(col_name, value_name):
v = None
if not col_name:
return None
for d in [generator_columns, upstream_columns, orig_columns, schema_columns]:
v_ = d.get(col_name.lower(), {}).get(value_name)
if v_:
v = v_
return v
# Look for new properties
extra_properties = set()
for d in [generator_columns, upstream_columns, orig_columns, schema_columns]:
for k, v in d.items():
for kk, vv in v.items():
extra_properties.add(kk)
# Remove the properties that are already accounted for
extra_properties = extra_properties - {'pos', 'header', 'name', ''}
# Add any extra properties, such as from upstream packages, to the schema.
for ep in extra_properties:
r.doc['Schema'].add_arg(ep)
for c in schema_term.find('Table.Column'):
for ep in extra_properties:
t = c.get_or_new_child(ep)
v = get_col_value(c.name, ep)
if v:
t.value = v
added.append((c.name, ep, v))
prt('Updated schema for {}. Set {} properties'.format(r.name, len(added))) | python | def update_resource_properties(r, orig_columns={}, force=False):
"""Get descriptions and other properties from this, or upstream, packages, and add them to the schema. """
added = []
schema_term = r.schema_term
if not schema_term:
warn("No schema term for ", r.name)
return
rg = r.raw_row_generator
# Get columns information from the schema, or, if it is a package reference,
# from the upstream schema
upstream_columns = {e['name'].lower() if e['name'] else '': e for e in r.columns() or {}}
# Just from the local schema
schema_columns = {e['name'].lower() if e['name'] else '': e for e in r.schema_columns or {}}
# Ask the generator if it can provide column descriptions and types
generator_columns = {e['name'].lower() if e['name'] else '': e for e in rg.columns or {}}
def get_col_value(col_name, value_name):
v = None
if not col_name:
return None
for d in [generator_columns, upstream_columns, orig_columns, schema_columns]:
v_ = d.get(col_name.lower(), {}).get(value_name)
if v_:
v = v_
return v
# Look for new properties
extra_properties = set()
for d in [generator_columns, upstream_columns, orig_columns, schema_columns]:
for k, v in d.items():
for kk, vv in v.items():
extra_properties.add(kk)
# Remove the properties that are already accounted for
extra_properties = extra_properties - {'pos', 'header', 'name', ''}
# Add any extra properties, such as from upstream packages, to the schema.
for ep in extra_properties:
r.doc['Schema'].add_arg(ep)
for c in schema_term.find('Table.Column'):
for ep in extra_properties:
t = c.get_or_new_child(ep)
v = get_col_value(c.name, ep)
if v:
t.value = v
added.append((c.name, ep, v))
prt('Updated schema for {}. Set {} properties'.format(r.name, len(added))) | [
"def",
"update_resource_properties",
"(",
"r",
",",
"orig_columns",
"=",
"{",
"}",
",",
"force",
"=",
"False",
")",
":",
"added",
"=",
"[",
"]",
"schema_term",
"=",
"r",
".",
"schema_term",
"if",
"not",
"schema_term",
":",
"warn",
"(",
"\"No schema term for \"",
",",
"r",
".",
"name",
")",
"return",
"rg",
"=",
"r",
".",
"raw_row_generator",
"# Get columns information from the schema, or, if it is a package reference,",
"# from the upstream schema",
"upstream_columns",
"=",
"{",
"e",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
"if",
"e",
"[",
"'name'",
"]",
"else",
"''",
":",
"e",
"for",
"e",
"in",
"r",
".",
"columns",
"(",
")",
"or",
"{",
"}",
"}",
"# Just from the local schema",
"schema_columns",
"=",
"{",
"e",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
"if",
"e",
"[",
"'name'",
"]",
"else",
"''",
":",
"e",
"for",
"e",
"in",
"r",
".",
"schema_columns",
"or",
"{",
"}",
"}",
"# Ask the generator if it can provide column descriptions and types",
"generator_columns",
"=",
"{",
"e",
"[",
"'name'",
"]",
".",
"lower",
"(",
")",
"if",
"e",
"[",
"'name'",
"]",
"else",
"''",
":",
"e",
"for",
"e",
"in",
"rg",
".",
"columns",
"or",
"{",
"}",
"}",
"def",
"get_col_value",
"(",
"col_name",
",",
"value_name",
")",
":",
"v",
"=",
"None",
"if",
"not",
"col_name",
":",
"return",
"None",
"for",
"d",
"in",
"[",
"generator_columns",
",",
"upstream_columns",
",",
"orig_columns",
",",
"schema_columns",
"]",
":",
"v_",
"=",
"d",
".",
"get",
"(",
"col_name",
".",
"lower",
"(",
")",
",",
"{",
"}",
")",
".",
"get",
"(",
"value_name",
")",
"if",
"v_",
":",
"v",
"=",
"v_",
"return",
"v",
"# Look for new properties",
"extra_properties",
"=",
"set",
"(",
")",
"for",
"d",
"in",
"[",
"generator_columns",
",",
"upstream_columns",
",",
"orig_columns",
",",
"schema_columns",
"]",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"for",
"kk",
",",
"vv",
"in",
"v",
".",
"items",
"(",
")",
":",
"extra_properties",
".",
"add",
"(",
"kk",
")",
"# Remove the properties that are already accounted for",
"extra_properties",
"=",
"extra_properties",
"-",
"{",
"'pos'",
",",
"'header'",
",",
"'name'",
",",
"''",
"}",
"# Add any extra properties, such as from upstream packages, to the schema.",
"for",
"ep",
"in",
"extra_properties",
":",
"r",
".",
"doc",
"[",
"'Schema'",
"]",
".",
"add_arg",
"(",
"ep",
")",
"for",
"c",
"in",
"schema_term",
".",
"find",
"(",
"'Table.Column'",
")",
":",
"for",
"ep",
"in",
"extra_properties",
":",
"t",
"=",
"c",
".",
"get_or_new_child",
"(",
"ep",
")",
"v",
"=",
"get_col_value",
"(",
"c",
".",
"name",
",",
"ep",
")",
"if",
"v",
":",
"t",
".",
"value",
"=",
"v",
"added",
".",
"append",
"(",
"(",
"c",
".",
"name",
",",
"ep",
",",
"v",
")",
")",
"prt",
"(",
"'Updated schema for {}. Set {} properties'",
".",
"format",
"(",
"r",
".",
"name",
",",
"len",
"(",
"added",
")",
")",
")"
] | Get descriptions and other properties from this, or upstream, packages, and add them to the schema. | [
"Get",
"descriptions",
"and",
"other",
"properties",
"from",
"this",
"or",
"upstream",
"packages",
"and",
"add",
"them",
"to",
"the",
"schema",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/core.py#L458-L522 |
Metatab/metapack | metapack/cli/core.py | get_config | def get_config():
"""Return a configuration dict"""
from os import environ
from os.path import expanduser
from pathlib import Path
import yaml
def pexp(p):
try:
return Path(p).expanduser()
except AttributeError:
# python 3.4
return Path(expanduser(p))
paths = [environ.get("METAPACK_CONFIG"), '~/.metapack.yaml', '/etc/metapack.yaml']
for p in paths:
if not p:
continue
p = pexp(p)
if p.exists():
with p.open() as f:
config = yaml.safe_load(f)
if not config:
config = {}
config['_loaded_from'] = str(p)
return config
return None | python | def get_config():
"""Return a configuration dict"""
from os import environ
from os.path import expanduser
from pathlib import Path
import yaml
def pexp(p):
try:
return Path(p).expanduser()
except AttributeError:
# python 3.4
return Path(expanduser(p))
paths = [environ.get("METAPACK_CONFIG"), '~/.metapack.yaml', '/etc/metapack.yaml']
for p in paths:
if not p:
continue
p = pexp(p)
if p.exists():
with p.open() as f:
config = yaml.safe_load(f)
if not config:
config = {}
config['_loaded_from'] = str(p)
return config
return None | [
"def",
"get_config",
"(",
")",
":",
"from",
"os",
"import",
"environ",
"from",
"os",
".",
"path",
"import",
"expanduser",
"from",
"pathlib",
"import",
"Path",
"import",
"yaml",
"def",
"pexp",
"(",
"p",
")",
":",
"try",
":",
"return",
"Path",
"(",
"p",
")",
".",
"expanduser",
"(",
")",
"except",
"AttributeError",
":",
"# python 3.4",
"return",
"Path",
"(",
"expanduser",
"(",
"p",
")",
")",
"paths",
"=",
"[",
"environ",
".",
"get",
"(",
"\"METAPACK_CONFIG\"",
")",
",",
"'~/.metapack.yaml'",
",",
"'/etc/metapack.yaml'",
"]",
"for",
"p",
"in",
"paths",
":",
"if",
"not",
"p",
":",
"continue",
"p",
"=",
"pexp",
"(",
"p",
")",
"if",
"p",
".",
"exists",
"(",
")",
":",
"with",
"p",
".",
"open",
"(",
")",
"as",
"f",
":",
"config",
"=",
"yaml",
".",
"safe_load",
"(",
"f",
")",
"if",
"not",
"config",
":",
"config",
"=",
"{",
"}",
"config",
"[",
"'_loaded_from'",
"]",
"=",
"str",
"(",
"p",
")",
"return",
"config",
"return",
"None"
] | Return a configuration dict | [
"Return",
"a",
"configuration",
"dict"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/core.py#L669-L701 |
Metatab/metapack | metapack/cli/core.py | find_csv_packages | def find_csv_packages(m, downloader):
"""Locate the build CSV package, which will have distributions if it was generated as
and S3 package"""
from metapack.package import CsvPackageBuilder
pkg_dir = m.package_root
name = m.doc.get_value('Root.Name')
package_path, cache_path = CsvPackageBuilder.make_package_path(pkg_dir, name)
if package_path.exists():
return open_package(package_path, downloader=downloader) | python | def find_csv_packages(m, downloader):
"""Locate the build CSV package, which will have distributions if it was generated as
and S3 package"""
from metapack.package import CsvPackageBuilder
pkg_dir = m.package_root
name = m.doc.get_value('Root.Name')
package_path, cache_path = CsvPackageBuilder.make_package_path(pkg_dir, name)
if package_path.exists():
return open_package(package_path, downloader=downloader) | [
"def",
"find_csv_packages",
"(",
"m",
",",
"downloader",
")",
":",
"from",
"metapack",
".",
"package",
"import",
"CsvPackageBuilder",
"pkg_dir",
"=",
"m",
".",
"package_root",
"name",
"=",
"m",
".",
"doc",
".",
"get_value",
"(",
"'Root.Name'",
")",
"package_path",
",",
"cache_path",
"=",
"CsvPackageBuilder",
".",
"make_package_path",
"(",
"pkg_dir",
",",
"name",
")",
"if",
"package_path",
".",
"exists",
"(",
")",
":",
"return",
"open_package",
"(",
"package_path",
",",
"downloader",
"=",
"downloader",
")"
] | Locate the build CSV package, which will have distributions if it was generated as
and S3 package | [
"Locate",
"the",
"build",
"CSV",
"package",
"which",
"will",
"have",
"distributions",
"if",
"it",
"was",
"generated",
"as",
"and",
"S3",
"package"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/core.py#L765-L776 |
Microsoft/vsts-cd-manager | continuous_delivery/continuous_delivery.py | ContinuousDelivery.provisioning_configuration | def provisioning_configuration(
self, body, custom_headers=None, raw=False, **operation_config):
"""ProvisioningConfiguration.
:param body:
:type body: :class:`ContinuousDeploymentConfiguration
<vsts_info_provider.models.ContinuousDeploymentConfiguration>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/_apis/continuousdelivery/provisioningconfigurations'
# Construct parameters
query_parameters = {}
if self.api_version:
query_parameters["api-version"] = self.api_version
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'ProvisioningConfiguration')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 202]:
print("POST", request.url, file=stderr)
print("response:", response.status_code, file=stderr)
print(response.text, file=stderr)
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if response.status_code == 202:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | python | def provisioning_configuration(
self, body, custom_headers=None, raw=False, **operation_config):
"""ProvisioningConfiguration.
:param body:
:type body: :class:`ContinuousDeploymentConfiguration
<vsts_info_provider.models.ContinuousDeploymentConfiguration>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/_apis/continuousdelivery/provisioningconfigurations'
# Construct parameters
query_parameters = {}
if self.api_version:
query_parameters["api-version"] = self.api_version
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(body, 'ProvisioningConfiguration')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 202]:
print("POST", request.url, file=stderr)
print("response:", response.status_code, file=stderr)
print(response.text, file=stderr)
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if response.status_code == 202:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | [
"def",
"provisioning_configuration",
"(",
"self",
",",
"body",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"*",
"*",
"operation_config",
")",
":",
"# Construct URL",
"url",
"=",
"'/_apis/continuousdelivery/provisioningconfigurations'",
"# Construct parameters",
"query_parameters",
"=",
"{",
"}",
"if",
"self",
".",
"api_version",
":",
"query_parameters",
"[",
"\"api-version\"",
"]",
"=",
"self",
".",
"api_version",
"# Construct headers",
"header_parameters",
"=",
"{",
"}",
"header_parameters",
"[",
"'Content-Type'",
"]",
"=",
"'application/json; charset=utf-8'",
"if",
"custom_headers",
":",
"header_parameters",
".",
"update",
"(",
"custom_headers",
")",
"# Construct body",
"body_content",
"=",
"self",
".",
"_serialize",
".",
"body",
"(",
"body",
",",
"'ProvisioningConfiguration'",
")",
"# Construct and send request",
"request",
"=",
"self",
".",
"_client",
".",
"post",
"(",
"url",
",",
"query_parameters",
")",
"response",
"=",
"self",
".",
"_client",
".",
"send",
"(",
"request",
",",
"header_parameters",
",",
"body_content",
",",
"*",
"*",
"operation_config",
")",
"if",
"response",
".",
"status_code",
"not",
"in",
"[",
"200",
",",
"202",
"]",
":",
"print",
"(",
"\"POST\"",
",",
"request",
".",
"url",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\"response:\"",
",",
"response",
".",
"status_code",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"response",
".",
"text",
",",
"file",
"=",
"stderr",
")",
"raise",
"HttpOperationError",
"(",
"self",
".",
"_deserialize",
",",
"response",
")",
"deserialized",
"=",
"None",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"deserialized",
"=",
"self",
".",
"_deserialize",
"(",
"'ProvisioningConfiguration'",
",",
"response",
")",
"if",
"response",
".",
"status_code",
"==",
"202",
":",
"deserialized",
"=",
"self",
".",
"_deserialize",
"(",
"'ProvisioningConfiguration'",
",",
"response",
")",
"if",
"raw",
":",
"client_raw_response",
"=",
"ClientRawResponse",
"(",
"deserialized",
",",
"response",
")",
"return",
"client_raw_response",
"return",
"deserialized"
] | ProvisioningConfiguration.
:param body:
:type body: :class:`ContinuousDeploymentConfiguration
<vsts_info_provider.models.ContinuousDeploymentConfiguration>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>` | [
"ProvisioningConfiguration",
"."
] | train | https://github.com/Microsoft/vsts-cd-manager/blob/2649d236be94d119b13e0ac607964c94a9e51fde/continuous_delivery/continuous_delivery.py#L69-L126 |
Microsoft/vsts-cd-manager | continuous_delivery/continuous_delivery.py | ContinuousDelivery.get_provisioning_configuration | def get_provisioning_configuration(
self, provisioning_configuration_id, custom_headers=None, raw=False, **operation_config):
"""GetContinuousDeploymentOperation.
:param provisioning_configuration_id:
:type provisioning_configuration_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/_apis/continuousdelivery/provisioningconfigurations/{provisioningConfigurationId}'
path_format_arguments = {
'provisioningConfigurationId': self._serialize.url("provisioning_configuration_id", provisioning_configuration_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
print("GET", request.url, file=stderr)
print("response:", response.status_code, file=stderr)
print(response.text, file=stderr)
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | python | def get_provisioning_configuration(
self, provisioning_configuration_id, custom_headers=None, raw=False, **operation_config):
"""GetContinuousDeploymentOperation.
:param provisioning_configuration_id:
:type provisioning_configuration_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/_apis/continuousdelivery/provisioningconfigurations/{provisioningConfigurationId}'
path_format_arguments = {
'provisioningConfigurationId': self._serialize.url("provisioning_configuration_id", provisioning_configuration_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
print("GET", request.url, file=stderr)
print("response:", response.status_code, file=stderr)
print(response.text, file=stderr)
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProvisioningConfiguration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | [
"def",
"get_provisioning_configuration",
"(",
"self",
",",
"provisioning_configuration_id",
",",
"custom_headers",
"=",
"None",
",",
"raw",
"=",
"False",
",",
"*",
"*",
"operation_config",
")",
":",
"# Construct URL",
"url",
"=",
"'/_apis/continuousdelivery/provisioningconfigurations/{provisioningConfigurationId}'",
"path_format_arguments",
"=",
"{",
"'provisioningConfigurationId'",
":",
"self",
".",
"_serialize",
".",
"url",
"(",
"\"provisioning_configuration_id\"",
",",
"provisioning_configuration_id",
",",
"'str'",
")",
"}",
"url",
"=",
"self",
".",
"_client",
".",
"format_url",
"(",
"url",
",",
"*",
"*",
"path_format_arguments",
")",
"# Construct parameters",
"query_parameters",
"=",
"{",
"}",
"# Construct headers",
"header_parameters",
"=",
"{",
"}",
"header_parameters",
"[",
"'Content-Type'",
"]",
"=",
"'application/json; charset=utf-8'",
"if",
"custom_headers",
":",
"header_parameters",
".",
"update",
"(",
"custom_headers",
")",
"# Construct and send request",
"request",
"=",
"self",
".",
"_client",
".",
"get",
"(",
"url",
",",
"query_parameters",
")",
"response",
"=",
"self",
".",
"_client",
".",
"send",
"(",
"request",
",",
"header_parameters",
",",
"*",
"*",
"operation_config",
")",
"if",
"response",
".",
"status_code",
"not",
"in",
"[",
"200",
"]",
":",
"print",
"(",
"\"GET\"",
",",
"request",
".",
"url",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\"response:\"",
",",
"response",
".",
"status_code",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"response",
".",
"text",
",",
"file",
"=",
"stderr",
")",
"raise",
"HttpOperationError",
"(",
"self",
".",
"_deserialize",
",",
"response",
")",
"deserialized",
"=",
"None",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"deserialized",
"=",
"self",
".",
"_deserialize",
"(",
"'ProvisioningConfiguration'",
",",
"response",
")",
"if",
"raw",
":",
"client_raw_response",
"=",
"ClientRawResponse",
"(",
"deserialized",
",",
"response",
")",
"return",
"client_raw_response",
"return",
"deserialized"
] | GetContinuousDeploymentOperation.
:param provisioning_configuration_id:
:type provisioning_configuration_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ContinuousDeploymentOperation
<vsts_info_provider.models.ContinuousDeploymentOperation>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>` | [
"GetContinuousDeploymentOperation",
"."
] | train | https://github.com/Microsoft/vsts-cd-manager/blob/2649d236be94d119b13e0ac607964c94a9e51fde/continuous_delivery/continuous_delivery.py#L128-L180 |
NicolasLM/spinach | spinach/contrib/spinachd/mail.py | serialize_email_messages | def serialize_email_messages(messages: List[EmailMessage]):
"""Serialize EmailMessages to be passed as task argument.
Pickle is used because serializing an EmailMessage to json can be a bit
tricky and would probably break if Django modifies the structure of the
object in the future.
"""
return [
base64.b64encode(zlib.compress(pickle.dumps(m, protocol=4))).decode()
for m in messages
] | python | def serialize_email_messages(messages: List[EmailMessage]):
"""Serialize EmailMessages to be passed as task argument.
Pickle is used because serializing an EmailMessage to json can be a bit
tricky and would probably break if Django modifies the structure of the
object in the future.
"""
return [
base64.b64encode(zlib.compress(pickle.dumps(m, protocol=4))).decode()
for m in messages
] | [
"def",
"serialize_email_messages",
"(",
"messages",
":",
"List",
"[",
"EmailMessage",
"]",
")",
":",
"return",
"[",
"base64",
".",
"b64encode",
"(",
"zlib",
".",
"compress",
"(",
"pickle",
".",
"dumps",
"(",
"m",
",",
"protocol",
"=",
"4",
")",
")",
")",
".",
"decode",
"(",
")",
"for",
"m",
"in",
"messages",
"]"
] | Serialize EmailMessages to be passed as task argument.
Pickle is used because serializing an EmailMessage to json can be a bit
tricky and would probably break if Django modifies the structure of the
object in the future. | [
"Serialize",
"EmailMessages",
"to",
"be",
"passed",
"as",
"task",
"argument",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/spinachd/mail.py#L25-L35 |
NicolasLM/spinach | spinach/contrib/spinachd/mail.py | deserialize_email_messages | def deserialize_email_messages(messages: List[str]):
"""Deserialize EmailMessages passed as task argument."""
return [
pickle.loads(zlib.decompress(base64.b64decode(m)))
for m in messages
] | python | def deserialize_email_messages(messages: List[str]):
"""Deserialize EmailMessages passed as task argument."""
return [
pickle.loads(zlib.decompress(base64.b64decode(m)))
for m in messages
] | [
"def",
"deserialize_email_messages",
"(",
"messages",
":",
"List",
"[",
"str",
"]",
")",
":",
"return",
"[",
"pickle",
".",
"loads",
"(",
"zlib",
".",
"decompress",
"(",
"base64",
".",
"b64decode",
"(",
"m",
")",
")",
")",
"for",
"m",
"in",
"messages",
"]"
] | Deserialize EmailMessages passed as task argument. | [
"Deserialize",
"EmailMessages",
"passed",
"as",
"task",
"argument",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/spinachd/mail.py#L38-L43 |
Parsely/probably | probably/cdbf.py | CountdownBloomFilter._estimate_count | def _estimate_count(self):
""" Update the count number using the estimation of the unset ratio """
if self.estimate_z == 0:
self.estimate_z = (1.0 / self.nbr_bits)
self.estimate_z = min(self.estimate_z, 0.999999)
self.count = int(-(self.nbr_bits / self.nbr_slices) * np.log(1 - self.estimate_z)) | python | def _estimate_count(self):
""" Update the count number using the estimation of the unset ratio """
if self.estimate_z == 0:
self.estimate_z = (1.0 / self.nbr_bits)
self.estimate_z = min(self.estimate_z, 0.999999)
self.count = int(-(self.nbr_bits / self.nbr_slices) * np.log(1 - self.estimate_z)) | [
"def",
"_estimate_count",
"(",
"self",
")",
":",
"if",
"self",
".",
"estimate_z",
"==",
"0",
":",
"self",
".",
"estimate_z",
"=",
"(",
"1.0",
"/",
"self",
".",
"nbr_bits",
")",
"self",
".",
"estimate_z",
"=",
"min",
"(",
"self",
".",
"estimate_z",
",",
"0.999999",
")",
"self",
".",
"count",
"=",
"int",
"(",
"-",
"(",
"self",
".",
"nbr_bits",
"/",
"self",
".",
"nbr_slices",
")",
"*",
"np",
".",
"log",
"(",
"1",
"-",
"self",
".",
"estimate_z",
")",
")"
] | Update the count number using the estimation of the unset ratio | [
"Update",
"the",
"count",
"number",
"using",
"the",
"estimation",
"of",
"the",
"unset",
"ratio"
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L42-L47 |
Parsely/probably | probably/cdbf.py | CountdownBloomFilter.expiration_maintenance | def expiration_maintenance(self):
""" Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time()
"""
if self.cellarray[self.refresh_head] != 0:
self.cellarray[self.refresh_head] -= 1
self.refresh_head = (self.refresh_head + 1) % self.nbr_bits | python | def expiration_maintenance(self):
""" Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time()
"""
if self.cellarray[self.refresh_head] != 0:
self.cellarray[self.refresh_head] -= 1
self.refresh_head = (self.refresh_head + 1) % self.nbr_bits | [
"def",
"expiration_maintenance",
"(",
"self",
")",
":",
"if",
"self",
".",
"cellarray",
"[",
"self",
".",
"refresh_head",
"]",
"!=",
"0",
":",
"self",
".",
"cellarray",
"[",
"self",
".",
"refresh_head",
"]",
"-=",
"1",
"self",
".",
"refresh_head",
"=",
"(",
"self",
".",
"refresh_head",
"+",
"1",
")",
"%",
"self",
".",
"nbr_bits"
] | Decrement cell value if not zero
This maintenance process need to executed each self.compute_refresh_time() | [
"Decrement",
"cell",
"value",
"if",
"not",
"zero",
"This",
"maintenance",
"process",
"need",
"to",
"executed",
"each",
"self",
".",
"compute_refresh_time",
"()"
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L49-L55 |
Parsely/probably | probably/cdbf.py | CountdownBloomFilter.batched_expiration_maintenance_dev | def batched_expiration_maintenance_dev(self, elapsed_time):
""" Batched version of expiration_maintenance() """
num_iterations = self.num_batched_maintenance(elapsed_time)
for i in range(num_iterations):
self.expiration_maintenance() | python | def batched_expiration_maintenance_dev(self, elapsed_time):
""" Batched version of expiration_maintenance() """
num_iterations = self.num_batched_maintenance(elapsed_time)
for i in range(num_iterations):
self.expiration_maintenance() | [
"def",
"batched_expiration_maintenance_dev",
"(",
"self",
",",
"elapsed_time",
")",
":",
"num_iterations",
"=",
"self",
".",
"num_batched_maintenance",
"(",
"elapsed_time",
")",
"for",
"i",
"in",
"range",
"(",
"num_iterations",
")",
":",
"self",
".",
"expiration_maintenance",
"(",
")"
] | Batched version of expiration_maintenance() | [
"Batched",
"version",
"of",
"expiration_maintenance",
"()"
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L57-L61 |
Parsely/probably | probably/cdbf.py | CountdownBloomFilter.batched_expiration_maintenance | def batched_expiration_maintenance(self, elapsed_time):
""" Batched version of expiration_maintenance()
Cython version
"""
num_iterations = self.num_batched_maintenance(elapsed_time)
self.refresh_head, nonzero = maintenance(self.cellarray, self.nbr_bits, num_iterations, self.refresh_head)
if num_iterations != 0:
self.estimate_z = float(nonzero) / float(num_iterations)
self._estimate_count()
processed_interval = num_iterations * self.compute_refresh_time()
return processed_interval | python | def batched_expiration_maintenance(self, elapsed_time):
""" Batched version of expiration_maintenance()
Cython version
"""
num_iterations = self.num_batched_maintenance(elapsed_time)
self.refresh_head, nonzero = maintenance(self.cellarray, self.nbr_bits, num_iterations, self.refresh_head)
if num_iterations != 0:
self.estimate_z = float(nonzero) / float(num_iterations)
self._estimate_count()
processed_interval = num_iterations * self.compute_refresh_time()
return processed_interval | [
"def",
"batched_expiration_maintenance",
"(",
"self",
",",
"elapsed_time",
")",
":",
"num_iterations",
"=",
"self",
".",
"num_batched_maintenance",
"(",
"elapsed_time",
")",
"self",
".",
"refresh_head",
",",
"nonzero",
"=",
"maintenance",
"(",
"self",
".",
"cellarray",
",",
"self",
".",
"nbr_bits",
",",
"num_iterations",
",",
"self",
".",
"refresh_head",
")",
"if",
"num_iterations",
"!=",
"0",
":",
"self",
".",
"estimate_z",
"=",
"float",
"(",
"nonzero",
")",
"/",
"float",
"(",
"num_iterations",
")",
"self",
".",
"_estimate_count",
"(",
")",
"processed_interval",
"=",
"num_iterations",
"*",
"self",
".",
"compute_refresh_time",
"(",
")",
"return",
"processed_interval"
] | Batched version of expiration_maintenance()
Cython version | [
"Batched",
"version",
"of",
"expiration_maintenance",
"()",
"Cython",
"version"
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L63-L73 |
Parsely/probably | probably/cdbf.py | CountdownBloomFilter.compute_refresh_time | def compute_refresh_time(self):
""" Compute the refresh period for the given expiration delay """
if self.z == 0:
self.z = 1E-10
s = float(self.expiration) * (1.0/(self.nbr_bits)) * (1.0/(self.counter_init - 1 + (1.0/(self.z * (self.nbr_slices + 1)))))
return s | python | def compute_refresh_time(self):
""" Compute the refresh period for the given expiration delay """
if self.z == 0:
self.z = 1E-10
s = float(self.expiration) * (1.0/(self.nbr_bits)) * (1.0/(self.counter_init - 1 + (1.0/(self.z * (self.nbr_slices + 1)))))
return s | [
"def",
"compute_refresh_time",
"(",
"self",
")",
":",
"if",
"self",
".",
"z",
"==",
"0",
":",
"self",
".",
"z",
"=",
"1E-10",
"s",
"=",
"float",
"(",
"self",
".",
"expiration",
")",
"*",
"(",
"1.0",
"/",
"(",
"self",
".",
"nbr_bits",
")",
")",
"*",
"(",
"1.0",
"/",
"(",
"self",
".",
"counter_init",
"-",
"1",
"+",
"(",
"1.0",
"/",
"(",
"self",
".",
"z",
"*",
"(",
"self",
".",
"nbr_slices",
"+",
"1",
")",
")",
")",
")",
")",
"return",
"s"
] | Compute the refresh period for the given expiration delay | [
"Compute",
"the",
"refresh",
"period",
"for",
"the",
"given",
"expiration",
"delay"
] | train | https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L75-L80 |
NeuroML/NeuroMLlite | examples/Example7.py | generate | def generate():
################################################################################
### Build new network
net = Network(id='Example7_Brunel2000')
net.notes = 'Example 7: based on network of Brunel 2000'
net.parameters = { 'g': 4,
'eta': 1,
'order': 5,
'epsilon': 0.1,
'J': 0.1,
'delay': 1.5,
'tauMem': 20.0,
'tauSyn': 0.1,
'tauRef': 2.0,
'U0': 0.0,
'theta': 20.0}
cell = Cell(id='ifcell', pynn_cell='IF_curr_alpha')
cell.parameters = { 'tau_m': 'tauMem',
'tau_refrac': 'tauRef',
'v_rest': 'U0',
'v_reset': 'U0',
'v_thresh': 'theta',
'cm': 0.001,
"i_offset": 0}
#cell = Cell(id='hhcell', neuroml2_source_file='test_files/hhcell.cell.nml')
net.cells.append(cell)
expoisson = Cell(id='expoisson', pynn_cell='SpikeSourcePoisson')
expoisson.parameters = { 'rate': '1000 * (eta*theta/(J*4*order*epsilon*tauMem)) * (4*order*epsilon)',
'start': 0,
'duration': 1e9}
net.cells.append(expoisson)
'''
input_source = InputSource(id='iclamp0',
pynn_input='DCSource',
parameters={'amplitude':0.002, 'start':100., 'stop':900.})
input_source = InputSource(id='poissonFiringSyn',
neuroml2_input='poissonFiringSynapse',
parameters={'average_rate':"eta", 'synapse':"ampa", 'spike_target':"./ampa"})
net.input_sources.append(input_source)'''
pE = Population(id='Epop', size='4*order', component=cell.id, properties={'color':'1 0 0'})
pEpoisson = Population(id='Einput', size='4*order', component=expoisson.id, properties={'color':'.5 0 0'})
pI = Population(id='Ipop', size='1*order', component=cell.id, properties={'color':'0 0 1'})
net.populations.append(pE)
net.populations.append(pEpoisson)
net.populations.append(pI)
net.synapses.append(Synapse(id='ampa',
pynn_receptor_type='excitatory',
pynn_synapse_type='curr_alpha',
parameters={'tau_syn':0.1}))
net.synapses.append(Synapse(id='gaba',
pynn_receptor_type='inhibitory',
pynn_synapse_type='curr_alpha',
parameters={'tau_syn':0.1}))
net.projections.append(Projection(id='projEinput',
presynaptic=pEpoisson.id,
postsynaptic=pE.id,
synapse='ampa',
delay=2,
weight=0.02,
one_to_one_connector=OneToOneConnector()))
'''
net.projections.append(Projection(id='projEE',
presynaptic=pE.id,
postsynaptic=pE.id,
synapse='ampa',
delay=2,
weight=0.002,
random_connectivity=RandomConnectivity(probability=.5)))'''
net.projections.append(Projection(id='projEI',
presynaptic=pE.id,
postsynaptic=pI.id,
synapse='ampa',
delay=2,
weight=0.02,
random_connectivity=RandomConnectivity(probability=.5)))
'''
net.projections.append(Projection(id='projIE',
presynaptic=pI.id,
postsynaptic=pE.id,
synapse='gaba',
delay=2,
weight=0.02,
random_connectivity=RandomConnectivity(probability=.5)))
net.inputs.append(Input(id='stim',
input_source=input_source.id,
population=pE.id,
percentage=50))'''
#print(net)
#print(net.to_json())
new_file = net.to_json_file('%s.json'%net.id)
################################################################################
### Build Simulation object & save as JSON
sim = Simulation(id='SimExample7',
network=new_file,
duration='1000',
dt='0.025',
seed= 123,
recordTraces={pE.id:'*',pI.id:'*'},
recordSpikes={'all':'*'})
sim.to_json_file()
return sim, net | python | def generate():
################################################################################
### Build new network
net = Network(id='Example7_Brunel2000')
net.notes = 'Example 7: based on network of Brunel 2000'
net.parameters = { 'g': 4,
'eta': 1,
'order': 5,
'epsilon': 0.1,
'J': 0.1,
'delay': 1.5,
'tauMem': 20.0,
'tauSyn': 0.1,
'tauRef': 2.0,
'U0': 0.0,
'theta': 20.0}
cell = Cell(id='ifcell', pynn_cell='IF_curr_alpha')
cell.parameters = { 'tau_m': 'tauMem',
'tau_refrac': 'tauRef',
'v_rest': 'U0',
'v_reset': 'U0',
'v_thresh': 'theta',
'cm': 0.001,
"i_offset": 0}
#cell = Cell(id='hhcell', neuroml2_source_file='test_files/hhcell.cell.nml')
net.cells.append(cell)
expoisson = Cell(id='expoisson', pynn_cell='SpikeSourcePoisson')
expoisson.parameters = { 'rate': '1000 * (eta*theta/(J*4*order*epsilon*tauMem)) * (4*order*epsilon)',
'start': 0,
'duration': 1e9}
net.cells.append(expoisson)
'''
input_source = InputSource(id='iclamp0',
pynn_input='DCSource',
parameters={'amplitude':0.002, 'start':100., 'stop':900.})
input_source = InputSource(id='poissonFiringSyn',
neuroml2_input='poissonFiringSynapse',
parameters={'average_rate':"eta", 'synapse':"ampa", 'spike_target':"./ampa"})
net.input_sources.append(input_source)'''
pE = Population(id='Epop', size='4*order', component=cell.id, properties={'color':'1 0 0'})
pEpoisson = Population(id='Einput', size='4*order', component=expoisson.id, properties={'color':'.5 0 0'})
pI = Population(id='Ipop', size='1*order', component=cell.id, properties={'color':'0 0 1'})
net.populations.append(pE)
net.populations.append(pEpoisson)
net.populations.append(pI)
net.synapses.append(Synapse(id='ampa',
pynn_receptor_type='excitatory',
pynn_synapse_type='curr_alpha',
parameters={'tau_syn':0.1}))
net.synapses.append(Synapse(id='gaba',
pynn_receptor_type='inhibitory',
pynn_synapse_type='curr_alpha',
parameters={'tau_syn':0.1}))
net.projections.append(Projection(id='projEinput',
presynaptic=pEpoisson.id,
postsynaptic=pE.id,
synapse='ampa',
delay=2,
weight=0.02,
one_to_one_connector=OneToOneConnector()))
'''
net.projections.append(Projection(id='projEE',
presynaptic=pE.id,
postsynaptic=pE.id,
synapse='ampa',
delay=2,
weight=0.002,
random_connectivity=RandomConnectivity(probability=.5)))'''
net.projections.append(Projection(id='projEI',
presynaptic=pE.id,
postsynaptic=pI.id,
synapse='ampa',
delay=2,
weight=0.02,
random_connectivity=RandomConnectivity(probability=.5)))
'''
net.projections.append(Projection(id='projIE',
presynaptic=pI.id,
postsynaptic=pE.id,
synapse='gaba',
delay=2,
weight=0.02,
random_connectivity=RandomConnectivity(probability=.5)))
net.inputs.append(Input(id='stim',
input_source=input_source.id,
population=pE.id,
percentage=50))'''
#print(net)
#print(net.to_json())
new_file = net.to_json_file('%s.json'%net.id)
################################################################################
### Build Simulation object & save as JSON
sim = Simulation(id='SimExample7',
network=new_file,
duration='1000',
dt='0.025',
seed= 123,
recordTraces={pE.id:'*',pI.id:'*'},
recordSpikes={'all':'*'})
sim.to_json_file()
return sim, net | [
"def",
"generate",
"(",
")",
":",
"################################################################################",
"### Build new network",
"net",
"=",
"Network",
"(",
"id",
"=",
"'Example7_Brunel2000'",
")",
"net",
".",
"notes",
"=",
"'Example 7: based on network of Brunel 2000'",
"net",
".",
"parameters",
"=",
"{",
"'g'",
":",
"4",
",",
"'eta'",
":",
"1",
",",
"'order'",
":",
"5",
",",
"'epsilon'",
":",
"0.1",
",",
"'J'",
":",
"0.1",
",",
"'delay'",
":",
"1.5",
",",
"'tauMem'",
":",
"20.0",
",",
"'tauSyn'",
":",
"0.1",
",",
"'tauRef'",
":",
"2.0",
",",
"'U0'",
":",
"0.0",
",",
"'theta'",
":",
"20.0",
"}",
"cell",
"=",
"Cell",
"(",
"id",
"=",
"'ifcell'",
",",
"pynn_cell",
"=",
"'IF_curr_alpha'",
")",
"cell",
".",
"parameters",
"=",
"{",
"'tau_m'",
":",
"'tauMem'",
",",
"'tau_refrac'",
":",
"'tauRef'",
",",
"'v_rest'",
":",
"'U0'",
",",
"'v_reset'",
":",
"'U0'",
",",
"'v_thresh'",
":",
"'theta'",
",",
"'cm'",
":",
"0.001",
",",
"\"i_offset\"",
":",
"0",
"}",
"#cell = Cell(id='hhcell', neuroml2_source_file='test_files/hhcell.cell.nml')",
"net",
".",
"cells",
".",
"append",
"(",
"cell",
")",
"expoisson",
"=",
"Cell",
"(",
"id",
"=",
"'expoisson'",
",",
"pynn_cell",
"=",
"'SpikeSourcePoisson'",
")",
"expoisson",
".",
"parameters",
"=",
"{",
"'rate'",
":",
"'1000 * (eta*theta/(J*4*order*epsilon*tauMem)) * (4*order*epsilon)'",
",",
"'start'",
":",
"0",
",",
"'duration'",
":",
"1e9",
"}",
"net",
".",
"cells",
".",
"append",
"(",
"expoisson",
")",
"pE",
"=",
"Population",
"(",
"id",
"=",
"'Epop'",
",",
"size",
"=",
"'4*order'",
",",
"component",
"=",
"cell",
".",
"id",
",",
"properties",
"=",
"{",
"'color'",
":",
"'1 0 0'",
"}",
")",
"pEpoisson",
"=",
"Population",
"(",
"id",
"=",
"'Einput'",
",",
"size",
"=",
"'4*order'",
",",
"component",
"=",
"expoisson",
".",
"id",
",",
"properties",
"=",
"{",
"'color'",
":",
"'.5 0 0'",
"}",
")",
"pI",
"=",
"Population",
"(",
"id",
"=",
"'Ipop'",
",",
"size",
"=",
"'1*order'",
",",
"component",
"=",
"cell",
".",
"id",
",",
"properties",
"=",
"{",
"'color'",
":",
"'0 0 1'",
"}",
")",
"net",
".",
"populations",
".",
"append",
"(",
"pE",
")",
"net",
".",
"populations",
".",
"append",
"(",
"pEpoisson",
")",
"net",
".",
"populations",
".",
"append",
"(",
"pI",
")",
"net",
".",
"synapses",
".",
"append",
"(",
"Synapse",
"(",
"id",
"=",
"'ampa'",
",",
"pynn_receptor_type",
"=",
"'excitatory'",
",",
"pynn_synapse_type",
"=",
"'curr_alpha'",
",",
"parameters",
"=",
"{",
"'tau_syn'",
":",
"0.1",
"}",
")",
")",
"net",
".",
"synapses",
".",
"append",
"(",
"Synapse",
"(",
"id",
"=",
"'gaba'",
",",
"pynn_receptor_type",
"=",
"'inhibitory'",
",",
"pynn_synapse_type",
"=",
"'curr_alpha'",
",",
"parameters",
"=",
"{",
"'tau_syn'",
":",
"0.1",
"}",
")",
")",
"net",
".",
"projections",
".",
"append",
"(",
"Projection",
"(",
"id",
"=",
"'projEinput'",
",",
"presynaptic",
"=",
"pEpoisson",
".",
"id",
",",
"postsynaptic",
"=",
"pE",
".",
"id",
",",
"synapse",
"=",
"'ampa'",
",",
"delay",
"=",
"2",
",",
"weight",
"=",
"0.02",
",",
"one_to_one_connector",
"=",
"OneToOneConnector",
"(",
")",
")",
")",
"''' \n net.projections.append(Projection(id='projEE',\n presynaptic=pE.id, \n postsynaptic=pE.id,\n synapse='ampa',\n delay=2,\n weight=0.002,\n random_connectivity=RandomConnectivity(probability=.5)))'''",
"net",
".",
"projections",
".",
"append",
"(",
"Projection",
"(",
"id",
"=",
"'projEI'",
",",
"presynaptic",
"=",
"pE",
".",
"id",
",",
"postsynaptic",
"=",
"pI",
".",
"id",
",",
"synapse",
"=",
"'ampa'",
",",
"delay",
"=",
"2",
",",
"weight",
"=",
"0.02",
",",
"random_connectivity",
"=",
"RandomConnectivity",
"(",
"probability",
"=",
".5",
")",
")",
")",
"'''\n net.projections.append(Projection(id='projIE',\n presynaptic=pI.id, \n postsynaptic=pE.id,\n synapse='gaba',\n delay=2,\n weight=0.02,\n random_connectivity=RandomConnectivity(probability=.5)))\n\n net.inputs.append(Input(id='stim',\n input_source=input_source.id,\n population=pE.id,\n percentage=50))'''",
"#print(net)",
"#print(net.to_json())",
"new_file",
"=",
"net",
".",
"to_json_file",
"(",
"'%s.json'",
"%",
"net",
".",
"id",
")",
"################################################################################",
"### Build Simulation object & save as JSON",
"sim",
"=",
"Simulation",
"(",
"id",
"=",
"'SimExample7'",
",",
"network",
"=",
"new_file",
",",
"duration",
"=",
"'1000'",
",",
"dt",
"=",
"'0.025'",
",",
"seed",
"=",
"123",
",",
"recordTraces",
"=",
"{",
"pE",
".",
"id",
":",
"'*'",
",",
"pI",
".",
"id",
":",
"'*'",
"}",
",",
"recordSpikes",
"=",
"{",
"'all'",
":",
"'*'",
"}",
")",
"sim",
".",
"to_json_file",
"(",
")",
"return",
"sim",
",",
"net"
] | input_source = InputSource(id='iclamp0',
pynn_input='DCSource',
parameters={'amplitude':0.002, 'start':100., 'stop':900.})
input_source = InputSource(id='poissonFiringSyn',
neuroml2_input='poissonFiringSynapse',
parameters={'average_rate':"eta", 'synapse':"ampa", 'spike_target':"./ampa"})
net.input_sources.append(input_source) | [
"input_source",
"=",
"InputSource",
"(",
"id",
"=",
"iclamp0",
"pynn_input",
"=",
"DCSource",
"parameters",
"=",
"{",
"amplitude",
":",
"0",
".",
"002",
"start",
":",
"100",
".",
"stop",
":",
"900",
".",
"}",
")"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/examples/Example7.py#L10-L137 |
project-rig/rig | rig/place_and_route/allocate/greedy.py | allocate | def allocate(vertices_resources, nets, machine, constraints, placements):
"""Allocate resources to vertices on cores arbitrarily using a simple greedy
algorithm.
"""
allocation = {}
# Globally reserved resource ranges {resource, [slice, ...], ...}
globally_reserved = defaultdict(list)
# Locally reserved resource ranges {(x, y): {resource, [slice, ...], ...}}
locally_reserved = defaultdict(lambda: defaultdict(list))
# Alignment of each resource
alignments = defaultdict(lambda: 1)
# Collect constraints
for constraint in constraints:
if isinstance(constraint, ReserveResourceConstraint):
if constraint.location is None:
globally_reserved[constraint.resource].append(
constraint.reservation)
else:
locally_reserved[constraint.location][
constraint.resource].append(constraint.reservation)
elif isinstance(constraint, AlignResourceConstraint):
alignments[constraint.resource] = constraint.alignment
# A dictionary {(x, y): [vertex, ...], ...}
chip_contents = defaultdict(list)
for vertex, xy in iteritems(placements):
chip_contents[xy].append(vertex)
for xy, chip_vertices in iteritems(chip_contents):
# Index of the next free resource in the current chip
resource_pointers = {resource: 0
for resource in machine.chip_resources}
for vertex in chip_vertices:
vertex_allocation = {}
# Make allocations, advancing resource pointers
for resource, requirement in iteritems(vertices_resources[vertex]):
proposed_allocation = None
proposal_overlaps = True
while proposal_overlaps:
# Check that the proposed allocation doesn't overlap a
# reserved area.
start = align(resource_pointers[resource],
alignments[resource])
proposed_allocation = slice(start, start + requirement)
proposal_overlaps = False
if proposed_allocation.stop > machine[xy][resource]:
raise InsufficientResourceError(
"{} over-allocated on chip {}".format(resource,
xy))
for reservation in globally_reserved[resource]:
if slices_overlap(proposed_allocation, reservation):
resource_pointers[resource] = reservation.stop
proposal_overlaps = True
local_reservations \
= locally_reserved.get(xy, {}).get(resource, [])
for reservation in local_reservations:
if slices_overlap(proposed_allocation, reservation):
resource_pointers[resource] = reservation.stop
proposal_overlaps = True
# Getting here means the proposed allocation is not blocked
# by any reservations
vertex_allocation[resource] = proposed_allocation
resource_pointers[resource] = proposed_allocation.stop
allocation[vertex] = vertex_allocation
return allocation | python | def allocate(vertices_resources, nets, machine, constraints, placements):
"""Allocate resources to vertices on cores arbitrarily using a simple greedy
algorithm.
"""
allocation = {}
# Globally reserved resource ranges {resource, [slice, ...], ...}
globally_reserved = defaultdict(list)
# Locally reserved resource ranges {(x, y): {resource, [slice, ...], ...}}
locally_reserved = defaultdict(lambda: defaultdict(list))
# Alignment of each resource
alignments = defaultdict(lambda: 1)
# Collect constraints
for constraint in constraints:
if isinstance(constraint, ReserveResourceConstraint):
if constraint.location is None:
globally_reserved[constraint.resource].append(
constraint.reservation)
else:
locally_reserved[constraint.location][
constraint.resource].append(constraint.reservation)
elif isinstance(constraint, AlignResourceConstraint):
alignments[constraint.resource] = constraint.alignment
# A dictionary {(x, y): [vertex, ...], ...}
chip_contents = defaultdict(list)
for vertex, xy in iteritems(placements):
chip_contents[xy].append(vertex)
for xy, chip_vertices in iteritems(chip_contents):
# Index of the next free resource in the current chip
resource_pointers = {resource: 0
for resource in machine.chip_resources}
for vertex in chip_vertices:
vertex_allocation = {}
# Make allocations, advancing resource pointers
for resource, requirement in iteritems(vertices_resources[vertex]):
proposed_allocation = None
proposal_overlaps = True
while proposal_overlaps:
# Check that the proposed allocation doesn't overlap a
# reserved area.
start = align(resource_pointers[resource],
alignments[resource])
proposed_allocation = slice(start, start + requirement)
proposal_overlaps = False
if proposed_allocation.stop > machine[xy][resource]:
raise InsufficientResourceError(
"{} over-allocated on chip {}".format(resource,
xy))
for reservation in globally_reserved[resource]:
if slices_overlap(proposed_allocation, reservation):
resource_pointers[resource] = reservation.stop
proposal_overlaps = True
local_reservations \
= locally_reserved.get(xy, {}).get(resource, [])
for reservation in local_reservations:
if slices_overlap(proposed_allocation, reservation):
resource_pointers[resource] = reservation.stop
proposal_overlaps = True
# Getting here means the proposed allocation is not blocked
# by any reservations
vertex_allocation[resource] = proposed_allocation
resource_pointers[resource] = proposed_allocation.stop
allocation[vertex] = vertex_allocation
return allocation | [
"def",
"allocate",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
")",
":",
"allocation",
"=",
"{",
"}",
"# Globally reserved resource ranges {resource, [slice, ...], ...}",
"globally_reserved",
"=",
"defaultdict",
"(",
"list",
")",
"# Locally reserved resource ranges {(x, y): {resource, [slice, ...], ...}}",
"locally_reserved",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"list",
")",
")",
"# Alignment of each resource",
"alignments",
"=",
"defaultdict",
"(",
"lambda",
":",
"1",
")",
"# Collect constraints",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"ReserveResourceConstraint",
")",
":",
"if",
"constraint",
".",
"location",
"is",
"None",
":",
"globally_reserved",
"[",
"constraint",
".",
"resource",
"]",
".",
"append",
"(",
"constraint",
".",
"reservation",
")",
"else",
":",
"locally_reserved",
"[",
"constraint",
".",
"location",
"]",
"[",
"constraint",
".",
"resource",
"]",
".",
"append",
"(",
"constraint",
".",
"reservation",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"AlignResourceConstraint",
")",
":",
"alignments",
"[",
"constraint",
".",
"resource",
"]",
"=",
"constraint",
".",
"alignment",
"# A dictionary {(x, y): [vertex, ...], ...}",
"chip_contents",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"vertex",
",",
"xy",
"in",
"iteritems",
"(",
"placements",
")",
":",
"chip_contents",
"[",
"xy",
"]",
".",
"append",
"(",
"vertex",
")",
"for",
"xy",
",",
"chip_vertices",
"in",
"iteritems",
"(",
"chip_contents",
")",
":",
"# Index of the next free resource in the current chip",
"resource_pointers",
"=",
"{",
"resource",
":",
"0",
"for",
"resource",
"in",
"machine",
".",
"chip_resources",
"}",
"for",
"vertex",
"in",
"chip_vertices",
":",
"vertex_allocation",
"=",
"{",
"}",
"# Make allocations, advancing resource pointers",
"for",
"resource",
",",
"requirement",
"in",
"iteritems",
"(",
"vertices_resources",
"[",
"vertex",
"]",
")",
":",
"proposed_allocation",
"=",
"None",
"proposal_overlaps",
"=",
"True",
"while",
"proposal_overlaps",
":",
"# Check that the proposed allocation doesn't overlap a",
"# reserved area.",
"start",
"=",
"align",
"(",
"resource_pointers",
"[",
"resource",
"]",
",",
"alignments",
"[",
"resource",
"]",
")",
"proposed_allocation",
"=",
"slice",
"(",
"start",
",",
"start",
"+",
"requirement",
")",
"proposal_overlaps",
"=",
"False",
"if",
"proposed_allocation",
".",
"stop",
">",
"machine",
"[",
"xy",
"]",
"[",
"resource",
"]",
":",
"raise",
"InsufficientResourceError",
"(",
"\"{} over-allocated on chip {}\"",
".",
"format",
"(",
"resource",
",",
"xy",
")",
")",
"for",
"reservation",
"in",
"globally_reserved",
"[",
"resource",
"]",
":",
"if",
"slices_overlap",
"(",
"proposed_allocation",
",",
"reservation",
")",
":",
"resource_pointers",
"[",
"resource",
"]",
"=",
"reservation",
".",
"stop",
"proposal_overlaps",
"=",
"True",
"local_reservations",
"=",
"locally_reserved",
".",
"get",
"(",
"xy",
",",
"{",
"}",
")",
".",
"get",
"(",
"resource",
",",
"[",
"]",
")",
"for",
"reservation",
"in",
"local_reservations",
":",
"if",
"slices_overlap",
"(",
"proposed_allocation",
",",
"reservation",
")",
":",
"resource_pointers",
"[",
"resource",
"]",
"=",
"reservation",
".",
"stop",
"proposal_overlaps",
"=",
"True",
"# Getting here means the proposed allocation is not blocked",
"# by any reservations",
"vertex_allocation",
"[",
"resource",
"]",
"=",
"proposed_allocation",
"resource_pointers",
"[",
"resource",
"]",
"=",
"proposed_allocation",
".",
"stop",
"allocation",
"[",
"vertex",
"]",
"=",
"vertex_allocation",
"return",
"allocation"
] | Allocate resources to vertices on cores arbitrarily using a simple greedy
algorithm. | [
"Allocate",
"resources",
"to",
"vertices",
"on",
"cores",
"arbitrarily",
"using",
"a",
"simple",
"greedy",
"algorithm",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/allocate/greedy.py#L27-L99 |
openstack/networking-hyperv | networking_hyperv/neutron/qos/qos_driver.py | QosHyperVAgentDriver.create | def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
LOG.info("Setting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
policy_data = self._get_policy_values(qos_policy)
self._utils.set_port_qos_rule(port["port_id"], policy_data) | python | def create(self, port, qos_policy):
"""Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port.
"""
LOG.info("Setting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
policy_data = self._get_policy_values(qos_policy)
self._utils.set_port_qos_rule(port["port_id"], policy_data) | [
"def",
"create",
"(",
"self",
",",
"port",
",",
"qos_policy",
")",
":",
"LOG",
".",
"info",
"(",
"\"Setting QoS policy %(qos_policy)s on port %(port)s\"",
",",
"dict",
"(",
"qos_policy",
"=",
"qos_policy",
",",
"port",
"=",
"port",
")",
")",
"policy_data",
"=",
"self",
".",
"_get_policy_values",
"(",
"qos_policy",
")",
"self",
".",
"_utils",
".",
"set_port_qos_rule",
"(",
"port",
"[",
"\"port_id\"",
"]",
",",
"policy_data",
")"
] | Apply QoS rules on port for the first time.
:param port: port object.
:param qos_policy: the QoS policy to be applied on port. | [
"Apply",
"QoS",
"rules",
"on",
"port",
"for",
"the",
"first",
"time",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/qos/qos_driver.py#L35-L45 |
openstack/networking-hyperv | networking_hyperv/neutron/qos/qos_driver.py | QosHyperVAgentDriver.delete | def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
LOG.info("Deleting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
self._utils.remove_port_qos_rule(port["port_id"]) | python | def delete(self, port, qos_policy=None):
"""Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port.
"""
LOG.info("Deleting QoS policy %(qos_policy)s on port %(port)s",
dict(qos_policy=qos_policy, port=port))
self._utils.remove_port_qos_rule(port["port_id"]) | [
"def",
"delete",
"(",
"self",
",",
"port",
",",
"qos_policy",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"\"Deleting QoS policy %(qos_policy)s on port %(port)s\"",
",",
"dict",
"(",
"qos_policy",
"=",
"qos_policy",
",",
"port",
"=",
"port",
")",
")",
"self",
".",
"_utils",
".",
"remove_port_qos_rule",
"(",
"port",
"[",
"\"port_id\"",
"]",
")"
] | Remove QoS rules from port.
:param port: port object.
:param qos_policy: the QoS policy to be removed from port. | [
"Remove",
"QoS",
"rules",
"from",
"port",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/qos/qos_driver.py#L59-L68 |
NicolasLM/spinach | spinach/task.py | Tasks.task | def task(self, func: Optional[Callable]=None, name: Optional[str]=None,
queue: Optional[str]=None, max_retries: Optional[Number]=None,
periodicity: Optional[timedelta]=None):
"""Decorator to register a task function.
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> @tasks.task(name='foo')
>>> def foo():
... pass
"""
if func is None:
return functools.partial(self.task, name=name, queue=queue,
max_retries=max_retries,
periodicity=periodicity)
self.add(func, name=name, queue=queue, max_retries=max_retries,
periodicity=periodicity)
# Add an attribute to the function to be able to conveniently use it as
# spin.schedule(function) instead of spin.schedule('task_name')
func.task_name = name
return func | python | def task(self, func: Optional[Callable]=None, name: Optional[str]=None,
queue: Optional[str]=None, max_retries: Optional[Number]=None,
periodicity: Optional[timedelta]=None):
"""Decorator to register a task function.
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> @tasks.task(name='foo')
>>> def foo():
... pass
"""
if func is None:
return functools.partial(self.task, name=name, queue=queue,
max_retries=max_retries,
periodicity=periodicity)
self.add(func, name=name, queue=queue, max_retries=max_retries,
periodicity=periodicity)
# Add an attribute to the function to be able to conveniently use it as
# spin.schedule(function) instead of spin.schedule('task_name')
func.task_name = name
return func | [
"def",
"task",
"(",
"self",
",",
"func",
":",
"Optional",
"[",
"Callable",
"]",
"=",
"None",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"queue",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"max_retries",
":",
"Optional",
"[",
"Number",
"]",
"=",
"None",
",",
"periodicity",
":",
"Optional",
"[",
"timedelta",
"]",
"=",
"None",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"functools",
".",
"partial",
"(",
"self",
".",
"task",
",",
"name",
"=",
"name",
",",
"queue",
"=",
"queue",
",",
"max_retries",
"=",
"max_retries",
",",
"periodicity",
"=",
"periodicity",
")",
"self",
".",
"add",
"(",
"func",
",",
"name",
"=",
"name",
",",
"queue",
"=",
"queue",
",",
"max_retries",
"=",
"max_retries",
",",
"periodicity",
"=",
"periodicity",
")",
"# Add an attribute to the function to be able to conveniently use it as",
"# spin.schedule(function) instead of spin.schedule('task_name')",
"func",
".",
"task_name",
"=",
"name",
"return",
"func"
] | Decorator to register a task function.
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> @tasks.task(name='foo')
>>> def foo():
... pass | [
"Decorator",
"to",
"register",
"a",
"task",
"function",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L99-L128 |
NicolasLM/spinach | spinach/task.py | Tasks.add | def add(self, func: Callable, name: Optional[str]=None,
queue: Optional[str]=None, max_retries: Optional[Number]=None,
periodicity: Optional[timedelta]=None):
"""Register a task function.
:arg func: a callable to be executed
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> tasks.add(lambda x: x, name='do_nothing')
"""
if not name:
raise ValueError('Each Spinach task needs a name')
if name in self._tasks:
raise ValueError('A task named {} already exists'.format(name))
if queue is None:
if self.queue:
queue = self.queue
else:
queue = const.DEFAULT_QUEUE
if max_retries is None:
if self.max_retries:
max_retries = self.max_retries
else:
max_retries = const.DEFAULT_MAX_RETRIES
if periodicity is None:
periodicity = self.periodicity
if queue and queue.startswith('_'):
raise ValueError('Queues starting with "_" are reserved by '
'Spinach for internal use')
self._tasks[name] = Task(func, name, queue, max_retries, periodicity) | python | def add(self, func: Callable, name: Optional[str]=None,
queue: Optional[str]=None, max_retries: Optional[Number]=None,
periodicity: Optional[timedelta]=None):
"""Register a task function.
:arg func: a callable to be executed
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> tasks.add(lambda x: x, name='do_nothing')
"""
if not name:
raise ValueError('Each Spinach task needs a name')
if name in self._tasks:
raise ValueError('A task named {} already exists'.format(name))
if queue is None:
if self.queue:
queue = self.queue
else:
queue = const.DEFAULT_QUEUE
if max_retries is None:
if self.max_retries:
max_retries = self.max_retries
else:
max_retries = const.DEFAULT_MAX_RETRIES
if periodicity is None:
periodicity = self.periodicity
if queue and queue.startswith('_'):
raise ValueError('Queues starting with "_" are reserved by '
'Spinach for internal use')
self._tasks[name] = Task(func, name, queue, max_retries, periodicity) | [
"def",
"add",
"(",
"self",
",",
"func",
":",
"Callable",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"queue",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"max_retries",
":",
"Optional",
"[",
"Number",
"]",
"=",
"None",
",",
"periodicity",
":",
"Optional",
"[",
"timedelta",
"]",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"raise",
"ValueError",
"(",
"'Each Spinach task needs a name'",
")",
"if",
"name",
"in",
"self",
".",
"_tasks",
":",
"raise",
"ValueError",
"(",
"'A task named {} already exists'",
".",
"format",
"(",
"name",
")",
")",
"if",
"queue",
"is",
"None",
":",
"if",
"self",
".",
"queue",
":",
"queue",
"=",
"self",
".",
"queue",
"else",
":",
"queue",
"=",
"const",
".",
"DEFAULT_QUEUE",
"if",
"max_retries",
"is",
"None",
":",
"if",
"self",
".",
"max_retries",
":",
"max_retries",
"=",
"self",
".",
"max_retries",
"else",
":",
"max_retries",
"=",
"const",
".",
"DEFAULT_MAX_RETRIES",
"if",
"periodicity",
"is",
"None",
":",
"periodicity",
"=",
"self",
".",
"periodicity",
"if",
"queue",
"and",
"queue",
".",
"startswith",
"(",
"'_'",
")",
":",
"raise",
"ValueError",
"(",
"'Queues starting with \"_\" are reserved by '",
"'Spinach for internal use'",
")",
"self",
".",
"_tasks",
"[",
"name",
"]",
"=",
"Task",
"(",
"func",
",",
"name",
",",
"queue",
",",
"max_retries",
",",
"periodicity",
")"
] | Register a task function.
:arg func: a callable to be executed
:arg name: name of the task, used later to schedule jobs
:arg queue: queue of the task, the default is used if not provided
:arg max_retries: maximum number of retries, the default is used if
not provided
:arg periodicity: for periodic tasks, delay between executions as a
timedelta
>>> tasks = Tasks()
>>> tasks.add(lambda x: x, name='do_nothing') | [
"Register",
"a",
"task",
"function",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L130-L170 |
NicolasLM/spinach | spinach/task.py | Tasks.schedule | def schedule(self, task: Schedulable, *args, **kwargs):
"""Schedule a job to be executed as soon as possible.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`.
"""
self._require_attached_tasks()
self._spin.schedule(task, *args, **kwargs) | python | def schedule(self, task: Schedulable, *args, **kwargs):
"""Schedule a job to be executed as soon as possible.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`.
"""
self._require_attached_tasks()
self._spin.schedule(task, *args, **kwargs) | [
"def",
"schedule",
"(",
"self",
",",
"task",
":",
"Schedulable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_require_attached_tasks",
"(",
")",
"self",
".",
"_spin",
".",
"schedule",
"(",
"task",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Schedule a job to be executed as soon as possible.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`. | [
"Schedule",
"a",
"job",
"to",
"be",
"executed",
"as",
"soon",
"as",
"possible",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L179-L190 |
NicolasLM/spinach | spinach/task.py | Tasks.schedule_at | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`.
"""
self._require_attached_tasks()
self._spin.schedule_at(task, at, *args, **kwargs) | python | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`.
"""
self._require_attached_tasks()
self._spin.schedule_at(task, at, *args, **kwargs) | [
"def",
"schedule_at",
"(",
"self",
",",
"task",
":",
"Schedulable",
",",
"at",
":",
"datetime",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_require_attached_tasks",
"(",
")",
"self",
".",
"_spin",
".",
"schedule_at",
"(",
"task",
",",
"at",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
This method can only be used once tasks have been attached to a
Spinach :class:`Engine`. | [
"Schedule",
"a",
"job",
"to",
"be",
"executed",
"in",
"the",
"future",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L192-L207 |
NicolasLM/spinach | spinach/task.py | Batch.schedule | def schedule(self, task: Schedulable, *args, **kwargs):
"""Add a job to be executed ASAP to the batch.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
at = datetime.now(timezone.utc)
self.schedule_at(task, at, *args, **kwargs) | python | def schedule(self, task: Schedulable, *args, **kwargs):
"""Add a job to be executed ASAP to the batch.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
at = datetime.now(timezone.utc)
self.schedule_at(task, at, *args, **kwargs) | [
"def",
"schedule",
"(",
"self",
",",
"task",
":",
"Schedulable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"at",
"=",
"datetime",
".",
"now",
"(",
"timezone",
".",
"utc",
")",
"self",
".",
"schedule_at",
"(",
"task",
",",
"at",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Add a job to be executed ASAP to the batch.
:arg task: the task or its name to execute in the background
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function | [
"Add",
"a",
"job",
"to",
"be",
"executed",
"ASAP",
"to",
"the",
"batch",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L243-L251 |
NicolasLM/spinach | spinach/task.py | Batch.schedule_at | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Add a job to be executed in the future to the batch.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
self.jobs_to_create.append((task, at, args, kwargs)) | python | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Add a job to be executed in the future to the batch.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
self.jobs_to_create.append((task, at, args, kwargs)) | [
"def",
"schedule_at",
"(",
"self",
",",
"task",
":",
"Schedulable",
",",
"at",
":",
"datetime",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"jobs_to_create",
".",
"append",
"(",
"(",
"task",
",",
"at",
",",
"args",
",",
"kwargs",
")",
")"
] | Add a job to be executed in the future to the batch.
:arg task: the task or its name to execute in the background
:arg at: Date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function | [
"Add",
"a",
"job",
"to",
"be",
"executed",
"in",
"the",
"future",
"to",
"the",
"batch",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/task.py#L253-L264 |
project-rig/rig | rig/machine_control/common.py | unpack_sver_response_version | def unpack_sver_response_version(packet):
"""For internal use. Unpack the version-related parts of an sver (aka
CMD_VERSION) response.
Parameters
----------
packet : :py:class:`~rig.machine_control.packets.SCPPacket`
The packet recieved in response to the version command.
Returns
-------
software_name : string
The name of the software running on the remote machine.
(major, minor, patch) : (int, int, int)
The numerical part of the semantic version number.
labels : string
Any labels in the version number (e.g. '-dev'). May be an empty string.
"""
software_name = packet.data.decode("utf-8")
legacy_version_field = packet.arg2 >> 16
if legacy_version_field != 0xFFFF:
# Legacy version encoding: just encoded in decimal fixed-point in the
# integer.
major = legacy_version_field // 100
minor = legacy_version_field % 100
patch = 0
labels = ""
else:
# Semantic Version encoding: packed after the null-terminator of the
# software name in the version string.
software_name, _, version_number = software_name.partition("\0")
match = VERSION_NUMBER_REGEX.match(version_number.rstrip("\0"))
assert match, "Malformed version number: {}".format(version_number)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
labels = match.group(4) or ""
return (software_name.rstrip("\0"), (major, minor, patch), labels) | python | def unpack_sver_response_version(packet):
"""For internal use. Unpack the version-related parts of an sver (aka
CMD_VERSION) response.
Parameters
----------
packet : :py:class:`~rig.machine_control.packets.SCPPacket`
The packet recieved in response to the version command.
Returns
-------
software_name : string
The name of the software running on the remote machine.
(major, minor, patch) : (int, int, int)
The numerical part of the semantic version number.
labels : string
Any labels in the version number (e.g. '-dev'). May be an empty string.
"""
software_name = packet.data.decode("utf-8")
legacy_version_field = packet.arg2 >> 16
if legacy_version_field != 0xFFFF:
# Legacy version encoding: just encoded in decimal fixed-point in the
# integer.
major = legacy_version_field // 100
minor = legacy_version_field % 100
patch = 0
labels = ""
else:
# Semantic Version encoding: packed after the null-terminator of the
# software name in the version string.
software_name, _, version_number = software_name.partition("\0")
match = VERSION_NUMBER_REGEX.match(version_number.rstrip("\0"))
assert match, "Malformed version number: {}".format(version_number)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
labels = match.group(4) or ""
return (software_name.rstrip("\0"), (major, minor, patch), labels) | [
"def",
"unpack_sver_response_version",
"(",
"packet",
")",
":",
"software_name",
"=",
"packet",
".",
"data",
".",
"decode",
"(",
"\"utf-8\"",
")",
"legacy_version_field",
"=",
"packet",
".",
"arg2",
">>",
"16",
"if",
"legacy_version_field",
"!=",
"0xFFFF",
":",
"# Legacy version encoding: just encoded in decimal fixed-point in the",
"# integer.",
"major",
"=",
"legacy_version_field",
"//",
"100",
"minor",
"=",
"legacy_version_field",
"%",
"100",
"patch",
"=",
"0",
"labels",
"=",
"\"\"",
"else",
":",
"# Semantic Version encoding: packed after the null-terminator of the",
"# software name in the version string.",
"software_name",
",",
"_",
",",
"version_number",
"=",
"software_name",
".",
"partition",
"(",
"\"\\0\"",
")",
"match",
"=",
"VERSION_NUMBER_REGEX",
".",
"match",
"(",
"version_number",
".",
"rstrip",
"(",
"\"\\0\"",
")",
")",
"assert",
"match",
",",
"\"Malformed version number: {}\"",
".",
"format",
"(",
"version_number",
")",
"major",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"minor",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"patch",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"labels",
"=",
"match",
".",
"group",
"(",
"4",
")",
"or",
"\"\"",
"return",
"(",
"software_name",
".",
"rstrip",
"(",
"\"\\0\"",
")",
",",
"(",
"major",
",",
"minor",
",",
"patch",
")",
",",
"labels",
")"
] | For internal use. Unpack the version-related parts of an sver (aka
CMD_VERSION) response.
Parameters
----------
packet : :py:class:`~rig.machine_control.packets.SCPPacket`
The packet recieved in response to the version command.
Returns
-------
software_name : string
The name of the software running on the remote machine.
(major, minor, patch) : (int, int, int)
The numerical part of the semantic version number.
labels : string
Any labels in the version number (e.g. '-dev'). May be an empty string. | [
"For",
"internal",
"use",
".",
"Unpack",
"the",
"version",
"-",
"related",
"parts",
"of",
"an",
"sver",
"(",
"aka",
"CMD_VERSION",
")",
"response",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/common.py#L20-L62 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | _locate_file | def _locate_file(f, base_dir):
"""
Utility method for finding full path to a filename as string
"""
if base_dir == None:
return f
file_name = os.path.join(base_dir, f)
real = os.path.realpath(file_name)
#print_v('- Located %s at %s'%(f,real))
return real | python | def _locate_file(f, base_dir):
"""
Utility method for finding full path to a filename as string
"""
if base_dir == None:
return f
file_name = os.path.join(base_dir, f)
real = os.path.realpath(file_name)
#print_v('- Located %s at %s'%(f,real))
return real | [
"def",
"_locate_file",
"(",
"f",
",",
"base_dir",
")",
":",
"if",
"base_dir",
"==",
"None",
":",
"return",
"f",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"f",
")",
"real",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"file_name",
")",
"#print_v('- Located %s at %s'%(f,real))",
"return",
"real"
] | Utility method for finding full path to a filename as string | [
"Utility",
"method",
"for",
"finding",
"full",
"path",
"to",
"a",
"filename",
"as",
"string"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L10-L19 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | generate_network | def generate_network(nl_model,
handler,
seed=1234,
always_include_props=False,
include_connections=True,
include_inputs=True,
base_dir=None):
"""
Generate the network model as described in NeuroMLlite in a specific handler,
e.g. NeuroMLHandler, PyNNHandler, etc.
"""
pop_locations = {}
cell_objects = {}
synapse_objects = {}
print_v("Starting net generation for %s%s..." % (nl_model.id,
' (base dir: %s)' % base_dir if base_dir else ''))
rng = random.Random(seed)
if nl_model.network_reader:
exec('from neuromllite.%s import %s' % (nl_model.network_reader.type, nl_model.network_reader.type))
exec('network_reader = %s()' % (nl_model.network_reader.type))
network_reader.parameters = nl_model.network_reader.parameters
network_reader.parse(handler)
pop_locations = network_reader.get_locations()
else:
notes = "Generated network: %s" % nl_model.id
notes += "\n Generation seed: %i" % (seed)
if nl_model.parameters:
notes += "\n NeuroMLlite parameters: "
for p in nl_model.parameters:
notes += "\n %s = %s" % (p, nl_model.parameters[p])
handler.handle_document_start(nl_model.id, notes)
temperature = '%sdegC' % nl_model.temperature if nl_model.temperature else None
handler.handle_network(nl_model.id, nl_model.notes, temperature=temperature)
nml2_doc_temp = _extract_pynn_components_to_neuroml(nl_model)
for c in nl_model.cells:
if c.neuroml2_source_file:
from pyneuroml import pynml
nml2_doc = pynml.read_neuroml2_file(_locate_file(c.neuroml2_source_file, base_dir),
include_includes=True)
cell_objects[c.id] = nml2_doc.get_by_id(c.id)
if c.pynn_cell:
cell_objects[c.id] = nml2_doc_temp.get_by_id(c.id)
for s in nl_model.synapses:
if s.neuroml2_source_file:
from pyneuroml import pynml
nml2_doc = pynml.read_neuroml2_file(_locate_file(s.neuroml2_source_file, base_dir),
include_includes=True)
synapse_objects[s.id] = nml2_doc.get_by_id(s.id)
if s.pynn_synapse:
synapse_objects[s.id] = nml2_doc_temp.get_by_id(s.id)
for p in nl_model.populations:
size = evaluate(p.size, nl_model.parameters)
properties = p.properties if p.properties else {}
if p.random_layout:
properties['region'] = p.random_layout.region
if not p.random_layout and not p.single_location and not always_include_props:
# If there are no positions (abstract network), and <property>
# is added to <population>, jLems doesn't like it... (it has difficulty
# interpreting pop0[0]/v, etc.)
# So better not to give properties...
properties = {}
if p.notes:
handler.handle_population(p.id,
p.component,
size,
cell_objects[p.component] if p.component in cell_objects else None,
properties=properties,
notes=p.notes)
else:
handler.handle_population(p.id,
p.component,
size,
cell_objects[p.component] if p.component in cell_objects else None,
properties=properties)
pop_locations[p.id] = np.zeros((size, 3))
for i in range(size):
if p.random_layout:
region = nl_model.get_child(p.random_layout.region, 'regions')
x = region.x + rng.random() * region.width
y = region.y + rng.random() * region.height
z = region.z + rng.random() * region.depth
pop_locations[p.id][i] = (x, y, z)
handler.handle_location(i, p.id, p.component, x, y, z)
if p.single_location:
loc = p.single_location.location
x = loc.x
y = loc.y
z = loc.z
pop_locations[p.id][i] = (x, y, z)
handler.handle_location(i, p.id, p.component, x, y, z)
if hasattr(handler, 'finalise_population'):
handler.finalise_population(p.id)
if include_connections:
for p in nl_model.projections:
type = p.type if p.type else 'projection'
handler.handle_projection(p.id,
p.presynaptic,
p.postsynaptic,
p.synapse,
synapse_obj=synapse_objects[p.synapse] if p.synapse in synapse_objects else None,
pre_synapse_obj=synapse_objects[p.pre_synapse] if p.pre_synapse in synapse_objects else None,
type=type)
delay = p.delay if p.delay else 0
weight = p.weight if p.weight else 1
conn_count = 0
if p.random_connectivity:
for pre_i in range(len(pop_locations[p.presynaptic])):
for post_i in range(len(pop_locations[p.postsynaptic])):
flip = rng.random()
#print("Is cell %i conn to %i, prob %s - %s"%(pre_i, post_i, flip, p.random_connectivity.probability))
if flip < p.random_connectivity.probability:
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
#print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
pre_i, \
post_i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
if p.convergent_connectivity:
for post_i in range(len(pop_locations[p.postsynaptic])):
for count in range(int(p.convergent_connectivity.num_per_post)):
found = False
while not found:
pre_i = int(rng.random()*len(pop_locations[p.presynaptic]))
if p.presynaptic==p.postsynaptic and pre_i==post_i:
found=False
else:
found=True
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
print_v("Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s"%(conn_count, pre_i, post_i, count, p.convergent_connectivity.num_per_post, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
pre_i, \
post_i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
elif p.one_to_one_connector:
for i in range(min(len(pop_locations[p.presynaptic]), len(pop_locations[p.postsynaptic]))):
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
#print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
i, \
i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
handler.finalise_projection(p.id,
p.presynaptic,
p.postsynaptic,
p.synapse)
if include_inputs:
for input in nl_model.inputs:
handler.handle_input_list(input.id,
input.population,
input.input_source,
size=0,
input_comp_obj=None)
input_count = 0
for i in range(len(pop_locations[input.population])):
flip = rng.random()
weight = input.weight if input.weight else 1
if flip * 100. < input.percentage:
number_per_cell = evaluate(input.number_per_cell, nl_model.parameters) if input.number_per_cell else 1
for j in range(number_per_cell):
handler.handle_single_input(input.id,
input_count,
i,
weight=evaluate(weight, nl_model.parameters))
input_count += 1
handler.finalise_input_source(input.id)
if hasattr(handler, 'finalise_document'):
handler.finalise_document() | python | def generate_network(nl_model,
handler,
seed=1234,
always_include_props=False,
include_connections=True,
include_inputs=True,
base_dir=None):
"""
Generate the network model as described in NeuroMLlite in a specific handler,
e.g. NeuroMLHandler, PyNNHandler, etc.
"""
pop_locations = {}
cell_objects = {}
synapse_objects = {}
print_v("Starting net generation for %s%s..." % (nl_model.id,
' (base dir: %s)' % base_dir if base_dir else ''))
rng = random.Random(seed)
if nl_model.network_reader:
exec('from neuromllite.%s import %s' % (nl_model.network_reader.type, nl_model.network_reader.type))
exec('network_reader = %s()' % (nl_model.network_reader.type))
network_reader.parameters = nl_model.network_reader.parameters
network_reader.parse(handler)
pop_locations = network_reader.get_locations()
else:
notes = "Generated network: %s" % nl_model.id
notes += "\n Generation seed: %i" % (seed)
if nl_model.parameters:
notes += "\n NeuroMLlite parameters: "
for p in nl_model.parameters:
notes += "\n %s = %s" % (p, nl_model.parameters[p])
handler.handle_document_start(nl_model.id, notes)
temperature = '%sdegC' % nl_model.temperature if nl_model.temperature else None
handler.handle_network(nl_model.id, nl_model.notes, temperature=temperature)
nml2_doc_temp = _extract_pynn_components_to_neuroml(nl_model)
for c in nl_model.cells:
if c.neuroml2_source_file:
from pyneuroml import pynml
nml2_doc = pynml.read_neuroml2_file(_locate_file(c.neuroml2_source_file, base_dir),
include_includes=True)
cell_objects[c.id] = nml2_doc.get_by_id(c.id)
if c.pynn_cell:
cell_objects[c.id] = nml2_doc_temp.get_by_id(c.id)
for s in nl_model.synapses:
if s.neuroml2_source_file:
from pyneuroml import pynml
nml2_doc = pynml.read_neuroml2_file(_locate_file(s.neuroml2_source_file, base_dir),
include_includes=True)
synapse_objects[s.id] = nml2_doc.get_by_id(s.id)
if s.pynn_synapse:
synapse_objects[s.id] = nml2_doc_temp.get_by_id(s.id)
for p in nl_model.populations:
size = evaluate(p.size, nl_model.parameters)
properties = p.properties if p.properties else {}
if p.random_layout:
properties['region'] = p.random_layout.region
if not p.random_layout and not p.single_location and not always_include_props:
# If there are no positions (abstract network), and <property>
# is added to <population>, jLems doesn't like it... (it has difficulty
# interpreting pop0[0]/v, etc.)
# So better not to give properties...
properties = {}
if p.notes:
handler.handle_population(p.id,
p.component,
size,
cell_objects[p.component] if p.component in cell_objects else None,
properties=properties,
notes=p.notes)
else:
handler.handle_population(p.id,
p.component,
size,
cell_objects[p.component] if p.component in cell_objects else None,
properties=properties)
pop_locations[p.id] = np.zeros((size, 3))
for i in range(size):
if p.random_layout:
region = nl_model.get_child(p.random_layout.region, 'regions')
x = region.x + rng.random() * region.width
y = region.y + rng.random() * region.height
z = region.z + rng.random() * region.depth
pop_locations[p.id][i] = (x, y, z)
handler.handle_location(i, p.id, p.component, x, y, z)
if p.single_location:
loc = p.single_location.location
x = loc.x
y = loc.y
z = loc.z
pop_locations[p.id][i] = (x, y, z)
handler.handle_location(i, p.id, p.component, x, y, z)
if hasattr(handler, 'finalise_population'):
handler.finalise_population(p.id)
if include_connections:
for p in nl_model.projections:
type = p.type if p.type else 'projection'
handler.handle_projection(p.id,
p.presynaptic,
p.postsynaptic,
p.synapse,
synapse_obj=synapse_objects[p.synapse] if p.synapse in synapse_objects else None,
pre_synapse_obj=synapse_objects[p.pre_synapse] if p.pre_synapse in synapse_objects else None,
type=type)
delay = p.delay if p.delay else 0
weight = p.weight if p.weight else 1
conn_count = 0
if p.random_connectivity:
for pre_i in range(len(pop_locations[p.presynaptic])):
for post_i in range(len(pop_locations[p.postsynaptic])):
flip = rng.random()
#print("Is cell %i conn to %i, prob %s - %s"%(pre_i, post_i, flip, p.random_connectivity.probability))
if flip < p.random_connectivity.probability:
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
#print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
pre_i, \
post_i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
if p.convergent_connectivity:
for post_i in range(len(pop_locations[p.postsynaptic])):
for count in range(int(p.convergent_connectivity.num_per_post)):
found = False
while not found:
pre_i = int(rng.random()*len(pop_locations[p.presynaptic]))
if p.presynaptic==p.postsynaptic and pre_i==post_i:
found=False
else:
found=True
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
print_v("Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s"%(conn_count, pre_i, post_i, count, p.convergent_connectivity.num_per_post, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
pre_i, \
post_i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
elif p.one_to_one_connector:
for i in range(min(len(pop_locations[p.presynaptic]), len(pop_locations[p.postsynaptic]))):
weight = evaluate(weight, nl_model.parameters)
delay = evaluate(delay, nl_model.parameters)
#print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay))
handler.handle_connection(p.id,
conn_count,
p.presynaptic,
p.postsynaptic,
p.synapse, \
i, \
i, \
preSegId=0, \
preFract=0.5, \
postSegId=0, \
postFract=0.5, \
delay=delay, \
weight=weight)
conn_count += 1
handler.finalise_projection(p.id,
p.presynaptic,
p.postsynaptic,
p.synapse)
if include_inputs:
for input in nl_model.inputs:
handler.handle_input_list(input.id,
input.population,
input.input_source,
size=0,
input_comp_obj=None)
input_count = 0
for i in range(len(pop_locations[input.population])):
flip = rng.random()
weight = input.weight if input.weight else 1
if flip * 100. < input.percentage:
number_per_cell = evaluate(input.number_per_cell, nl_model.parameters) if input.number_per_cell else 1
for j in range(number_per_cell):
handler.handle_single_input(input.id,
input_count,
i,
weight=evaluate(weight, nl_model.parameters))
input_count += 1
handler.finalise_input_source(input.id)
if hasattr(handler, 'finalise_document'):
handler.finalise_document() | [
"def",
"generate_network",
"(",
"nl_model",
",",
"handler",
",",
"seed",
"=",
"1234",
",",
"always_include_props",
"=",
"False",
",",
"include_connections",
"=",
"True",
",",
"include_inputs",
"=",
"True",
",",
"base_dir",
"=",
"None",
")",
":",
"pop_locations",
"=",
"{",
"}",
"cell_objects",
"=",
"{",
"}",
"synapse_objects",
"=",
"{",
"}",
"print_v",
"(",
"\"Starting net generation for %s%s...\"",
"%",
"(",
"nl_model",
".",
"id",
",",
"' (base dir: %s)'",
"%",
"base_dir",
"if",
"base_dir",
"else",
"''",
")",
")",
"rng",
"=",
"random",
".",
"Random",
"(",
"seed",
")",
"if",
"nl_model",
".",
"network_reader",
":",
"exec",
"(",
"'from neuromllite.%s import %s'",
"%",
"(",
"nl_model",
".",
"network_reader",
".",
"type",
",",
"nl_model",
".",
"network_reader",
".",
"type",
")",
")",
"exec",
"(",
"'network_reader = %s()'",
"%",
"(",
"nl_model",
".",
"network_reader",
".",
"type",
")",
")",
"network_reader",
".",
"parameters",
"=",
"nl_model",
".",
"network_reader",
".",
"parameters",
"network_reader",
".",
"parse",
"(",
"handler",
")",
"pop_locations",
"=",
"network_reader",
".",
"get_locations",
"(",
")",
"else",
":",
"notes",
"=",
"\"Generated network: %s\"",
"%",
"nl_model",
".",
"id",
"notes",
"+=",
"\"\\n Generation seed: %i\"",
"%",
"(",
"seed",
")",
"if",
"nl_model",
".",
"parameters",
":",
"notes",
"+=",
"\"\\n NeuroMLlite parameters: \"",
"for",
"p",
"in",
"nl_model",
".",
"parameters",
":",
"notes",
"+=",
"\"\\n %s = %s\"",
"%",
"(",
"p",
",",
"nl_model",
".",
"parameters",
"[",
"p",
"]",
")",
"handler",
".",
"handle_document_start",
"(",
"nl_model",
".",
"id",
",",
"notes",
")",
"temperature",
"=",
"'%sdegC'",
"%",
"nl_model",
".",
"temperature",
"if",
"nl_model",
".",
"temperature",
"else",
"None",
"handler",
".",
"handle_network",
"(",
"nl_model",
".",
"id",
",",
"nl_model",
".",
"notes",
",",
"temperature",
"=",
"temperature",
")",
"nml2_doc_temp",
"=",
"_extract_pynn_components_to_neuroml",
"(",
"nl_model",
")",
"for",
"c",
"in",
"nl_model",
".",
"cells",
":",
"if",
"c",
".",
"neuroml2_source_file",
":",
"from",
"pyneuroml",
"import",
"pynml",
"nml2_doc",
"=",
"pynml",
".",
"read_neuroml2_file",
"(",
"_locate_file",
"(",
"c",
".",
"neuroml2_source_file",
",",
"base_dir",
")",
",",
"include_includes",
"=",
"True",
")",
"cell_objects",
"[",
"c",
".",
"id",
"]",
"=",
"nml2_doc",
".",
"get_by_id",
"(",
"c",
".",
"id",
")",
"if",
"c",
".",
"pynn_cell",
":",
"cell_objects",
"[",
"c",
".",
"id",
"]",
"=",
"nml2_doc_temp",
".",
"get_by_id",
"(",
"c",
".",
"id",
")",
"for",
"s",
"in",
"nl_model",
".",
"synapses",
":",
"if",
"s",
".",
"neuroml2_source_file",
":",
"from",
"pyneuroml",
"import",
"pynml",
"nml2_doc",
"=",
"pynml",
".",
"read_neuroml2_file",
"(",
"_locate_file",
"(",
"s",
".",
"neuroml2_source_file",
",",
"base_dir",
")",
",",
"include_includes",
"=",
"True",
")",
"synapse_objects",
"[",
"s",
".",
"id",
"]",
"=",
"nml2_doc",
".",
"get_by_id",
"(",
"s",
".",
"id",
")",
"if",
"s",
".",
"pynn_synapse",
":",
"synapse_objects",
"[",
"s",
".",
"id",
"]",
"=",
"nml2_doc_temp",
".",
"get_by_id",
"(",
"s",
".",
"id",
")",
"for",
"p",
"in",
"nl_model",
".",
"populations",
":",
"size",
"=",
"evaluate",
"(",
"p",
".",
"size",
",",
"nl_model",
".",
"parameters",
")",
"properties",
"=",
"p",
".",
"properties",
"if",
"p",
".",
"properties",
"else",
"{",
"}",
"if",
"p",
".",
"random_layout",
":",
"properties",
"[",
"'region'",
"]",
"=",
"p",
".",
"random_layout",
".",
"region",
"if",
"not",
"p",
".",
"random_layout",
"and",
"not",
"p",
".",
"single_location",
"and",
"not",
"always_include_props",
":",
"# If there are no positions (abstract network), and <property>",
"# is added to <population>, jLems doesn't like it... (it has difficulty ",
"# interpreting pop0[0]/v, etc.)",
"# So better not to give properties...",
"properties",
"=",
"{",
"}",
"if",
"p",
".",
"notes",
":",
"handler",
".",
"handle_population",
"(",
"p",
".",
"id",
",",
"p",
".",
"component",
",",
"size",
",",
"cell_objects",
"[",
"p",
".",
"component",
"]",
"if",
"p",
".",
"component",
"in",
"cell_objects",
"else",
"None",
",",
"properties",
"=",
"properties",
",",
"notes",
"=",
"p",
".",
"notes",
")",
"else",
":",
"handler",
".",
"handle_population",
"(",
"p",
".",
"id",
",",
"p",
".",
"component",
",",
"size",
",",
"cell_objects",
"[",
"p",
".",
"component",
"]",
"if",
"p",
".",
"component",
"in",
"cell_objects",
"else",
"None",
",",
"properties",
"=",
"properties",
")",
"pop_locations",
"[",
"p",
".",
"id",
"]",
"=",
"np",
".",
"zeros",
"(",
"(",
"size",
",",
"3",
")",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"if",
"p",
".",
"random_layout",
":",
"region",
"=",
"nl_model",
".",
"get_child",
"(",
"p",
".",
"random_layout",
".",
"region",
",",
"'regions'",
")",
"x",
"=",
"region",
".",
"x",
"+",
"rng",
".",
"random",
"(",
")",
"*",
"region",
".",
"width",
"y",
"=",
"region",
".",
"y",
"+",
"rng",
".",
"random",
"(",
")",
"*",
"region",
".",
"height",
"z",
"=",
"region",
".",
"z",
"+",
"rng",
".",
"random",
"(",
")",
"*",
"region",
".",
"depth",
"pop_locations",
"[",
"p",
".",
"id",
"]",
"[",
"i",
"]",
"=",
"(",
"x",
",",
"y",
",",
"z",
")",
"handler",
".",
"handle_location",
"(",
"i",
",",
"p",
".",
"id",
",",
"p",
".",
"component",
",",
"x",
",",
"y",
",",
"z",
")",
"if",
"p",
".",
"single_location",
":",
"loc",
"=",
"p",
".",
"single_location",
".",
"location",
"x",
"=",
"loc",
".",
"x",
"y",
"=",
"loc",
".",
"y",
"z",
"=",
"loc",
".",
"z",
"pop_locations",
"[",
"p",
".",
"id",
"]",
"[",
"i",
"]",
"=",
"(",
"x",
",",
"y",
",",
"z",
")",
"handler",
".",
"handle_location",
"(",
"i",
",",
"p",
".",
"id",
",",
"p",
".",
"component",
",",
"x",
",",
"y",
",",
"z",
")",
"if",
"hasattr",
"(",
"handler",
",",
"'finalise_population'",
")",
":",
"handler",
".",
"finalise_population",
"(",
"p",
".",
"id",
")",
"if",
"include_connections",
":",
"for",
"p",
"in",
"nl_model",
".",
"projections",
":",
"type",
"=",
"p",
".",
"type",
"if",
"p",
".",
"type",
"else",
"'projection'",
"handler",
".",
"handle_projection",
"(",
"p",
".",
"id",
",",
"p",
".",
"presynaptic",
",",
"p",
".",
"postsynaptic",
",",
"p",
".",
"synapse",
",",
"synapse_obj",
"=",
"synapse_objects",
"[",
"p",
".",
"synapse",
"]",
"if",
"p",
".",
"synapse",
"in",
"synapse_objects",
"else",
"None",
",",
"pre_synapse_obj",
"=",
"synapse_objects",
"[",
"p",
".",
"pre_synapse",
"]",
"if",
"p",
".",
"pre_synapse",
"in",
"synapse_objects",
"else",
"None",
",",
"type",
"=",
"type",
")",
"delay",
"=",
"p",
".",
"delay",
"if",
"p",
".",
"delay",
"else",
"0",
"weight",
"=",
"p",
".",
"weight",
"if",
"p",
".",
"weight",
"else",
"1",
"conn_count",
"=",
"0",
"if",
"p",
".",
"random_connectivity",
":",
"for",
"pre_i",
"in",
"range",
"(",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"presynaptic",
"]",
")",
")",
":",
"for",
"post_i",
"in",
"range",
"(",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"postsynaptic",
"]",
")",
")",
":",
"flip",
"=",
"rng",
".",
"random",
"(",
")",
"#print(\"Is cell %i conn to %i, prob %s - %s\"%(pre_i, post_i, flip, p.random_connectivity.probability))",
"if",
"flip",
"<",
"p",
".",
"random_connectivity",
".",
"probability",
":",
"weight",
"=",
"evaluate",
"(",
"weight",
",",
"nl_model",
".",
"parameters",
")",
"delay",
"=",
"evaluate",
"(",
"delay",
",",
"nl_model",
".",
"parameters",
")",
"#print_v(\"Adding connection %i with weight: %s, delay: %s\"%(conn_count, weight, delay))",
"handler",
".",
"handle_connection",
"(",
"p",
".",
"id",
",",
"conn_count",
",",
"p",
".",
"presynaptic",
",",
"p",
".",
"postsynaptic",
",",
"p",
".",
"synapse",
",",
"pre_i",
",",
"post_i",
",",
"preSegId",
"=",
"0",
",",
"preFract",
"=",
"0.5",
",",
"postSegId",
"=",
"0",
",",
"postFract",
"=",
"0.5",
",",
"delay",
"=",
"delay",
",",
"weight",
"=",
"weight",
")",
"conn_count",
"+=",
"1",
"if",
"p",
".",
"convergent_connectivity",
":",
"for",
"post_i",
"in",
"range",
"(",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"postsynaptic",
"]",
")",
")",
":",
"for",
"count",
"in",
"range",
"(",
"int",
"(",
"p",
".",
"convergent_connectivity",
".",
"num_per_post",
")",
")",
":",
"found",
"=",
"False",
"while",
"not",
"found",
":",
"pre_i",
"=",
"int",
"(",
"rng",
".",
"random",
"(",
")",
"*",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"presynaptic",
"]",
")",
")",
"if",
"p",
".",
"presynaptic",
"==",
"p",
".",
"postsynaptic",
"and",
"pre_i",
"==",
"post_i",
":",
"found",
"=",
"False",
"else",
":",
"found",
"=",
"True",
"weight",
"=",
"evaluate",
"(",
"weight",
",",
"nl_model",
".",
"parameters",
")",
"delay",
"=",
"evaluate",
"(",
"delay",
",",
"nl_model",
".",
"parameters",
")",
"print_v",
"(",
"\"Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s\"",
"%",
"(",
"conn_count",
",",
"pre_i",
",",
"post_i",
",",
"count",
",",
"p",
".",
"convergent_connectivity",
".",
"num_per_post",
",",
"weight",
",",
"delay",
")",
")",
"handler",
".",
"handle_connection",
"(",
"p",
".",
"id",
",",
"conn_count",
",",
"p",
".",
"presynaptic",
",",
"p",
".",
"postsynaptic",
",",
"p",
".",
"synapse",
",",
"pre_i",
",",
"post_i",
",",
"preSegId",
"=",
"0",
",",
"preFract",
"=",
"0.5",
",",
"postSegId",
"=",
"0",
",",
"postFract",
"=",
"0.5",
",",
"delay",
"=",
"delay",
",",
"weight",
"=",
"weight",
")",
"conn_count",
"+=",
"1",
"elif",
"p",
".",
"one_to_one_connector",
":",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"presynaptic",
"]",
")",
",",
"len",
"(",
"pop_locations",
"[",
"p",
".",
"postsynaptic",
"]",
")",
")",
")",
":",
"weight",
"=",
"evaluate",
"(",
"weight",
",",
"nl_model",
".",
"parameters",
")",
"delay",
"=",
"evaluate",
"(",
"delay",
",",
"nl_model",
".",
"parameters",
")",
"#print_v(\"Adding connection %i with weight: %s, delay: %s\"%(conn_count, weight, delay))",
"handler",
".",
"handle_connection",
"(",
"p",
".",
"id",
",",
"conn_count",
",",
"p",
".",
"presynaptic",
",",
"p",
".",
"postsynaptic",
",",
"p",
".",
"synapse",
",",
"i",
",",
"i",
",",
"preSegId",
"=",
"0",
",",
"preFract",
"=",
"0.5",
",",
"postSegId",
"=",
"0",
",",
"postFract",
"=",
"0.5",
",",
"delay",
"=",
"delay",
",",
"weight",
"=",
"weight",
")",
"conn_count",
"+=",
"1",
"handler",
".",
"finalise_projection",
"(",
"p",
".",
"id",
",",
"p",
".",
"presynaptic",
",",
"p",
".",
"postsynaptic",
",",
"p",
".",
"synapse",
")",
"if",
"include_inputs",
":",
"for",
"input",
"in",
"nl_model",
".",
"inputs",
":",
"handler",
".",
"handle_input_list",
"(",
"input",
".",
"id",
",",
"input",
".",
"population",
",",
"input",
".",
"input_source",
",",
"size",
"=",
"0",
",",
"input_comp_obj",
"=",
"None",
")",
"input_count",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pop_locations",
"[",
"input",
".",
"population",
"]",
")",
")",
":",
"flip",
"=",
"rng",
".",
"random",
"(",
")",
"weight",
"=",
"input",
".",
"weight",
"if",
"input",
".",
"weight",
"else",
"1",
"if",
"flip",
"*",
"100.",
"<",
"input",
".",
"percentage",
":",
"number_per_cell",
"=",
"evaluate",
"(",
"input",
".",
"number_per_cell",
",",
"nl_model",
".",
"parameters",
")",
"if",
"input",
".",
"number_per_cell",
"else",
"1",
"for",
"j",
"in",
"range",
"(",
"number_per_cell",
")",
":",
"handler",
".",
"handle_single_input",
"(",
"input",
".",
"id",
",",
"input_count",
",",
"i",
",",
"weight",
"=",
"evaluate",
"(",
"weight",
",",
"nl_model",
".",
"parameters",
")",
")",
"input_count",
"+=",
"1",
"handler",
".",
"finalise_input_source",
"(",
"input",
".",
"id",
")",
"if",
"hasattr",
"(",
"handler",
",",
"'finalise_document'",
")",
":",
"handler",
".",
"finalise_document",
"(",
")"
] | Generate the network model as described in NeuroMLlite in a specific handler,
e.g. NeuroMLHandler, PyNNHandler, etc. | [
"Generate",
"the",
"network",
"model",
"as",
"described",
"in",
"NeuroMLlite",
"in",
"a",
"specific",
"handler",
"e",
".",
"g",
".",
"NeuroMLHandler",
"PyNNHandler",
"etc",
"."
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L22-L267 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | check_to_generate_or_run | def check_to_generate_or_run(argv, sim):
"""
Useful method for calling in main method after network and simulation are
generated, to handle some standard export options like -jnml, -graph etc.
"""
print_v("Checking arguments: %s to see whether anything should be run in simulation %s (net: %s)..." %
(argv, sim.id, sim.network))
if len(argv)==1:
print_v("No arguments found. Currently supported export formats:")
print_v(" -nml | -nmlh5 | -jnml | -jnmlnrn | -jnmlnetpyne | -netpyne | -pynnnrn "+\
"| -pynnnest | -pynnbrian | -pynnneuroml | -sonata | -matrix[1-2] | -graph[1-6 n/d/f/c]")
if '-pynnnest' in argv:
generate_and_run(sim, simulator='PyNN_NEST')
elif '-pynnnrn' in argv:
generate_and_run(sim, simulator='PyNN_NEURON')
elif '-pynnbrian' in argv:
generate_and_run(sim, simulator='PyNN_Brian')
elif '-jnml' in argv:
generate_and_run(sim, simulator='jNeuroML')
elif '-jnmlnrn' in argv:
generate_and_run(sim, simulator='jNeuroML_NEURON')
elif '-netpyne' in argv:
generate_and_run(sim, simulator='NetPyNE')
elif '-pynnneuroml' in argv:
generate_and_run(sim, simulator='PyNN_NeuroML')
elif '-sonata' in argv:
generate_and_run(sim, simulator='sonata')
elif '-nml' in argv or '-neuroml' in argv:
network = load_network_json(sim.network)
generate_neuroml2_from_network(network, validate=True)
elif '-nmlh5' in argv or '-neuromlh5' in argv:
network = load_network_json(sim.network)
generate_neuroml2_from_network(network, validate=True, format='hdf5')
else:
for a in argv:
if '-jnmlnetpyne' in a:
num_processors = 1
if len(a)>len('-jnmlnetpyne'):
num_processors = int(a[12:])
generate_and_run(sim, simulator='jNeuroML_NetPyNE',num_processors=num_processors)
elif 'graph' in a: # e.g. -graph3c
generate_and_run(sim, simulator=a[1:]) # Will not "run" obviously...
elif 'matrix' in a: # e.g. -matrix2
generate_and_run(sim, simulator=a[1:]) | python | def check_to_generate_or_run(argv, sim):
"""
Useful method for calling in main method after network and simulation are
generated, to handle some standard export options like -jnml, -graph etc.
"""
print_v("Checking arguments: %s to see whether anything should be run in simulation %s (net: %s)..." %
(argv, sim.id, sim.network))
if len(argv)==1:
print_v("No arguments found. Currently supported export formats:")
print_v(" -nml | -nmlh5 | -jnml | -jnmlnrn | -jnmlnetpyne | -netpyne | -pynnnrn "+\
"| -pynnnest | -pynnbrian | -pynnneuroml | -sonata | -matrix[1-2] | -graph[1-6 n/d/f/c]")
if '-pynnnest' in argv:
generate_and_run(sim, simulator='PyNN_NEST')
elif '-pynnnrn' in argv:
generate_and_run(sim, simulator='PyNN_NEURON')
elif '-pynnbrian' in argv:
generate_and_run(sim, simulator='PyNN_Brian')
elif '-jnml' in argv:
generate_and_run(sim, simulator='jNeuroML')
elif '-jnmlnrn' in argv:
generate_and_run(sim, simulator='jNeuroML_NEURON')
elif '-netpyne' in argv:
generate_and_run(sim, simulator='NetPyNE')
elif '-pynnneuroml' in argv:
generate_and_run(sim, simulator='PyNN_NeuroML')
elif '-sonata' in argv:
generate_and_run(sim, simulator='sonata')
elif '-nml' in argv or '-neuroml' in argv:
network = load_network_json(sim.network)
generate_neuroml2_from_network(network, validate=True)
elif '-nmlh5' in argv or '-neuromlh5' in argv:
network = load_network_json(sim.network)
generate_neuroml2_from_network(network, validate=True, format='hdf5')
else:
for a in argv:
if '-jnmlnetpyne' in a:
num_processors = 1
if len(a)>len('-jnmlnetpyne'):
num_processors = int(a[12:])
generate_and_run(sim, simulator='jNeuroML_NetPyNE',num_processors=num_processors)
elif 'graph' in a: # e.g. -graph3c
generate_and_run(sim, simulator=a[1:]) # Will not "run" obviously...
elif 'matrix' in a: # e.g. -matrix2
generate_and_run(sim, simulator=a[1:]) | [
"def",
"check_to_generate_or_run",
"(",
"argv",
",",
"sim",
")",
":",
"print_v",
"(",
"\"Checking arguments: %s to see whether anything should be run in simulation %s (net: %s)...\"",
"%",
"(",
"argv",
",",
"sim",
".",
"id",
",",
"sim",
".",
"network",
")",
")",
"if",
"len",
"(",
"argv",
")",
"==",
"1",
":",
"print_v",
"(",
"\"No arguments found. Currently supported export formats:\"",
")",
"print_v",
"(",
"\" -nml | -nmlh5 | -jnml | -jnmlnrn | -jnmlnetpyne | -netpyne | -pynnnrn \"",
"+",
"\"| -pynnnest | -pynnbrian | -pynnneuroml | -sonata | -matrix[1-2] | -graph[1-6 n/d/f/c]\"",
")",
"if",
"'-pynnnest'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'PyNN_NEST'",
")",
"elif",
"'-pynnnrn'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'PyNN_NEURON'",
")",
"elif",
"'-pynnbrian'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'PyNN_Brian'",
")",
"elif",
"'-jnml'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'jNeuroML'",
")",
"elif",
"'-jnmlnrn'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'jNeuroML_NEURON'",
")",
"elif",
"'-netpyne'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'NetPyNE'",
")",
"elif",
"'-pynnneuroml'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'PyNN_NeuroML'",
")",
"elif",
"'-sonata'",
"in",
"argv",
":",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'sonata'",
")",
"elif",
"'-nml'",
"in",
"argv",
"or",
"'-neuroml'",
"in",
"argv",
":",
"network",
"=",
"load_network_json",
"(",
"sim",
".",
"network",
")",
"generate_neuroml2_from_network",
"(",
"network",
",",
"validate",
"=",
"True",
")",
"elif",
"'-nmlh5'",
"in",
"argv",
"or",
"'-neuromlh5'",
"in",
"argv",
":",
"network",
"=",
"load_network_json",
"(",
"sim",
".",
"network",
")",
"generate_neuroml2_from_network",
"(",
"network",
",",
"validate",
"=",
"True",
",",
"format",
"=",
"'hdf5'",
")",
"else",
":",
"for",
"a",
"in",
"argv",
":",
"if",
"'-jnmlnetpyne'",
"in",
"a",
":",
"num_processors",
"=",
"1",
"if",
"len",
"(",
"a",
")",
">",
"len",
"(",
"'-jnmlnetpyne'",
")",
":",
"num_processors",
"=",
"int",
"(",
"a",
"[",
"12",
":",
"]",
")",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"'jNeuroML_NetPyNE'",
",",
"num_processors",
"=",
"num_processors",
")",
"elif",
"'graph'",
"in",
"a",
":",
"# e.g. -graph3c",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"a",
"[",
"1",
":",
"]",
")",
"# Will not \"run\" obviously...",
"elif",
"'matrix'",
"in",
"a",
":",
"# e.g. -matrix2",
"generate_and_run",
"(",
"sim",
",",
"simulator",
"=",
"a",
"[",
"1",
":",
"]",
")"
] | Useful method for calling in main method after network and simulation are
generated, to handle some standard export options like -jnml, -graph etc. | [
"Useful",
"method",
"for",
"calling",
"in",
"main",
"method",
"after",
"network",
"and",
"simulation",
"are",
"generated",
"to",
"handle",
"some",
"standard",
"export",
"options",
"like",
"-",
"jnml",
"-",
"graph",
"etc",
"."
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L270-L329 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | _extract_pynn_components_to_neuroml | def _extract_pynn_components_to_neuroml(nl_model, nml_doc=None):
"""
Parse the NeuroMLlite description for cell, synapses and inputs described as
PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert
these to the equivalent elements in a NeuroMLDocument
"""
if nml_doc == None:
from neuroml import NeuroMLDocument
nml_doc = NeuroMLDocument(id="temp")
for c in nl_model.cells:
if c.pynn_cell:
if nml_doc.get_by_id(c.id) == None:
import pyNN.neuroml
cell_params = c.parameters if c.parameters else {}
#print('------- %s: %s' % (c, cell_params))
for p in cell_params:
cell_params[p] = evaluate(cell_params[p], nl_model.parameters)
#print('====== %s: %s' % (c, cell_params))
for proj in nl_model.projections:
synapse = nl_model.get_child(proj.synapse, 'synapses')
post_pop = nl_model.get_child(proj.postsynaptic, 'populations')
if post_pop.component == c.id:
#print("--------- Cell %s in post pop %s of %s uses %s"%(c.id,post_pop.id, proj.id, synapse))
if synapse.pynn_receptor_type == 'excitatory':
post = '_E'
elif synapse.pynn_receptor_type == 'inhibitory':
post = '_I'
for p in synapse.parameters:
cell_params['%s%s' % (p, post)] = synapse.parameters[p]
temp_cell = eval('pyNN.neuroml.%s(**cell_params)' % c.pynn_cell)
if c.pynn_cell != 'SpikeSourcePoisson':
temp_cell.default_initial_values['v'] = temp_cell.parameter_space['v_rest'].base_value
cell_id = temp_cell.add_to_nml_doc(nml_doc, None)
cell = nml_doc.get_by_id(cell_id)
cell.id = c.id
for s in nl_model.synapses:
if nml_doc.get_by_id(s.id) == None:
if s.pynn_synapse_type and s.pynn_receptor_type:
import neuroml
if s.pynn_synapse_type == 'cond_exp':
syn = neuroml.ExpCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev'])
nml_doc.exp_cond_synapses.append(syn)
elif s.pynn_synapse_type == 'cond_alpha':
syn = neuroml.AlphaCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev'])
nml_doc.alpha_cond_synapses.append(syn)
elif s.pynn_synapse_type == 'curr_exp':
syn = neuroml.ExpCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn'])
nml_doc.exp_curr_synapses.append(syn)
elif s.pynn_synapse_type == 'curr_alpha':
syn = neuroml.AlphaCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn'])
nml_doc.alpha_curr_synapses.append(syn)
for i in nl_model.input_sources:
#if nml_doc.get_by_id(i.id) == None:
if i.pynn_input:
import pyNN.neuroml
input_params = i.parameters if i.parameters else {}
exec('input__%s = pyNN.neuroml.%s(**input_params)' % (i.id, i.pynn_input))
exec('temp_input = input__%s' % i.id)
pg_id = temp_input.add_to_nml_doc(nml_doc, None)
#for pp in nml_doc.pulse_generators:
# print('PG: %s: %s'%(pp,pp.id))
pg = nml_doc.get_by_id(pg_id)
pg.id = i.id
return nml_doc | python | def _extract_pynn_components_to_neuroml(nl_model, nml_doc=None):
"""
Parse the NeuroMLlite description for cell, synapses and inputs described as
PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert
these to the equivalent elements in a NeuroMLDocument
"""
if nml_doc == None:
from neuroml import NeuroMLDocument
nml_doc = NeuroMLDocument(id="temp")
for c in nl_model.cells:
if c.pynn_cell:
if nml_doc.get_by_id(c.id) == None:
import pyNN.neuroml
cell_params = c.parameters if c.parameters else {}
#print('------- %s: %s' % (c, cell_params))
for p in cell_params:
cell_params[p] = evaluate(cell_params[p], nl_model.parameters)
#print('====== %s: %s' % (c, cell_params))
for proj in nl_model.projections:
synapse = nl_model.get_child(proj.synapse, 'synapses')
post_pop = nl_model.get_child(proj.postsynaptic, 'populations')
if post_pop.component == c.id:
#print("--------- Cell %s in post pop %s of %s uses %s"%(c.id,post_pop.id, proj.id, synapse))
if synapse.pynn_receptor_type == 'excitatory':
post = '_E'
elif synapse.pynn_receptor_type == 'inhibitory':
post = '_I'
for p in synapse.parameters:
cell_params['%s%s' % (p, post)] = synapse.parameters[p]
temp_cell = eval('pyNN.neuroml.%s(**cell_params)' % c.pynn_cell)
if c.pynn_cell != 'SpikeSourcePoisson':
temp_cell.default_initial_values['v'] = temp_cell.parameter_space['v_rest'].base_value
cell_id = temp_cell.add_to_nml_doc(nml_doc, None)
cell = nml_doc.get_by_id(cell_id)
cell.id = c.id
for s in nl_model.synapses:
if nml_doc.get_by_id(s.id) == None:
if s.pynn_synapse_type and s.pynn_receptor_type:
import neuroml
if s.pynn_synapse_type == 'cond_exp':
syn = neuroml.ExpCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev'])
nml_doc.exp_cond_synapses.append(syn)
elif s.pynn_synapse_type == 'cond_alpha':
syn = neuroml.AlphaCondSynapse(id=s.id, tau_syn=s.parameters['tau_syn'], e_rev=s.parameters['e_rev'])
nml_doc.alpha_cond_synapses.append(syn)
elif s.pynn_synapse_type == 'curr_exp':
syn = neuroml.ExpCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn'])
nml_doc.exp_curr_synapses.append(syn)
elif s.pynn_synapse_type == 'curr_alpha':
syn = neuroml.AlphaCurrSynapse(id=s.id, tau_syn=s.parameters['tau_syn'])
nml_doc.alpha_curr_synapses.append(syn)
for i in nl_model.input_sources:
#if nml_doc.get_by_id(i.id) == None:
if i.pynn_input:
import pyNN.neuroml
input_params = i.parameters if i.parameters else {}
exec('input__%s = pyNN.neuroml.%s(**input_params)' % (i.id, i.pynn_input))
exec('temp_input = input__%s' % i.id)
pg_id = temp_input.add_to_nml_doc(nml_doc, None)
#for pp in nml_doc.pulse_generators:
# print('PG: %s: %s'%(pp,pp.id))
pg = nml_doc.get_by_id(pg_id)
pg.id = i.id
return nml_doc | [
"def",
"_extract_pynn_components_to_neuroml",
"(",
"nl_model",
",",
"nml_doc",
"=",
"None",
")",
":",
"if",
"nml_doc",
"==",
"None",
":",
"from",
"neuroml",
"import",
"NeuroMLDocument",
"nml_doc",
"=",
"NeuroMLDocument",
"(",
"id",
"=",
"\"temp\"",
")",
"for",
"c",
"in",
"nl_model",
".",
"cells",
":",
"if",
"c",
".",
"pynn_cell",
":",
"if",
"nml_doc",
".",
"get_by_id",
"(",
"c",
".",
"id",
")",
"==",
"None",
":",
"import",
"pyNN",
".",
"neuroml",
"cell_params",
"=",
"c",
".",
"parameters",
"if",
"c",
".",
"parameters",
"else",
"{",
"}",
"#print('------- %s: %s' % (c, cell_params))",
"for",
"p",
"in",
"cell_params",
":",
"cell_params",
"[",
"p",
"]",
"=",
"evaluate",
"(",
"cell_params",
"[",
"p",
"]",
",",
"nl_model",
".",
"parameters",
")",
"#print('====== %s: %s' % (c, cell_params))",
"for",
"proj",
"in",
"nl_model",
".",
"projections",
":",
"synapse",
"=",
"nl_model",
".",
"get_child",
"(",
"proj",
".",
"synapse",
",",
"'synapses'",
")",
"post_pop",
"=",
"nl_model",
".",
"get_child",
"(",
"proj",
".",
"postsynaptic",
",",
"'populations'",
")",
"if",
"post_pop",
".",
"component",
"==",
"c",
".",
"id",
":",
"#print(\"--------- Cell %s in post pop %s of %s uses %s\"%(c.id,post_pop.id, proj.id, synapse))",
"if",
"synapse",
".",
"pynn_receptor_type",
"==",
"'excitatory'",
":",
"post",
"=",
"'_E'",
"elif",
"synapse",
".",
"pynn_receptor_type",
"==",
"'inhibitory'",
":",
"post",
"=",
"'_I'",
"for",
"p",
"in",
"synapse",
".",
"parameters",
":",
"cell_params",
"[",
"'%s%s'",
"%",
"(",
"p",
",",
"post",
")",
"]",
"=",
"synapse",
".",
"parameters",
"[",
"p",
"]",
"temp_cell",
"=",
"eval",
"(",
"'pyNN.neuroml.%s(**cell_params)'",
"%",
"c",
".",
"pynn_cell",
")",
"if",
"c",
".",
"pynn_cell",
"!=",
"'SpikeSourcePoisson'",
":",
"temp_cell",
".",
"default_initial_values",
"[",
"'v'",
"]",
"=",
"temp_cell",
".",
"parameter_space",
"[",
"'v_rest'",
"]",
".",
"base_value",
"cell_id",
"=",
"temp_cell",
".",
"add_to_nml_doc",
"(",
"nml_doc",
",",
"None",
")",
"cell",
"=",
"nml_doc",
".",
"get_by_id",
"(",
"cell_id",
")",
"cell",
".",
"id",
"=",
"c",
".",
"id",
"for",
"s",
"in",
"nl_model",
".",
"synapses",
":",
"if",
"nml_doc",
".",
"get_by_id",
"(",
"s",
".",
"id",
")",
"==",
"None",
":",
"if",
"s",
".",
"pynn_synapse_type",
"and",
"s",
".",
"pynn_receptor_type",
":",
"import",
"neuroml",
"if",
"s",
".",
"pynn_synapse_type",
"==",
"'cond_exp'",
":",
"syn",
"=",
"neuroml",
".",
"ExpCondSynapse",
"(",
"id",
"=",
"s",
".",
"id",
",",
"tau_syn",
"=",
"s",
".",
"parameters",
"[",
"'tau_syn'",
"]",
",",
"e_rev",
"=",
"s",
".",
"parameters",
"[",
"'e_rev'",
"]",
")",
"nml_doc",
".",
"exp_cond_synapses",
".",
"append",
"(",
"syn",
")",
"elif",
"s",
".",
"pynn_synapse_type",
"==",
"'cond_alpha'",
":",
"syn",
"=",
"neuroml",
".",
"AlphaCondSynapse",
"(",
"id",
"=",
"s",
".",
"id",
",",
"tau_syn",
"=",
"s",
".",
"parameters",
"[",
"'tau_syn'",
"]",
",",
"e_rev",
"=",
"s",
".",
"parameters",
"[",
"'e_rev'",
"]",
")",
"nml_doc",
".",
"alpha_cond_synapses",
".",
"append",
"(",
"syn",
")",
"elif",
"s",
".",
"pynn_synapse_type",
"==",
"'curr_exp'",
":",
"syn",
"=",
"neuroml",
".",
"ExpCurrSynapse",
"(",
"id",
"=",
"s",
".",
"id",
",",
"tau_syn",
"=",
"s",
".",
"parameters",
"[",
"'tau_syn'",
"]",
")",
"nml_doc",
".",
"exp_curr_synapses",
".",
"append",
"(",
"syn",
")",
"elif",
"s",
".",
"pynn_synapse_type",
"==",
"'curr_alpha'",
":",
"syn",
"=",
"neuroml",
".",
"AlphaCurrSynapse",
"(",
"id",
"=",
"s",
".",
"id",
",",
"tau_syn",
"=",
"s",
".",
"parameters",
"[",
"'tau_syn'",
"]",
")",
"nml_doc",
".",
"alpha_curr_synapses",
".",
"append",
"(",
"syn",
")",
"for",
"i",
"in",
"nl_model",
".",
"input_sources",
":",
"#if nml_doc.get_by_id(i.id) == None:",
"if",
"i",
".",
"pynn_input",
":",
"import",
"pyNN",
".",
"neuroml",
"input_params",
"=",
"i",
".",
"parameters",
"if",
"i",
".",
"parameters",
"else",
"{",
"}",
"exec",
"(",
"'input__%s = pyNN.neuroml.%s(**input_params)'",
"%",
"(",
"i",
".",
"id",
",",
"i",
".",
"pynn_input",
")",
")",
"exec",
"(",
"'temp_input = input__%s'",
"%",
"i",
".",
"id",
")",
"pg_id",
"=",
"temp_input",
".",
"add_to_nml_doc",
"(",
"nml_doc",
",",
"None",
")",
"#for pp in nml_doc.pulse_generators:",
"# print('PG: %s: %s'%(pp,pp.id))",
"pg",
"=",
"nml_doc",
".",
"get_by_id",
"(",
"pg_id",
")",
"pg",
".",
"id",
"=",
"i",
".",
"id",
"return",
"nml_doc"
] | Parse the NeuroMLlite description for cell, synapses and inputs described as
PyNN elements (e.g. IF_cond_alpha, DCSource) and parameters, and convert
these to the equivalent elements in a NeuroMLDocument | [
"Parse",
"the",
"NeuroMLlite",
"description",
"for",
"cell",
"synapses",
"and",
"inputs",
"described",
"as",
"PyNN",
"elements",
"(",
"e",
".",
"g",
".",
"IF_cond_alpha",
"DCSource",
")",
"and",
"parameters",
"and",
"convert",
"these",
"to",
"the",
"equivalent",
"elements",
"in",
"a",
"NeuroMLDocument"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L332-L410 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | generate_neuroml2_from_network | def generate_neuroml2_from_network(nl_model,
nml_file_name=None,
print_summary=True,
seed=1234,
format='xml',
base_dir=None,
copy_included_elements=False,
target_dir=None,
validate=False):
"""
Generate and save NeuroML2 file (in either XML or HDF5 format) from the
NeuroMLlite description
"""
print_v("Generating NeuroML2 for %s%s..." % (nl_model.id, ' (base dir: %s; target dir: %s)'
% (base_dir, target_dir) if base_dir or target_dir else ''))
import neuroml
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
generate_network(nl_model, neuroml_handler, seed=seed, base_dir=base_dir)
nml_doc = neuroml_handler.get_nml_doc()
for i in nl_model.input_sources:
if nml_doc.get_by_id(i.id) == None:
if i.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(i.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
if i.neuroml2_input:
input_params = i.parameters if i.parameters else {}
# TODO make more generic...
if i.neuroml2_input.lower() == 'pulsegenerator':
input = neuroml.PulseGenerator(id=i.id)
nml_doc.pulse_generators.append(input)
elif i.neuroml2_input.lower() == 'pulsegeneratordl':
input = neuroml.PulseGeneratorDL(id=i.id)
nml_doc.pulse_generator_dls.append(input)
elif i.neuroml2_input.lower() == 'poissonfiringsynapse':
input = neuroml.PoissonFiringSynapse(id=i.id)
nml_doc.poisson_firing_synapses.append(input)
for p in input_params:
exec('input.%s = "%s"' % (p, evaluate(input_params[p], nl_model.parameters)))
for c in nl_model.cells:
if c.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(c.neuroml2_source_file, base_dir))
found_cell = False
for cell in nml_doc.cells:
if cell.id == c.id:
nml_doc.cells.remove(cell) # Better to use imported cell file; will have channels, etc.
nml_doc.includes.append(incl)
found_cell = True
if not found_cell:
for p in nl_model.populations:
if p.component == c.id:
pass
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
''' Needed???
if c.lems_source_file:
incl = neuroml.IncludeType(_locate_file(c.lems_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)'''
if c.neuroml2_cell:
cell_params = c.parameters if c.parameters else {}
# TODO make more generic...
if c.neuroml2_cell.lower() == 'spikegenerator':
cell = neuroml.SpikeGenerator(id=c.id)
nml_doc.spike_generators.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorpoisson':
cell = neuroml.SpikeGeneratorPoisson(id=c.id)
nml_doc.spike_generator_poissons.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorrefpoisson':
cell = neuroml.SpikeGeneratorRefPoisson(id=c.id)
nml_doc.spike_generator_ref_poissons.append(cell)
else:
raise Exception('The neuroml2_cell: %s is not yet supported...'%c.neuroml2_cell)
for p in cell_params:
exec('cell.%s = "%s"' % (p, evaluate(cell_params[p], nl_model.parameters)))
for s in nl_model.synapses:
if nml_doc.get_by_id(s.id) == None:
if s.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(s.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
# Look for and add the PyNN based elements to the NeuroMLDocument
_extract_pynn_components_to_neuroml(nl_model, nml_doc)
if print_summary:
# Print info
print_v(nml_doc.summary())
# Save to file
if target_dir == None:
target_dir = base_dir
if format == 'xml':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
if format == 'hdf5':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml.h5' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v("Written NeuroML to %s" % nml_file_name)
if validate and format == 'xml':
from pyneuroml import pynml
success = pynml.validate_neuroml2(nml_file_name, verbose_validate=False)
if success:
print_v('Generated file is valid NeuroML2!')
else:
print_v('Generated file is NOT valid NeuroML2!')
return nml_file_name, nml_doc | python | def generate_neuroml2_from_network(nl_model,
nml_file_name=None,
print_summary=True,
seed=1234,
format='xml',
base_dir=None,
copy_included_elements=False,
target_dir=None,
validate=False):
"""
Generate and save NeuroML2 file (in either XML or HDF5 format) from the
NeuroMLlite description
"""
print_v("Generating NeuroML2 for %s%s..." % (nl_model.id, ' (base dir: %s; target dir: %s)'
% (base_dir, target_dir) if base_dir or target_dir else ''))
import neuroml
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
generate_network(nl_model, neuroml_handler, seed=seed, base_dir=base_dir)
nml_doc = neuroml_handler.get_nml_doc()
for i in nl_model.input_sources:
if nml_doc.get_by_id(i.id) == None:
if i.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(i.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
if i.neuroml2_input:
input_params = i.parameters if i.parameters else {}
# TODO make more generic...
if i.neuroml2_input.lower() == 'pulsegenerator':
input = neuroml.PulseGenerator(id=i.id)
nml_doc.pulse_generators.append(input)
elif i.neuroml2_input.lower() == 'pulsegeneratordl':
input = neuroml.PulseGeneratorDL(id=i.id)
nml_doc.pulse_generator_dls.append(input)
elif i.neuroml2_input.lower() == 'poissonfiringsynapse':
input = neuroml.PoissonFiringSynapse(id=i.id)
nml_doc.poisson_firing_synapses.append(input)
for p in input_params:
exec('input.%s = "%s"' % (p, evaluate(input_params[p], nl_model.parameters)))
for c in nl_model.cells:
if c.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(c.neuroml2_source_file, base_dir))
found_cell = False
for cell in nml_doc.cells:
if cell.id == c.id:
nml_doc.cells.remove(cell) # Better to use imported cell file; will have channels, etc.
nml_doc.includes.append(incl)
found_cell = True
if not found_cell:
for p in nl_model.populations:
if p.component == c.id:
pass
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
''' Needed???
if c.lems_source_file:
incl = neuroml.IncludeType(_locate_file(c.lems_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)'''
if c.neuroml2_cell:
cell_params = c.parameters if c.parameters else {}
# TODO make more generic...
if c.neuroml2_cell.lower() == 'spikegenerator':
cell = neuroml.SpikeGenerator(id=c.id)
nml_doc.spike_generators.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorpoisson':
cell = neuroml.SpikeGeneratorPoisson(id=c.id)
nml_doc.spike_generator_poissons.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorrefpoisson':
cell = neuroml.SpikeGeneratorRefPoisson(id=c.id)
nml_doc.spike_generator_ref_poissons.append(cell)
else:
raise Exception('The neuroml2_cell: %s is not yet supported...'%c.neuroml2_cell)
for p in cell_params:
exec('cell.%s = "%s"' % (p, evaluate(cell_params[p], nl_model.parameters)))
for s in nl_model.synapses:
if nml_doc.get_by_id(s.id) == None:
if s.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(s.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
# Look for and add the PyNN based elements to the NeuroMLDocument
_extract_pynn_components_to_neuroml(nl_model, nml_doc)
if print_summary:
# Print info
print_v(nml_doc.summary())
# Save to file
if target_dir == None:
target_dir = base_dir
if format == 'xml':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
if format == 'hdf5':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml.h5' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v("Written NeuroML to %s" % nml_file_name)
if validate and format == 'xml':
from pyneuroml import pynml
success = pynml.validate_neuroml2(nml_file_name, verbose_validate=False)
if success:
print_v('Generated file is valid NeuroML2!')
else:
print_v('Generated file is NOT valid NeuroML2!')
return nml_file_name, nml_doc | [
"def",
"generate_neuroml2_from_network",
"(",
"nl_model",
",",
"nml_file_name",
"=",
"None",
",",
"print_summary",
"=",
"True",
",",
"seed",
"=",
"1234",
",",
"format",
"=",
"'xml'",
",",
"base_dir",
"=",
"None",
",",
"copy_included_elements",
"=",
"False",
",",
"target_dir",
"=",
"None",
",",
"validate",
"=",
"False",
")",
":",
"print_v",
"(",
"\"Generating NeuroML2 for %s%s...\"",
"%",
"(",
"nl_model",
".",
"id",
",",
"' (base dir: %s; target dir: %s)'",
"%",
"(",
"base_dir",
",",
"target_dir",
")",
"if",
"base_dir",
"or",
"target_dir",
"else",
"''",
")",
")",
"import",
"neuroml",
"from",
"neuroml",
".",
"hdf5",
".",
"NetworkBuilder",
"import",
"NetworkBuilder",
"neuroml_handler",
"=",
"NetworkBuilder",
"(",
")",
"generate_network",
"(",
"nl_model",
",",
"neuroml_handler",
",",
"seed",
"=",
"seed",
",",
"base_dir",
"=",
"base_dir",
")",
"nml_doc",
"=",
"neuroml_handler",
".",
"get_nml_doc",
"(",
")",
"for",
"i",
"in",
"nl_model",
".",
"input_sources",
":",
"if",
"nml_doc",
".",
"get_by_id",
"(",
"i",
".",
"id",
")",
"==",
"None",
":",
"if",
"i",
".",
"neuroml2_source_file",
":",
"incl",
"=",
"neuroml",
".",
"IncludeType",
"(",
"_locate_file",
"(",
"i",
".",
"neuroml2_source_file",
",",
"base_dir",
")",
")",
"if",
"not",
"incl",
"in",
"nml_doc",
".",
"includes",
":",
"nml_doc",
".",
"includes",
".",
"append",
"(",
"incl",
")",
"if",
"i",
".",
"neuroml2_input",
":",
"input_params",
"=",
"i",
".",
"parameters",
"if",
"i",
".",
"parameters",
"else",
"{",
"}",
"# TODO make more generic...",
"if",
"i",
".",
"neuroml2_input",
".",
"lower",
"(",
")",
"==",
"'pulsegenerator'",
":",
"input",
"=",
"neuroml",
".",
"PulseGenerator",
"(",
"id",
"=",
"i",
".",
"id",
")",
"nml_doc",
".",
"pulse_generators",
".",
"append",
"(",
"input",
")",
"elif",
"i",
".",
"neuroml2_input",
".",
"lower",
"(",
")",
"==",
"'pulsegeneratordl'",
":",
"input",
"=",
"neuroml",
".",
"PulseGeneratorDL",
"(",
"id",
"=",
"i",
".",
"id",
")",
"nml_doc",
".",
"pulse_generator_dls",
".",
"append",
"(",
"input",
")",
"elif",
"i",
".",
"neuroml2_input",
".",
"lower",
"(",
")",
"==",
"'poissonfiringsynapse'",
":",
"input",
"=",
"neuroml",
".",
"PoissonFiringSynapse",
"(",
"id",
"=",
"i",
".",
"id",
")",
"nml_doc",
".",
"poisson_firing_synapses",
".",
"append",
"(",
"input",
")",
"for",
"p",
"in",
"input_params",
":",
"exec",
"(",
"'input.%s = \"%s\"'",
"%",
"(",
"p",
",",
"evaluate",
"(",
"input_params",
"[",
"p",
"]",
",",
"nl_model",
".",
"parameters",
")",
")",
")",
"for",
"c",
"in",
"nl_model",
".",
"cells",
":",
"if",
"c",
".",
"neuroml2_source_file",
":",
"incl",
"=",
"neuroml",
".",
"IncludeType",
"(",
"_locate_file",
"(",
"c",
".",
"neuroml2_source_file",
",",
"base_dir",
")",
")",
"found_cell",
"=",
"False",
"for",
"cell",
"in",
"nml_doc",
".",
"cells",
":",
"if",
"cell",
".",
"id",
"==",
"c",
".",
"id",
":",
"nml_doc",
".",
"cells",
".",
"remove",
"(",
"cell",
")",
"# Better to use imported cell file; will have channels, etc.",
"nml_doc",
".",
"includes",
".",
"append",
"(",
"incl",
")",
"found_cell",
"=",
"True",
"if",
"not",
"found_cell",
":",
"for",
"p",
"in",
"nl_model",
".",
"populations",
":",
"if",
"p",
".",
"component",
"==",
"c",
".",
"id",
":",
"pass",
"if",
"not",
"incl",
"in",
"nml_doc",
".",
"includes",
":",
"nml_doc",
".",
"includes",
".",
"append",
"(",
"incl",
")",
"''' Needed???\n if c.lems_source_file: \n incl = neuroml.IncludeType(_locate_file(c.lems_source_file, base_dir))\n if not incl in nml_doc.includes:\n nml_doc.includes.append(incl)'''",
"if",
"c",
".",
"neuroml2_cell",
":",
"cell_params",
"=",
"c",
".",
"parameters",
"if",
"c",
".",
"parameters",
"else",
"{",
"}",
"# TODO make more generic...",
"if",
"c",
".",
"neuroml2_cell",
".",
"lower",
"(",
")",
"==",
"'spikegenerator'",
":",
"cell",
"=",
"neuroml",
".",
"SpikeGenerator",
"(",
"id",
"=",
"c",
".",
"id",
")",
"nml_doc",
".",
"spike_generators",
".",
"append",
"(",
"cell",
")",
"elif",
"c",
".",
"neuroml2_cell",
".",
"lower",
"(",
")",
"==",
"'spikegeneratorpoisson'",
":",
"cell",
"=",
"neuroml",
".",
"SpikeGeneratorPoisson",
"(",
"id",
"=",
"c",
".",
"id",
")",
"nml_doc",
".",
"spike_generator_poissons",
".",
"append",
"(",
"cell",
")",
"elif",
"c",
".",
"neuroml2_cell",
".",
"lower",
"(",
")",
"==",
"'spikegeneratorrefpoisson'",
":",
"cell",
"=",
"neuroml",
".",
"SpikeGeneratorRefPoisson",
"(",
"id",
"=",
"c",
".",
"id",
")",
"nml_doc",
".",
"spike_generator_ref_poissons",
".",
"append",
"(",
"cell",
")",
"else",
":",
"raise",
"Exception",
"(",
"'The neuroml2_cell: %s is not yet supported...'",
"%",
"c",
".",
"neuroml2_cell",
")",
"for",
"p",
"in",
"cell_params",
":",
"exec",
"(",
"'cell.%s = \"%s\"'",
"%",
"(",
"p",
",",
"evaluate",
"(",
"cell_params",
"[",
"p",
"]",
",",
"nl_model",
".",
"parameters",
")",
")",
")",
"for",
"s",
"in",
"nl_model",
".",
"synapses",
":",
"if",
"nml_doc",
".",
"get_by_id",
"(",
"s",
".",
"id",
")",
"==",
"None",
":",
"if",
"s",
".",
"neuroml2_source_file",
":",
"incl",
"=",
"neuroml",
".",
"IncludeType",
"(",
"_locate_file",
"(",
"s",
".",
"neuroml2_source_file",
",",
"base_dir",
")",
")",
"if",
"not",
"incl",
"in",
"nml_doc",
".",
"includes",
":",
"nml_doc",
".",
"includes",
".",
"append",
"(",
"incl",
")",
"# Look for and add the PyNN based elements to the NeuroMLDocument ",
"_extract_pynn_components_to_neuroml",
"(",
"nl_model",
",",
"nml_doc",
")",
"if",
"print_summary",
":",
"# Print info",
"print_v",
"(",
"nml_doc",
".",
"summary",
"(",
")",
")",
"# Save to file",
"if",
"target_dir",
"==",
"None",
":",
"target_dir",
"=",
"base_dir",
"if",
"format",
"==",
"'xml'",
":",
"if",
"not",
"nml_file_name",
":",
"nml_file_name",
"=",
"_locate_file",
"(",
"'%s.net.nml'",
"%",
"nml_doc",
".",
"id",
",",
"target_dir",
")",
"from",
"neuroml",
".",
"writers",
"import",
"NeuroMLWriter",
"NeuroMLWriter",
".",
"write",
"(",
"nml_doc",
",",
"nml_file_name",
")",
"if",
"format",
"==",
"'hdf5'",
":",
"if",
"not",
"nml_file_name",
":",
"nml_file_name",
"=",
"_locate_file",
"(",
"'%s.net.nml.h5'",
"%",
"nml_doc",
".",
"id",
",",
"target_dir",
")",
"from",
"neuroml",
".",
"writers",
"import",
"NeuroMLHdf5Writer",
"NeuroMLHdf5Writer",
".",
"write",
"(",
"nml_doc",
",",
"nml_file_name",
")",
"print_v",
"(",
"\"Written NeuroML to %s\"",
"%",
"nml_file_name",
")",
"if",
"validate",
"and",
"format",
"==",
"'xml'",
":",
"from",
"pyneuroml",
"import",
"pynml",
"success",
"=",
"pynml",
".",
"validate_neuroml2",
"(",
"nml_file_name",
",",
"verbose_validate",
"=",
"False",
")",
"if",
"success",
":",
"print_v",
"(",
"'Generated file is valid NeuroML2!'",
")",
"else",
":",
"print_v",
"(",
"'Generated file is NOT valid NeuroML2!'",
")",
"return",
"nml_file_name",
",",
"nml_doc"
] | Generate and save NeuroML2 file (in either XML or HDF5 format) from the
NeuroMLlite description | [
"Generate",
"and",
"save",
"NeuroML2",
"file",
"(",
"in",
"either",
"XML",
"or",
"HDF5",
"format",
")",
"from",
"the",
"NeuroMLlite",
"description"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L413-L554 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | _generate_neuron_files_from_neuroml | def _generate_neuron_files_from_neuroml(network, verbose=False, dir_for_mod_files = None):
"""
Generate NEURON hoc/mod files from the NeuroML files which are marked as
included in the NeuroMLlite description; also compiles the mod files
"""
print_v("------------- Generating NEURON files from NeuroML for %s (default dir: %s)..." % (network.id, dir_for_mod_files))
nml_src_files = []
from neuroml import NeuroMLDocument
import neuroml.writers as writers
temp_nml_doc = NeuroMLDocument(id="temp")
dirs_for_mod_files = []
if dir_for_mod_files!=None:
dirs_for_mod_files.append(os.path.abspath(dir_for_mod_files))
for c in network.cells:
if c.neuroml2_source_file:
nml_src_files.append(c.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(c.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
for s in network.synapses:
if s.neuroml2_source_file:
nml_src_files.append(s.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(s.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
for i in network.input_sources:
if i.neuroml2_source_file:
nml_src_files.append(i.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(i.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
temp_nml_doc = _extract_pynn_components_to_neuroml(network)
summary = temp_nml_doc.summary()
if 'IF_' in summary:
import tempfile
temp_nml_file = tempfile.NamedTemporaryFile(delete=False, suffix='.nml', dir=dir_for_mod_files)
print_v("Writing temporary NML file to: %s, summary: "%temp_nml_file.name)
print_v(summary)
writers.NeuroMLWriter.write(temp_nml_doc, temp_nml_file.name)
nml_src_files.append(temp_nml_file.name)
for f in nml_src_files:
from pyneuroml import pynml
print_v("Generating/compiling hoc/mod files for: %s"%f)
pynml.run_lems_with_jneuroml_neuron(f,
nogui=True,
only_generate_scripts=True,
compile_mods=True,
verbose=False)
for dir_for_mod_files in dirs_for_mod_files:
if not dir_for_mod_files in locations_mods_loaded_from:
print_v("Generated NEURON code; loading mechanisms from %s (cwd: %s; already loaded: %s)" % (dir_for_mod_files,os.getcwd(),locations_mods_loaded_from))
try:
from neuron import load_mechanisms
if os.getcwd()==dir_for_mod_files:
print_v("That's current dir => importing neuron module loads mods here...")
else:
load_mechanisms(dir_for_mod_files)
locations_mods_loaded_from.append(dir_for_mod_files)
except:
print_v("Failed to load mod file mechanisms...")
else:
print_v("Already loaded mechanisms from %s (all loaded: %s)" % (dir_for_mod_files,locations_mods_loaded_from)) | python | def _generate_neuron_files_from_neuroml(network, verbose=False, dir_for_mod_files = None):
"""
Generate NEURON hoc/mod files from the NeuroML files which are marked as
included in the NeuroMLlite description; also compiles the mod files
"""
print_v("------------- Generating NEURON files from NeuroML for %s (default dir: %s)..." % (network.id, dir_for_mod_files))
nml_src_files = []
from neuroml import NeuroMLDocument
import neuroml.writers as writers
temp_nml_doc = NeuroMLDocument(id="temp")
dirs_for_mod_files = []
if dir_for_mod_files!=None:
dirs_for_mod_files.append(os.path.abspath(dir_for_mod_files))
for c in network.cells:
if c.neuroml2_source_file:
nml_src_files.append(c.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(c.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
for s in network.synapses:
if s.neuroml2_source_file:
nml_src_files.append(s.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(s.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
for i in network.input_sources:
if i.neuroml2_source_file:
nml_src_files.append(i.neuroml2_source_file)
dir_for_mod = os.path.dirname(os.path.abspath(i.neuroml2_source_file))
if not dir_for_mod in dirs_for_mod_files: dirs_for_mod_files.append(dir_for_mod)
temp_nml_doc = _extract_pynn_components_to_neuroml(network)
summary = temp_nml_doc.summary()
if 'IF_' in summary:
import tempfile
temp_nml_file = tempfile.NamedTemporaryFile(delete=False, suffix='.nml', dir=dir_for_mod_files)
print_v("Writing temporary NML file to: %s, summary: "%temp_nml_file.name)
print_v(summary)
writers.NeuroMLWriter.write(temp_nml_doc, temp_nml_file.name)
nml_src_files.append(temp_nml_file.name)
for f in nml_src_files:
from pyneuroml import pynml
print_v("Generating/compiling hoc/mod files for: %s"%f)
pynml.run_lems_with_jneuroml_neuron(f,
nogui=True,
only_generate_scripts=True,
compile_mods=True,
verbose=False)
for dir_for_mod_files in dirs_for_mod_files:
if not dir_for_mod_files in locations_mods_loaded_from:
print_v("Generated NEURON code; loading mechanisms from %s (cwd: %s; already loaded: %s)" % (dir_for_mod_files,os.getcwd(),locations_mods_loaded_from))
try:
from neuron import load_mechanisms
if os.getcwd()==dir_for_mod_files:
print_v("That's current dir => importing neuron module loads mods here...")
else:
load_mechanisms(dir_for_mod_files)
locations_mods_loaded_from.append(dir_for_mod_files)
except:
print_v("Failed to load mod file mechanisms...")
else:
print_v("Already loaded mechanisms from %s (all loaded: %s)" % (dir_for_mod_files,locations_mods_loaded_from)) | [
"def",
"_generate_neuron_files_from_neuroml",
"(",
"network",
",",
"verbose",
"=",
"False",
",",
"dir_for_mod_files",
"=",
"None",
")",
":",
"print_v",
"(",
"\"------------- Generating NEURON files from NeuroML for %s (default dir: %s)...\"",
"%",
"(",
"network",
".",
"id",
",",
"dir_for_mod_files",
")",
")",
"nml_src_files",
"=",
"[",
"]",
"from",
"neuroml",
"import",
"NeuroMLDocument",
"import",
"neuroml",
".",
"writers",
"as",
"writers",
"temp_nml_doc",
"=",
"NeuroMLDocument",
"(",
"id",
"=",
"\"temp\"",
")",
"dirs_for_mod_files",
"=",
"[",
"]",
"if",
"dir_for_mod_files",
"!=",
"None",
":",
"dirs_for_mod_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"dir_for_mod_files",
")",
")",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"neuroml2_source_file",
":",
"nml_src_files",
".",
"append",
"(",
"c",
".",
"neuroml2_source_file",
")",
"dir_for_mod",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"c",
".",
"neuroml2_source_file",
")",
")",
"if",
"not",
"dir_for_mod",
"in",
"dirs_for_mod_files",
":",
"dirs_for_mod_files",
".",
"append",
"(",
"dir_for_mod",
")",
"for",
"s",
"in",
"network",
".",
"synapses",
":",
"if",
"s",
".",
"neuroml2_source_file",
":",
"nml_src_files",
".",
"append",
"(",
"s",
".",
"neuroml2_source_file",
")",
"dir_for_mod",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"s",
".",
"neuroml2_source_file",
")",
")",
"if",
"not",
"dir_for_mod",
"in",
"dirs_for_mod_files",
":",
"dirs_for_mod_files",
".",
"append",
"(",
"dir_for_mod",
")",
"for",
"i",
"in",
"network",
".",
"input_sources",
":",
"if",
"i",
".",
"neuroml2_source_file",
":",
"nml_src_files",
".",
"append",
"(",
"i",
".",
"neuroml2_source_file",
")",
"dir_for_mod",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"i",
".",
"neuroml2_source_file",
")",
")",
"if",
"not",
"dir_for_mod",
"in",
"dirs_for_mod_files",
":",
"dirs_for_mod_files",
".",
"append",
"(",
"dir_for_mod",
")",
"temp_nml_doc",
"=",
"_extract_pynn_components_to_neuroml",
"(",
"network",
")",
"summary",
"=",
"temp_nml_doc",
".",
"summary",
"(",
")",
"if",
"'IF_'",
"in",
"summary",
":",
"import",
"tempfile",
"temp_nml_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
",",
"suffix",
"=",
"'.nml'",
",",
"dir",
"=",
"dir_for_mod_files",
")",
"print_v",
"(",
"\"Writing temporary NML file to: %s, summary: \"",
"%",
"temp_nml_file",
".",
"name",
")",
"print_v",
"(",
"summary",
")",
"writers",
".",
"NeuroMLWriter",
".",
"write",
"(",
"temp_nml_doc",
",",
"temp_nml_file",
".",
"name",
")",
"nml_src_files",
".",
"append",
"(",
"temp_nml_file",
".",
"name",
")",
"for",
"f",
"in",
"nml_src_files",
":",
"from",
"pyneuroml",
"import",
"pynml",
"print_v",
"(",
"\"Generating/compiling hoc/mod files for: %s\"",
"%",
"f",
")",
"pynml",
".",
"run_lems_with_jneuroml_neuron",
"(",
"f",
",",
"nogui",
"=",
"True",
",",
"only_generate_scripts",
"=",
"True",
",",
"compile_mods",
"=",
"True",
",",
"verbose",
"=",
"False",
")",
"for",
"dir_for_mod_files",
"in",
"dirs_for_mod_files",
":",
"if",
"not",
"dir_for_mod_files",
"in",
"locations_mods_loaded_from",
":",
"print_v",
"(",
"\"Generated NEURON code; loading mechanisms from %s (cwd: %s; already loaded: %s)\"",
"%",
"(",
"dir_for_mod_files",
",",
"os",
".",
"getcwd",
"(",
")",
",",
"locations_mods_loaded_from",
")",
")",
"try",
":",
"from",
"neuron",
"import",
"load_mechanisms",
"if",
"os",
".",
"getcwd",
"(",
")",
"==",
"dir_for_mod_files",
":",
"print_v",
"(",
"\"That's current dir => importing neuron module loads mods here...\"",
")",
"else",
":",
"load_mechanisms",
"(",
"dir_for_mod_files",
")",
"locations_mods_loaded_from",
".",
"append",
"(",
"dir_for_mod_files",
")",
"except",
":",
"print_v",
"(",
"\"Failed to load mod file mechanisms...\"",
")",
"else",
":",
"print_v",
"(",
"\"Already loaded mechanisms from %s (all loaded: %s)\"",
"%",
"(",
"dir_for_mod_files",
",",
"locations_mods_loaded_from",
")",
")"
] | Generate NEURON hoc/mod files from the NeuroML files which are marked as
included in the NeuroMLlite description; also compiles the mod files | [
"Generate",
"NEURON",
"hoc",
"/",
"mod",
"files",
"from",
"the",
"NeuroML",
"files",
"which",
"are",
"marked",
"as",
"included",
"in",
"the",
"NeuroMLlite",
"description",
";",
"also",
"compiles",
"the",
"mod",
"files"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L559-L630 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | generate_and_run | def generate_and_run(simulation,
simulator,
network=None,
return_results=False,
base_dir=None,
target_dir=None,
num_processors=1):
"""
Generates the network in the specified simulator and runs, if appropriate
"""
if network == None:
network = load_network_json(simulation.network)
print_v("Generating network %s and running in simulator: %s..." % (network.id, simulator))
if simulator == 'NEURON':
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from neuromllite.NeuronHandler import NeuronHandler
nrn_handler = NeuronHandler()
for c in network.cells:
if c.neuroml2_source_file:
src_dir = os.path.dirname(os.path.abspath(c.neuroml2_source_file))
nrn_handler.executeHoc('load_file("%s/%s.hoc")' % (src_dir, c.id))
generate_network(network, nrn_handler, generate_network, base_dir)
if return_results:
raise NotImplementedError("Reloading results not supported in Neuron yet...")
elif simulator.lower() == 'sonata': # Will not "run" obviously...
from neuromllite.SonataHandler import SonataHandler
sonata_handler = SonataHandler()
generate_network(network, sonata_handler, always_include_props=True, base_dir=base_dir)
print_v("Done with Sonata...")
elif simulator.lower().startswith('graph'): # Will not "run" obviously...
from neuromllite.GraphVizHandler import GraphVizHandler, engines
try:
if simulator[-1].isalpha():
print simulator
print simulator[5:]
print simulator[5:-1]
engine = engines[simulator[-1]]
level = int(simulator[5:-1])
else:
engine = 'dot'
level = int(simulator[5:])
except Exception as e:
print e
print_v("Error parsing: %s"%simulator)
print_v("Graphs of the network structure can be generated at many levels of detail (1-6, required) and laid out using GraphViz engines (d - dot (default); c - circo; n - neato; f - fdp), so use: -graph3c, -graph2, -graph4f etc.")
return
handler = GraphVizHandler(level, engine=engine, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with GraphViz...")
elif simulator.lower().startswith('matrix'): # Will not "run" obviously...
from neuromllite.MatrixHandler import MatrixHandler
try:
level = int(simulator[6:])
except:
print_v("Error parsing: %s"%simulator)
print_v("Matrices of the network structure can be generated at many levels of detail (1-n, required), so use: -matrix1, -matrix2, etc.")
return
handler = MatrixHandler(level, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with MatrixHandler...")
elif simulator.startswith('PyNN'):
#_generate_neuron_files_from_neuroml(network)
simulator_name = simulator.split('_')[1].lower()
from neuromllite.PyNNHandler import PyNNHandler
pynn_handler = PyNNHandler(simulator_name, simulation.dt, network.id)
syn_cell_params = {}
for proj in network.projections:
synapse = network.get_child(proj.synapse, 'synapses')
post_pop = network.get_child(proj.postsynaptic, 'populations')
if not post_pop.component in syn_cell_params:
syn_cell_params[post_pop.component] = {}
for p in synapse.parameters:
post = ''
if synapse.pynn_receptor_type == "excitatory":
post = '_E'
elif synapse.pynn_receptor_type == "inhibitory":
post = '_I'
syn_cell_params[post_pop.component]['%s%s' % (p, post)] = synapse.parameters[p]
cells = {}
for c in network.cells:
if c.pynn_cell:
cell_params = {}
if c.parameters:
for p in c.parameters:
cell_params[p] = evaluate(c.parameters[p], network.parameters)
dont_set_here = ['tau_syn_E', 'e_rev_E', 'tau_syn_I', 'e_rev_I']
for d in dont_set_here:
if d in c.parameters:
raise Exception('Synaptic parameters like %s should be set '+
'in individual synapses, not in the list of parameters associated with the cell' % d)
if c.id in syn_cell_params:
cell_params.update(syn_cell_params[c.id])
print_v("Creating cell with params: %s" % cell_params)
exec('cells["%s"] = pynn_handler.sim.%s(**cell_params)' % (c.id, c.pynn_cell))
if c.pynn_cell != 'SpikeSourcePoisson':
exec("cells['%s'].default_initial_values['v'] = cells['%s'].parameter_space['v_rest'].base_value" % (c.id, c.id))
pynn_handler.set_cells(cells)
receptor_types = {}
for s in network.synapses:
if s.pynn_receptor_type:
receptor_types[s.id] = s.pynn_receptor_type
pynn_handler.set_receptor_types(receptor_types)
for input_source in network.input_sources:
if input_source.pynn_input:
pynn_handler.add_input_source(input_source)
generate_network(network, pynn_handler, always_include_props=True, base_dir=base_dir)
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
if pop.can_record('v'):
pop.record('v')
pynn_handler.sim.run(simulation.duration)
pynn_handler.sim.end()
traces = {}
events = {}
if not 'NeuroML' in simulator:
from neo.io import PyNNTextIO
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
filename = "%s.%s.v.dat" % (simulation.id, pop.label)
all_columns = []
print_v("Writing data for %s to %s" % (pop.label, filename))
for i in range(len(pop)):
if pop.can_record('v'):
ref = '%s[%i]'%(pop.label,i)
traces[ref] = []
data = pop.get_data('v', gather=False)
for segment in data.segments:
vm = segment.analogsignals[0].transpose()[i]
if len(all_columns) == 0:
tt = np.array([t * simulation.dt / 1000. for t in range(len(vm))])
all_columns.append(tt)
vm_si = [float(v / 1000.) for v in vm]
traces[ref] = vm_si
all_columns.append(vm_si)
times_vm = np.array(all_columns).transpose()
np.savetxt(filename, times_vm, delimiter='\t', fmt='%s')
if return_results:
_print_result_info(traces, events)
return traces, events
elif simulator == 'NetPyNE':
if target_dir==None:
target_dir='./'
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from netpyne import specs
from netpyne import sim
# Note NetPyNE from this branch is required: https://github.com/Neurosim-lab/netpyne/tree/neuroml_updates
from netpyne.conversion.neuromlFormat import NetPyNEBuilder
import pprint; pp = pprint.PrettyPrinter(depth=6)
netParams = specs.NetParams()
simConfig = specs.SimConfig()
netpyne_handler = NetPyNEBuilder(netParams, simConfig=simConfig, verbose=True)
generate_network(network, netpyne_handler, base_dir=base_dir)
netpyne_handler.finalise()
simConfig = specs.SimConfig()
simConfig.tstop = simulation.duration
simConfig.duration = simulation.duration
simConfig.dt = simulation.dt
simConfig.seed = simulation.seed
simConfig.recordStep = simulation.dt
simConfig.recordCells = ['all']
simConfig.recordTraces = {}
for pop in netpyne_handler.popParams.values():
if 'all' in simulation.recordTraces or pop.id in simulation.recordTraces:
for i in pop['cellsList']:
id = pop['pop']
index = i['cellLabel']
simConfig.recordTraces['v_%s_%s' % (id, index)] = {'sec':'soma', 'loc':0.5, 'var':'v', 'conds':{'pop':id, 'cellLabel':index}}
simConfig.saveDat = True
print_v("NetPyNE netParams: ")
pp.pprint(netParams.todict())
#print_v("NetPyNE simConfig: ")
#pp.pprint(simConfig.todict())
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
sim.net.createPops()
cells = sim.net.createCells() # instantiate network cells based on defined populations
for proj_id in netpyne_handler.projection_infos.keys():
projName, prePop, postPop, synapse, ptype = netpyne_handler.projection_infos[proj_id]
print_v("Creating connections for %s (%s): %s->%s via %s" % (projName, ptype, prePop, postPop, synapse))
preComp = netpyne_handler.pop_ids_vs_components[prePop]
for conn in netpyne_handler.connections[projName]:
pre_id, pre_seg, pre_fract, post_id, post_seg, post_fract, delay, weight = conn
#connParam = {'delay':delay,'weight':weight,'synsPerConn':1, 'sec':post_seg, 'loc':post_fract, 'threshold':threshold}
connParam = {'delay':delay, 'weight':weight, 'synsPerConn':1, 'sec':post_seg, 'loc':post_fract}
if ptype == 'electricalProjection':
if weight != 1:
raise Exception('Cannot yet support inputs where weight !=1!')
connParam = {'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract,
'gapJunction': True,
'weight': weight}
else:
connParam = {'delay': delay,
'weight': weight,
'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract}
#'threshold': threshold}
connParam['synMech'] = synapse
if post_id in sim.net.gid2lid: # check if postsyn is in this node's list of gids
sim.net._addCellConn(connParam, pre_id, post_id)
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.runSim() # run parallel Neuron simulation
sim.gatherData() # gather spiking data and cell info from each node
sim.saveData() # save params, cell info and sim output to file (pickle,mat,txt,etc)
if return_results:
raise NotImplementedError("Reloading results not supported in NetPyNE yet...")
elif simulator == 'jNeuroML' or simulator == 'jNeuroML_NEURON' or simulator == 'jNeuroML_NetPyNE':
from pyneuroml.lems import generate_lems_file_for_neuroml
from pyneuroml import pynml
lems_file_name = 'LEMS_%s.xml' % simulation.id
nml_file_name, nml_doc = generate_neuroml2_from_network(network, base_dir=base_dir, target_dir=target_dir)
included_files = ['PyNN.xml']
for c in network.cells:
if c.lems_source_file:
included_files.append(c.lems_source_file)
'''
if network.cells:
for c in network.cells:
included_files.append(c.neuroml2_source_file)
'''
if network.synapses:
for s in network.synapses:
if s.lems_source_file:
included_files.append(s.lems_source_file)
print_v("Generating LEMS file prior to running in %s" % simulator)
pops_plot_save = []
pops_spike_save = []
gen_plots_for_quantities = {}
gen_saves_for_quantities = {}
for p in network.populations:
if simulation.recordTraces and ('all' in simulation.recordTraces or p.id in simulation.recordTraces):
pops_plot_save.append(p.id)
if simulation.recordSpikes and ('all' in simulation.recordSpikes or p.id in simulation.recordSpikes):
pops_spike_save.append(p.id)
if simulation.recordRates and ('all' in simulation.recordRates or p.id in simulation.recordRates):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/r' % (p.id, i, p.component)
gen_plots_for_quantities['%s_%i_r' % (p.id, i)] = [quantity]
gen_saves_for_quantities['%s_%i.r.dat' % (p.id, i)] = [quantity]
if simulation.recordVariables:
for var in simulation.recordVariables:
to_rec = simulation.recordVariables[var]
if ('all' in to_rec or p.id in to_rec):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/%s' % (p.id, i, p.component,var)
gen_plots_for_quantities['%s_%i_%s' % (p.id, i, var)] = [quantity]
gen_saves_for_quantities['%s_%i.%s.dat' % (p.id, i, var)] = [quantity]
generate_lems_file_for_neuroml(simulation.id,
nml_file_name,
network.id,
simulation.duration,
simulation.dt,
lems_file_name,
target_dir=target_dir if target_dir else '.',
nml_doc=nml_doc, # Use this if the nml doc has already been loaded (to avoid delay in reload)
include_extra_files=included_files,
gen_plots_for_all_v=False,
plot_all_segments=False,
gen_plots_for_quantities=gen_plots_for_quantities, # Dict with displays vs lists of quantity paths
gen_plots_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_all_v=False,
save_all_segments=False,
gen_saves_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_quantities=gen_saves_for_quantities, # Dict with file names vs lists of quantity paths
gen_spike_saves_for_all_somas=False,
gen_spike_saves_for_only_populations=pops_spike_save, # List of populations, all pops if = []
gen_spike_saves_for_cells={}, # Dict with file names vs lists of quantity paths
spike_time_format='ID_TIME',
copy_neuroml=True,
lems_file_generate_seed=12345,
report_file_name='report.%s.txt' % simulation.id,
simulation_seed=simulation.seed if simulation.seed else 12345,
verbose=True)
lems_file_name = _locate_file(lems_file_name, target_dir)
if simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NetPyNE':
results = pynml.run_lems_with_jneuroml_netpyne(lems_file_name,
nogui=True,
verbose=True,
load_saved_data=return_results,
reload_events=return_results,
num_processors=num_processors)
print_v("Finished running LEMS file %s in %s (returning results: %s)" % (lems_file_name, simulator, return_results))
if return_results:
traces, events = results
_print_result_info(traces, events)
return results | python | def generate_and_run(simulation,
simulator,
network=None,
return_results=False,
base_dir=None,
target_dir=None,
num_processors=1):
"""
Generates the network in the specified simulator and runs, if appropriate
"""
if network == None:
network = load_network_json(simulation.network)
print_v("Generating network %s and running in simulator: %s..." % (network.id, simulator))
if simulator == 'NEURON':
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from neuromllite.NeuronHandler import NeuronHandler
nrn_handler = NeuronHandler()
for c in network.cells:
if c.neuroml2_source_file:
src_dir = os.path.dirname(os.path.abspath(c.neuroml2_source_file))
nrn_handler.executeHoc('load_file("%s/%s.hoc")' % (src_dir, c.id))
generate_network(network, nrn_handler, generate_network, base_dir)
if return_results:
raise NotImplementedError("Reloading results not supported in Neuron yet...")
elif simulator.lower() == 'sonata': # Will not "run" obviously...
from neuromllite.SonataHandler import SonataHandler
sonata_handler = SonataHandler()
generate_network(network, sonata_handler, always_include_props=True, base_dir=base_dir)
print_v("Done with Sonata...")
elif simulator.lower().startswith('graph'): # Will not "run" obviously...
from neuromllite.GraphVizHandler import GraphVizHandler, engines
try:
if simulator[-1].isalpha():
print simulator
print simulator[5:]
print simulator[5:-1]
engine = engines[simulator[-1]]
level = int(simulator[5:-1])
else:
engine = 'dot'
level = int(simulator[5:])
except Exception as e:
print e
print_v("Error parsing: %s"%simulator)
print_v("Graphs of the network structure can be generated at many levels of detail (1-6, required) and laid out using GraphViz engines (d - dot (default); c - circo; n - neato; f - fdp), so use: -graph3c, -graph2, -graph4f etc.")
return
handler = GraphVizHandler(level, engine=engine, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with GraphViz...")
elif simulator.lower().startswith('matrix'): # Will not "run" obviously...
from neuromllite.MatrixHandler import MatrixHandler
try:
level = int(simulator[6:])
except:
print_v("Error parsing: %s"%simulator)
print_v("Matrices of the network structure can be generated at many levels of detail (1-n, required), so use: -matrix1, -matrix2, etc.")
return
handler = MatrixHandler(level, nl_network=network)
generate_network(network, handler, always_include_props=True, base_dir=base_dir)
print_v("Done with MatrixHandler...")
elif simulator.startswith('PyNN'):
#_generate_neuron_files_from_neuroml(network)
simulator_name = simulator.split('_')[1].lower()
from neuromllite.PyNNHandler import PyNNHandler
pynn_handler = PyNNHandler(simulator_name, simulation.dt, network.id)
syn_cell_params = {}
for proj in network.projections:
synapse = network.get_child(proj.synapse, 'synapses')
post_pop = network.get_child(proj.postsynaptic, 'populations')
if not post_pop.component in syn_cell_params:
syn_cell_params[post_pop.component] = {}
for p in synapse.parameters:
post = ''
if synapse.pynn_receptor_type == "excitatory":
post = '_E'
elif synapse.pynn_receptor_type == "inhibitory":
post = '_I'
syn_cell_params[post_pop.component]['%s%s' % (p, post)] = synapse.parameters[p]
cells = {}
for c in network.cells:
if c.pynn_cell:
cell_params = {}
if c.parameters:
for p in c.parameters:
cell_params[p] = evaluate(c.parameters[p], network.parameters)
dont_set_here = ['tau_syn_E', 'e_rev_E', 'tau_syn_I', 'e_rev_I']
for d in dont_set_here:
if d in c.parameters:
raise Exception('Synaptic parameters like %s should be set '+
'in individual synapses, not in the list of parameters associated with the cell' % d)
if c.id in syn_cell_params:
cell_params.update(syn_cell_params[c.id])
print_v("Creating cell with params: %s" % cell_params)
exec('cells["%s"] = pynn_handler.sim.%s(**cell_params)' % (c.id, c.pynn_cell))
if c.pynn_cell != 'SpikeSourcePoisson':
exec("cells['%s'].default_initial_values['v'] = cells['%s'].parameter_space['v_rest'].base_value" % (c.id, c.id))
pynn_handler.set_cells(cells)
receptor_types = {}
for s in network.synapses:
if s.pynn_receptor_type:
receptor_types[s.id] = s.pynn_receptor_type
pynn_handler.set_receptor_types(receptor_types)
for input_source in network.input_sources:
if input_source.pynn_input:
pynn_handler.add_input_source(input_source)
generate_network(network, pynn_handler, always_include_props=True, base_dir=base_dir)
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
if pop.can_record('v'):
pop.record('v')
pynn_handler.sim.run(simulation.duration)
pynn_handler.sim.end()
traces = {}
events = {}
if not 'NeuroML' in simulator:
from neo.io import PyNNTextIO
for pid in pynn_handler.populations:
pop = pynn_handler.populations[pid]
if 'all' in simulation.recordTraces or pop.label in simulation.recordTraces:
filename = "%s.%s.v.dat" % (simulation.id, pop.label)
all_columns = []
print_v("Writing data for %s to %s" % (pop.label, filename))
for i in range(len(pop)):
if pop.can_record('v'):
ref = '%s[%i]'%(pop.label,i)
traces[ref] = []
data = pop.get_data('v', gather=False)
for segment in data.segments:
vm = segment.analogsignals[0].transpose()[i]
if len(all_columns) == 0:
tt = np.array([t * simulation.dt / 1000. for t in range(len(vm))])
all_columns.append(tt)
vm_si = [float(v / 1000.) for v in vm]
traces[ref] = vm_si
all_columns.append(vm_si)
times_vm = np.array(all_columns).transpose()
np.savetxt(filename, times_vm, delimiter='\t', fmt='%s')
if return_results:
_print_result_info(traces, events)
return traces, events
elif simulator == 'NetPyNE':
if target_dir==None:
target_dir='./'
_generate_neuron_files_from_neuroml(network, dir_for_mod_files=target_dir)
from netpyne import specs
from netpyne import sim
# Note NetPyNE from this branch is required: https://github.com/Neurosim-lab/netpyne/tree/neuroml_updates
from netpyne.conversion.neuromlFormat import NetPyNEBuilder
import pprint; pp = pprint.PrettyPrinter(depth=6)
netParams = specs.NetParams()
simConfig = specs.SimConfig()
netpyne_handler = NetPyNEBuilder(netParams, simConfig=simConfig, verbose=True)
generate_network(network, netpyne_handler, base_dir=base_dir)
netpyne_handler.finalise()
simConfig = specs.SimConfig()
simConfig.tstop = simulation.duration
simConfig.duration = simulation.duration
simConfig.dt = simulation.dt
simConfig.seed = simulation.seed
simConfig.recordStep = simulation.dt
simConfig.recordCells = ['all']
simConfig.recordTraces = {}
for pop in netpyne_handler.popParams.values():
if 'all' in simulation.recordTraces or pop.id in simulation.recordTraces:
for i in pop['cellsList']:
id = pop['pop']
index = i['cellLabel']
simConfig.recordTraces['v_%s_%s' % (id, index)] = {'sec':'soma', 'loc':0.5, 'var':'v', 'conds':{'pop':id, 'cellLabel':index}}
simConfig.saveDat = True
print_v("NetPyNE netParams: ")
pp.pprint(netParams.todict())
#print_v("NetPyNE simConfig: ")
#pp.pprint(simConfig.todict())
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
sim.net.createPops()
cells = sim.net.createCells() # instantiate network cells based on defined populations
for proj_id in netpyne_handler.projection_infos.keys():
projName, prePop, postPop, synapse, ptype = netpyne_handler.projection_infos[proj_id]
print_v("Creating connections for %s (%s): %s->%s via %s" % (projName, ptype, prePop, postPop, synapse))
preComp = netpyne_handler.pop_ids_vs_components[prePop]
for conn in netpyne_handler.connections[projName]:
pre_id, pre_seg, pre_fract, post_id, post_seg, post_fract, delay, weight = conn
#connParam = {'delay':delay,'weight':weight,'synsPerConn':1, 'sec':post_seg, 'loc':post_fract, 'threshold':threshold}
connParam = {'delay':delay, 'weight':weight, 'synsPerConn':1, 'sec':post_seg, 'loc':post_fract}
if ptype == 'electricalProjection':
if weight != 1:
raise Exception('Cannot yet support inputs where weight !=1!')
connParam = {'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract,
'gapJunction': True,
'weight': weight}
else:
connParam = {'delay': delay,
'weight': weight,
'synsPerConn': 1,
'sec': post_seg,
'loc': post_fract}
#'threshold': threshold}
connParam['synMech'] = synapse
if post_id in sim.net.gid2lid: # check if postsyn is in this node's list of gids
sim.net._addCellConn(connParam, pre_id, post_id)
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.runSim() # run parallel Neuron simulation
sim.gatherData() # gather spiking data and cell info from each node
sim.saveData() # save params, cell info and sim output to file (pickle,mat,txt,etc)
if return_results:
raise NotImplementedError("Reloading results not supported in NetPyNE yet...")
elif simulator == 'jNeuroML' or simulator == 'jNeuroML_NEURON' or simulator == 'jNeuroML_NetPyNE':
from pyneuroml.lems import generate_lems_file_for_neuroml
from pyneuroml import pynml
lems_file_name = 'LEMS_%s.xml' % simulation.id
nml_file_name, nml_doc = generate_neuroml2_from_network(network, base_dir=base_dir, target_dir=target_dir)
included_files = ['PyNN.xml']
for c in network.cells:
if c.lems_source_file:
included_files.append(c.lems_source_file)
'''
if network.cells:
for c in network.cells:
included_files.append(c.neuroml2_source_file)
'''
if network.synapses:
for s in network.synapses:
if s.lems_source_file:
included_files.append(s.lems_source_file)
print_v("Generating LEMS file prior to running in %s" % simulator)
pops_plot_save = []
pops_spike_save = []
gen_plots_for_quantities = {}
gen_saves_for_quantities = {}
for p in network.populations:
if simulation.recordTraces and ('all' in simulation.recordTraces or p.id in simulation.recordTraces):
pops_plot_save.append(p.id)
if simulation.recordSpikes and ('all' in simulation.recordSpikes or p.id in simulation.recordSpikes):
pops_spike_save.append(p.id)
if simulation.recordRates and ('all' in simulation.recordRates or p.id in simulation.recordRates):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/r' % (p.id, i, p.component)
gen_plots_for_quantities['%s_%i_r' % (p.id, i)] = [quantity]
gen_saves_for_quantities['%s_%i.r.dat' % (p.id, i)] = [quantity]
if simulation.recordVariables:
for var in simulation.recordVariables:
to_rec = simulation.recordVariables[var]
if ('all' in to_rec or p.id in to_rec):
size = evaluate(p.size, network.parameters)
for i in range(size):
quantity = '%s/%i/%s/%s' % (p.id, i, p.component,var)
gen_plots_for_quantities['%s_%i_%s' % (p.id, i, var)] = [quantity]
gen_saves_for_quantities['%s_%i.%s.dat' % (p.id, i, var)] = [quantity]
generate_lems_file_for_neuroml(simulation.id,
nml_file_name,
network.id,
simulation.duration,
simulation.dt,
lems_file_name,
target_dir=target_dir if target_dir else '.',
nml_doc=nml_doc, # Use this if the nml doc has already been loaded (to avoid delay in reload)
include_extra_files=included_files,
gen_plots_for_all_v=False,
plot_all_segments=False,
gen_plots_for_quantities=gen_plots_for_quantities, # Dict with displays vs lists of quantity paths
gen_plots_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_all_v=False,
save_all_segments=False,
gen_saves_for_only_populations=pops_plot_save, # List of populations, all pops if = []
gen_saves_for_quantities=gen_saves_for_quantities, # Dict with file names vs lists of quantity paths
gen_spike_saves_for_all_somas=False,
gen_spike_saves_for_only_populations=pops_spike_save, # List of populations, all pops if = []
gen_spike_saves_for_cells={}, # Dict with file names vs lists of quantity paths
spike_time_format='ID_TIME',
copy_neuroml=True,
lems_file_generate_seed=12345,
report_file_name='report.%s.txt' % simulation.id,
simulation_seed=simulation.seed if simulation.seed else 12345,
verbose=True)
lems_file_name = _locate_file(lems_file_name, target_dir)
if simulator == 'jNeuroML':
results = pynml.run_lems_with_jneuroml(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NEURON':
results = pynml.run_lems_with_jneuroml_neuron(lems_file_name,
nogui=True,
load_saved_data=return_results,
reload_events=return_results)
elif simulator == 'jNeuroML_NetPyNE':
results = pynml.run_lems_with_jneuroml_netpyne(lems_file_name,
nogui=True,
verbose=True,
load_saved_data=return_results,
reload_events=return_results,
num_processors=num_processors)
print_v("Finished running LEMS file %s in %s (returning results: %s)" % (lems_file_name, simulator, return_results))
if return_results:
traces, events = results
_print_result_info(traces, events)
return results | [
"def",
"generate_and_run",
"(",
"simulation",
",",
"simulator",
",",
"network",
"=",
"None",
",",
"return_results",
"=",
"False",
",",
"base_dir",
"=",
"None",
",",
"target_dir",
"=",
"None",
",",
"num_processors",
"=",
"1",
")",
":",
"if",
"network",
"==",
"None",
":",
"network",
"=",
"load_network_json",
"(",
"simulation",
".",
"network",
")",
"print_v",
"(",
"\"Generating network %s and running in simulator: %s...\"",
"%",
"(",
"network",
".",
"id",
",",
"simulator",
")",
")",
"if",
"simulator",
"==",
"'NEURON'",
":",
"_generate_neuron_files_from_neuroml",
"(",
"network",
",",
"dir_for_mod_files",
"=",
"target_dir",
")",
"from",
"neuromllite",
".",
"NeuronHandler",
"import",
"NeuronHandler",
"nrn_handler",
"=",
"NeuronHandler",
"(",
")",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"neuroml2_source_file",
":",
"src_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"c",
".",
"neuroml2_source_file",
")",
")",
"nrn_handler",
".",
"executeHoc",
"(",
"'load_file(\"%s/%s.hoc\")'",
"%",
"(",
"src_dir",
",",
"c",
".",
"id",
")",
")",
"generate_network",
"(",
"network",
",",
"nrn_handler",
",",
"generate_network",
",",
"base_dir",
")",
"if",
"return_results",
":",
"raise",
"NotImplementedError",
"(",
"\"Reloading results not supported in Neuron yet...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
"==",
"'sonata'",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"SonataHandler",
"import",
"SonataHandler",
"sonata_handler",
"=",
"SonataHandler",
"(",
")",
"generate_network",
"(",
"network",
",",
"sonata_handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with Sonata...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'graph'",
")",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"GraphVizHandler",
"import",
"GraphVizHandler",
",",
"engines",
"try",
":",
"if",
"simulator",
"[",
"-",
"1",
"]",
".",
"isalpha",
"(",
")",
":",
"print",
"simulator",
"print",
"simulator",
"[",
"5",
":",
"]",
"print",
"simulator",
"[",
"5",
":",
"-",
"1",
"]",
"engine",
"=",
"engines",
"[",
"simulator",
"[",
"-",
"1",
"]",
"]",
"level",
"=",
"int",
"(",
"simulator",
"[",
"5",
":",
"-",
"1",
"]",
")",
"else",
":",
"engine",
"=",
"'dot'",
"level",
"=",
"int",
"(",
"simulator",
"[",
"5",
":",
"]",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"e",
"print_v",
"(",
"\"Error parsing: %s\"",
"%",
"simulator",
")",
"print_v",
"(",
"\"Graphs of the network structure can be generated at many levels of detail (1-6, required) and laid out using GraphViz engines (d - dot (default); c - circo; n - neato; f - fdp), so use: -graph3c, -graph2, -graph4f etc.\"",
")",
"return",
"handler",
"=",
"GraphVizHandler",
"(",
"level",
",",
"engine",
"=",
"engine",
",",
"nl_network",
"=",
"network",
")",
"generate_network",
"(",
"network",
",",
"handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with GraphViz...\"",
")",
"elif",
"simulator",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'matrix'",
")",
":",
"# Will not \"run\" obviously...",
"from",
"neuromllite",
".",
"MatrixHandler",
"import",
"MatrixHandler",
"try",
":",
"level",
"=",
"int",
"(",
"simulator",
"[",
"6",
":",
"]",
")",
"except",
":",
"print_v",
"(",
"\"Error parsing: %s\"",
"%",
"simulator",
")",
"print_v",
"(",
"\"Matrices of the network structure can be generated at many levels of detail (1-n, required), so use: -matrix1, -matrix2, etc.\"",
")",
"return",
"handler",
"=",
"MatrixHandler",
"(",
"level",
",",
"nl_network",
"=",
"network",
")",
"generate_network",
"(",
"network",
",",
"handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"print_v",
"(",
"\"Done with MatrixHandler...\"",
")",
"elif",
"simulator",
".",
"startswith",
"(",
"'PyNN'",
")",
":",
"#_generate_neuron_files_from_neuroml(network)",
"simulator_name",
"=",
"simulator",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"from",
"neuromllite",
".",
"PyNNHandler",
"import",
"PyNNHandler",
"pynn_handler",
"=",
"PyNNHandler",
"(",
"simulator_name",
",",
"simulation",
".",
"dt",
",",
"network",
".",
"id",
")",
"syn_cell_params",
"=",
"{",
"}",
"for",
"proj",
"in",
"network",
".",
"projections",
":",
"synapse",
"=",
"network",
".",
"get_child",
"(",
"proj",
".",
"synapse",
",",
"'synapses'",
")",
"post_pop",
"=",
"network",
".",
"get_child",
"(",
"proj",
".",
"postsynaptic",
",",
"'populations'",
")",
"if",
"not",
"post_pop",
".",
"component",
"in",
"syn_cell_params",
":",
"syn_cell_params",
"[",
"post_pop",
".",
"component",
"]",
"=",
"{",
"}",
"for",
"p",
"in",
"synapse",
".",
"parameters",
":",
"post",
"=",
"''",
"if",
"synapse",
".",
"pynn_receptor_type",
"==",
"\"excitatory\"",
":",
"post",
"=",
"'_E'",
"elif",
"synapse",
".",
"pynn_receptor_type",
"==",
"\"inhibitory\"",
":",
"post",
"=",
"'_I'",
"syn_cell_params",
"[",
"post_pop",
".",
"component",
"]",
"[",
"'%s%s'",
"%",
"(",
"p",
",",
"post",
")",
"]",
"=",
"synapse",
".",
"parameters",
"[",
"p",
"]",
"cells",
"=",
"{",
"}",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"pynn_cell",
":",
"cell_params",
"=",
"{",
"}",
"if",
"c",
".",
"parameters",
":",
"for",
"p",
"in",
"c",
".",
"parameters",
":",
"cell_params",
"[",
"p",
"]",
"=",
"evaluate",
"(",
"c",
".",
"parameters",
"[",
"p",
"]",
",",
"network",
".",
"parameters",
")",
"dont_set_here",
"=",
"[",
"'tau_syn_E'",
",",
"'e_rev_E'",
",",
"'tau_syn_I'",
",",
"'e_rev_I'",
"]",
"for",
"d",
"in",
"dont_set_here",
":",
"if",
"d",
"in",
"c",
".",
"parameters",
":",
"raise",
"Exception",
"(",
"'Synaptic parameters like %s should be set '",
"+",
"'in individual synapses, not in the list of parameters associated with the cell'",
"%",
"d",
")",
"if",
"c",
".",
"id",
"in",
"syn_cell_params",
":",
"cell_params",
".",
"update",
"(",
"syn_cell_params",
"[",
"c",
".",
"id",
"]",
")",
"print_v",
"(",
"\"Creating cell with params: %s\"",
"%",
"cell_params",
")",
"exec",
"(",
"'cells[\"%s\"] = pynn_handler.sim.%s(**cell_params)'",
"%",
"(",
"c",
".",
"id",
",",
"c",
".",
"pynn_cell",
")",
")",
"if",
"c",
".",
"pynn_cell",
"!=",
"'SpikeSourcePoisson'",
":",
"exec",
"(",
"\"cells['%s'].default_initial_values['v'] = cells['%s'].parameter_space['v_rest'].base_value\"",
"%",
"(",
"c",
".",
"id",
",",
"c",
".",
"id",
")",
")",
"pynn_handler",
".",
"set_cells",
"(",
"cells",
")",
"receptor_types",
"=",
"{",
"}",
"for",
"s",
"in",
"network",
".",
"synapses",
":",
"if",
"s",
".",
"pynn_receptor_type",
":",
"receptor_types",
"[",
"s",
".",
"id",
"]",
"=",
"s",
".",
"pynn_receptor_type",
"pynn_handler",
".",
"set_receptor_types",
"(",
"receptor_types",
")",
"for",
"input_source",
"in",
"network",
".",
"input_sources",
":",
"if",
"input_source",
".",
"pynn_input",
":",
"pynn_handler",
".",
"add_input_source",
"(",
"input_source",
")",
"generate_network",
"(",
"network",
",",
"pynn_handler",
",",
"always_include_props",
"=",
"True",
",",
"base_dir",
"=",
"base_dir",
")",
"for",
"pid",
"in",
"pynn_handler",
".",
"populations",
":",
"pop",
"=",
"pynn_handler",
".",
"populations",
"[",
"pid",
"]",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"label",
"in",
"simulation",
".",
"recordTraces",
":",
"if",
"pop",
".",
"can_record",
"(",
"'v'",
")",
":",
"pop",
".",
"record",
"(",
"'v'",
")",
"pynn_handler",
".",
"sim",
".",
"run",
"(",
"simulation",
".",
"duration",
")",
"pynn_handler",
".",
"sim",
".",
"end",
"(",
")",
"traces",
"=",
"{",
"}",
"events",
"=",
"{",
"}",
"if",
"not",
"'NeuroML'",
"in",
"simulator",
":",
"from",
"neo",
".",
"io",
"import",
"PyNNTextIO",
"for",
"pid",
"in",
"pynn_handler",
".",
"populations",
":",
"pop",
"=",
"pynn_handler",
".",
"populations",
"[",
"pid",
"]",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"label",
"in",
"simulation",
".",
"recordTraces",
":",
"filename",
"=",
"\"%s.%s.v.dat\"",
"%",
"(",
"simulation",
".",
"id",
",",
"pop",
".",
"label",
")",
"all_columns",
"=",
"[",
"]",
"print_v",
"(",
"\"Writing data for %s to %s\"",
"%",
"(",
"pop",
".",
"label",
",",
"filename",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pop",
")",
")",
":",
"if",
"pop",
".",
"can_record",
"(",
"'v'",
")",
":",
"ref",
"=",
"'%s[%i]'",
"%",
"(",
"pop",
".",
"label",
",",
"i",
")",
"traces",
"[",
"ref",
"]",
"=",
"[",
"]",
"data",
"=",
"pop",
".",
"get_data",
"(",
"'v'",
",",
"gather",
"=",
"False",
")",
"for",
"segment",
"in",
"data",
".",
"segments",
":",
"vm",
"=",
"segment",
".",
"analogsignals",
"[",
"0",
"]",
".",
"transpose",
"(",
")",
"[",
"i",
"]",
"if",
"len",
"(",
"all_columns",
")",
"==",
"0",
":",
"tt",
"=",
"np",
".",
"array",
"(",
"[",
"t",
"*",
"simulation",
".",
"dt",
"/",
"1000.",
"for",
"t",
"in",
"range",
"(",
"len",
"(",
"vm",
")",
")",
"]",
")",
"all_columns",
".",
"append",
"(",
"tt",
")",
"vm_si",
"=",
"[",
"float",
"(",
"v",
"/",
"1000.",
")",
"for",
"v",
"in",
"vm",
"]",
"traces",
"[",
"ref",
"]",
"=",
"vm_si",
"all_columns",
".",
"append",
"(",
"vm_si",
")",
"times_vm",
"=",
"np",
".",
"array",
"(",
"all_columns",
")",
".",
"transpose",
"(",
")",
"np",
".",
"savetxt",
"(",
"filename",
",",
"times_vm",
",",
"delimiter",
"=",
"'\\t'",
",",
"fmt",
"=",
"'%s'",
")",
"if",
"return_results",
":",
"_print_result_info",
"(",
"traces",
",",
"events",
")",
"return",
"traces",
",",
"events",
"elif",
"simulator",
"==",
"'NetPyNE'",
":",
"if",
"target_dir",
"==",
"None",
":",
"target_dir",
"=",
"'./'",
"_generate_neuron_files_from_neuroml",
"(",
"network",
",",
"dir_for_mod_files",
"=",
"target_dir",
")",
"from",
"netpyne",
"import",
"specs",
"from",
"netpyne",
"import",
"sim",
"# Note NetPyNE from this branch is required: https://github.com/Neurosim-lab/netpyne/tree/neuroml_updates",
"from",
"netpyne",
".",
"conversion",
".",
"neuromlFormat",
"import",
"NetPyNEBuilder",
"import",
"pprint",
"pp",
"=",
"pprint",
".",
"PrettyPrinter",
"(",
"depth",
"=",
"6",
")",
"netParams",
"=",
"specs",
".",
"NetParams",
"(",
")",
"simConfig",
"=",
"specs",
".",
"SimConfig",
"(",
")",
"netpyne_handler",
"=",
"NetPyNEBuilder",
"(",
"netParams",
",",
"simConfig",
"=",
"simConfig",
",",
"verbose",
"=",
"True",
")",
"generate_network",
"(",
"network",
",",
"netpyne_handler",
",",
"base_dir",
"=",
"base_dir",
")",
"netpyne_handler",
".",
"finalise",
"(",
")",
"simConfig",
"=",
"specs",
".",
"SimConfig",
"(",
")",
"simConfig",
".",
"tstop",
"=",
"simulation",
".",
"duration",
"simConfig",
".",
"duration",
"=",
"simulation",
".",
"duration",
"simConfig",
".",
"dt",
"=",
"simulation",
".",
"dt",
"simConfig",
".",
"seed",
"=",
"simulation",
".",
"seed",
"simConfig",
".",
"recordStep",
"=",
"simulation",
".",
"dt",
"simConfig",
".",
"recordCells",
"=",
"[",
"'all'",
"]",
"simConfig",
".",
"recordTraces",
"=",
"{",
"}",
"for",
"pop",
"in",
"netpyne_handler",
".",
"popParams",
".",
"values",
"(",
")",
":",
"if",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"pop",
".",
"id",
"in",
"simulation",
".",
"recordTraces",
":",
"for",
"i",
"in",
"pop",
"[",
"'cellsList'",
"]",
":",
"id",
"=",
"pop",
"[",
"'pop'",
"]",
"index",
"=",
"i",
"[",
"'cellLabel'",
"]",
"simConfig",
".",
"recordTraces",
"[",
"'v_%s_%s'",
"%",
"(",
"id",
",",
"index",
")",
"]",
"=",
"{",
"'sec'",
":",
"'soma'",
",",
"'loc'",
":",
"0.5",
",",
"'var'",
":",
"'v'",
",",
"'conds'",
":",
"{",
"'pop'",
":",
"id",
",",
"'cellLabel'",
":",
"index",
"}",
"}",
"simConfig",
".",
"saveDat",
"=",
"True",
"print_v",
"(",
"\"NetPyNE netParams: \"",
")",
"pp",
".",
"pprint",
"(",
"netParams",
".",
"todict",
"(",
")",
")",
"#print_v(\"NetPyNE simConfig: \")",
"#pp.pprint(simConfig.todict())",
"sim",
".",
"initialize",
"(",
"netParams",
",",
"simConfig",
")",
"# create network object and set cfg and net params",
"sim",
".",
"net",
".",
"createPops",
"(",
")",
"cells",
"=",
"sim",
".",
"net",
".",
"createCells",
"(",
")",
"# instantiate network cells based on defined populations ",
"for",
"proj_id",
"in",
"netpyne_handler",
".",
"projection_infos",
".",
"keys",
"(",
")",
":",
"projName",
",",
"prePop",
",",
"postPop",
",",
"synapse",
",",
"ptype",
"=",
"netpyne_handler",
".",
"projection_infos",
"[",
"proj_id",
"]",
"print_v",
"(",
"\"Creating connections for %s (%s): %s->%s via %s\"",
"%",
"(",
"projName",
",",
"ptype",
",",
"prePop",
",",
"postPop",
",",
"synapse",
")",
")",
"preComp",
"=",
"netpyne_handler",
".",
"pop_ids_vs_components",
"[",
"prePop",
"]",
"for",
"conn",
"in",
"netpyne_handler",
".",
"connections",
"[",
"projName",
"]",
":",
"pre_id",
",",
"pre_seg",
",",
"pre_fract",
",",
"post_id",
",",
"post_seg",
",",
"post_fract",
",",
"delay",
",",
"weight",
"=",
"conn",
"#connParam = {'delay':delay,'weight':weight,'synsPerConn':1, 'sec':post_seg, 'loc':post_fract, 'threshold':threshold}",
"connParam",
"=",
"{",
"'delay'",
":",
"delay",
",",
"'weight'",
":",
"weight",
",",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
"}",
"if",
"ptype",
"==",
"'electricalProjection'",
":",
"if",
"weight",
"!=",
"1",
":",
"raise",
"Exception",
"(",
"'Cannot yet support inputs where weight !=1!'",
")",
"connParam",
"=",
"{",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
",",
"'gapJunction'",
":",
"True",
",",
"'weight'",
":",
"weight",
"}",
"else",
":",
"connParam",
"=",
"{",
"'delay'",
":",
"delay",
",",
"'weight'",
":",
"weight",
",",
"'synsPerConn'",
":",
"1",
",",
"'sec'",
":",
"post_seg",
",",
"'loc'",
":",
"post_fract",
"}",
"#'threshold': threshold}",
"connParam",
"[",
"'synMech'",
"]",
"=",
"synapse",
"if",
"post_id",
"in",
"sim",
".",
"net",
".",
"gid2lid",
":",
"# check if postsyn is in this node's list of gids",
"sim",
".",
"net",
".",
"_addCellConn",
"(",
"connParam",
",",
"pre_id",
",",
"post_id",
")",
"stims",
"=",
"sim",
".",
"net",
".",
"addStims",
"(",
")",
"# add external stimulation to cells (IClamps etc)",
"simData",
"=",
"sim",
".",
"setupRecording",
"(",
")",
"# setup variables to record for each cell (spikes, V traces, etc)",
"sim",
".",
"runSim",
"(",
")",
"# run parallel Neuron simulation ",
"sim",
".",
"gatherData",
"(",
")",
"# gather spiking data and cell info from each node",
"sim",
".",
"saveData",
"(",
")",
"# save params, cell info and sim output to file (pickle,mat,txt,etc)",
"if",
"return_results",
":",
"raise",
"NotImplementedError",
"(",
"\"Reloading results not supported in NetPyNE yet...\"",
")",
"elif",
"simulator",
"==",
"'jNeuroML'",
"or",
"simulator",
"==",
"'jNeuroML_NEURON'",
"or",
"simulator",
"==",
"'jNeuroML_NetPyNE'",
":",
"from",
"pyneuroml",
".",
"lems",
"import",
"generate_lems_file_for_neuroml",
"from",
"pyneuroml",
"import",
"pynml",
"lems_file_name",
"=",
"'LEMS_%s.xml'",
"%",
"simulation",
".",
"id",
"nml_file_name",
",",
"nml_doc",
"=",
"generate_neuroml2_from_network",
"(",
"network",
",",
"base_dir",
"=",
"base_dir",
",",
"target_dir",
"=",
"target_dir",
")",
"included_files",
"=",
"[",
"'PyNN.xml'",
"]",
"for",
"c",
"in",
"network",
".",
"cells",
":",
"if",
"c",
".",
"lems_source_file",
":",
"included_files",
".",
"append",
"(",
"c",
".",
"lems_source_file",
")",
"'''\n if network.cells:\n for c in network.cells:\n included_files.append(c.neuroml2_source_file)\n '''",
"if",
"network",
".",
"synapses",
":",
"for",
"s",
"in",
"network",
".",
"synapses",
":",
"if",
"s",
".",
"lems_source_file",
":",
"included_files",
".",
"append",
"(",
"s",
".",
"lems_source_file",
")",
"print_v",
"(",
"\"Generating LEMS file prior to running in %s\"",
"%",
"simulator",
")",
"pops_plot_save",
"=",
"[",
"]",
"pops_spike_save",
"=",
"[",
"]",
"gen_plots_for_quantities",
"=",
"{",
"}",
"gen_saves_for_quantities",
"=",
"{",
"}",
"for",
"p",
"in",
"network",
".",
"populations",
":",
"if",
"simulation",
".",
"recordTraces",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordTraces",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordTraces",
")",
":",
"pops_plot_save",
".",
"append",
"(",
"p",
".",
"id",
")",
"if",
"simulation",
".",
"recordSpikes",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordSpikes",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordSpikes",
")",
":",
"pops_spike_save",
".",
"append",
"(",
"p",
".",
"id",
")",
"if",
"simulation",
".",
"recordRates",
"and",
"(",
"'all'",
"in",
"simulation",
".",
"recordRates",
"or",
"p",
".",
"id",
"in",
"simulation",
".",
"recordRates",
")",
":",
"size",
"=",
"evaluate",
"(",
"p",
".",
"size",
",",
"network",
".",
"parameters",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"quantity",
"=",
"'%s/%i/%s/r'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"p",
".",
"component",
")",
"gen_plots_for_quantities",
"[",
"'%s_%i_r'",
"%",
"(",
"p",
".",
"id",
",",
"i",
")",
"]",
"=",
"[",
"quantity",
"]",
"gen_saves_for_quantities",
"[",
"'%s_%i.r.dat'",
"%",
"(",
"p",
".",
"id",
",",
"i",
")",
"]",
"=",
"[",
"quantity",
"]",
"if",
"simulation",
".",
"recordVariables",
":",
"for",
"var",
"in",
"simulation",
".",
"recordVariables",
":",
"to_rec",
"=",
"simulation",
".",
"recordVariables",
"[",
"var",
"]",
"if",
"(",
"'all'",
"in",
"to_rec",
"or",
"p",
".",
"id",
"in",
"to_rec",
")",
":",
"size",
"=",
"evaluate",
"(",
"p",
".",
"size",
",",
"network",
".",
"parameters",
")",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"quantity",
"=",
"'%s/%i/%s/%s'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"p",
".",
"component",
",",
"var",
")",
"gen_plots_for_quantities",
"[",
"'%s_%i_%s'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"var",
")",
"]",
"=",
"[",
"quantity",
"]",
"gen_saves_for_quantities",
"[",
"'%s_%i.%s.dat'",
"%",
"(",
"p",
".",
"id",
",",
"i",
",",
"var",
")",
"]",
"=",
"[",
"quantity",
"]",
"generate_lems_file_for_neuroml",
"(",
"simulation",
".",
"id",
",",
"nml_file_name",
",",
"network",
".",
"id",
",",
"simulation",
".",
"duration",
",",
"simulation",
".",
"dt",
",",
"lems_file_name",
",",
"target_dir",
"=",
"target_dir",
"if",
"target_dir",
"else",
"'.'",
",",
"nml_doc",
"=",
"nml_doc",
",",
"# Use this if the nml doc has already been loaded (to avoid delay in reload)",
"include_extra_files",
"=",
"included_files",
",",
"gen_plots_for_all_v",
"=",
"False",
",",
"plot_all_segments",
"=",
"False",
",",
"gen_plots_for_quantities",
"=",
"gen_plots_for_quantities",
",",
"# Dict with displays vs lists of quantity paths",
"gen_plots_for_only_populations",
"=",
"pops_plot_save",
",",
"# List of populations, all pops if = []",
"gen_saves_for_all_v",
"=",
"False",
",",
"save_all_segments",
"=",
"False",
",",
"gen_saves_for_only_populations",
"=",
"pops_plot_save",
",",
"# List of populations, all pops if = []",
"gen_saves_for_quantities",
"=",
"gen_saves_for_quantities",
",",
"# Dict with file names vs lists of quantity paths",
"gen_spike_saves_for_all_somas",
"=",
"False",
",",
"gen_spike_saves_for_only_populations",
"=",
"pops_spike_save",
",",
"# List of populations, all pops if = []",
"gen_spike_saves_for_cells",
"=",
"{",
"}",
",",
"# Dict with file names vs lists of quantity paths",
"spike_time_format",
"=",
"'ID_TIME'",
",",
"copy_neuroml",
"=",
"True",
",",
"lems_file_generate_seed",
"=",
"12345",
",",
"report_file_name",
"=",
"'report.%s.txt'",
"%",
"simulation",
".",
"id",
",",
"simulation_seed",
"=",
"simulation",
".",
"seed",
"if",
"simulation",
".",
"seed",
"else",
"12345",
",",
"verbose",
"=",
"True",
")",
"lems_file_name",
"=",
"_locate_file",
"(",
"lems_file_name",
",",
"target_dir",
")",
"if",
"simulator",
"==",
"'jNeuroML'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
")",
"elif",
"simulator",
"==",
"'jNeuroML_NEURON'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml_neuron",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
")",
"elif",
"simulator",
"==",
"'jNeuroML_NetPyNE'",
":",
"results",
"=",
"pynml",
".",
"run_lems_with_jneuroml_netpyne",
"(",
"lems_file_name",
",",
"nogui",
"=",
"True",
",",
"verbose",
"=",
"True",
",",
"load_saved_data",
"=",
"return_results",
",",
"reload_events",
"=",
"return_results",
",",
"num_processors",
"=",
"num_processors",
")",
"print_v",
"(",
"\"Finished running LEMS file %s in %s (returning results: %s)\"",
"%",
"(",
"lems_file_name",
",",
"simulator",
",",
"return_results",
")",
")",
"if",
"return_results",
":",
"traces",
",",
"events",
"=",
"results",
"_print_result_info",
"(",
"traces",
",",
"events",
")",
"return",
"results"
] | Generates the network in the specified simulator and runs, if appropriate | [
"Generates",
"the",
"network",
"in",
"the",
"specified",
"simulator",
"and",
"runs",
"if",
"appropriate"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L633-L1046 |
NeuroML/NeuroMLlite | neuromllite/NetworkGenerator.py | _print_result_info | def _print_result_info(traces, events):
"""
Print a summary of the returned (voltage) traces and spike times
"""
print_v('Returning %i traces:'%len(traces))
for r in sorted(traces.keys()):
x = traces[r]
print_v(' %s (%s): %s -> %s (min: %s, max: %s, len: %i)'%(r, type(x), x[0],x[-1],min(x),max(x),len(x)))
print_v('Returning %i events:'%len(events))
for r in sorted(events.keys()):
x = events[r]
print_v(' %s: %s -> %s (len: %i)'%(r, x[0] if len(x)>0 else '-',x[-1] if len(x)>0 else '-',len(x))) | python | def _print_result_info(traces, events):
"""
Print a summary of the returned (voltage) traces and spike times
"""
print_v('Returning %i traces:'%len(traces))
for r in sorted(traces.keys()):
x = traces[r]
print_v(' %s (%s): %s -> %s (min: %s, max: %s, len: %i)'%(r, type(x), x[0],x[-1],min(x),max(x),len(x)))
print_v('Returning %i events:'%len(events))
for r in sorted(events.keys()):
x = events[r]
print_v(' %s: %s -> %s (len: %i)'%(r, x[0] if len(x)>0 else '-',x[-1] if len(x)>0 else '-',len(x))) | [
"def",
"_print_result_info",
"(",
"traces",
",",
"events",
")",
":",
"print_v",
"(",
"'Returning %i traces:'",
"%",
"len",
"(",
"traces",
")",
")",
"for",
"r",
"in",
"sorted",
"(",
"traces",
".",
"keys",
"(",
")",
")",
":",
"x",
"=",
"traces",
"[",
"r",
"]",
"print_v",
"(",
"' %s (%s): %s -> %s (min: %s, max: %s, len: %i)'",
"%",
"(",
"r",
",",
"type",
"(",
"x",
")",
",",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"-",
"1",
"]",
",",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
",",
"len",
"(",
"x",
")",
")",
")",
"print_v",
"(",
"'Returning %i events:'",
"%",
"len",
"(",
"events",
")",
")",
"for",
"r",
"in",
"sorted",
"(",
"events",
".",
"keys",
"(",
")",
")",
":",
"x",
"=",
"events",
"[",
"r",
"]",
"print_v",
"(",
"' %s: %s -> %s (len: %i)'",
"%",
"(",
"r",
",",
"x",
"[",
"0",
"]",
"if",
"len",
"(",
"x",
")",
">",
"0",
"else",
"'-'",
",",
"x",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"x",
")",
">",
"0",
"else",
"'-'",
",",
"len",
"(",
"x",
")",
")",
")"
] | Print a summary of the returned (voltage) traces and spike times | [
"Print",
"a",
"summary",
"of",
"the",
"returned",
"(",
"voltage",
")",
"traces",
"and",
"spike",
"times"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/NetworkGenerator.py#L1049-L1060 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.