repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Gate.connect_input
def connect_input(self, name, wire): """Connect the specified input to a wire.""" self._inputs[name] = wire wire.sinks.append(self)
python
def connect_input(self, name, wire): """Connect the specified input to a wire.""" self._inputs[name] = wire wire.sinks.append(self)
[ "def", "connect_input", "(", "self", ",", "name", ",", "wire", ")", ":", "self", ".", "_inputs", "[", "name", "]", "=", "wire", "wire", ".", "sinks", ".", "append", "(", "self", ")" ]
Connect the specified input to a wire.
[ "Connect", "the", "specified", "input", "to", "a", "wire", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L80-L83
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Gate._write_config
def _write_config(self, memory): """Write the configuration for this gate to memory.""" memory.seek(0) memory.write(struct.pack("<5I", # sim_length self._simulator.length, # input_a_key self._inputs["a"].routing_key if self._inputs["a"] is not None else 0xFFFFFFFF, # input_b_key self._inputs["b"].routing_key if self._inputs["b"] is not None else 0xFFFFFFFF, # output_key self.output.routing_key, # lut self._lookup_table))
python
def _write_config(self, memory): """Write the configuration for this gate to memory.""" memory.seek(0) memory.write(struct.pack("<5I", # sim_length self._simulator.length, # input_a_key self._inputs["a"].routing_key if self._inputs["a"] is not None else 0xFFFFFFFF, # input_b_key self._inputs["b"].routing_key if self._inputs["b"] is not None else 0xFFFFFFFF, # output_key self.output.routing_key, # lut self._lookup_table))
[ "def", "_write_config", "(", "self", ",", "memory", ")", ":", "memory", ".", "seek", "(", "0", ")", "memory", ".", "write", "(", "struct", ".", "pack", "(", "\"<5I\"", ",", "# sim_length", "self", ".", "_simulator", ".", "length", ",", "# input_a_key", "self", ".", "_inputs", "[", "\"a\"", "]", ".", "routing_key", "if", "self", ".", "_inputs", "[", "\"a\"", "]", "is", "not", "None", "else", "0xFFFFFFFF", ",", "# input_b_key", "self", ".", "_inputs", "[", "\"b\"", "]", ".", "routing_key", "if", "self", ".", "_inputs", "[", "\"b\"", "]", "is", "not", "None", "else", "0xFFFFFFFF", ",", "# output_key", "self", ".", "output", ".", "routing_key", ",", "# lut", "self", ".", "_lookup_table", ")", ")" ]
Write the configuration for this gate to memory.
[ "Write", "the", "configuration", "for", "this", "gate", "to", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L94-L111
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Probe.connect_input
def connect_input(self, wire): """Probe the specified wire.""" self._input = wire wire.sinks.append(self)
python
def connect_input(self, wire): """Probe the specified wire.""" self._input = wire wire.sinks.append(self)
[ "def", "connect_input", "(", "self", ",", "wire", ")", ":", "self", ".", "_input", "=", "wire", "wire", ".", "sinks", ".", "append", "(", "self", ")" ]
Probe the specified wire.
[ "Probe", "the", "specified", "wire", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L172-L175
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Probe._write_config
def _write_config(self, memory): """Write the configuration for this probe to memory.""" memory.seek(0) memory.write(struct.pack("<II", # sim_length self._simulator.length, # input_key self._input.routing_key if self._input is not None else 0xFFFFFFFF))
python
def _write_config(self, memory): """Write the configuration for this probe to memory.""" memory.seek(0) memory.write(struct.pack("<II", # sim_length self._simulator.length, # input_key self._input.routing_key if self._input is not None else 0xFFFFFFFF))
[ "def", "_write_config", "(", "self", ",", "memory", ")", ":", "memory", ".", "seek", "(", "0", ")", "memory", ".", "write", "(", "struct", ".", "pack", "(", "\"<II\"", ",", "# sim_length", "self", ".", "_simulator", ".", "length", ",", "# input_key", "self", ".", "_input", ".", "routing_key", "if", "self", ".", "_input", "is", "not", "None", "else", "0xFFFFFFFF", ")", ")" ]
Write the configuration for this probe to memory.
[ "Write", "the", "configuration", "for", "this", "probe", "to", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L187-L196
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Probe._read_results
def _read_results(self, memory): """Read back the probed results. Returns ------- str A string of "0"s and "1"s, one for each millisecond of simulation. """ # Seek to the simulation data and read it all back memory.seek(8) bits = bitarray(endian="little") bits.frombytes(memory.read()) self.recorded_data = bits.to01()
python
def _read_results(self, memory): """Read back the probed results. Returns ------- str A string of "0"s and "1"s, one for each millisecond of simulation. """ # Seek to the simulation data and read it all back memory.seek(8) bits = bitarray(endian="little") bits.frombytes(memory.read()) self.recorded_data = bits.to01()
[ "def", "_read_results", "(", "self", ",", "memory", ")", ":", "# Seek to the simulation data and read it all back", "memory", ".", "seek", "(", "8", ")", "bits", "=", "bitarray", "(", "endian", "=", "\"little\"", ")", "bits", ".", "frombytes", "(", "memory", ".", "read", "(", ")", ")", "self", ".", "recorded_data", "=", "bits", ".", "to01", "(", ")" ]
Read back the probed results. Returns ------- str A string of "0"s and "1"s, one for each millisecond of simulation.
[ "Read", "back", "the", "probed", "results", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L198-L210
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Stimulus._write_config
def _write_config(self, memory): """Write the configuration for this stimulus to memory.""" memory.seek(0) memory.write(struct.pack("<II", # sim_length self._simulator.length, # output_key self.output.routing_key)) # NB: memory.write will automatically truncate any excess stimulus memory.write(bitarray( self.stimulus.ljust(self._simulator.length, "0"), endian="little").tobytes())
python
def _write_config(self, memory): """Write the configuration for this stimulus to memory.""" memory.seek(0) memory.write(struct.pack("<II", # sim_length self._simulator.length, # output_key self.output.routing_key)) # NB: memory.write will automatically truncate any excess stimulus memory.write(bitarray( self.stimulus.ljust(self._simulator.length, "0"), endian="little").tobytes())
[ "def", "_write_config", "(", "self", ",", "memory", ")", ":", "memory", ".", "seek", "(", "0", ")", "memory", ".", "write", "(", "struct", ".", "pack", "(", "\"<II\"", ",", "# sim_length", "self", ".", "_simulator", ".", "length", ",", "# output_key", "self", ".", "output", ".", "routing_key", ")", ")", "# NB: memory.write will automatically truncate any excess stimulus", "memory", ".", "write", "(", "bitarray", "(", "self", ".", "stimulus", ".", "ljust", "(", "self", ".", "_simulator", ".", "length", ",", "\"0\"", ")", ",", "endian", "=", "\"little\"", ")", ".", "tobytes", "(", ")", ")" ]
Write the configuration for this stimulus to memory.
[ "Write", "the", "configuration", "for", "this", "stimulus", "to", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L247-L259
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Simulator._new_wire
def _new_wire(self, source, sinks=None): """Create a new :py:class:`._Wire` with a unique routing key.""" # Assign sequential routing key to new nets. wire = _Wire(source, sinks if sinks is not None else [], len(self._wires)) self._wires.append(wire) return wire
python
def _new_wire(self, source, sinks=None): """Create a new :py:class:`._Wire` with a unique routing key.""" # Assign sequential routing key to new nets. wire = _Wire(source, sinks if sinks is not None else [], len(self._wires)) self._wires.append(wire) return wire
[ "def", "_new_wire", "(", "self", ",", "source", ",", "sinks", "=", "None", ")", ":", "# Assign sequential routing key to new nets.", "wire", "=", "_Wire", "(", "source", ",", "sinks", "if", "sinks", "is", "not", "None", "else", "[", "]", ",", "len", "(", "self", ".", "_wires", ")", ")", "self", ".", "_wires", ".", "append", "(", "wire", ")", "return", "wire" ]
Create a new :py:class:`._Wire` with a unique routing key.
[ "Create", "a", "new", ":", "py", ":", "class", ":", ".", "_Wire", "with", "a", "unique", "routing", "key", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L295-L301
project-rig/rig
docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py
Simulator.run
def run(self): """Run the simulation.""" # Define the resource requirements of each component in the simulation. vertices_resources = { # Every component runs on exactly one core and consumes a certain # amount of SDRAM to hold configuration data. component: {Cores: 1, SDRAM: component._get_config_size()} for component in self._components } # Work out what SpiNNaker application needs to be loaded for each # component vertices_applications = {component: component._get_kernel() for component in self._components} # Convert the Wire objects into Rig Net objects and create a lookup # from Net to the (key, mask) to use. net_keys = {Net(wire.source, wire.sinks): (wire.routing_key, 0xFFFFFFFF) for wire in self._wires} nets = list(net_keys) # Boot the SpiNNaker machine and interrogate it to determine what # resources (e.g. cores, SDRAM etc.) are available. mc = MachineController(self._hostname) mc.boot() system_info = mc.get_system_info() # Automatically chose which chips and cores to use for each component # and generate routing tables. placements, allocations, application_map, routing_tables = \ place_and_route_wrapper(vertices_resources, vertices_applications, nets, net_keys, system_info) with mc.application(): # Allocate memory for configuration data, tagged by core number. memory_allocations = sdram_alloc_for_vertices(mc, placements, allocations) # Load the configuration data for all components for component, memory in memory_allocations.items(): component._write_config(memory) # Load all routing tables mc.load_routing_tables(routing_tables) # Load all SpiNNaker application kernels mc.load_application(application_map) # Wait for all six cores to reach the 'sync0' barrier mc.wait_for_cores_to_reach_state("sync0", len(self._components)) # Send the 'sync0' signal to start execution and wait for the # simulation to finish. mc.send_signal("sync0") time.sleep(self.length * 0.001) mc.wait_for_cores_to_reach_state("exit", len(self._components)) # Retrieve result data for component, memory in memory_allocations.items(): component._read_results(memory)
python
def run(self): """Run the simulation.""" # Define the resource requirements of each component in the simulation. vertices_resources = { # Every component runs on exactly one core and consumes a certain # amount of SDRAM to hold configuration data. component: {Cores: 1, SDRAM: component._get_config_size()} for component in self._components } # Work out what SpiNNaker application needs to be loaded for each # component vertices_applications = {component: component._get_kernel() for component in self._components} # Convert the Wire objects into Rig Net objects and create a lookup # from Net to the (key, mask) to use. net_keys = {Net(wire.source, wire.sinks): (wire.routing_key, 0xFFFFFFFF) for wire in self._wires} nets = list(net_keys) # Boot the SpiNNaker machine and interrogate it to determine what # resources (e.g. cores, SDRAM etc.) are available. mc = MachineController(self._hostname) mc.boot() system_info = mc.get_system_info() # Automatically chose which chips and cores to use for each component # and generate routing tables. placements, allocations, application_map, routing_tables = \ place_and_route_wrapper(vertices_resources, vertices_applications, nets, net_keys, system_info) with mc.application(): # Allocate memory for configuration data, tagged by core number. memory_allocations = sdram_alloc_for_vertices(mc, placements, allocations) # Load the configuration data for all components for component, memory in memory_allocations.items(): component._write_config(memory) # Load all routing tables mc.load_routing_tables(routing_tables) # Load all SpiNNaker application kernels mc.load_application(application_map) # Wait for all six cores to reach the 'sync0' barrier mc.wait_for_cores_to_reach_state("sync0", len(self._components)) # Send the 'sync0' signal to start execution and wait for the # simulation to finish. mc.send_signal("sync0") time.sleep(self.length * 0.001) mc.wait_for_cores_to_reach_state("exit", len(self._components)) # Retrieve result data for component, memory in memory_allocations.items(): component._read_results(memory)
[ "def", "run", "(", "self", ")", ":", "# Define the resource requirements of each component in the simulation.", "vertices_resources", "=", "{", "# Every component runs on exactly one core and consumes a certain", "# amount of SDRAM to hold configuration data.", "component", ":", "{", "Cores", ":", "1", ",", "SDRAM", ":", "component", ".", "_get_config_size", "(", ")", "}", "for", "component", "in", "self", ".", "_components", "}", "# Work out what SpiNNaker application needs to be loaded for each", "# component", "vertices_applications", "=", "{", "component", ":", "component", ".", "_get_kernel", "(", ")", "for", "component", "in", "self", ".", "_components", "}", "# Convert the Wire objects into Rig Net objects and create a lookup", "# from Net to the (key, mask) to use.", "net_keys", "=", "{", "Net", "(", "wire", ".", "source", ",", "wire", ".", "sinks", ")", ":", "(", "wire", ".", "routing_key", ",", "0xFFFFFFFF", ")", "for", "wire", "in", "self", ".", "_wires", "}", "nets", "=", "list", "(", "net_keys", ")", "# Boot the SpiNNaker machine and interrogate it to determine what", "# resources (e.g. cores, SDRAM etc.) are available.", "mc", "=", "MachineController", "(", "self", ".", "_hostname", ")", "mc", ".", "boot", "(", ")", "system_info", "=", "mc", ".", "get_system_info", "(", ")", "# Automatically chose which chips and cores to use for each component", "# and generate routing tables.", "placements", ",", "allocations", ",", "application_map", ",", "routing_tables", "=", "place_and_route_wrapper", "(", "vertices_resources", ",", "vertices_applications", ",", "nets", ",", "net_keys", ",", "system_info", ")", "with", "mc", ".", "application", "(", ")", ":", "# Allocate memory for configuration data, tagged by core number.", "memory_allocations", "=", "sdram_alloc_for_vertices", "(", "mc", ",", "placements", ",", "allocations", ")", "# Load the configuration data for all components", "for", "component", ",", "memory", "in", "memory_allocations", ".", "items", "(", ")", ":", "component", ".", "_write_config", "(", "memory", ")", "# Load all routing tables", "mc", ".", "load_routing_tables", "(", "routing_tables", ")", "# Load all SpiNNaker application kernels", "mc", ".", "load_application", "(", "application_map", ")", "# Wait for all six cores to reach the 'sync0' barrier", "mc", ".", "wait_for_cores_to_reach_state", "(", "\"sync0\"", ",", "len", "(", "self", ".", "_components", ")", ")", "# Send the 'sync0' signal to start execution and wait for the", "# simulation to finish.", "mc", ".", "send_signal", "(", "\"sync0\"", ")", "time", ".", "sleep", "(", "self", ".", "length", "*", "0.001", ")", "mc", ".", "wait_for_cores_to_reach_state", "(", "\"exit\"", ",", "len", "(", "self", ".", "_components", ")", ")", "# Retrieve result data", "for", "component", ",", "memory", "in", "memory_allocations", ".", "items", "(", ")", ":", "component", ".", "_read_results", "(", "memory", ")" ]
Run the simulation.
[ "Run", "the", "simulation", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/circuit_sim_tutorial/05_circuit_simulation/circuit_simulator.py#L303-L365
Metatab/metapack
metapack/jupyter/exec.py
execute_notebook
def execute_notebook(nb_path, pkg_dir, dataframes, write_notebook=False, env=None): """ Execute a notebook after adding the prolog and epilog. Can also add %mt_materialize magics to write dataframes to files :param nb_path: path to a notebook. :param pkg_dir: Directory to which dataframes are materialized :param dataframes: List of names of dataframes to materialize :return: a Notebook object """ import nbformat from metapack.jupyter.preprocessors import AddEpilog, AddProlog from metapack.jupyter.exporters import ExecutePreprocessor, Config from os.path import dirname, join, splitext, basename from nbconvert.preprocessors.execute import CellExecutionError with open(nb_path, encoding='utf8') as f: nb = nbformat.read(f, as_version=4) root, ext = splitext(basename(nb_path)) c = Config() nb, resources = AddProlog(config=c, env=env or {}).preprocess(nb, {}) nb, resources = AddEpilog(config=c, pkg_dir=pkg_dir, dataframes=dataframes, ).preprocess(nb, {}) def _write_notebook(nb_path, root, ext, write_notebook): if write_notebook: if write_notebook is True: exec_nb_path = join(dirname(nb_path), root + '-executed' + ext) else: exec_nb_path = write_notebook with open(exec_nb_path, 'w', encoding='utf8') as f: nbformat.write(nb, f) _write_notebook(nb_path, root, ext, write_notebook) try: ep = ExecutePreprocessor(config=c) ep.timeout = 5*60 nb, _ = ep.preprocess(nb, {'metadata': {'path': dirname(nb_path)}}) except (CellExecutionError, TimeoutError) as e: err_nb_path = join(dirname(nb_path), root + '-errors' + ext) with open(err_nb_path, 'w', encoding='utf8') as f: nbformat.write(nb, f) raise CellExecutionError("Errors executing noteboook. See notebook at {} for details.\n{}" .format(err_nb_path, '')) except ImportError as e: raise NotebookError("Failed to import a library required for notebook execution: {}".format(str(e))) _write_notebook(nb_path, root, ext, write_notebook) return nb
python
def execute_notebook(nb_path, pkg_dir, dataframes, write_notebook=False, env=None): """ Execute a notebook after adding the prolog and epilog. Can also add %mt_materialize magics to write dataframes to files :param nb_path: path to a notebook. :param pkg_dir: Directory to which dataframes are materialized :param dataframes: List of names of dataframes to materialize :return: a Notebook object """ import nbformat from metapack.jupyter.preprocessors import AddEpilog, AddProlog from metapack.jupyter.exporters import ExecutePreprocessor, Config from os.path import dirname, join, splitext, basename from nbconvert.preprocessors.execute import CellExecutionError with open(nb_path, encoding='utf8') as f: nb = nbformat.read(f, as_version=4) root, ext = splitext(basename(nb_path)) c = Config() nb, resources = AddProlog(config=c, env=env or {}).preprocess(nb, {}) nb, resources = AddEpilog(config=c, pkg_dir=pkg_dir, dataframes=dataframes, ).preprocess(nb, {}) def _write_notebook(nb_path, root, ext, write_notebook): if write_notebook: if write_notebook is True: exec_nb_path = join(dirname(nb_path), root + '-executed' + ext) else: exec_nb_path = write_notebook with open(exec_nb_path, 'w', encoding='utf8') as f: nbformat.write(nb, f) _write_notebook(nb_path, root, ext, write_notebook) try: ep = ExecutePreprocessor(config=c) ep.timeout = 5*60 nb, _ = ep.preprocess(nb, {'metadata': {'path': dirname(nb_path)}}) except (CellExecutionError, TimeoutError) as e: err_nb_path = join(dirname(nb_path), root + '-errors' + ext) with open(err_nb_path, 'w', encoding='utf8') as f: nbformat.write(nb, f) raise CellExecutionError("Errors executing noteboook. See notebook at {} for details.\n{}" .format(err_nb_path, '')) except ImportError as e: raise NotebookError("Failed to import a library required for notebook execution: {}".format(str(e))) _write_notebook(nb_path, root, ext, write_notebook) return nb
[ "def", "execute_notebook", "(", "nb_path", ",", "pkg_dir", ",", "dataframes", ",", "write_notebook", "=", "False", ",", "env", "=", "None", ")", ":", "import", "nbformat", "from", "metapack", ".", "jupyter", ".", "preprocessors", "import", "AddEpilog", ",", "AddProlog", "from", "metapack", ".", "jupyter", ".", "exporters", "import", "ExecutePreprocessor", ",", "Config", "from", "os", ".", "path", "import", "dirname", ",", "join", ",", "splitext", ",", "basename", "from", "nbconvert", ".", "preprocessors", ".", "execute", "import", "CellExecutionError", "with", "open", "(", "nb_path", ",", "encoding", "=", "'utf8'", ")", "as", "f", ":", "nb", "=", "nbformat", ".", "read", "(", "f", ",", "as_version", "=", "4", ")", "root", ",", "ext", "=", "splitext", "(", "basename", "(", "nb_path", ")", ")", "c", "=", "Config", "(", ")", "nb", ",", "resources", "=", "AddProlog", "(", "config", "=", "c", ",", "env", "=", "env", "or", "{", "}", ")", ".", "preprocess", "(", "nb", ",", "{", "}", ")", "nb", ",", "resources", "=", "AddEpilog", "(", "config", "=", "c", ",", "pkg_dir", "=", "pkg_dir", ",", "dataframes", "=", "dataframes", ",", ")", ".", "preprocess", "(", "nb", ",", "{", "}", ")", "def", "_write_notebook", "(", "nb_path", ",", "root", ",", "ext", ",", "write_notebook", ")", ":", "if", "write_notebook", ":", "if", "write_notebook", "is", "True", ":", "exec_nb_path", "=", "join", "(", "dirname", "(", "nb_path", ")", ",", "root", "+", "'-executed'", "+", "ext", ")", "else", ":", "exec_nb_path", "=", "write_notebook", "with", "open", "(", "exec_nb_path", ",", "'w'", ",", "encoding", "=", "'utf8'", ")", "as", "f", ":", "nbformat", ".", "write", "(", "nb", ",", "f", ")", "_write_notebook", "(", "nb_path", ",", "root", ",", "ext", ",", "write_notebook", ")", "try", ":", "ep", "=", "ExecutePreprocessor", "(", "config", "=", "c", ")", "ep", ".", "timeout", "=", "5", "*", "60", "nb", ",", "_", "=", "ep", ".", "preprocess", "(", "nb", ",", "{", "'metadata'", ":", "{", "'path'", ":", "dirname", "(", "nb_path", ")", "}", "}", ")", "except", "(", "CellExecutionError", ",", "TimeoutError", ")", "as", "e", ":", "err_nb_path", "=", "join", "(", "dirname", "(", "nb_path", ")", ",", "root", "+", "'-errors'", "+", "ext", ")", "with", "open", "(", "err_nb_path", ",", "'w'", ",", "encoding", "=", "'utf8'", ")", "as", "f", ":", "nbformat", ".", "write", "(", "nb", ",", "f", ")", "raise", "CellExecutionError", "(", "\"Errors executing noteboook. See notebook at {} for details.\\n{}\"", ".", "format", "(", "err_nb_path", ",", "''", ")", ")", "except", "ImportError", "as", "e", ":", "raise", "NotebookError", "(", "\"Failed to import a library required for notebook execution: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "_write_notebook", "(", "nb_path", ",", "root", ",", "ext", ",", "write_notebook", ")", "return", "nb" ]
Execute a notebook after adding the prolog and epilog. Can also add %mt_materialize magics to write dataframes to files :param nb_path: path to a notebook. :param pkg_dir: Directory to which dataframes are materialized :param dataframes: List of names of dataframes to materialize :return: a Notebook object
[ "Execute", "a", "notebook", "after", "adding", "the", "prolog", "and", "epilog", ".", "Can", "also", "add", "%mt_materialize", "magics", "to", "write", "dataframes", "to", "files" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exec.py#L11-L72
Metatab/metapack
metapack/jupyter/convert.py
convert_documentation
def convert_documentation(nb_path): """Run only the document conversion portion of the notebook conversion The final document will not be completel """ with open(nb_path) as f: nb = nbformat.reads(f.read(), as_version=4) doc = ExtractInlineMetatabDoc(package_url="metapack+file:" + dirname(nb_path)).run(nb) package_name = doc.as_version(None) output_dir = join(getcwd(), package_name) de = DocumentationExporter(config=Config(), log=logger, metadata=doc_metadata(doc)) prt('Converting documentation') output, resources = de.from_filename(nb_path) fw = FilesWriter() fw.build_directory = join(output_dir, 'docs') fw.write(output, resources, notebook_name='notebook') prt("Wrote documentation to {}".format(fw.build_directory))
python
def convert_documentation(nb_path): """Run only the document conversion portion of the notebook conversion The final document will not be completel """ with open(nb_path) as f: nb = nbformat.reads(f.read(), as_version=4) doc = ExtractInlineMetatabDoc(package_url="metapack+file:" + dirname(nb_path)).run(nb) package_name = doc.as_version(None) output_dir = join(getcwd(), package_name) de = DocumentationExporter(config=Config(), log=logger, metadata=doc_metadata(doc)) prt('Converting documentation') output, resources = de.from_filename(nb_path) fw = FilesWriter() fw.build_directory = join(output_dir, 'docs') fw.write(output, resources, notebook_name='notebook') prt("Wrote documentation to {}".format(fw.build_directory))
[ "def", "convert_documentation", "(", "nb_path", ")", ":", "with", "open", "(", "nb_path", ")", "as", "f", ":", "nb", "=", "nbformat", ".", "reads", "(", "f", ".", "read", "(", ")", ",", "as_version", "=", "4", ")", "doc", "=", "ExtractInlineMetatabDoc", "(", "package_url", "=", "\"metapack+file:\"", "+", "dirname", "(", "nb_path", ")", ")", ".", "run", "(", "nb", ")", "package_name", "=", "doc", ".", "as_version", "(", "None", ")", "output_dir", "=", "join", "(", "getcwd", "(", ")", ",", "package_name", ")", "de", "=", "DocumentationExporter", "(", "config", "=", "Config", "(", ")", ",", "log", "=", "logger", ",", "metadata", "=", "doc_metadata", "(", "doc", ")", ")", "prt", "(", "'Converting documentation'", ")", "output", ",", "resources", "=", "de", ".", "from_filename", "(", "nb_path", ")", "fw", "=", "FilesWriter", "(", ")", "fw", ".", "build_directory", "=", "join", "(", "output_dir", ",", "'docs'", ")", "fw", ".", "write", "(", "output", ",", "resources", ",", "notebook_name", "=", "'notebook'", ")", "prt", "(", "\"Wrote documentation to {}\"", ".", "format", "(", "fw", ".", "build_directory", ")", ")" ]
Run only the document conversion portion of the notebook conversion The final document will not be completel
[ "Run", "only", "the", "document", "conversion", "portion", "of", "the", "notebook", "conversion" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/convert.py#L23-L46
Metatab/metapack
metapack/jupyter/convert.py
doc_metadata
def doc_metadata(doc): """Create a metadata dict from a MetatabDoc, for Document conversion""" r = doc['Root'].as_dict() r.update(doc['Contacts'].as_dict()) r['author'] = r.get('author', r.get('creator', r.get('wrangler'))) return r
python
def doc_metadata(doc): """Create a metadata dict from a MetatabDoc, for Document conversion""" r = doc['Root'].as_dict() r.update(doc['Contacts'].as_dict()) r['author'] = r.get('author', r.get('creator', r.get('wrangler'))) return r
[ "def", "doc_metadata", "(", "doc", ")", ":", "r", "=", "doc", "[", "'Root'", "]", ".", "as_dict", "(", ")", "r", ".", "update", "(", "doc", "[", "'Contacts'", "]", ".", "as_dict", "(", ")", ")", "r", "[", "'author'", "]", "=", "r", ".", "get", "(", "'author'", ",", "r", ".", "get", "(", "'creator'", ",", "r", ".", "get", "(", "'wrangler'", ")", ")", ")", "return", "r" ]
Create a metadata dict from a MetatabDoc, for Document conversion
[ "Create", "a", "metadata", "dict", "from", "a", "MetatabDoc", "for", "Document", "conversion" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/convert.py#L111-L118
Metatab/metapack
metapack/jupyter/convert.py
extract_notebook_metatab
def extract_notebook_metatab(nb_path: Path): """Extract the metatab lines from a notebook and return a Metapack doc """ from metatab.rowgenerators import TextRowGenerator import nbformat with nb_path.open() as f: nb = nbformat.read(f, as_version=4) lines = '\n'.join(['Declare: metatab-latest'] + [get_cell_source(nb, tag) for tag in ['metadata', 'resources', 'schema']]) doc = MetapackDoc(TextRowGenerator(lines)) doc['Root'].get_or_new_term('Root.Title').value = get_cell_source(nb, 'Title').strip('#').strip() doc['Root'].get_or_new_term('Root.Description').value = get_cell_source(nb, 'Description') doc['Documentation'].get_or_new_term('Root.Readme').value = get_cell_source(nb, 'readme') return doc
python
def extract_notebook_metatab(nb_path: Path): """Extract the metatab lines from a notebook and return a Metapack doc """ from metatab.rowgenerators import TextRowGenerator import nbformat with nb_path.open() as f: nb = nbformat.read(f, as_version=4) lines = '\n'.join(['Declare: metatab-latest'] + [get_cell_source(nb, tag) for tag in ['metadata', 'resources', 'schema']]) doc = MetapackDoc(TextRowGenerator(lines)) doc['Root'].get_or_new_term('Root.Title').value = get_cell_source(nb, 'Title').strip('#').strip() doc['Root'].get_or_new_term('Root.Description').value = get_cell_source(nb, 'Description') doc['Documentation'].get_or_new_term('Root.Readme').value = get_cell_source(nb, 'readme') return doc
[ "def", "extract_notebook_metatab", "(", "nb_path", ":", "Path", ")", ":", "from", "metatab", ".", "rowgenerators", "import", "TextRowGenerator", "import", "nbformat", "with", "nb_path", ".", "open", "(", ")", "as", "f", ":", "nb", "=", "nbformat", ".", "read", "(", "f", ",", "as_version", "=", "4", ")", "lines", "=", "'\\n'", ".", "join", "(", "[", "'Declare: metatab-latest'", "]", "+", "[", "get_cell_source", "(", "nb", ",", "tag", ")", "for", "tag", "in", "[", "'metadata'", ",", "'resources'", ",", "'schema'", "]", "]", ")", "doc", "=", "MetapackDoc", "(", "TextRowGenerator", "(", "lines", ")", ")", "doc", "[", "'Root'", "]", ".", "get_or_new_term", "(", "'Root.Title'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'Title'", ")", ".", "strip", "(", "'#'", ")", ".", "strip", "(", ")", "doc", "[", "'Root'", "]", ".", "get_or_new_term", "(", "'Root.Description'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'Description'", ")", "doc", "[", "'Documentation'", "]", ".", "get_or_new_term", "(", "'Root.Readme'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'readme'", ")", "return", "doc" ]
Extract the metatab lines from a notebook and return a Metapack doc
[ "Extract", "the", "metatab", "lines", "from", "a", "notebook", "and", "return", "a", "Metapack", "doc" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/convert.py#L181-L199
Metatab/metapack
metapack/cli/wp.py
publish_wp
def publish_wp(site_name, output_file, resources, args): """Publish a notebook to a wordpress post, using Gutenberg blocks. Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter' show_input: hide github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb identifier: 5c987397-a954-46ca-8743-bdcd7a71579c featured_image: 171 authors: - email: eric@civicknowledge.com name: Eric Busboom organization: Civic Knowledge type: wrangler tags: - Tag1 - Tag2 categories: - Demographics - Tutorial 'Featured_image' is an attachment id """ from wordpress_xmlrpc import Client, WordPressPost from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost # http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png url, user, password = get_site_config(site_name) meta = {} for r in resources: if r.endswith('.json'): with open(r) as f: meta = json.load(f) fm = meta.get('frontmatter',{}) if not 'identifier' in fm or not fm['identifier']: err("Can't publish notebook without a unique identifier. Add this to the " "Metatab document or frontmatter metadata:\n identifier: {}".format(str(uuid4()))) wp = Client(url, user, password) post = find_post(wp, fm['identifier']) if post: prt("Updating old post") else: post = WordPressPost() post.id = wp.call(NewPost(post)) prt("Creating new post") post.title = fm.get('title','') post.slug = fm.get('slug') with open(output_file) as f: content = f.read() post.terms_names = { 'post_tag': fm.get('tags',[]), 'category': fm.get('categories',[]) } if args.header: print(yaml.dump(fm, default_flow_style=False)) set_custom_field(post, 'identifier', fm['identifier']) post.excerpt = fm.get('excerpt', fm.get('brief', fm.get('description'))) def strip_image_name(n): """Strip off the version number from the media file""" from os.path import splitext import re return re.sub(r'\-\d+$','',splitext(n)[0]) extant_files = list(wp.call(GetMediaLibrary(dict(parent_id=post.id)))) def find_extant_image(image_name): for img in extant_files: if strip_image_name(basename(img.metadata['file'])) == strip_image_name(image_name): return img return None for r in resources: image_data = prepare_image(fm['identifier'], r, post.id) img_from = "/{}/{}".format(fm['slug'], basename(r)) extant_image = find_extant_image(image_data['name']) if extant_image: prt("Post already has image:", extant_image.id, extant_image.link) img_to = extant_image.link elif r.endswith('.png'): # Foolishly assuming all images are PNGs response = wp.call(UploadFile(image_data, overwrite=True)) prt("Uploaded image {} to id={}, {}".format(basename(r), response['id'], response['link'])) img_to = response['link'] content = content.replace(img_from, img_to) if fm.get('featured_image') and fm.get('featured_image').strip(): post.thumbnail = int(fm['featured_image']) elif hasattr(post, 'thumbnail') and isinstance(post.thumbnail, dict): # The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost post.thumbnail = post.thumbnail['attachment_id'] post.content = content r = wp.call(EditPost(post.id, post)) return r, wp.call(GetPost(post.id))
python
def publish_wp(site_name, output_file, resources, args): """Publish a notebook to a wordpress post, using Gutenberg blocks. Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter' show_input: hide github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb identifier: 5c987397-a954-46ca-8743-bdcd7a71579c featured_image: 171 authors: - email: eric@civicknowledge.com name: Eric Busboom organization: Civic Knowledge type: wrangler tags: - Tag1 - Tag2 categories: - Demographics - Tutorial 'Featured_image' is an attachment id """ from wordpress_xmlrpc import Client, WordPressPost from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost # http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png url, user, password = get_site_config(site_name) meta = {} for r in resources: if r.endswith('.json'): with open(r) as f: meta = json.load(f) fm = meta.get('frontmatter',{}) if not 'identifier' in fm or not fm['identifier']: err("Can't publish notebook without a unique identifier. Add this to the " "Metatab document or frontmatter metadata:\n identifier: {}".format(str(uuid4()))) wp = Client(url, user, password) post = find_post(wp, fm['identifier']) if post: prt("Updating old post") else: post = WordPressPost() post.id = wp.call(NewPost(post)) prt("Creating new post") post.title = fm.get('title','') post.slug = fm.get('slug') with open(output_file) as f: content = f.read() post.terms_names = { 'post_tag': fm.get('tags',[]), 'category': fm.get('categories',[]) } if args.header: print(yaml.dump(fm, default_flow_style=False)) set_custom_field(post, 'identifier', fm['identifier']) post.excerpt = fm.get('excerpt', fm.get('brief', fm.get('description'))) def strip_image_name(n): """Strip off the version number from the media file""" from os.path import splitext import re return re.sub(r'\-\d+$','',splitext(n)[0]) extant_files = list(wp.call(GetMediaLibrary(dict(parent_id=post.id)))) def find_extant_image(image_name): for img in extant_files: if strip_image_name(basename(img.metadata['file'])) == strip_image_name(image_name): return img return None for r in resources: image_data = prepare_image(fm['identifier'], r, post.id) img_from = "/{}/{}".format(fm['slug'], basename(r)) extant_image = find_extant_image(image_data['name']) if extant_image: prt("Post already has image:", extant_image.id, extant_image.link) img_to = extant_image.link elif r.endswith('.png'): # Foolishly assuming all images are PNGs response = wp.call(UploadFile(image_data, overwrite=True)) prt("Uploaded image {} to id={}, {}".format(basename(r), response['id'], response['link'])) img_to = response['link'] content = content.replace(img_from, img_to) if fm.get('featured_image') and fm.get('featured_image').strip(): post.thumbnail = int(fm['featured_image']) elif hasattr(post, 'thumbnail') and isinstance(post.thumbnail, dict): # The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost post.thumbnail = post.thumbnail['attachment_id'] post.content = content r = wp.call(EditPost(post.id, post)) return r, wp.call(GetPost(post.id))
[ "def", "publish_wp", "(", "site_name", ",", "output_file", ",", "resources", ",", "args", ")", ":", "from", "wordpress_xmlrpc", "import", "Client", ",", "WordPressPost", "from", "wordpress_xmlrpc", ".", "methods", ".", "media", "import", "UploadFile", ",", "GetMediaLibrary", "from", "wordpress_xmlrpc", ".", "methods", ".", "posts", "import", "NewPost", ",", "EditPost", ",", "GetPost", "# http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png", "url", ",", "user", ",", "password", "=", "get_site_config", "(", "site_name", ")", "meta", "=", "{", "}", "for", "r", "in", "resources", ":", "if", "r", ".", "endswith", "(", "'.json'", ")", ":", "with", "open", "(", "r", ")", "as", "f", ":", "meta", "=", "json", ".", "load", "(", "f", ")", "fm", "=", "meta", ".", "get", "(", "'frontmatter'", ",", "{", "}", ")", "if", "not", "'identifier'", "in", "fm", "or", "not", "fm", "[", "'identifier'", "]", ":", "err", "(", "\"Can't publish notebook without a unique identifier. Add this to the \"", "\"Metatab document or frontmatter metadata:\\n identifier: {}\"", ".", "format", "(", "str", "(", "uuid4", "(", ")", ")", ")", ")", "wp", "=", "Client", "(", "url", ",", "user", ",", "password", ")", "post", "=", "find_post", "(", "wp", ",", "fm", "[", "'identifier'", "]", ")", "if", "post", ":", "prt", "(", "\"Updating old post\"", ")", "else", ":", "post", "=", "WordPressPost", "(", ")", "post", ".", "id", "=", "wp", ".", "call", "(", "NewPost", "(", "post", ")", ")", "prt", "(", "\"Creating new post\"", ")", "post", ".", "title", "=", "fm", ".", "get", "(", "'title'", ",", "''", ")", "post", ".", "slug", "=", "fm", ".", "get", "(", "'slug'", ")", "with", "open", "(", "output_file", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "post", ".", "terms_names", "=", "{", "'post_tag'", ":", "fm", ".", "get", "(", "'tags'", ",", "[", "]", ")", ",", "'category'", ":", "fm", ".", "get", "(", "'categories'", ",", "[", "]", ")", "}", "if", "args", ".", "header", ":", "print", "(", "yaml", ".", "dump", "(", "fm", ",", "default_flow_style", "=", "False", ")", ")", "set_custom_field", "(", "post", ",", "'identifier'", ",", "fm", "[", "'identifier'", "]", ")", "post", ".", "excerpt", "=", "fm", ".", "get", "(", "'excerpt'", ",", "fm", ".", "get", "(", "'brief'", ",", "fm", ".", "get", "(", "'description'", ")", ")", ")", "def", "strip_image_name", "(", "n", ")", ":", "\"\"\"Strip off the version number from the media file\"\"\"", "from", "os", ".", "path", "import", "splitext", "import", "re", "return", "re", ".", "sub", "(", "r'\\-\\d+$'", ",", "''", ",", "splitext", "(", "n", ")", "[", "0", "]", ")", "extant_files", "=", "list", "(", "wp", ".", "call", "(", "GetMediaLibrary", "(", "dict", "(", "parent_id", "=", "post", ".", "id", ")", ")", ")", ")", "def", "find_extant_image", "(", "image_name", ")", ":", "for", "img", "in", "extant_files", ":", "if", "strip_image_name", "(", "basename", "(", "img", ".", "metadata", "[", "'file'", "]", ")", ")", "==", "strip_image_name", "(", "image_name", ")", ":", "return", "img", "return", "None", "for", "r", "in", "resources", ":", "image_data", "=", "prepare_image", "(", "fm", "[", "'identifier'", "]", ",", "r", ",", "post", ".", "id", ")", "img_from", "=", "\"/{}/{}\"", ".", "format", "(", "fm", "[", "'slug'", "]", ",", "basename", "(", "r", ")", ")", "extant_image", "=", "find_extant_image", "(", "image_data", "[", "'name'", "]", ")", "if", "extant_image", ":", "prt", "(", "\"Post already has image:\"", ",", "extant_image", ".", "id", ",", "extant_image", ".", "link", ")", "img_to", "=", "extant_image", ".", "link", "elif", "r", ".", "endswith", "(", "'.png'", ")", ":", "# Foolishly assuming all images are PNGs", "response", "=", "wp", ".", "call", "(", "UploadFile", "(", "image_data", ",", "overwrite", "=", "True", ")", ")", "prt", "(", "\"Uploaded image {} to id={}, {}\"", ".", "format", "(", "basename", "(", "r", ")", ",", "response", "[", "'id'", "]", ",", "response", "[", "'link'", "]", ")", ")", "img_to", "=", "response", "[", "'link'", "]", "content", "=", "content", ".", "replace", "(", "img_from", ",", "img_to", ")", "if", "fm", ".", "get", "(", "'featured_image'", ")", "and", "fm", ".", "get", "(", "'featured_image'", ")", ".", "strip", "(", ")", ":", "post", ".", "thumbnail", "=", "int", "(", "fm", "[", "'featured_image'", "]", ")", "elif", "hasattr", "(", "post", ",", "'thumbnail'", ")", "and", "isinstance", "(", "post", ".", "thumbnail", ",", "dict", ")", ":", "# The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost", "post", ".", "thumbnail", "=", "post", ".", "thumbnail", "[", "'attachment_id'", "]", "post", ".", "content", "=", "content", "r", "=", "wp", ".", "call", "(", "EditPost", "(", "post", ".", "id", ",", "post", ")", ")", "return", "r", ",", "wp", ".", "call", "(", "GetPost", "(", "post", ".", "id", ")", ")" ]
Publish a notebook to a wordpress post, using Gutenberg blocks. Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter' show_input: hide github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb identifier: 5c987397-a954-46ca-8743-bdcd7a71579c featured_image: 171 authors: - email: eric@civicknowledge.com name: Eric Busboom organization: Civic Knowledge type: wrangler tags: - Tag1 - Tag2 categories: - Demographics - Tutorial 'Featured_image' is an attachment id
[ "Publish", "a", "notebook", "to", "a", "wordpress", "post", "using", "Gutenberg", "blocks", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/wp.py#L143-L263
Metatab/metapack
metapack/package/core.py
PackageBuilder.add_single_resource
def add_single_resource(self, ref, **properties): """ Add a single resource, without trying to enumerate it's contents :param ref: :return: """ t = self.doc.find_first('Root.Datafile', value=ref) if t: self.prt("Datafile exists for '{}', deleting".format(ref)) self.doc.remove_term(t) term_name = self.classify_url(ref) ref, path, name = self.extract_path_name(ref) self.prt("Adding resource for '{}'".format(ref)) try: encoding, ri = self.run_row_intuit(path, self._cache) except Exception as e: self.warn("Failed to intuit '{}'; {}".format(path, e)) return None if not name: name = sha1(slugify(path).encode('ascii')).hexdigest()[:12] # xlrd gets grouchy if the name doesn't start with a char try: int(name[0]) name = 'a' + name[1:] except: pass if 'name' in properties: name = properties['name'] del properties['name'] return self.sections.resources.new_term(term_name, ref, name=name, startline=ri.start_line, headerlines=','.join(str(e) for e in ri.header_lines), encoding=encoding, **properties)
python
def add_single_resource(self, ref, **properties): """ Add a single resource, without trying to enumerate it's contents :param ref: :return: """ t = self.doc.find_first('Root.Datafile', value=ref) if t: self.prt("Datafile exists for '{}', deleting".format(ref)) self.doc.remove_term(t) term_name = self.classify_url(ref) ref, path, name = self.extract_path_name(ref) self.prt("Adding resource for '{}'".format(ref)) try: encoding, ri = self.run_row_intuit(path, self._cache) except Exception as e: self.warn("Failed to intuit '{}'; {}".format(path, e)) return None if not name: name = sha1(slugify(path).encode('ascii')).hexdigest()[:12] # xlrd gets grouchy if the name doesn't start with a char try: int(name[0]) name = 'a' + name[1:] except: pass if 'name' in properties: name = properties['name'] del properties['name'] return self.sections.resources.new_term(term_name, ref, name=name, startline=ri.start_line, headerlines=','.join(str(e) for e in ri.header_lines), encoding=encoding, **properties)
[ "def", "add_single_resource", "(", "self", ",", "ref", ",", "*", "*", "properties", ")", ":", "t", "=", "self", ".", "doc", ".", "find_first", "(", "'Root.Datafile'", ",", "value", "=", "ref", ")", "if", "t", ":", "self", ".", "prt", "(", "\"Datafile exists for '{}', deleting\"", ".", "format", "(", "ref", ")", ")", "self", ".", "doc", ".", "remove_term", "(", "t", ")", "term_name", "=", "self", ".", "classify_url", "(", "ref", ")", "ref", ",", "path", ",", "name", "=", "self", ".", "extract_path_name", "(", "ref", ")", "self", ".", "prt", "(", "\"Adding resource for '{}'\"", ".", "format", "(", "ref", ")", ")", "try", ":", "encoding", ",", "ri", "=", "self", ".", "run_row_intuit", "(", "path", ",", "self", ".", "_cache", ")", "except", "Exception", "as", "e", ":", "self", ".", "warn", "(", "\"Failed to intuit '{}'; {}\"", ".", "format", "(", "path", ",", "e", ")", ")", "return", "None", "if", "not", "name", ":", "name", "=", "sha1", "(", "slugify", "(", "path", ")", ".", "encode", "(", "'ascii'", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "12", "]", "# xlrd gets grouchy if the name doesn't start with a char", "try", ":", "int", "(", "name", "[", "0", "]", ")", "name", "=", "'a'", "+", "name", "[", "1", ":", "]", "except", ":", "pass", "if", "'name'", "in", "properties", ":", "name", "=", "properties", "[", "'name'", "]", "del", "properties", "[", "'name'", "]", "return", "self", ".", "sections", ".", "resources", ".", "new_term", "(", "term_name", ",", "ref", ",", "name", "=", "name", ",", "startline", "=", "ri", ".", "start_line", ",", "headerlines", "=", "','", ".", "join", "(", "str", "(", "e", ")", "for", "e", "in", "ri", ".", "header_lines", ")", ",", "encoding", "=", "encoding", ",", "*", "*", "properties", ")" ]
Add a single resource, without trying to enumerate it's contents :param ref: :return:
[ "Add", "a", "single", "resource", "without", "trying", "to", "enumerate", "it", "s", "contents", ":", "param", "ref", ":", ":", "return", ":" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L253-L296
Metatab/metapack
metapack/package/core.py
PackageBuilder.add_resource
def add_resource(self, ref, **properties): """Add one or more resources entities, from a url and property values, possibly adding multiple entries for an excel spreadsheet or ZIP file""" raise NotImplementedError("Still uses decompose_url") du = Bunch(decompose_url(ref)) added = [] if du.proto == 'file' and isdir(ref): for f in self.find_files(ref, ['csv']): if f.endswith(DEFAULT_METATAB_FILE): continue if self._doc.find_first('Root.Datafile', value=f): self.prt("Datafile exists for '{}', ignoring".format(f)) else: added.extend(self.add_resource(f, **properties)) else: self.prt("Enumerating '{}'".format(ref)) for c in enumerate_contents(ref, self._cache): added.append(self.add_single_resource(c.rebuild_url(), **properties)) return added
python
def add_resource(self, ref, **properties): """Add one or more resources entities, from a url and property values, possibly adding multiple entries for an excel spreadsheet or ZIP file""" raise NotImplementedError("Still uses decompose_url") du = Bunch(decompose_url(ref)) added = [] if du.proto == 'file' and isdir(ref): for f in self.find_files(ref, ['csv']): if f.endswith(DEFAULT_METATAB_FILE): continue if self._doc.find_first('Root.Datafile', value=f): self.prt("Datafile exists for '{}', ignoring".format(f)) else: added.extend(self.add_resource(f, **properties)) else: self.prt("Enumerating '{}'".format(ref)) for c in enumerate_contents(ref, self._cache): added.append(self.add_single_resource(c.rebuild_url(), **properties)) return added
[ "def", "add_resource", "(", "self", ",", "ref", ",", "*", "*", "properties", ")", ":", "raise", "NotImplementedError", "(", "\"Still uses decompose_url\"", ")", "du", "=", "Bunch", "(", "decompose_url", "(", "ref", ")", ")", "added", "=", "[", "]", "if", "du", ".", "proto", "==", "'file'", "and", "isdir", "(", "ref", ")", ":", "for", "f", "in", "self", ".", "find_files", "(", "ref", ",", "[", "'csv'", "]", ")", ":", "if", "f", ".", "endswith", "(", "DEFAULT_METATAB_FILE", ")", ":", "continue", "if", "self", ".", "_doc", ".", "find_first", "(", "'Root.Datafile'", ",", "value", "=", "f", ")", ":", "self", ".", "prt", "(", "\"Datafile exists for '{}', ignoring\"", ".", "format", "(", "f", ")", ")", "else", ":", "added", ".", "extend", "(", "self", ".", "add_resource", "(", "f", ",", "*", "*", "properties", ")", ")", "else", ":", "self", ".", "prt", "(", "\"Enumerating '{}'\"", ".", "format", "(", "ref", ")", ")", "for", "c", "in", "enumerate_contents", "(", "ref", ",", "self", ".", "_cache", ")", ":", "added", ".", "append", "(", "self", ".", "add_single_resource", "(", "c", ".", "rebuild_url", "(", ")", ",", "*", "*", "properties", ")", ")", "return", "added" ]
Add one or more resources entities, from a url and property values, possibly adding multiple entries for an excel spreadsheet or ZIP file
[ "Add", "one", "or", "more", "resources", "entities", "from", "a", "url", "and", "property", "values", "possibly", "adding", "multiple", "entries", "for", "an", "excel", "spreadsheet", "or", "ZIP", "file" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L298-L323
Metatab/metapack
metapack/package/core.py
PackageBuilder._clean_doc
def _clean_doc(self, doc=None): """Clean the doc before writing it, removing unnecessary properties and doing other operations.""" if doc is None: doc = self.doc resources = doc['Resources'] # We don't need these anymore because all of the data written into the package is normalized. for arg in ['startline', 'headerlines', 'encoding']: for e in list(resources.args): if e.lower() == arg: resources.args.remove(e) for term in resources: term['startline'] = None term['headerlines'] = None term['encoding'] = None schema = doc['Schema'] ## FIXME! This is probably dangerous, because the section args are changing, but the children ## are not, so when these two are combined in the Term.properties() acessors, the values are off. ## Because of this, _clean_doc should be run immediately before writing the doc. for arg in ['altname', 'transform']: for e in list(schema.args): if e.lower() == arg: schema.args.remove(e) for table in self.doc.find('Root.Table'): for col in table.find('Column'): try: col.value = col['altname'].value except: pass col['altname'] = None col['transform'] = None # Remove any DSNs #for dsn_t in self.doc.find('Root.Dsn'): # self.doc.remove_term(dsn_t) return doc
python
def _clean_doc(self, doc=None): """Clean the doc before writing it, removing unnecessary properties and doing other operations.""" if doc is None: doc = self.doc resources = doc['Resources'] # We don't need these anymore because all of the data written into the package is normalized. for arg in ['startline', 'headerlines', 'encoding']: for e in list(resources.args): if e.lower() == arg: resources.args.remove(e) for term in resources: term['startline'] = None term['headerlines'] = None term['encoding'] = None schema = doc['Schema'] ## FIXME! This is probably dangerous, because the section args are changing, but the children ## are not, so when these two are combined in the Term.properties() acessors, the values are off. ## Because of this, _clean_doc should be run immediately before writing the doc. for arg in ['altname', 'transform']: for e in list(schema.args): if e.lower() == arg: schema.args.remove(e) for table in self.doc.find('Root.Table'): for col in table.find('Column'): try: col.value = col['altname'].value except: pass col['altname'] = None col['transform'] = None # Remove any DSNs #for dsn_t in self.doc.find('Root.Dsn'): # self.doc.remove_term(dsn_t) return doc
[ "def", "_clean_doc", "(", "self", ",", "doc", "=", "None", ")", ":", "if", "doc", "is", "None", ":", "doc", "=", "self", ".", "doc", "resources", "=", "doc", "[", "'Resources'", "]", "# We don't need these anymore because all of the data written into the package is normalized.", "for", "arg", "in", "[", "'startline'", ",", "'headerlines'", ",", "'encoding'", "]", ":", "for", "e", "in", "list", "(", "resources", ".", "args", ")", ":", "if", "e", ".", "lower", "(", ")", "==", "arg", ":", "resources", ".", "args", ".", "remove", "(", "e", ")", "for", "term", "in", "resources", ":", "term", "[", "'startline'", "]", "=", "None", "term", "[", "'headerlines'", "]", "=", "None", "term", "[", "'encoding'", "]", "=", "None", "schema", "=", "doc", "[", "'Schema'", "]", "## FIXME! This is probably dangerous, because the section args are changing, but the children", "## are not, so when these two are combined in the Term.properties() acessors, the values are off.", "## Because of this, _clean_doc should be run immediately before writing the doc.", "for", "arg", "in", "[", "'altname'", ",", "'transform'", "]", ":", "for", "e", "in", "list", "(", "schema", ".", "args", ")", ":", "if", "e", ".", "lower", "(", ")", "==", "arg", ":", "schema", ".", "args", ".", "remove", "(", "e", ")", "for", "table", "in", "self", ".", "doc", ".", "find", "(", "'Root.Table'", ")", ":", "for", "col", "in", "table", ".", "find", "(", "'Column'", ")", ":", "try", ":", "col", ".", "value", "=", "col", "[", "'altname'", "]", ".", "value", "except", ":", "pass", "col", "[", "'altname'", "]", "=", "None", "col", "[", "'transform'", "]", "=", "None", "# Remove any DSNs", "#for dsn_t in self.doc.find('Root.Dsn'):", "# self.doc.remove_term(dsn_t)", "return", "doc" ]
Clean the doc before writing it, removing unnecessary properties and doing other operations.
[ "Clean", "the", "doc", "before", "writing", "it", "removing", "unnecessary", "properties", "and", "doing", "other", "operations", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L325-L370
Metatab/metapack
metapack/package/core.py
PackageBuilder._load_resources
def _load_resources(self, abs_path=False): """Copy all of the Datafile entries into the package""" from metapack.doc import MetapackDoc assert type(self.doc) == MetapackDoc for r in self.datafiles: # Special handling for SQL is probably a really bad idea. It should be handled as # a Rowgenerator. if r.term_is('root.sql'): if not r.value: self.warn("No value for SQL URL for {} ".format(r.term)) continue try: self._load_resource(r, abs_path) except Exception as e: if r.props.get('ignoreerrors'): self.warn(f"Ignoring errors for {r.name}: {str(e)}") pass else: raise e else: if not r.url: self.warn("No value for URL for {} ".format(r.term)) continue try: if self._resource.exists(r): self.prt("Resource '{}' exists, skipping".format(r.name)) continue except AttributeError: pass self.prt("Reading resource {} from {} ".format(r.name, r.resolved_url)) try: if not r.headers: raise PackageError("Resource {} does not have header. Have schemas been generated?" .format(r.name)) except AttributeError: raise PackageError("Resource '{}' of type {} does not have a headers property" .format(r.url, type(r))) try: self._load_resource(r, abs_path) except Exception as e: if r.props.get('ignoreerrors'): self.warn(f"Ignoring errors for {r.name}: {str(e)}") pass else: raise e
python
def _load_resources(self, abs_path=False): """Copy all of the Datafile entries into the package""" from metapack.doc import MetapackDoc assert type(self.doc) == MetapackDoc for r in self.datafiles: # Special handling for SQL is probably a really bad idea. It should be handled as # a Rowgenerator. if r.term_is('root.sql'): if not r.value: self.warn("No value for SQL URL for {} ".format(r.term)) continue try: self._load_resource(r, abs_path) except Exception as e: if r.props.get('ignoreerrors'): self.warn(f"Ignoring errors for {r.name}: {str(e)}") pass else: raise e else: if not r.url: self.warn("No value for URL for {} ".format(r.term)) continue try: if self._resource.exists(r): self.prt("Resource '{}' exists, skipping".format(r.name)) continue except AttributeError: pass self.prt("Reading resource {} from {} ".format(r.name, r.resolved_url)) try: if not r.headers: raise PackageError("Resource {} does not have header. Have schemas been generated?" .format(r.name)) except AttributeError: raise PackageError("Resource '{}' of type {} does not have a headers property" .format(r.url, type(r))) try: self._load_resource(r, abs_path) except Exception as e: if r.props.get('ignoreerrors'): self.warn(f"Ignoring errors for {r.name}: {str(e)}") pass else: raise e
[ "def", "_load_resources", "(", "self", ",", "abs_path", "=", "False", ")", ":", "from", "metapack", ".", "doc", "import", "MetapackDoc", "assert", "type", "(", "self", ".", "doc", ")", "==", "MetapackDoc", "for", "r", "in", "self", ".", "datafiles", ":", "# Special handling for SQL is probably a really bad idea. It should be handled as", "# a Rowgenerator.", "if", "r", ".", "term_is", "(", "'root.sql'", ")", ":", "if", "not", "r", ".", "value", ":", "self", ".", "warn", "(", "\"No value for SQL URL for {} \"", ".", "format", "(", "r", ".", "term", ")", ")", "continue", "try", ":", "self", ".", "_load_resource", "(", "r", ",", "abs_path", ")", "except", "Exception", "as", "e", ":", "if", "r", ".", "props", ".", "get", "(", "'ignoreerrors'", ")", ":", "self", ".", "warn", "(", "f\"Ignoring errors for {r.name}: {str(e)}\"", ")", "pass", "else", ":", "raise", "e", "else", ":", "if", "not", "r", ".", "url", ":", "self", ".", "warn", "(", "\"No value for URL for {} \"", ".", "format", "(", "r", ".", "term", ")", ")", "continue", "try", ":", "if", "self", ".", "_resource", ".", "exists", "(", "r", ")", ":", "self", ".", "prt", "(", "\"Resource '{}' exists, skipping\"", ".", "format", "(", "r", ".", "name", ")", ")", "continue", "except", "AttributeError", ":", "pass", "self", ".", "prt", "(", "\"Reading resource {} from {} \"", ".", "format", "(", "r", ".", "name", ",", "r", ".", "resolved_url", ")", ")", "try", ":", "if", "not", "r", ".", "headers", ":", "raise", "PackageError", "(", "\"Resource {} does not have header. Have schemas been generated?\"", ".", "format", "(", "r", ".", "name", ")", ")", "except", "AttributeError", ":", "raise", "PackageError", "(", "\"Resource '{}' of type {} does not have a headers property\"", ".", "format", "(", "r", ".", "url", ",", "type", "(", "r", ")", ")", ")", "try", ":", "self", ".", "_load_resource", "(", "r", ",", "abs_path", ")", "except", "Exception", "as", "e", ":", "if", "r", ".", "props", ".", "get", "(", "'ignoreerrors'", ")", ":", "self", ".", "warn", "(", "f\"Ignoring errors for {r.name}: {str(e)}\"", ")", "pass", "else", ":", "raise", "e" ]
Copy all of the Datafile entries into the package
[ "Copy", "all", "of", "the", "Datafile", "entries", "into", "the", "package" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L372-L427
Metatab/metapack
metapack/package/core.py
PackageBuilder._load_documentation_files
def _load_documentation_files(self): """Copy all of the Datafile """ for t in self.doc.find(['Root.Documentation', 'Root.Image', 'Root.Notebook']): resource = self._get_ref_contents(t) if not resource: continue if t.term_is('Root.Documentation'): # Prefer the slugified title to the base name, because in cases of collections # of many data releases, like annual datasets, documentation files may all have the same name, # but the titles should be different. real_name_base, ext = splitext(resource.resource_file) name = t.get_value('name') if t.get_value('name') else real_name_base real_name = slugify(name) + ext self._load_documentation(t, resource.read(), resource.resource_file) # Root.Readme is a special term added from Jupyter notebooks, so README files # can be generated for packages. t = self.doc.find_first('Root.Readme') if t and (t.value or '').strip(): # Since the text is comming from a notebook, it probably does not have a title t['title'] = 'Readme' readme = '# '+ (self.doc.get_value('Root.Title') or '').strip() if self.doc.description: readme += '\n\n' + (self.doc.description or '').strip() if (t.value or '').strip(): readme += '\n\n' +(t.value or '').strip() self._load_documentation(t, readme.encode('utf8'), 'README.md')
python
def _load_documentation_files(self): """Copy all of the Datafile """ for t in self.doc.find(['Root.Documentation', 'Root.Image', 'Root.Notebook']): resource = self._get_ref_contents(t) if not resource: continue if t.term_is('Root.Documentation'): # Prefer the slugified title to the base name, because in cases of collections # of many data releases, like annual datasets, documentation files may all have the same name, # but the titles should be different. real_name_base, ext = splitext(resource.resource_file) name = t.get_value('name') if t.get_value('name') else real_name_base real_name = slugify(name) + ext self._load_documentation(t, resource.read(), resource.resource_file) # Root.Readme is a special term added from Jupyter notebooks, so README files # can be generated for packages. t = self.doc.find_first('Root.Readme') if t and (t.value or '').strip(): # Since the text is comming from a notebook, it probably does not have a title t['title'] = 'Readme' readme = '# '+ (self.doc.get_value('Root.Title') or '').strip() if self.doc.description: readme += '\n\n' + (self.doc.description or '').strip() if (t.value or '').strip(): readme += '\n\n' +(t.value or '').strip() self._load_documentation(t, readme.encode('utf8'), 'README.md')
[ "def", "_load_documentation_files", "(", "self", ")", ":", "for", "t", "in", "self", ".", "doc", ".", "find", "(", "[", "'Root.Documentation'", ",", "'Root.Image'", ",", "'Root.Notebook'", "]", ")", ":", "resource", "=", "self", ".", "_get_ref_contents", "(", "t", ")", "if", "not", "resource", ":", "continue", "if", "t", ".", "term_is", "(", "'Root.Documentation'", ")", ":", "# Prefer the slugified title to the base name, because in cases of collections", "# of many data releases, like annual datasets, documentation files may all have the same name,", "# but the titles should be different.", "real_name_base", ",", "ext", "=", "splitext", "(", "resource", ".", "resource_file", ")", "name", "=", "t", ".", "get_value", "(", "'name'", ")", "if", "t", ".", "get_value", "(", "'name'", ")", "else", "real_name_base", "real_name", "=", "slugify", "(", "name", ")", "+", "ext", "self", ".", "_load_documentation", "(", "t", ",", "resource", ".", "read", "(", ")", ",", "resource", ".", "resource_file", ")", "# Root.Readme is a special term added from Jupyter notebooks, so README files", "# can be generated for packages.", "t", "=", "self", ".", "doc", ".", "find_first", "(", "'Root.Readme'", ")", "if", "t", "and", "(", "t", ".", "value", "or", "''", ")", ".", "strip", "(", ")", ":", "# Since the text is comming from a notebook, it probably does not have a title", "t", "[", "'title'", "]", "=", "'Readme'", "readme", "=", "'# '", "+", "(", "self", ".", "doc", ".", "get_value", "(", "'Root.Title'", ")", "or", "''", ")", ".", "strip", "(", ")", "if", "self", ".", "doc", ".", "description", ":", "readme", "+=", "'\\n\\n'", "+", "(", "self", ".", "doc", ".", "description", "or", "''", ")", ".", "strip", "(", ")", "if", "(", "t", ".", "value", "or", "''", ")", ".", "strip", "(", ")", ":", "readme", "+=", "'\\n\\n'", "+", "(", "t", ".", "value", "or", "''", ")", ".", "strip", "(", ")", "self", ".", "_load_documentation", "(", "t", ",", "readme", ".", "encode", "(", "'utf8'", ")", ",", "'README.md'", ")" ]
Copy all of the Datafile
[ "Copy", "all", "of", "the", "Datafile" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L491-L528
Metatab/metapack
metapack/package/core.py
PackageBuilder._load_files
def _load_files(self): """Load other files""" def copy_dir(path): for (dr, _, files) in walk(path): for fn in files: if '__pycache__' in fn: continue relpath = dr.replace(self.source_dir, '').strip('/') src = parse_app_url(join(dr, fn)) dest = join(relpath, fn) resource = src.get_resource() self._load_file( dest, resource.read()) for term in self.resources(term = 'Root.Pythonlib'): uv = parse_app_url(term.value) ur = parse_app_url(self.source_dir) # In the case that the input doc is a file, and the ref is to a file, # try interpreting the file as relative. if ur.proto == 'file' and uv.proto == 'file': # Either a file or a directory path = join(self.source_dir, uv.path) if isdir(path): copy_dir(path) else: # Load it as a URL f = self._get_ref_contents(term) try: self._load_file(term.value,f.read() ) except Exception as e: raise PackageError("Failed to load file for '{}': {} ".format(term.value, e)) nb_dir = join(self.source_dir, 'notebooks') if exists(nb_dir) and isdir(nb_dir): copy_dir(nb_dir)
python
def _load_files(self): """Load other files""" def copy_dir(path): for (dr, _, files) in walk(path): for fn in files: if '__pycache__' in fn: continue relpath = dr.replace(self.source_dir, '').strip('/') src = parse_app_url(join(dr, fn)) dest = join(relpath, fn) resource = src.get_resource() self._load_file( dest, resource.read()) for term in self.resources(term = 'Root.Pythonlib'): uv = parse_app_url(term.value) ur = parse_app_url(self.source_dir) # In the case that the input doc is a file, and the ref is to a file, # try interpreting the file as relative. if ur.proto == 'file' and uv.proto == 'file': # Either a file or a directory path = join(self.source_dir, uv.path) if isdir(path): copy_dir(path) else: # Load it as a URL f = self._get_ref_contents(term) try: self._load_file(term.value,f.read() ) except Exception as e: raise PackageError("Failed to load file for '{}': {} ".format(term.value, e)) nb_dir = join(self.source_dir, 'notebooks') if exists(nb_dir) and isdir(nb_dir): copy_dir(nb_dir)
[ "def", "_load_files", "(", "self", ")", ":", "def", "copy_dir", "(", "path", ")", ":", "for", "(", "dr", ",", "_", ",", "files", ")", "in", "walk", "(", "path", ")", ":", "for", "fn", "in", "files", ":", "if", "'__pycache__'", "in", "fn", ":", "continue", "relpath", "=", "dr", ".", "replace", "(", "self", ".", "source_dir", ",", "''", ")", ".", "strip", "(", "'/'", ")", "src", "=", "parse_app_url", "(", "join", "(", "dr", ",", "fn", ")", ")", "dest", "=", "join", "(", "relpath", ",", "fn", ")", "resource", "=", "src", ".", "get_resource", "(", ")", "self", ".", "_load_file", "(", "dest", ",", "resource", ".", "read", "(", ")", ")", "for", "term", "in", "self", ".", "resources", "(", "term", "=", "'Root.Pythonlib'", ")", ":", "uv", "=", "parse_app_url", "(", "term", ".", "value", ")", "ur", "=", "parse_app_url", "(", "self", ".", "source_dir", ")", "# In the case that the input doc is a file, and the ref is to a file,", "# try interpreting the file as relative.", "if", "ur", ".", "proto", "==", "'file'", "and", "uv", ".", "proto", "==", "'file'", ":", "# Either a file or a directory", "path", "=", "join", "(", "self", ".", "source_dir", ",", "uv", ".", "path", ")", "if", "isdir", "(", "path", ")", ":", "copy_dir", "(", "path", ")", "else", ":", "# Load it as a URL", "f", "=", "self", ".", "_get_ref_contents", "(", "term", ")", "try", ":", "self", ".", "_load_file", "(", "term", ".", "value", ",", "f", ".", "read", "(", ")", ")", "except", "Exception", "as", "e", ":", "raise", "PackageError", "(", "\"Failed to load file for '{}': {} \"", ".", "format", "(", "term", ".", "value", ",", "e", ")", ")", "nb_dir", "=", "join", "(", "self", ".", "source_dir", ",", "'notebooks'", ")", "if", "exists", "(", "nb_dir", ")", "and", "isdir", "(", "nb_dir", ")", ":", "copy_dir", "(", "nb_dir", ")" ]
Load other files
[ "Load", "other", "files" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L534-L577
Metatab/metapack
metapack/package/core.py
PackageBuilder.create_nv_link
def create_nv_link(self): """After a save(), write a link to the saved file using a non-versioned name""" from os.path import abspath, islink from os import unlink, symlink nv_name = self.doc.as_version(None) from_path = abspath(self._last_write_path or self.package_path.path) to_path = join(dirname(from_path), nv_name + self.type_suffix) if islink(to_path): unlink(to_path) symlink(from_path, to_path)
python
def create_nv_link(self): """After a save(), write a link to the saved file using a non-versioned name""" from os.path import abspath, islink from os import unlink, symlink nv_name = self.doc.as_version(None) from_path = abspath(self._last_write_path or self.package_path.path) to_path = join(dirname(from_path), nv_name + self.type_suffix) if islink(to_path): unlink(to_path) symlink(from_path, to_path)
[ "def", "create_nv_link", "(", "self", ")", ":", "from", "os", ".", "path", "import", "abspath", ",", "islink", "from", "os", "import", "unlink", ",", "symlink", "nv_name", "=", "self", ".", "doc", ".", "as_version", "(", "None", ")", "from_path", "=", "abspath", "(", "self", ".", "_last_write_path", "or", "self", ".", "package_path", ".", "path", ")", "to_path", "=", "join", "(", "dirname", "(", "from_path", ")", ",", "nv_name", "+", "self", ".", "type_suffix", ")", "if", "islink", "(", "to_path", ")", ":", "unlink", "(", "to_path", ")", "symlink", "(", "from_path", ",", "to_path", ")" ]
After a save(), write a link to the saved file using a non-versioned name
[ "After", "a", "save", "()", "write", "a", "link", "to", "the", "saved", "file", "using", "a", "non", "-", "versioned", "name" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/core.py#L587-L601
NicolasLM/spinach
spinach/brokers/memory.py
MemoryBroker.enqueue_jobs
def enqueue_jobs(self, jobs: Iterable[Job]): """Enqueue a batch of jobs.""" for job in jobs: if job.should_start: job.status = JobStatus.QUEUED queue = self._get_queue(job.queue) queue.put(job.serialize()) else: with self._lock: job.status = JobStatus.WAITING self._future_jobs.append(job.serialize()) self._future_jobs.sort(key=lambda j: Job.deserialize(j).at) self._something_happened.set()
python
def enqueue_jobs(self, jobs: Iterable[Job]): """Enqueue a batch of jobs.""" for job in jobs: if job.should_start: job.status = JobStatus.QUEUED queue = self._get_queue(job.queue) queue.put(job.serialize()) else: with self._lock: job.status = JobStatus.WAITING self._future_jobs.append(job.serialize()) self._future_jobs.sort(key=lambda j: Job.deserialize(j).at) self._something_happened.set()
[ "def", "enqueue_jobs", "(", "self", ",", "jobs", ":", "Iterable", "[", "Job", "]", ")", ":", "for", "job", "in", "jobs", ":", "if", "job", ".", "should_start", ":", "job", ".", "status", "=", "JobStatus", ".", "QUEUED", "queue", "=", "self", ".", "_get_queue", "(", "job", ".", "queue", ")", "queue", ".", "put", "(", "job", ".", "serialize", "(", ")", ")", "else", ":", "with", "self", ".", "_lock", ":", "job", ".", "status", "=", "JobStatus", ".", "WAITING", "self", ".", "_future_jobs", ".", "append", "(", "job", ".", "serialize", "(", ")", ")", "self", ".", "_future_jobs", ".", "sort", "(", "key", "=", "lambda", "j", ":", "Job", ".", "deserialize", "(", "j", ")", ".", "at", ")", "self", ".", "_something_happened", ".", "set", "(", ")" ]
Enqueue a batch of jobs.
[ "Enqueue", "a", "batch", "of", "jobs", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/memory.py#L36-L48
NicolasLM/spinach
spinach/brokers/memory.py
MemoryBroker.register_periodic_tasks
def register_periodic_tasks(self, tasks: Iterable[Task]): """Register tasks that need to be scheduled periodically.""" for task in tasks: self._scheduler.enter( int(task.periodicity.total_seconds()), 0, self._schedule_periodic_task, argument=(task,) )
python
def register_periodic_tasks(self, tasks: Iterable[Task]): """Register tasks that need to be scheduled periodically.""" for task in tasks: self._scheduler.enter( int(task.periodicity.total_seconds()), 0, self._schedule_periodic_task, argument=(task,) )
[ "def", "register_periodic_tasks", "(", "self", ",", "tasks", ":", "Iterable", "[", "Task", "]", ")", ":", "for", "task", "in", "tasks", ":", "self", ".", "_scheduler", ".", "enter", "(", "int", "(", "task", ".", "periodicity", ".", "total_seconds", "(", ")", ")", ",", "0", ",", "self", ".", "_schedule_periodic_task", ",", "argument", "=", "(", "task", ",", ")", ")" ]
Register tasks that need to be scheduled periodically.
[ "Register", "tasks", "that", "need", "to", "be", "scheduled", "periodically", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/memory.py#L67-L75
NicolasLM/spinach
spinach/brokers/memory.py
MemoryBroker.next_future_periodic_delta
def next_future_periodic_delta(self) -> Optional[float]: """Give the amount of seconds before the next periodic task is due.""" try: next_event = self._scheduler.queue[0] except IndexError: return None now = time.monotonic() next_event_time = next_event[0] if next_event_time < now: return 0 return next_event_time - now
python
def next_future_periodic_delta(self) -> Optional[float]: """Give the amount of seconds before the next periodic task is due.""" try: next_event = self._scheduler.queue[0] except IndexError: return None now = time.monotonic() next_event_time = next_event[0] if next_event_time < now: return 0 return next_event_time - now
[ "def", "next_future_periodic_delta", "(", "self", ")", "->", "Optional", "[", "float", "]", ":", "try", ":", "next_event", "=", "self", ".", "_scheduler", ".", "queue", "[", "0", "]", "except", "IndexError", ":", "return", "None", "now", "=", "time", ".", "monotonic", "(", ")", "next_event_time", "=", "next_event", "[", "0", "]", "if", "next_event_time", "<", "now", ":", "return", "0", "return", "next_event_time", "-", "now" ]
Give the amount of seconds before the next periodic task is due.
[ "Give", "the", "amount", "of", "seconds", "before", "the", "next", "periodic", "task", "is", "due", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/memory.py#L89-L101
NicolasLM/spinach
spinach/brokers/memory.py
MemoryBroker.inspect_periodic_tasks
def inspect_periodic_tasks(self) -> List[Tuple[int, str]]: """Get the next periodic task schedule. Used only for debugging and during tests. """ return [(int(e[0]), e[3][0].name) for e in self._scheduler.queue]
python
def inspect_periodic_tasks(self) -> List[Tuple[int, str]]: """Get the next periodic task schedule. Used only for debugging and during tests. """ return [(int(e[0]), e[3][0].name) for e in self._scheduler.queue]
[ "def", "inspect_periodic_tasks", "(", "self", ")", "->", "List", "[", "Tuple", "[", "int", ",", "str", "]", "]", ":", "return", "[", "(", "int", "(", "e", "[", "0", "]", ")", ",", "e", "[", "3", "]", "[", "0", "]", ".", "name", ")", "for", "e", "in", "self", ".", "_scheduler", ".", "queue", "]" ]
Get the next periodic task schedule. Used only for debugging and during tests.
[ "Get", "the", "next", "periodic", "task", "schedule", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/memory.py#L103-L108
NicolasLM/spinach
spinach/brokers/memory.py
MemoryBroker.get_jobs_from_queue
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]: """Get jobs from a queue.""" rv = list() while len(rv) < max_jobs: try: job_json_string = self._get_queue(queue).get(block=False) except Empty: break job = Job.deserialize(job_json_string) job.status = JobStatus.RUNNING rv.append(job) return rv
python
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]: """Get jobs from a queue.""" rv = list() while len(rv) < max_jobs: try: job_json_string = self._get_queue(queue).get(block=False) except Empty: break job = Job.deserialize(job_json_string) job.status = JobStatus.RUNNING rv.append(job) return rv
[ "def", "get_jobs_from_queue", "(", "self", ",", "queue", ":", "str", ",", "max_jobs", ":", "int", ")", "->", "List", "[", "Job", "]", ":", "rv", "=", "list", "(", ")", "while", "len", "(", "rv", ")", "<", "max_jobs", ":", "try", ":", "job_json_string", "=", "self", ".", "_get_queue", "(", "queue", ")", ".", "get", "(", "block", "=", "False", ")", "except", "Empty", ":", "break", "job", "=", "Job", ".", "deserialize", "(", "job_json_string", ")", "job", ".", "status", "=", "JobStatus", ".", "RUNNING", "rv", ".", "append", "(", "job", ")", "return", "rv" ]
Get jobs from a queue.
[ "Get", "jobs", "from", "a", "queue", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/memory.py#L117-L130
Ryuno-Ki/webmention-tools
webmentiontools/urlinfo.py
UrlInfo.snippetWithLink
def snippetWithLink(self, url): """ This method will try to return the first <p> or <div> that contains an <a> tag linking to the given URL. """ link = self.soup.find("a", attrs={'href': url}) if link: for p in link.parents: if p.name in ('p', 'div'): return ' '.join(p.text.split()[0:30]) return None
python
def snippetWithLink(self, url): """ This method will try to return the first <p> or <div> that contains an <a> tag linking to the given URL. """ link = self.soup.find("a", attrs={'href': url}) if link: for p in link.parents: if p.name in ('p', 'div'): return ' '.join(p.text.split()[0:30]) return None
[ "def", "snippetWithLink", "(", "self", ",", "url", ")", ":", "link", "=", "self", ".", "soup", ".", "find", "(", "\"a\"", ",", "attrs", "=", "{", "'href'", ":", "url", "}", ")", "if", "link", ":", "for", "p", "in", "link", ".", "parents", ":", "if", "p", ".", "name", "in", "(", "'p'", ",", "'div'", ")", ":", "return", "' '", ".", "join", "(", "p", ".", "text", ".", "split", "(", ")", "[", "0", ":", "30", "]", ")", "return", "None" ]
This method will try to return the first <p> or <div> that contains an <a> tag linking to the given URL.
[ "This", "method", "will", "try", "to", "return", "the", "first", "<p", ">", "or", "<div", ">", "that", "contains", "an", "<a", ">", "tag", "linking", "to", "the", "given", "URL", "." ]
train
https://github.com/Ryuno-Ki/webmention-tools/blob/69851e34089d925cb856c936d2bcca7b09ecfdfd/webmentiontools/urlinfo.py#L90-L100
Metatab/metapack
metapack/support/pylib.py
row_generator
def row_generator(resource, doc, env, *args, **kwargs): """ An example row generator function. Reference this function in a Metatab file as the value of a Datafile: Datafile: python:pylib#row_generator The function must yield rows, with the first being headers, and subsequenct rows being data. :param resource: The Datafile term being processed :param doc: The Metatab document that contains the term being processed :param args: Positional arguments passed to the generator :param kwargs: Keyword arguments passed to the generator :return: The env argument is a dict with these environmental keys: * CACHE_DIR * RESOURCE_NAME * RESOLVED_URL * WORKING_DIR * METATAB_DOC * METATAB_WORKING_DIR * METATAB_PACKAGE It also contains key/value pairs for all of the properties of the resource. """ yield 'a b c'.split() for i in range(10): yield [i, i*2, i*3]
python
def row_generator(resource, doc, env, *args, **kwargs): """ An example row generator function. Reference this function in a Metatab file as the value of a Datafile: Datafile: python:pylib#row_generator The function must yield rows, with the first being headers, and subsequenct rows being data. :param resource: The Datafile term being processed :param doc: The Metatab document that contains the term being processed :param args: Positional arguments passed to the generator :param kwargs: Keyword arguments passed to the generator :return: The env argument is a dict with these environmental keys: * CACHE_DIR * RESOURCE_NAME * RESOLVED_URL * WORKING_DIR * METATAB_DOC * METATAB_WORKING_DIR * METATAB_PACKAGE It also contains key/value pairs for all of the properties of the resource. """ yield 'a b c'.split() for i in range(10): yield [i, i*2, i*3]
[ "def", "row_generator", "(", "resource", ",", "doc", ",", "env", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "yield", "'a b c'", ".", "split", "(", ")", "for", "i", "in", "range", "(", "10", ")", ":", "yield", "[", "i", ",", "i", "*", "2", ",", "i", "*", "3", "]" ]
An example row generator function. Reference this function in a Metatab file as the value of a Datafile: Datafile: python:pylib#row_generator The function must yield rows, with the first being headers, and subsequenct rows being data. :param resource: The Datafile term being processed :param doc: The Metatab document that contains the term being processed :param args: Positional arguments passed to the generator :param kwargs: Keyword arguments passed to the generator :return: The env argument is a dict with these environmental keys: * CACHE_DIR * RESOURCE_NAME * RESOLVED_URL * WORKING_DIR * METATAB_DOC * METATAB_WORKING_DIR * METATAB_PACKAGE It also contains key/value pairs for all of the properties of the resource.
[ "An", "example", "row", "generator", "function", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/support/pylib.py#L4-L37
Metatab/metapack
metapack/support/pylib.py
example_transform
def example_transform(v, row, row_n, i_s, i_d, header_s, header_d,scratch, errors, accumulator): """ An example column transform. This is an example of a column transform with all of the arguments listed. An real transform can omit any ( or all ) of these, and can supply them in any order; the calling code will inspect the signature. When the function is listed as a transform for a column, it is called for every row of data. :param v: The current value of the column :param row: A RowProxy object for the whiole row. :param row_n: The current row number. :param i_s: The numeric index of the source column :param i_d: The numeric index for the destination column :param header_s: The name of the source column :param header_d: The name of the destination column :param scratch: A dict that can be used for storing any values. Persists between rows. :param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows. :param accumulator: A dict for use in accumulating values, such as computing aggregates. :return: The final value to be supplied for the column. """ return str(v)+'-foo'
python
def example_transform(v, row, row_n, i_s, i_d, header_s, header_d,scratch, errors, accumulator): """ An example column transform. This is an example of a column transform with all of the arguments listed. An real transform can omit any ( or all ) of these, and can supply them in any order; the calling code will inspect the signature. When the function is listed as a transform for a column, it is called for every row of data. :param v: The current value of the column :param row: A RowProxy object for the whiole row. :param row_n: The current row number. :param i_s: The numeric index of the source column :param i_d: The numeric index for the destination column :param header_s: The name of the source column :param header_d: The name of the destination column :param scratch: A dict that can be used for storing any values. Persists between rows. :param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows. :param accumulator: A dict for use in accumulating values, such as computing aggregates. :return: The final value to be supplied for the column. """ return str(v)+'-foo'
[ "def", "example_transform", "(", "v", ",", "row", ",", "row_n", ",", "i_s", ",", "i_d", ",", "header_s", ",", "header_d", ",", "scratch", ",", "errors", ",", "accumulator", ")", ":", "return", "str", "(", "v", ")", "+", "'-foo'" ]
An example column transform. This is an example of a column transform with all of the arguments listed. An real transform can omit any ( or all ) of these, and can supply them in any order; the calling code will inspect the signature. When the function is listed as a transform for a column, it is called for every row of data. :param v: The current value of the column :param row: A RowProxy object for the whiole row. :param row_n: The current row number. :param i_s: The numeric index of the source column :param i_d: The numeric index for the destination column :param header_s: The name of the source column :param header_d: The name of the destination column :param scratch: A dict that can be used for storing any values. Persists between rows. :param errors: A dict used to store error messages. Persists for all columns in a row, but not between rows. :param accumulator: A dict for use in accumulating values, such as computing aggregates. :return: The final value to be supplied for the column.
[ "An", "example", "column", "transform", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/support/pylib.py#L40-L62
Metatab/metapack
metapack/index.py
search_index_file
def search_index_file(): """Return the default local index file, from the download cache""" from metapack import Downloader from os import environ return environ.get('METAPACK_SEARCH_INDEX', Downloader.get_instance().cache.getsyspath('index.json'))
python
def search_index_file(): """Return the default local index file, from the download cache""" from metapack import Downloader from os import environ return environ.get('METAPACK_SEARCH_INDEX', Downloader.get_instance().cache.getsyspath('index.json'))
[ "def", "search_index_file", "(", ")", ":", "from", "metapack", "import", "Downloader", "from", "os", "import", "environ", "return", "environ", ".", "get", "(", "'METAPACK_SEARCH_INDEX'", ",", "Downloader", ".", "get_instance", "(", ")", ".", "cache", ".", "getsyspath", "(", "'index.json'", ")", ")" ]
Return the default local index file, from the download cache
[ "Return", "the", "default", "local", "index", "file", "from", "the", "download", "cache" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/index.py#L15-L21
Metatab/metapack
metapack/index.py
SearchIndex.write
def write(self): """Safely write the index data to the index file """ index_file = self.path new_index_file = index_file + '.new' bak_index_file = index_file + '.bak' if not self._db: return with open(new_index_file, 'w') as f: json.dump(self._db, f, indent=4) if exists(index_file): copy(index_file, bak_index_file) rename(new_index_file, index_file)
python
def write(self): """Safely write the index data to the index file """ index_file = self.path new_index_file = index_file + '.new' bak_index_file = index_file + '.bak' if not self._db: return with open(new_index_file, 'w') as f: json.dump(self._db, f, indent=4) if exists(index_file): copy(index_file, bak_index_file) rename(new_index_file, index_file)
[ "def", "write", "(", "self", ")", ":", "index_file", "=", "self", ".", "path", "new_index_file", "=", "index_file", "+", "'.new'", "bak_index_file", "=", "index_file", "+", "'.bak'", "if", "not", "self", ".", "_db", ":", "return", "with", "open", "(", "new_index_file", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "self", ".", "_db", ",", "f", ",", "indent", "=", "4", ")", "if", "exists", "(", "index_file", ")", ":", "copy", "(", "index_file", ",", "bak_index_file", ")", "rename", "(", "new_index_file", ",", "index_file", ")" ]
Safely write the index data to the index file
[ "Safely", "write", "the", "index", "data", "to", "the", "index", "file" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/index.py#L55-L70
Metatab/metapack
metapack/index.py
SearchIndex.update
def update(self,o): """Update from another index or index dict""" self.open() try: self._db.update(o._db) except AttributeError: self._db.update(o)
python
def update(self,o): """Update from another index or index dict""" self.open() try: self._db.update(o._db) except AttributeError: self._db.update(o)
[ "def", "update", "(", "self", ",", "o", ")", ":", "self", ".", "open", "(", ")", "try", ":", "self", ".", "_db", ".", "update", "(", "o", ".", "_db", ")", "except", "AttributeError", ":", "self", ".", "_db", ".", "update", "(", "o", ")" ]
Update from another index or index dict
[ "Update", "from", "another", "index", "or", "index", "dict" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/index.py#L140-L148
ungarj/tilematrix
tilematrix/tmx/main.py
bounds
def bounds(ctx, tile): """Print Tile bounds.""" click.echo( '%s %s %s %s' % TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bounds(pixelbuffer=ctx.obj['pixelbuffer']) )
python
def bounds(ctx, tile): """Print Tile bounds.""" click.echo( '%s %s %s %s' % TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bounds(pixelbuffer=ctx.obj['pixelbuffer']) )
[ "def", "bounds", "(", "ctx", ",", "tile", ")", ":", "click", ".", "echo", "(", "'%s %s %s %s'", "%", "TilePyramid", "(", "ctx", ".", "obj", "[", "'grid'", "]", ",", "tile_size", "=", "ctx", ".", "obj", "[", "'tile_size'", "]", ",", "metatiling", "=", "ctx", ".", "obj", "[", "'metatiling'", "]", ")", ".", "tile", "(", "*", "tile", ")", ".", "bounds", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ")" ]
Print Tile bounds.
[ "Print", "Tile", "bounds", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L40-L48
ungarj/tilematrix
tilematrix/tmx/main.py
bbox
def bbox(ctx, tile): """Print Tile bounding box as geometry.""" geom = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bbox(pixelbuffer=ctx.obj['pixelbuffer']) if ctx.obj['output_format'] in ['WKT', 'Tile']: click.echo(geom) elif ctx.obj['output_format'] == 'GeoJSON': click.echo(geojson.dumps(geom))
python
def bbox(ctx, tile): """Print Tile bounding box as geometry.""" geom = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bbox(pixelbuffer=ctx.obj['pixelbuffer']) if ctx.obj['output_format'] in ['WKT', 'Tile']: click.echo(geom) elif ctx.obj['output_format'] == 'GeoJSON': click.echo(geojson.dumps(geom))
[ "def", "bbox", "(", "ctx", ",", "tile", ")", ":", "geom", "=", "TilePyramid", "(", "ctx", ".", "obj", "[", "'grid'", "]", ",", "tile_size", "=", "ctx", ".", "obj", "[", "'tile_size'", "]", ",", "metatiling", "=", "ctx", ".", "obj", "[", "'metatiling'", "]", ")", ".", "tile", "(", "*", "tile", ")", ".", "bbox", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", "if", "ctx", ".", "obj", "[", "'output_format'", "]", "in", "[", "'WKT'", ",", "'Tile'", "]", ":", "click", ".", "echo", "(", "geom", ")", "elif", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'GeoJSON'", ":", "click", ".", "echo", "(", "geojson", ".", "dumps", "(", "geom", ")", ")" ]
Print Tile bounding box as geometry.
[ "Print", "Tile", "bounding", "box", "as", "geometry", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L54-L64
ungarj/tilematrix
tilematrix/tmx/main.py
tile
def tile(ctx, point, zoom): """Print Tile containing POINT..""" tile = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile_from_xy(*point, zoom=zoom) if ctx.obj['output_format'] == 'Tile': click.echo('%s %s %s' % tile.id) elif ctx.obj['output_format'] == 'WKT': click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer'])) elif ctx.obj['output_format'] == 'GeoJSON': click.echo( geojson.dumps( geojson.FeatureCollection([ geojson.Feature( geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']), properties=dict( zoom=tile.zoom, row=tile.row, col=tile.col ) ) ]) ) )
python
def tile(ctx, point, zoom): """Print Tile containing POINT..""" tile = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile_from_xy(*point, zoom=zoom) if ctx.obj['output_format'] == 'Tile': click.echo('%s %s %s' % tile.id) elif ctx.obj['output_format'] == 'WKT': click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer'])) elif ctx.obj['output_format'] == 'GeoJSON': click.echo( geojson.dumps( geojson.FeatureCollection([ geojson.Feature( geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']), properties=dict( zoom=tile.zoom, row=tile.row, col=tile.col ) ) ]) ) )
[ "def", "tile", "(", "ctx", ",", "point", ",", "zoom", ")", ":", "tile", "=", "TilePyramid", "(", "ctx", ".", "obj", "[", "'grid'", "]", ",", "tile_size", "=", "ctx", ".", "obj", "[", "'tile_size'", "]", ",", "metatiling", "=", "ctx", ".", "obj", "[", "'metatiling'", "]", ")", ".", "tile_from_xy", "(", "*", "point", ",", "zoom", "=", "zoom", ")", "if", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'Tile'", ":", "click", ".", "echo", "(", "'%s %s %s'", "%", "tile", ".", "id", ")", "elif", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'WKT'", ":", "click", ".", "echo", "(", "tile", ".", "bbox", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ")", "elif", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'GeoJSON'", ":", "click", ".", "echo", "(", "geojson", ".", "dumps", "(", "geojson", ".", "FeatureCollection", "(", "[", "geojson", ".", "Feature", "(", "geometry", "=", "tile", ".", "bbox", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ",", "properties", "=", "dict", "(", "zoom", "=", "tile", ".", "zoom", ",", "row", "=", "tile", ".", "row", ",", "col", "=", "tile", ".", "col", ")", ")", "]", ")", ")", ")" ]
Print Tile containing POINT..
[ "Print", "Tile", "containing", "POINT", ".." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L71-L96
ungarj/tilematrix
tilematrix/tmx/main.py
tiles
def tiles(ctx, bounds, zoom): """Print Tiles from bounds.""" tiles = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tiles_from_bounds(bounds, zoom=zoom) if ctx.obj['output_format'] == 'Tile': for tile in tiles: click.echo('%s %s %s' % tile.id) elif ctx.obj['output_format'] == 'WKT': for tile in tiles: click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer'])) elif ctx.obj['output_format'] == 'GeoJSON': click.echo( '{\n' ' "type": "FeatureCollection",\n' ' "features": [' ) # print tiles as they come and only add comma if there is a next tile try: tile = next(tiles) while True: gj = ' %s' % geojson.Feature( geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']), properties=dict( zoom=tile.zoom, row=tile.row, col=tile.col ) ) try: tile = next(tiles) click.echo(gj + ',') except StopIteration: click.echo(gj) raise except StopIteration: pass click.echo( ' ]\n' '}' )
python
def tiles(ctx, bounds, zoom): """Print Tiles from bounds.""" tiles = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tiles_from_bounds(bounds, zoom=zoom) if ctx.obj['output_format'] == 'Tile': for tile in tiles: click.echo('%s %s %s' % tile.id) elif ctx.obj['output_format'] == 'WKT': for tile in tiles: click.echo(tile.bbox(pixelbuffer=ctx.obj['pixelbuffer'])) elif ctx.obj['output_format'] == 'GeoJSON': click.echo( '{\n' ' "type": "FeatureCollection",\n' ' "features": [' ) # print tiles as they come and only add comma if there is a next tile try: tile = next(tiles) while True: gj = ' %s' % geojson.Feature( geometry=tile.bbox(pixelbuffer=ctx.obj['pixelbuffer']), properties=dict( zoom=tile.zoom, row=tile.row, col=tile.col ) ) try: tile = next(tiles) click.echo(gj + ',') except StopIteration: click.echo(gj) raise except StopIteration: pass click.echo( ' ]\n' '}' )
[ "def", "tiles", "(", "ctx", ",", "bounds", ",", "zoom", ")", ":", "tiles", "=", "TilePyramid", "(", "ctx", ".", "obj", "[", "'grid'", "]", ",", "tile_size", "=", "ctx", ".", "obj", "[", "'tile_size'", "]", ",", "metatiling", "=", "ctx", ".", "obj", "[", "'metatiling'", "]", ")", ".", "tiles_from_bounds", "(", "bounds", ",", "zoom", "=", "zoom", ")", "if", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'Tile'", ":", "for", "tile", "in", "tiles", ":", "click", ".", "echo", "(", "'%s %s %s'", "%", "tile", ".", "id", ")", "elif", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'WKT'", ":", "for", "tile", "in", "tiles", ":", "click", ".", "echo", "(", "tile", ".", "bbox", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ")", "elif", "ctx", ".", "obj", "[", "'output_format'", "]", "==", "'GeoJSON'", ":", "click", ".", "echo", "(", "'{\\n'", "' \"type\": \"FeatureCollection\",\\n'", "' \"features\": ['", ")", "# print tiles as they come and only add comma if there is a next tile", "try", ":", "tile", "=", "next", "(", "tiles", ")", "while", "True", ":", "gj", "=", "' %s'", "%", "geojson", ".", "Feature", "(", "geometry", "=", "tile", ".", "bbox", "(", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ",", "properties", "=", "dict", "(", "zoom", "=", "tile", ".", "zoom", ",", "row", "=", "tile", ".", "row", ",", "col", "=", "tile", ".", "col", ")", ")", "try", ":", "tile", "=", "next", "(", "tiles", ")", "click", ".", "echo", "(", "gj", "+", "','", ")", "except", "StopIteration", ":", "click", ".", "echo", "(", "gj", ")", "raise", "except", "StopIteration", ":", "pass", "click", ".", "echo", "(", "' ]\\n'", "'}'", ")" ]
Print Tiles from bounds.
[ "Print", "Tiles", "from", "bounds", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L103-L145
ungarj/tilematrix
tilematrix/tmx/main.py
snap_bbox
def snap_bbox(ctx, bounds, zoom): """Snap bbox to tile grid.""" click.echo(box(*tilematrix.snap_bounds( bounds=bounds, tile_pyramid=TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ), zoom=zoom, pixelbuffer=ctx.obj['pixelbuffer'] )))
python
def snap_bbox(ctx, bounds, zoom): """Snap bbox to tile grid.""" click.echo(box(*tilematrix.snap_bounds( bounds=bounds, tile_pyramid=TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ), zoom=zoom, pixelbuffer=ctx.obj['pixelbuffer'] )))
[ "def", "snap_bbox", "(", "ctx", ",", "bounds", ",", "zoom", ")", ":", "click", ".", "echo", "(", "box", "(", "*", "tilematrix", ".", "snap_bounds", "(", "bounds", "=", "bounds", ",", "tile_pyramid", "=", "TilePyramid", "(", "ctx", ".", "obj", "[", "'grid'", "]", ",", "tile_size", "=", "ctx", ".", "obj", "[", "'tile_size'", "]", ",", "metatiling", "=", "ctx", ".", "obj", "[", "'metatiling'", "]", ")", ",", "zoom", "=", "zoom", ",", "pixelbuffer", "=", "ctx", ".", "obj", "[", "'pixelbuffer'", "]", ")", ")", ")" ]
Snap bbox to tile grid.
[ "Snap", "bbox", "to", "tile", "grid", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/tmx/main.py#L170-L181
project-rig/rig
rig/place_and_route/place/rand.py
place
def place(vertices_resources, nets, machine, constraints, random=default_random): """A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. """ # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices, updated # throughout the function. placements = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location placements[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) # The set of vertices which have not been constrained. movable_vertices = [v for v in vertices_resources if v not in placements] locations = set(machine) for vertex in movable_vertices: # Keep choosing random chips until we find one where the vertex fits. while True: if len(locations) == 0: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) location = random.sample(locations, 1)[0] resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): # The vertex won't fit on this chip, we'll assume it is full # and not try it in the future. locations.remove(location) else: # The vertex fits: record the resources consumed and move on to # the next vertex. placements[vertex] = location machine[location] = resources_if_placed break finalise_same_chip_constraints(substitutions, placements) return placements
python
def place(vertices_resources, nets, machine, constraints, random=default_random): """A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. """ # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices, updated # throughout the function. placements = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location placements[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) # The set of vertices which have not been constrained. movable_vertices = [v for v in vertices_resources if v not in placements] locations = set(machine) for vertex in movable_vertices: # Keep choosing random chips until we find one where the vertex fits. while True: if len(locations) == 0: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) location = random.sample(locations, 1)[0] resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): # The vertex won't fit on this chip, we'll assume it is full # and not try it in the future. locations.remove(location) else: # The vertex fits: record the resources consumed and move on to # the next vertex. placements[vertex] = location machine[location] = resources_if_placed break finalise_same_chip_constraints(substitutions, placements) return placements
[ "def", "place", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "random", "=", "default_random", ")", ":", "# Within the algorithm we modify the resource availability values in the", "# machine to account for the effects of the current placement. As a result,", "# an internal copy of the structure must be made.", "machine", "=", "machine", ".", "copy", "(", ")", "# {vertex: (x, y), ...} gives the location of all vertices, updated", "# throughout the function.", "placements", "=", "{", "}", "# Handle constraints", "vertices_resources", ",", "nets", ",", "constraints", ",", "substitutions", "=", "apply_same_chip_constraints", "(", "vertices_resources", ",", "nets", ",", "constraints", ")", "for", "constraint", "in", "constraints", ":", "if", "isinstance", "(", "constraint", ",", "LocationConstraint", ")", ":", "# Location constraints are handled by recording the set of fixed", "# vertex locations and subtracting their resources from the chips", "# they're allocated to.", "location", "=", "constraint", ".", "location", "if", "location", "not", "in", "machine", ":", "raise", "InvalidConstraintError", "(", "\"Chip requested by {} unavailable\"", ".", "format", "(", "machine", ")", ")", "vertex", "=", "constraint", ".", "vertex", "# Record the constrained vertex's location", "placements", "[", "vertex", "]", "=", "location", "# Make sure the vertex fits at the requested location (updating the", "# resource availability after placement)", "resources", "=", "vertices_resources", "[", "vertex", "]", "machine", "[", "location", "]", "=", "subtract_resources", "(", "machine", "[", "location", "]", ",", "resources", ")", "if", "overallocated", "(", "machine", "[", "location", "]", ")", ":", "raise", "InsufficientResourceError", "(", "\"Cannot meet {}\"", ".", "format", "(", "constraint", ")", ")", "elif", "isinstance", "(", "constraint", ",", "# pragma: no branch", "ReserveResourceConstraint", ")", ":", "apply_reserve_resource_constraint", "(", "machine", ",", "constraint", ")", "# The set of vertices which have not been constrained.", "movable_vertices", "=", "[", "v", "for", "v", "in", "vertices_resources", "if", "v", "not", "in", "placements", "]", "locations", "=", "set", "(", "machine", ")", "for", "vertex", "in", "movable_vertices", ":", "# Keep choosing random chips until we find one where the vertex fits.", "while", "True", ":", "if", "len", "(", "locations", ")", "==", "0", ":", "raise", "InsufficientResourceError", "(", "\"Ran out of chips while attempting to place vertex \"", "\"{}\"", ".", "format", "(", "vertex", ")", ")", "location", "=", "random", ".", "sample", "(", "locations", ",", "1", ")", "[", "0", "]", "resources_if_placed", "=", "subtract_resources", "(", "machine", "[", "location", "]", ",", "vertices_resources", "[", "vertex", "]", ")", "if", "overallocated", "(", "resources_if_placed", ")", ":", "# The vertex won't fit on this chip, we'll assume it is full", "# and not try it in the future.", "locations", ".", "remove", "(", "location", ")", "else", ":", "# The vertex fits: record the resources consumed and move on to", "# the next vertex.", "placements", "[", "vertex", "]", "=", "location", "machine", "[", "location", "]", "=", "resources_if_placed", "break", "finalise_same_chip_constraints", "(", "substitutions", ",", "placements", ")", "return", "placements" ]
A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`.
[ "A", "random", "placer", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/rand.py#L19-L106
project-rig/rig
rig/place_and_route/place/sa/algorithm.py
_initial_placement
def _initial_placement(movable_vertices, vertices_resources, machine, random): """For internal use. Produces a random, sequential initial placement, updating the resource availabilities of every core in the supplied machine. Parameters ---------- movable_vertices : {vertex, ...} A set of the vertices to be given a random initial placement. vertices_resources : {vertex: {resource: value, ...}, ...} machine : :py:class:`rig.place_and_route.Machine` A machine object describing the machine into which the vertices should be placed. All chips hosting fixed vertices should have a chip_resource_exceptions entry which accounts for the allocated resources. When this function returns, the machine.chip_resource_exceptions will be updated to account for the resources consumed by the initial placement of movable vertices. random : :py:class`random.Random` The random number generator to use Returns ------- {vertex: (x, y), ...} For all movable_vertices. Raises ------ InsufficientResourceError InvalidConstraintError """ # Initially fill chips in the system in a random order locations = list(machine) random.shuffle(locations) location_iter = iter(locations) # Greedily place the vertices in a random order movable_vertices = list(v for v in vertices_resources if v in movable_vertices) random.shuffle(movable_vertices) vertex_iter = iter(movable_vertices) placement = {} try: location = next(location_iter) except StopIteration: raise InsufficientResourceError("No working chips in system.") while True: # Get a vertex to place try: vertex = next(vertex_iter) except StopIteration: # All vertices have been placed break # Advance through the set of available locations until we find a chip # where the vertex fits while True: resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): # The vertex won't fit on this chip, move onto the next chip try: location = next(location_iter) continue except StopIteration: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) else: # The vertex fits: record the resources consumed and move on to # the next vertex. placement[vertex] = location machine[location] = resources_if_placed break return placement
python
def _initial_placement(movable_vertices, vertices_resources, machine, random): """For internal use. Produces a random, sequential initial placement, updating the resource availabilities of every core in the supplied machine. Parameters ---------- movable_vertices : {vertex, ...} A set of the vertices to be given a random initial placement. vertices_resources : {vertex: {resource: value, ...}, ...} machine : :py:class:`rig.place_and_route.Machine` A machine object describing the machine into which the vertices should be placed. All chips hosting fixed vertices should have a chip_resource_exceptions entry which accounts for the allocated resources. When this function returns, the machine.chip_resource_exceptions will be updated to account for the resources consumed by the initial placement of movable vertices. random : :py:class`random.Random` The random number generator to use Returns ------- {vertex: (x, y), ...} For all movable_vertices. Raises ------ InsufficientResourceError InvalidConstraintError """ # Initially fill chips in the system in a random order locations = list(machine) random.shuffle(locations) location_iter = iter(locations) # Greedily place the vertices in a random order movable_vertices = list(v for v in vertices_resources if v in movable_vertices) random.shuffle(movable_vertices) vertex_iter = iter(movable_vertices) placement = {} try: location = next(location_iter) except StopIteration: raise InsufficientResourceError("No working chips in system.") while True: # Get a vertex to place try: vertex = next(vertex_iter) except StopIteration: # All vertices have been placed break # Advance through the set of available locations until we find a chip # where the vertex fits while True: resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): # The vertex won't fit on this chip, move onto the next chip try: location = next(location_iter) continue except StopIteration: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) else: # The vertex fits: record the resources consumed and move on to # the next vertex. placement[vertex] = location machine[location] = resources_if_placed break return placement
[ "def", "_initial_placement", "(", "movable_vertices", ",", "vertices_resources", ",", "machine", ",", "random", ")", ":", "# Initially fill chips in the system in a random order", "locations", "=", "list", "(", "machine", ")", "random", ".", "shuffle", "(", "locations", ")", "location_iter", "=", "iter", "(", "locations", ")", "# Greedily place the vertices in a random order", "movable_vertices", "=", "list", "(", "v", "for", "v", "in", "vertices_resources", "if", "v", "in", "movable_vertices", ")", "random", ".", "shuffle", "(", "movable_vertices", ")", "vertex_iter", "=", "iter", "(", "movable_vertices", ")", "placement", "=", "{", "}", "try", ":", "location", "=", "next", "(", "location_iter", ")", "except", "StopIteration", ":", "raise", "InsufficientResourceError", "(", "\"No working chips in system.\"", ")", "while", "True", ":", "# Get a vertex to place", "try", ":", "vertex", "=", "next", "(", "vertex_iter", ")", "except", "StopIteration", ":", "# All vertices have been placed", "break", "# Advance through the set of available locations until we find a chip", "# where the vertex fits", "while", "True", ":", "resources_if_placed", "=", "subtract_resources", "(", "machine", "[", "location", "]", ",", "vertices_resources", "[", "vertex", "]", ")", "if", "overallocated", "(", "resources_if_placed", ")", ":", "# The vertex won't fit on this chip, move onto the next chip", "try", ":", "location", "=", "next", "(", "location_iter", ")", "continue", "except", "StopIteration", ":", "raise", "InsufficientResourceError", "(", "\"Ran out of chips while attempting to place vertex \"", "\"{}\"", ".", "format", "(", "vertex", ")", ")", "else", ":", "# The vertex fits: record the resources consumed and move on to", "# the next vertex.", "placement", "[", "vertex", "]", "=", "location", "machine", "[", "location", "]", "=", "resources_if_placed", "break", "return", "placement" ]
For internal use. Produces a random, sequential initial placement, updating the resource availabilities of every core in the supplied machine. Parameters ---------- movable_vertices : {vertex, ...} A set of the vertices to be given a random initial placement. vertices_resources : {vertex: {resource: value, ...}, ...} machine : :py:class:`rig.place_and_route.Machine` A machine object describing the machine into which the vertices should be placed. All chips hosting fixed vertices should have a chip_resource_exceptions entry which accounts for the allocated resources. When this function returns, the machine.chip_resource_exceptions will be updated to account for the resources consumed by the initial placement of movable vertices. random : :py:class`random.Random` The random number generator to use Returns ------- {vertex: (x, y), ...} For all movable_vertices. Raises ------ InsufficientResourceError InvalidConstraintError
[ "For", "internal", "use", ".", "Produces", "a", "random", "sequential", "initial", "placement", "updating", "the", "resource", "availabilities", "of", "every", "core", "in", "the", "supplied", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/algorithm.py#L39-L117
project-rig/rig
rig/place_and_route/place/sa/algorithm.py
place
def place(vertices_resources, nets, machine, constraints, effort=1.0, random=default_random, on_temperature_change=None, kernel=default_kernel, kernel_kwargs={}): """A flat Simulated Annealing based placement algorithm. This placement algorithm uses simulated annealing directly on the supplied problem graph with the objective of reducing wire lengths (and thus, indirectly, the potential for congestion). Though computationally expensive, this placer produces relatively good placement solutions. The annealing temperature schedule used by this algorithm is taken from "VPR: A New Packing, Placement and Routing Tool for FPGA Research" by Vaughn Betz and Jonathan Rose from the "1997 International Workshop on Field Programmable Logic and Applications". Two implementations of the algorithm's kernel are available: * :py:class:`~rig.place_and_route.place.sa.python_kernel.PythonKernel` A pure Python implementation which is available on all platforms supported by Rig. * :py:class:`~rig.place_and_route.place.sa.c_kernel.CKernel` A C implementation which is typically 50-150x faster than the basic Python kernel. Since this implementation requires a C compiler during installation, it is an optional feature of Rig. See the :py:class:`CKernel's documentation <rig.place_and_route.place.sa.c_kernel.CKernel>` for details. The fastest kernel installed is used by default and can be manually chosen using the ``kernel`` argument. This algorithm produces INFO level logging information describing the progress made by the algorithm. .. warning: This algorithm does not attempt to produce good solutions to the bin-packing problem of optimally fitting vertices into chips and it may fail if a good placement requires good bin packing. Parameters ---------- effort : float A scaling factor for the number of iterations the algorithm should run for. 1.0 is probably about as low as you'll want to go in practice and runtime increases linearly as you increase this parameter. random : :py:class:`random.Random` A Python random number generator. Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. on_temperature_change : callback_function or None An (optional) callback function which is called every time the temperature is changed. This callback can be used to provide status updates The callback function is passed the following arguments: * ``iteration_count``: the number of iterations the placer has attempted (integer) * ``placements``: The current placement solution. * ``cost``: the weighted sum over all nets of bounding-box size. (float) * ``acceptance_rate``: the proportion of iterations which have resulted in an accepted change since the last callback call. (float between 0.0 and 1.0) * ``temperature``: The current annealing temperature. (float) * ``distance_limit``: The maximum distance any swap may be made over. (integer) If the callback returns False, the anneal is terminated immediately and the current solution is returned. kernel : :py:class:`~rig.place_and_route.place.sa.kernel.Kernel` A simulated annealing placement kernel. A sensible default will be chosen based on the available kernels on this machine. The kernel may not be used if the placement problem has a trivial solution. kernel_kwargs : dict Optional kernel-specific keyword arguments to pass to the kernel constructor. """ # Special case: just return immediately when there's nothing to place if len(vertices_resources) == 0: return {} # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices whose position # is fixed by a LocationConstraint. fixed_vertices = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. These vertices will then not be added to # the internal placement data structure to prevent annealing from # moving them. They will be re-introduced at the last possible # moment. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location fixed_vertices[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) # Initially randomly place the movable vertices movable_vertices = {v for v in vertices_resources if v not in fixed_vertices} initial_placements = _initial_placement(movable_vertices, vertices_resources, machine, random) # Include the fixed vertices in initial placement initial_placements.update(fixed_vertices) # Filter out empty or singleton nets and those weighted as zero since they # cannot influence placement. nets = [n for n in nets if len(set(n)) > 1 and n.weight > 0.0] # Special cases where no placement effort is required: # * There is only one chip # * There are no resource types to be consumed # * No effort is to be made # * No movable vertices # * There are no nets (and moving things has no effect) trivial = ((machine.width, machine.height) == (1, 1) or len(machine.chip_resources) == 0 or effort == 0.0 or len(movable_vertices) == 0 or len(nets) == 0) if trivial: logger.info("Placement has trivial solution. SA not used.") finalise_same_chip_constraints(substitutions, initial_placements) return initial_placements # Intialise the algorithm kernel k = kernel(vertices_resources, movable_vertices, set(fixed_vertices), initial_placements, nets, machine, random, **kernel_kwargs) logger.info("SA placement kernel: %s", kernel.__name__) # Specifies the maximum distance any swap can span. Initially consider # swaps that span the entire machine. distance_limit = max(machine.width, machine.height) # Determine initial temperature according to the heuristic used by VPR: 20 # times the standard deviation of len(movable_vertices) random swap costs. # The arbitrary very-high temperature is used to cause "all" swaps to be # accepted. _0, _1, cost_delta_sd = k.run_steps(len(movable_vertices), distance_limit, 1e100) temperature = 20.0 * cost_delta_sd # The number of swap-attempts between temperature changes is selected by # the heuristic used by VPR. This value is scaled linearly by the effort # parameter. num_steps = max(1, int(effort * len(vertices_resources)**1.33)) logger.info("Initial placement temperature: %0.1f", temperature) # Counter for the number of swap attempts made (used for diagnostic # purposes) iteration_count = 0 # Holds the total cost of the current placement. This default value chosen # to ensure the loop below iterates at least once. current_cost = 0.0 # The annealing algorithm runs until a heuristic termination condition # (taken from VPR) is hit. The heuristic waits until the temperature falls # below a small fraction of the average net cost. while temperature > (0.005 * current_cost) / len(nets): # Run an iteration at the current temperature num_accepted, current_cost, _ = k.run_steps( num_steps, int(math.ceil(distance_limit)), temperature) # The ratio of accepted-to-not-accepted changes r_accept = num_accepted / float(num_steps) # Special case: Can't do better than 0 cost! This is a special case # since the normal termination condition will not terminate if the cost # doesn't drop below 0. if current_cost == 0: break # The temperature is reduced by a factor heuristically based on the # acceptance rate. The schedule below attempts to maximise the time # spent at temperatures where a large portion (but not all) of changes # are being accepted. If lots of changes are being accepted (e.g. # during high-temperature periods) then most of them are likely not to # be beneficial. If few changes are being accepted, we're probably # pretty close to the optimal placement. if r_accept > 0.96: alpha = 0.5 elif r_accept > 0.8: alpha = 0.9 elif r_accept > 0.15: alpha = 0.95 else: alpha = 0.8 temperature = alpha * temperature # According to: # * M. Huang, F. Romeo, and A. Sangiovanni-Vincentelli, "An Efficient # General Cooling Schedule for Simulated Annealing" ICCAD, 1986, pp. # 381 - 384 and J. Lam # * J. Delosme, "Performance of a New Annealing Schedule" DAC, 1988, # pp. 306 - 311. # It is desirable to keep the acceptance ratio as close to 0.44 for as # long as possible. As a result, when r_accept falls below this we can # help increase the acceptance rate by reducing the set of possible # swap candidates based on the observation that near the end of # placement, most things are near their optimal location and thus long # distance swaps are unlikely to be useful. distance_limit *= 1.0 - 0.44 + r_accept distance_limit = min(max(distance_limit, 1.0), max(machine.width, machine.height)) iteration_count += num_steps logger.debug("Iteration: %d, " "Cost: %0.1f, " "Kept: %0.1f%%, " "Temp: %0.3f, " "Dist: %d.", iteration_count, current_cost, r_accept*100, temperature, math.ceil(distance_limit)) # Call the user callback before the next iteration, terminating if # requested. if on_temperature_change is not None: placements = k.get_placements().copy() finalise_same_chip_constraints(substitutions, placements) ret_val = on_temperature_change(iteration_count, placements, current_cost, r_accept, temperature, distance_limit) if ret_val is False: break logger.info("Anneal terminated after %d iterations.", iteration_count) placements = k.get_placements() finalise_same_chip_constraints(substitutions, placements) return placements
python
def place(vertices_resources, nets, machine, constraints, effort=1.0, random=default_random, on_temperature_change=None, kernel=default_kernel, kernel_kwargs={}): """A flat Simulated Annealing based placement algorithm. This placement algorithm uses simulated annealing directly on the supplied problem graph with the objective of reducing wire lengths (and thus, indirectly, the potential for congestion). Though computationally expensive, this placer produces relatively good placement solutions. The annealing temperature schedule used by this algorithm is taken from "VPR: A New Packing, Placement and Routing Tool for FPGA Research" by Vaughn Betz and Jonathan Rose from the "1997 International Workshop on Field Programmable Logic and Applications". Two implementations of the algorithm's kernel are available: * :py:class:`~rig.place_and_route.place.sa.python_kernel.PythonKernel` A pure Python implementation which is available on all platforms supported by Rig. * :py:class:`~rig.place_and_route.place.sa.c_kernel.CKernel` A C implementation which is typically 50-150x faster than the basic Python kernel. Since this implementation requires a C compiler during installation, it is an optional feature of Rig. See the :py:class:`CKernel's documentation <rig.place_and_route.place.sa.c_kernel.CKernel>` for details. The fastest kernel installed is used by default and can be manually chosen using the ``kernel`` argument. This algorithm produces INFO level logging information describing the progress made by the algorithm. .. warning: This algorithm does not attempt to produce good solutions to the bin-packing problem of optimally fitting vertices into chips and it may fail if a good placement requires good bin packing. Parameters ---------- effort : float A scaling factor for the number of iterations the algorithm should run for. 1.0 is probably about as low as you'll want to go in practice and runtime increases linearly as you increase this parameter. random : :py:class:`random.Random` A Python random number generator. Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. on_temperature_change : callback_function or None An (optional) callback function which is called every time the temperature is changed. This callback can be used to provide status updates The callback function is passed the following arguments: * ``iteration_count``: the number of iterations the placer has attempted (integer) * ``placements``: The current placement solution. * ``cost``: the weighted sum over all nets of bounding-box size. (float) * ``acceptance_rate``: the proportion of iterations which have resulted in an accepted change since the last callback call. (float between 0.0 and 1.0) * ``temperature``: The current annealing temperature. (float) * ``distance_limit``: The maximum distance any swap may be made over. (integer) If the callback returns False, the anneal is terminated immediately and the current solution is returned. kernel : :py:class:`~rig.place_and_route.place.sa.kernel.Kernel` A simulated annealing placement kernel. A sensible default will be chosen based on the available kernels on this machine. The kernel may not be used if the placement problem has a trivial solution. kernel_kwargs : dict Optional kernel-specific keyword arguments to pass to the kernel constructor. """ # Special case: just return immediately when there's nothing to place if len(vertices_resources) == 0: return {} # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices whose position # is fixed by a LocationConstraint. fixed_vertices = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. These vertices will then not be added to # the internal placement data structure to prevent annealing from # moving them. They will be re-introduced at the last possible # moment. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location fixed_vertices[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) # Initially randomly place the movable vertices movable_vertices = {v for v in vertices_resources if v not in fixed_vertices} initial_placements = _initial_placement(movable_vertices, vertices_resources, machine, random) # Include the fixed vertices in initial placement initial_placements.update(fixed_vertices) # Filter out empty or singleton nets and those weighted as zero since they # cannot influence placement. nets = [n for n in nets if len(set(n)) > 1 and n.weight > 0.0] # Special cases where no placement effort is required: # * There is only one chip # * There are no resource types to be consumed # * No effort is to be made # * No movable vertices # * There are no nets (and moving things has no effect) trivial = ((machine.width, machine.height) == (1, 1) or len(machine.chip_resources) == 0 or effort == 0.0 or len(movable_vertices) == 0 or len(nets) == 0) if trivial: logger.info("Placement has trivial solution. SA not used.") finalise_same_chip_constraints(substitutions, initial_placements) return initial_placements # Intialise the algorithm kernel k = kernel(vertices_resources, movable_vertices, set(fixed_vertices), initial_placements, nets, machine, random, **kernel_kwargs) logger.info("SA placement kernel: %s", kernel.__name__) # Specifies the maximum distance any swap can span. Initially consider # swaps that span the entire machine. distance_limit = max(machine.width, machine.height) # Determine initial temperature according to the heuristic used by VPR: 20 # times the standard deviation of len(movable_vertices) random swap costs. # The arbitrary very-high temperature is used to cause "all" swaps to be # accepted. _0, _1, cost_delta_sd = k.run_steps(len(movable_vertices), distance_limit, 1e100) temperature = 20.0 * cost_delta_sd # The number of swap-attempts between temperature changes is selected by # the heuristic used by VPR. This value is scaled linearly by the effort # parameter. num_steps = max(1, int(effort * len(vertices_resources)**1.33)) logger.info("Initial placement temperature: %0.1f", temperature) # Counter for the number of swap attempts made (used for diagnostic # purposes) iteration_count = 0 # Holds the total cost of the current placement. This default value chosen # to ensure the loop below iterates at least once. current_cost = 0.0 # The annealing algorithm runs until a heuristic termination condition # (taken from VPR) is hit. The heuristic waits until the temperature falls # below a small fraction of the average net cost. while temperature > (0.005 * current_cost) / len(nets): # Run an iteration at the current temperature num_accepted, current_cost, _ = k.run_steps( num_steps, int(math.ceil(distance_limit)), temperature) # The ratio of accepted-to-not-accepted changes r_accept = num_accepted / float(num_steps) # Special case: Can't do better than 0 cost! This is a special case # since the normal termination condition will not terminate if the cost # doesn't drop below 0. if current_cost == 0: break # The temperature is reduced by a factor heuristically based on the # acceptance rate. The schedule below attempts to maximise the time # spent at temperatures where a large portion (but not all) of changes # are being accepted. If lots of changes are being accepted (e.g. # during high-temperature periods) then most of them are likely not to # be beneficial. If few changes are being accepted, we're probably # pretty close to the optimal placement. if r_accept > 0.96: alpha = 0.5 elif r_accept > 0.8: alpha = 0.9 elif r_accept > 0.15: alpha = 0.95 else: alpha = 0.8 temperature = alpha * temperature # According to: # * M. Huang, F. Romeo, and A. Sangiovanni-Vincentelli, "An Efficient # General Cooling Schedule for Simulated Annealing" ICCAD, 1986, pp. # 381 - 384 and J. Lam # * J. Delosme, "Performance of a New Annealing Schedule" DAC, 1988, # pp. 306 - 311. # It is desirable to keep the acceptance ratio as close to 0.44 for as # long as possible. As a result, when r_accept falls below this we can # help increase the acceptance rate by reducing the set of possible # swap candidates based on the observation that near the end of # placement, most things are near their optimal location and thus long # distance swaps are unlikely to be useful. distance_limit *= 1.0 - 0.44 + r_accept distance_limit = min(max(distance_limit, 1.0), max(machine.width, machine.height)) iteration_count += num_steps logger.debug("Iteration: %d, " "Cost: %0.1f, " "Kept: %0.1f%%, " "Temp: %0.3f, " "Dist: %d.", iteration_count, current_cost, r_accept*100, temperature, math.ceil(distance_limit)) # Call the user callback before the next iteration, terminating if # requested. if on_temperature_change is not None: placements = k.get_placements().copy() finalise_same_chip_constraints(substitutions, placements) ret_val = on_temperature_change(iteration_count, placements, current_cost, r_accept, temperature, distance_limit) if ret_val is False: break logger.info("Anneal terminated after %d iterations.", iteration_count) placements = k.get_placements() finalise_same_chip_constraints(substitutions, placements) return placements
[ "def", "place", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "effort", "=", "1.0", ",", "random", "=", "default_random", ",", "on_temperature_change", "=", "None", ",", "kernel", "=", "default_kernel", ",", "kernel_kwargs", "=", "{", "}", ")", ":", "# Special case: just return immediately when there's nothing to place", "if", "len", "(", "vertices_resources", ")", "==", "0", ":", "return", "{", "}", "# Within the algorithm we modify the resource availability values in the", "# machine to account for the effects of the current placement. As a result,", "# an internal copy of the structure must be made.", "machine", "=", "machine", ".", "copy", "(", ")", "# {vertex: (x, y), ...} gives the location of all vertices whose position", "# is fixed by a LocationConstraint.", "fixed_vertices", "=", "{", "}", "# Handle constraints", "vertices_resources", ",", "nets", ",", "constraints", ",", "substitutions", "=", "apply_same_chip_constraints", "(", "vertices_resources", ",", "nets", ",", "constraints", ")", "for", "constraint", "in", "constraints", ":", "if", "isinstance", "(", "constraint", ",", "LocationConstraint", ")", ":", "# Location constraints are handled by recording the set of fixed", "# vertex locations and subtracting their resources from the chips", "# they're allocated to. These vertices will then not be added to", "# the internal placement data structure to prevent annealing from", "# moving them. They will be re-introduced at the last possible", "# moment.", "location", "=", "constraint", ".", "location", "if", "location", "not", "in", "machine", ":", "raise", "InvalidConstraintError", "(", "\"Chip requested by {} unavailable\"", ".", "format", "(", "machine", ")", ")", "vertex", "=", "constraint", ".", "vertex", "# Record the constrained vertex's location", "fixed_vertices", "[", "vertex", "]", "=", "location", "# Make sure the vertex fits at the requested location (updating the", "# resource availability after placement)", "resources", "=", "vertices_resources", "[", "vertex", "]", "machine", "[", "location", "]", "=", "subtract_resources", "(", "machine", "[", "location", "]", ",", "resources", ")", "if", "overallocated", "(", "machine", "[", "location", "]", ")", ":", "raise", "InsufficientResourceError", "(", "\"Cannot meet {}\"", ".", "format", "(", "constraint", ")", ")", "elif", "isinstance", "(", "constraint", ",", "# pragma: no branch", "ReserveResourceConstraint", ")", ":", "apply_reserve_resource_constraint", "(", "machine", ",", "constraint", ")", "# Initially randomly place the movable vertices", "movable_vertices", "=", "{", "v", "for", "v", "in", "vertices_resources", "if", "v", "not", "in", "fixed_vertices", "}", "initial_placements", "=", "_initial_placement", "(", "movable_vertices", ",", "vertices_resources", ",", "machine", ",", "random", ")", "# Include the fixed vertices in initial placement", "initial_placements", ".", "update", "(", "fixed_vertices", ")", "# Filter out empty or singleton nets and those weighted as zero since they", "# cannot influence placement.", "nets", "=", "[", "n", "for", "n", "in", "nets", "if", "len", "(", "set", "(", "n", ")", ")", ">", "1", "and", "n", ".", "weight", ">", "0.0", "]", "# Special cases where no placement effort is required:", "# * There is only one chip", "# * There are no resource types to be consumed", "# * No effort is to be made", "# * No movable vertices", "# * There are no nets (and moving things has no effect)", "trivial", "=", "(", "(", "machine", ".", "width", ",", "machine", ".", "height", ")", "==", "(", "1", ",", "1", ")", "or", "len", "(", "machine", ".", "chip_resources", ")", "==", "0", "or", "effort", "==", "0.0", "or", "len", "(", "movable_vertices", ")", "==", "0", "or", "len", "(", "nets", ")", "==", "0", ")", "if", "trivial", ":", "logger", ".", "info", "(", "\"Placement has trivial solution. SA not used.\"", ")", "finalise_same_chip_constraints", "(", "substitutions", ",", "initial_placements", ")", "return", "initial_placements", "# Intialise the algorithm kernel", "k", "=", "kernel", "(", "vertices_resources", ",", "movable_vertices", ",", "set", "(", "fixed_vertices", ")", ",", "initial_placements", ",", "nets", ",", "machine", ",", "random", ",", "*", "*", "kernel_kwargs", ")", "logger", ".", "info", "(", "\"SA placement kernel: %s\"", ",", "kernel", ".", "__name__", ")", "# Specifies the maximum distance any swap can span. Initially consider", "# swaps that span the entire machine.", "distance_limit", "=", "max", "(", "machine", ".", "width", ",", "machine", ".", "height", ")", "# Determine initial temperature according to the heuristic used by VPR: 20", "# times the standard deviation of len(movable_vertices) random swap costs.", "# The arbitrary very-high temperature is used to cause \"all\" swaps to be", "# accepted.", "_0", ",", "_1", ",", "cost_delta_sd", "=", "k", ".", "run_steps", "(", "len", "(", "movable_vertices", ")", ",", "distance_limit", ",", "1e100", ")", "temperature", "=", "20.0", "*", "cost_delta_sd", "# The number of swap-attempts between temperature changes is selected by", "# the heuristic used by VPR. This value is scaled linearly by the effort", "# parameter.", "num_steps", "=", "max", "(", "1", ",", "int", "(", "effort", "*", "len", "(", "vertices_resources", ")", "**", "1.33", ")", ")", "logger", ".", "info", "(", "\"Initial placement temperature: %0.1f\"", ",", "temperature", ")", "# Counter for the number of swap attempts made (used for diagnostic", "# purposes)", "iteration_count", "=", "0", "# Holds the total cost of the current placement. This default value chosen", "# to ensure the loop below iterates at least once.", "current_cost", "=", "0.0", "# The annealing algorithm runs until a heuristic termination condition", "# (taken from VPR) is hit. The heuristic waits until the temperature falls", "# below a small fraction of the average net cost.", "while", "temperature", ">", "(", "0.005", "*", "current_cost", ")", "/", "len", "(", "nets", ")", ":", "# Run an iteration at the current temperature", "num_accepted", ",", "current_cost", ",", "_", "=", "k", ".", "run_steps", "(", "num_steps", ",", "int", "(", "math", ".", "ceil", "(", "distance_limit", ")", ")", ",", "temperature", ")", "# The ratio of accepted-to-not-accepted changes", "r_accept", "=", "num_accepted", "/", "float", "(", "num_steps", ")", "# Special case: Can't do better than 0 cost! This is a special case", "# since the normal termination condition will not terminate if the cost", "# doesn't drop below 0.", "if", "current_cost", "==", "0", ":", "break", "# The temperature is reduced by a factor heuristically based on the", "# acceptance rate. The schedule below attempts to maximise the time", "# spent at temperatures where a large portion (but not all) of changes", "# are being accepted. If lots of changes are being accepted (e.g.", "# during high-temperature periods) then most of them are likely not to", "# be beneficial. If few changes are being accepted, we're probably", "# pretty close to the optimal placement.", "if", "r_accept", ">", "0.96", ":", "alpha", "=", "0.5", "elif", "r_accept", ">", "0.8", ":", "alpha", "=", "0.9", "elif", "r_accept", ">", "0.15", ":", "alpha", "=", "0.95", "else", ":", "alpha", "=", "0.8", "temperature", "=", "alpha", "*", "temperature", "# According to:", "# * M. Huang, F. Romeo, and A. Sangiovanni-Vincentelli, \"An Efficient", "# General Cooling Schedule for Simulated Annealing\" ICCAD, 1986, pp.", "# 381 - 384 and J. Lam", "# * J. Delosme, \"Performance of a New Annealing Schedule\" DAC, 1988,", "# pp. 306 - 311.", "# It is desirable to keep the acceptance ratio as close to 0.44 for as", "# long as possible. As a result, when r_accept falls below this we can", "# help increase the acceptance rate by reducing the set of possible", "# swap candidates based on the observation that near the end of", "# placement, most things are near their optimal location and thus long", "# distance swaps are unlikely to be useful.", "distance_limit", "*=", "1.0", "-", "0.44", "+", "r_accept", "distance_limit", "=", "min", "(", "max", "(", "distance_limit", ",", "1.0", ")", ",", "max", "(", "machine", ".", "width", ",", "machine", ".", "height", ")", ")", "iteration_count", "+=", "num_steps", "logger", ".", "debug", "(", "\"Iteration: %d, \"", "\"Cost: %0.1f, \"", "\"Kept: %0.1f%%, \"", "\"Temp: %0.3f, \"", "\"Dist: %d.\"", ",", "iteration_count", ",", "current_cost", ",", "r_accept", "*", "100", ",", "temperature", ",", "math", ".", "ceil", "(", "distance_limit", ")", ")", "# Call the user callback before the next iteration, terminating if", "# requested.", "if", "on_temperature_change", "is", "not", "None", ":", "placements", "=", "k", ".", "get_placements", "(", ")", ".", "copy", "(", ")", "finalise_same_chip_constraints", "(", "substitutions", ",", "placements", ")", "ret_val", "=", "on_temperature_change", "(", "iteration_count", ",", "placements", ",", "current_cost", ",", "r_accept", ",", "temperature", ",", "distance_limit", ")", "if", "ret_val", "is", "False", ":", "break", "logger", ".", "info", "(", "\"Anneal terminated after %d iterations.\"", ",", "iteration_count", ")", "placements", "=", "k", ".", "get_placements", "(", ")", "finalise_same_chip_constraints", "(", "substitutions", ",", "placements", ")", "return", "placements" ]
A flat Simulated Annealing based placement algorithm. This placement algorithm uses simulated annealing directly on the supplied problem graph with the objective of reducing wire lengths (and thus, indirectly, the potential for congestion). Though computationally expensive, this placer produces relatively good placement solutions. The annealing temperature schedule used by this algorithm is taken from "VPR: A New Packing, Placement and Routing Tool for FPGA Research" by Vaughn Betz and Jonathan Rose from the "1997 International Workshop on Field Programmable Logic and Applications". Two implementations of the algorithm's kernel are available: * :py:class:`~rig.place_and_route.place.sa.python_kernel.PythonKernel` A pure Python implementation which is available on all platforms supported by Rig. * :py:class:`~rig.place_and_route.place.sa.c_kernel.CKernel` A C implementation which is typically 50-150x faster than the basic Python kernel. Since this implementation requires a C compiler during installation, it is an optional feature of Rig. See the :py:class:`CKernel's documentation <rig.place_and_route.place.sa.c_kernel.CKernel>` for details. The fastest kernel installed is used by default and can be manually chosen using the ``kernel`` argument. This algorithm produces INFO level logging information describing the progress made by the algorithm. .. warning: This algorithm does not attempt to produce good solutions to the bin-packing problem of optimally fitting vertices into chips and it may fail if a good placement requires good bin packing. Parameters ---------- effort : float A scaling factor for the number of iterations the algorithm should run for. 1.0 is probably about as low as you'll want to go in practice and runtime increases linearly as you increase this parameter. random : :py:class:`random.Random` A Python random number generator. Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. on_temperature_change : callback_function or None An (optional) callback function which is called every time the temperature is changed. This callback can be used to provide status updates The callback function is passed the following arguments: * ``iteration_count``: the number of iterations the placer has attempted (integer) * ``placements``: The current placement solution. * ``cost``: the weighted sum over all nets of bounding-box size. (float) * ``acceptance_rate``: the proportion of iterations which have resulted in an accepted change since the last callback call. (float between 0.0 and 1.0) * ``temperature``: The current annealing temperature. (float) * ``distance_limit``: The maximum distance any swap may be made over. (integer) If the callback returns False, the anneal is terminated immediately and the current solution is returned. kernel : :py:class:`~rig.place_and_route.place.sa.kernel.Kernel` A simulated annealing placement kernel. A sensible default will be chosen based on the available kernels on this machine. The kernel may not be used if the placement problem has a trivial solution. kernel_kwargs : dict Optional kernel-specific keyword arguments to pass to the kernel constructor.
[ "A", "flat", "Simulated", "Annealing", "based", "placement", "algorithm", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/algorithm.py#L120-L386
Metatab/metapack
metapack/html.py
make_citation_dict
def make_citation_dict(td): """ Update a citation dictionary by editing the Author field :param td: A BixTex format citation dict or a term :return: """ from datetime import datetime if isinstance(td, dict): d = td name = d['name_link'] else: d = td.as_dict() d['_term'] = td try: d['name_link'] = td.name except AttributeError: d['name_link'] = td['name_link'].value if 'author' in d and isinstance(d['author'], str): authors = [] for e in d['author'].split(';'): author_d = HumanName(e).as_dict(include_empty=False) if 'suffix' in author_d: author_d['lineage'] = author_d['suffix'] del author_d['suffix'] authors.append(author_d) d['author'] = authors if not 'type' in d: if '_term' in d: t = d['_term'] if t.term_is('Root.Reference') or t.term_is('Root.Resource'): d['type'] = 'dataset' elif t.term_is('Root.Citation'): d['type'] = 'article' else: d['type'] = 'article' if d['type'] == 'dataset': if not 'editor' in d: d['editor'] = [HumanName('Missing Editor').as_dict(include_empty=False)] if not 'accessdate' in d: d['accessdate'] = datetime.now().strftime('%Y-%m-%d') if not 'author' in d: d['author'] = [HumanName('Missing Author').as_dict(include_empty=False)] if not 'title' in d: d['title'] = d.get('description', '<Missing Title>') if not 'journal' in d: d['journal'] = '<Missing Journal>' if not 'year' in d: d['year'] = '<Missing Year>' if '_term' in d: del d['_term'] return d
python
def make_citation_dict(td): """ Update a citation dictionary by editing the Author field :param td: A BixTex format citation dict or a term :return: """ from datetime import datetime if isinstance(td, dict): d = td name = d['name_link'] else: d = td.as_dict() d['_term'] = td try: d['name_link'] = td.name except AttributeError: d['name_link'] = td['name_link'].value if 'author' in d and isinstance(d['author'], str): authors = [] for e in d['author'].split(';'): author_d = HumanName(e).as_dict(include_empty=False) if 'suffix' in author_d: author_d['lineage'] = author_d['suffix'] del author_d['suffix'] authors.append(author_d) d['author'] = authors if not 'type' in d: if '_term' in d: t = d['_term'] if t.term_is('Root.Reference') or t.term_is('Root.Resource'): d['type'] = 'dataset' elif t.term_is('Root.Citation'): d['type'] = 'article' else: d['type'] = 'article' if d['type'] == 'dataset': if not 'editor' in d: d['editor'] = [HumanName('Missing Editor').as_dict(include_empty=False)] if not 'accessdate' in d: d['accessdate'] = datetime.now().strftime('%Y-%m-%d') if not 'author' in d: d['author'] = [HumanName('Missing Author').as_dict(include_empty=False)] if not 'title' in d: d['title'] = d.get('description', '<Missing Title>') if not 'journal' in d: d['journal'] = '<Missing Journal>' if not 'year' in d: d['year'] = '<Missing Year>' if '_term' in d: del d['_term'] return d
[ "def", "make_citation_dict", "(", "td", ")", ":", "from", "datetime", "import", "datetime", "if", "isinstance", "(", "td", ",", "dict", ")", ":", "d", "=", "td", "name", "=", "d", "[", "'name_link'", "]", "else", ":", "d", "=", "td", ".", "as_dict", "(", ")", "d", "[", "'_term'", "]", "=", "td", "try", ":", "d", "[", "'name_link'", "]", "=", "td", ".", "name", "except", "AttributeError", ":", "d", "[", "'name_link'", "]", "=", "td", "[", "'name_link'", "]", ".", "value", "if", "'author'", "in", "d", "and", "isinstance", "(", "d", "[", "'author'", "]", ",", "str", ")", ":", "authors", "=", "[", "]", "for", "e", "in", "d", "[", "'author'", "]", ".", "split", "(", "';'", ")", ":", "author_d", "=", "HumanName", "(", "e", ")", ".", "as_dict", "(", "include_empty", "=", "False", ")", "if", "'suffix'", "in", "author_d", ":", "author_d", "[", "'lineage'", "]", "=", "author_d", "[", "'suffix'", "]", "del", "author_d", "[", "'suffix'", "]", "authors", ".", "append", "(", "author_d", ")", "d", "[", "'author'", "]", "=", "authors", "if", "not", "'type'", "in", "d", ":", "if", "'_term'", "in", "d", ":", "t", "=", "d", "[", "'_term'", "]", "if", "t", ".", "term_is", "(", "'Root.Reference'", ")", "or", "t", ".", "term_is", "(", "'Root.Resource'", ")", ":", "d", "[", "'type'", "]", "=", "'dataset'", "elif", "t", ".", "term_is", "(", "'Root.Citation'", ")", ":", "d", "[", "'type'", "]", "=", "'article'", "else", ":", "d", "[", "'type'", "]", "=", "'article'", "if", "d", "[", "'type'", "]", "==", "'dataset'", ":", "if", "not", "'editor'", "in", "d", ":", "d", "[", "'editor'", "]", "=", "[", "HumanName", "(", "'Missing Editor'", ")", ".", "as_dict", "(", "include_empty", "=", "False", ")", "]", "if", "not", "'accessdate'", "in", "d", ":", "d", "[", "'accessdate'", "]", "=", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "if", "not", "'author'", "in", "d", ":", "d", "[", "'author'", "]", "=", "[", "HumanName", "(", "'Missing Author'", ")", ".", "as_dict", "(", "include_empty", "=", "False", ")", "]", "if", "not", "'title'", "in", "d", ":", "d", "[", "'title'", "]", "=", "d", ".", "get", "(", "'description'", ",", "'<Missing Title>'", ")", "if", "not", "'journal'", "in", "d", ":", "d", "[", "'journal'", "]", "=", "'<Missing Journal>'", "if", "not", "'year'", "in", "d", ":", "d", "[", "'year'", "]", "=", "'<Missing Year>'", "if", "'_term'", "in", "d", ":", "del", "d", "[", "'_term'", "]", "return", "d" ]
Update a citation dictionary by editing the Author field :param td: A BixTex format citation dict or a term :return:
[ "Update", "a", "citation", "dictionary", "by", "editing", "the", "Author", "field", ":", "param", "td", ":", "A", "BixTex", "format", "citation", "dict", "or", "a", "term", ":", "return", ":" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L174-L242
Metatab/metapack
metapack/html.py
make_metatab_citation_dict
def make_metatab_citation_dict(t): """ Return a dict with BibText key/values for metatab data :param t: :return: """ try: if parse_app_url(t.url).proto == 'metatab': try: url = parse_app_url(str(t.resolved_url)).resource_url doc = t.row_generator.generator.package except AttributeError as e: return False # It isn't a resource or reference creator = doc.find_first('Root.Creator') author_key='author' if not creator: creator = doc.find_first('Root.Wrangler') author_key = 'editor' if not creator: creator = doc.find_first('Root.Origin') author_key = 'editor' try: origin = doc['Contacts'].get('Root.Origin').get('organization').value except AttributeError: try: origin = doc.get_value('Root.Origin', doc.get_value('Name.Origin')).title() except: origin = None d = { 'type': 'dataset', 'name_link': t.name, author_key: [HumanName(creator.value).as_dict(include_empty=False)], 'publisher': creator.properties.get('organization'), 'origin': origin, 'journal': '2010 - 2015 American Community Survey', 'title': doc['Root'].find_first_value('Root.Title') + '; ' + t.name, 'year': doc.get_value('Root.Year'), 'accessDate': '{}'.format(datetime.datetime.now().strftime('%Y-%m-%d')), 'url': url, 'version': doc.get_value('Root.Version', doc.get_value('Name.Version') ), } d = { k:v for k, v in d.items() if v is not None} return d else: return False except (AttributeError, KeyError) as e: raise return False
python
def make_metatab_citation_dict(t): """ Return a dict with BibText key/values for metatab data :param t: :return: """ try: if parse_app_url(t.url).proto == 'metatab': try: url = parse_app_url(str(t.resolved_url)).resource_url doc = t.row_generator.generator.package except AttributeError as e: return False # It isn't a resource or reference creator = doc.find_first('Root.Creator') author_key='author' if not creator: creator = doc.find_first('Root.Wrangler') author_key = 'editor' if not creator: creator = doc.find_first('Root.Origin') author_key = 'editor' try: origin = doc['Contacts'].get('Root.Origin').get('organization').value except AttributeError: try: origin = doc.get_value('Root.Origin', doc.get_value('Name.Origin')).title() except: origin = None d = { 'type': 'dataset', 'name_link': t.name, author_key: [HumanName(creator.value).as_dict(include_empty=False)], 'publisher': creator.properties.get('organization'), 'origin': origin, 'journal': '2010 - 2015 American Community Survey', 'title': doc['Root'].find_first_value('Root.Title') + '; ' + t.name, 'year': doc.get_value('Root.Year'), 'accessDate': '{}'.format(datetime.datetime.now().strftime('%Y-%m-%d')), 'url': url, 'version': doc.get_value('Root.Version', doc.get_value('Name.Version') ), } d = { k:v for k, v in d.items() if v is not None} return d else: return False except (AttributeError, KeyError) as e: raise return False
[ "def", "make_metatab_citation_dict", "(", "t", ")", ":", "try", ":", "if", "parse_app_url", "(", "t", ".", "url", ")", ".", "proto", "==", "'metatab'", ":", "try", ":", "url", "=", "parse_app_url", "(", "str", "(", "t", ".", "resolved_url", ")", ")", ".", "resource_url", "doc", "=", "t", ".", "row_generator", ".", "generator", ".", "package", "except", "AttributeError", "as", "e", ":", "return", "False", "# It isn't a resource or reference", "creator", "=", "doc", ".", "find_first", "(", "'Root.Creator'", ")", "author_key", "=", "'author'", "if", "not", "creator", ":", "creator", "=", "doc", ".", "find_first", "(", "'Root.Wrangler'", ")", "author_key", "=", "'editor'", "if", "not", "creator", ":", "creator", "=", "doc", ".", "find_first", "(", "'Root.Origin'", ")", "author_key", "=", "'editor'", "try", ":", "origin", "=", "doc", "[", "'Contacts'", "]", ".", "get", "(", "'Root.Origin'", ")", ".", "get", "(", "'organization'", ")", ".", "value", "except", "AttributeError", ":", "try", ":", "origin", "=", "doc", ".", "get_value", "(", "'Root.Origin'", ",", "doc", ".", "get_value", "(", "'Name.Origin'", ")", ")", ".", "title", "(", ")", "except", ":", "origin", "=", "None", "d", "=", "{", "'type'", ":", "'dataset'", ",", "'name_link'", ":", "t", ".", "name", ",", "author_key", ":", "[", "HumanName", "(", "creator", ".", "value", ")", ".", "as_dict", "(", "include_empty", "=", "False", ")", "]", ",", "'publisher'", ":", "creator", ".", "properties", ".", "get", "(", "'organization'", ")", ",", "'origin'", ":", "origin", ",", "'journal'", ":", "'2010 - 2015 American Community Survey'", ",", "'title'", ":", "doc", "[", "'Root'", "]", ".", "find_first_value", "(", "'Root.Title'", ")", "+", "'; '", "+", "t", ".", "name", ",", "'year'", ":", "doc", ".", "get_value", "(", "'Root.Year'", ")", ",", "'accessDate'", ":", "'{}'", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", ",", "'url'", ":", "url", ",", "'version'", ":", "doc", ".", "get_value", "(", "'Root.Version'", ",", "doc", ".", "get_value", "(", "'Name.Version'", ")", ")", ",", "}", "d", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "return", "d", "else", ":", "return", "False", "except", "(", "AttributeError", ",", "KeyError", ")", "as", "e", ":", "raise", "return", "False" ]
Return a dict with BibText key/values for metatab data :param t: :return:
[ "Return", "a", "dict", "with", "BibText", "key", "/", "values", "for", "metatab", "data", ":", "param", "t", ":", ":", "return", ":" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L245-L307
Metatab/metapack
metapack/html.py
_bibliography
def _bibliography(doc, terms, converters=[], format='html'): """ Render citations, from a document or a doct of dicts If the input is a dict, each key is the name of the citation, and the value is a BibTex formatted dict :param doc: A MetatabDoc, or a dict of BibTex dicts :return: """ output_backend = 'latex' if format == 'latex' else MetatabHtmlBackend def mk_cite(v): for c in converters: r = c(v) if r is not False: return r return make_citation_dict(v) if isinstance(doc, MetatabDoc): # This doesn't work for LaTex, b/c the formatter adds the prologue and epilogue to eery entry d = [mk_cite(t) for t in terms] cd = {e['name_link']: e for e in d} else: cd = {k: mk_cite(v, i) for i, (k, v) in enumerate(doc.items())} # for k, v in cd.items(): # print (k, v) return PybtexEngine().format_from_string(safe_dump({'entries': cd}), style=MetatabStyle, output_backend=output_backend, bib_format='yaml')
python
def _bibliography(doc, terms, converters=[], format='html'): """ Render citations, from a document or a doct of dicts If the input is a dict, each key is the name of the citation, and the value is a BibTex formatted dict :param doc: A MetatabDoc, or a dict of BibTex dicts :return: """ output_backend = 'latex' if format == 'latex' else MetatabHtmlBackend def mk_cite(v): for c in converters: r = c(v) if r is not False: return r return make_citation_dict(v) if isinstance(doc, MetatabDoc): # This doesn't work for LaTex, b/c the formatter adds the prologue and epilogue to eery entry d = [mk_cite(t) for t in terms] cd = {e['name_link']: e for e in d} else: cd = {k: mk_cite(v, i) for i, (k, v) in enumerate(doc.items())} # for k, v in cd.items(): # print (k, v) return PybtexEngine().format_from_string(safe_dump({'entries': cd}), style=MetatabStyle, output_backend=output_backend, bib_format='yaml')
[ "def", "_bibliography", "(", "doc", ",", "terms", ",", "converters", "=", "[", "]", ",", "format", "=", "'html'", ")", ":", "output_backend", "=", "'latex'", "if", "format", "==", "'latex'", "else", "MetatabHtmlBackend", "def", "mk_cite", "(", "v", ")", ":", "for", "c", "in", "converters", ":", "r", "=", "c", "(", "v", ")", "if", "r", "is", "not", "False", ":", "return", "r", "return", "make_citation_dict", "(", "v", ")", "if", "isinstance", "(", "doc", ",", "MetatabDoc", ")", ":", "# This doesn't work for LaTex, b/c the formatter adds the prologue and epilogue to eery entry", "d", "=", "[", "mk_cite", "(", "t", ")", "for", "t", "in", "terms", "]", "cd", "=", "{", "e", "[", "'name_link'", "]", ":", "e", "for", "e", "in", "d", "}", "else", ":", "cd", "=", "{", "k", ":", "mk_cite", "(", "v", ",", "i", ")", "for", "i", ",", "(", "k", ",", "v", ")", "in", "enumerate", "(", "doc", ".", "items", "(", ")", ")", "}", "# for k, v in cd.items():", "# print (k, v)", "return", "PybtexEngine", "(", ")", ".", "format_from_string", "(", "safe_dump", "(", "{", "'entries'", ":", "cd", "}", ")", ",", "style", "=", "MetatabStyle", ",", "output_backend", "=", "output_backend", ",", "bib_format", "=", "'yaml'", ")" ]
Render citations, from a document or a doct of dicts If the input is a dict, each key is the name of the citation, and the value is a BibTex formatted dict :param doc: A MetatabDoc, or a dict of BibTex dicts :return:
[ "Render", "citations", "from", "a", "document", "or", "a", "doct", "of", "dicts" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L310-L349
Metatab/metapack
metapack/html.py
display_context
def display_context(doc): """Create a Jinja context for display""" from rowgenerators.exceptions import DownloadError context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'} #import json #print(json.dumps(context, indent=4)) mandatory_sections = ['documentation', 'contacts'] # Remove section names deletes = [] for k,v in context.items(): try: del v['@value'] except KeyError: pass # Doesn't have the value except TypeError: # Is actually completely empty, and has a scalar value. Delete and re-create deletes.append(k) if isinstance(v, str): # Shouldn't ever happen, but who knows ? deletes.append(k) for d in deletes: try: del context[d] except KeyError: # Fails in TravisCI, no idea why. pass for ms in mandatory_sections: if not ms in context: context[ms] = {} # Load inline documentation inline = '' for d in context.get('documentation',{}).get('documentation',[]): u = parse_app_url(d['url']) if u.target_format == 'md': # The README.md file inline = '' if u.proto == 'file': # File really ought to be relative t = doc.package_url.join_target(u).get_resource().get_target() else: try: t = u.get_resource().get_target() except DownloadError as e: raise e try: with open(t.fspath) as f: inline += f.read() except FileNotFoundError: pass del d['title'] # Will cause it to be ignored in next section # Strip off the leading title, if it exists, because it will be re-applied # by the templates import re lines = inline.strip().splitlines() if lines and lines[0].startswith('# '): lines = lines[1:] context['inline_doc'] = '\n'.join(lines) # Convert doc section doc_links = {} images = {} for term_name, terms in context['documentation'].items(): if term_name == 'note': context['notes'] = terms else: for i, term in enumerate(terms): try: if term_name == 'image': images[term['title']] = term else: doc_links[term['title']] = term except AttributeError: # A scalar pass # There should not be any scalars in the documentation section except KeyError: pass # ignore entries without titles except TypeError: pass # Also probably a ascalar context['doc_links'] = doc_links context['images'] = images del context['documentation'] # # Update contacts origin = None for term_name, terms in context['contacts'].items(): if isinstance(terms, dict): origin = terms # Origin is a scalar in roort, must be converted to sequence here else: for t in terms: try: t.update(process_contact(t)) except AttributeError: pass # Probably got a scalar if origin: origin.update(process_contact(origin)) context['contacts']['origin'] = [origin] # For resources and references, convert scalars into lists of dicts, which are the # default for Datafiles and References. for section in ('references', 'resources'): for term_key, term_vals in context.get(section,{}).items(): if isinstance(term_vals, dict): if '@value' in term_vals: term_vals['url'] = term_vals['@value'] del term_vals['@value'] new_term_vals = [term_vals] elif isinstance(term_vals, list): new_term_vals = None else: new_term_vals = [ {'url': term_vals, 'name': term_vals}] if new_term_vals: context[section][term_key] = new_term_vals context['distributions'] = {} for dist in doc.find('Root.Distribution'): context['distributions'][dist.type] = dist.value if doc.find('Root.Giturl'): context['distributions']['source'] = doc.get_value('Root.Giturl') return context
python
def display_context(doc): """Create a Jinja context for display""" from rowgenerators.exceptions import DownloadError context = {s.name.lower(): s.as_dict() for s in doc if s.name.lower() != 'schema'} #import json #print(json.dumps(context, indent=4)) mandatory_sections = ['documentation', 'contacts'] # Remove section names deletes = [] for k,v in context.items(): try: del v['@value'] except KeyError: pass # Doesn't have the value except TypeError: # Is actually completely empty, and has a scalar value. Delete and re-create deletes.append(k) if isinstance(v, str): # Shouldn't ever happen, but who knows ? deletes.append(k) for d in deletes: try: del context[d] except KeyError: # Fails in TravisCI, no idea why. pass for ms in mandatory_sections: if not ms in context: context[ms] = {} # Load inline documentation inline = '' for d in context.get('documentation',{}).get('documentation',[]): u = parse_app_url(d['url']) if u.target_format == 'md': # The README.md file inline = '' if u.proto == 'file': # File really ought to be relative t = doc.package_url.join_target(u).get_resource().get_target() else: try: t = u.get_resource().get_target() except DownloadError as e: raise e try: with open(t.fspath) as f: inline += f.read() except FileNotFoundError: pass del d['title'] # Will cause it to be ignored in next section # Strip off the leading title, if it exists, because it will be re-applied # by the templates import re lines = inline.strip().splitlines() if lines and lines[0].startswith('# '): lines = lines[1:] context['inline_doc'] = '\n'.join(lines) # Convert doc section doc_links = {} images = {} for term_name, terms in context['documentation'].items(): if term_name == 'note': context['notes'] = terms else: for i, term in enumerate(terms): try: if term_name == 'image': images[term['title']] = term else: doc_links[term['title']] = term except AttributeError: # A scalar pass # There should not be any scalars in the documentation section except KeyError: pass # ignore entries without titles except TypeError: pass # Also probably a ascalar context['doc_links'] = doc_links context['images'] = images del context['documentation'] # # Update contacts origin = None for term_name, terms in context['contacts'].items(): if isinstance(terms, dict): origin = terms # Origin is a scalar in roort, must be converted to sequence here else: for t in terms: try: t.update(process_contact(t)) except AttributeError: pass # Probably got a scalar if origin: origin.update(process_contact(origin)) context['contacts']['origin'] = [origin] # For resources and references, convert scalars into lists of dicts, which are the # default for Datafiles and References. for section in ('references', 'resources'): for term_key, term_vals in context.get(section,{}).items(): if isinstance(term_vals, dict): if '@value' in term_vals: term_vals['url'] = term_vals['@value'] del term_vals['@value'] new_term_vals = [term_vals] elif isinstance(term_vals, list): new_term_vals = None else: new_term_vals = [ {'url': term_vals, 'name': term_vals}] if new_term_vals: context[section][term_key] = new_term_vals context['distributions'] = {} for dist in doc.find('Root.Distribution'): context['distributions'][dist.type] = dist.value if doc.find('Root.Giturl'): context['distributions']['source'] = doc.get_value('Root.Giturl') return context
[ "def", "display_context", "(", "doc", ")", ":", "from", "rowgenerators", ".", "exceptions", "import", "DownloadError", "context", "=", "{", "s", ".", "name", ".", "lower", "(", ")", ":", "s", ".", "as_dict", "(", ")", "for", "s", "in", "doc", "if", "s", ".", "name", ".", "lower", "(", ")", "!=", "'schema'", "}", "#import json", "#print(json.dumps(context, indent=4))", "mandatory_sections", "=", "[", "'documentation'", ",", "'contacts'", "]", "# Remove section names", "deletes", "=", "[", "]", "for", "k", ",", "v", "in", "context", ".", "items", "(", ")", ":", "try", ":", "del", "v", "[", "'@value'", "]", "except", "KeyError", ":", "pass", "# Doesn't have the value", "except", "TypeError", ":", "# Is actually completely empty, and has a scalar value. Delete and re-create", "deletes", ".", "append", "(", "k", ")", "if", "isinstance", "(", "v", ",", "str", ")", ":", "# Shouldn't ever happen, but who knows ?", "deletes", ".", "append", "(", "k", ")", "for", "d", "in", "deletes", ":", "try", ":", "del", "context", "[", "d", "]", "except", "KeyError", ":", "# Fails in TravisCI, no idea why.", "pass", "for", "ms", "in", "mandatory_sections", ":", "if", "not", "ms", "in", "context", ":", "context", "[", "ms", "]", "=", "{", "}", "# Load inline documentation", "inline", "=", "''", "for", "d", "in", "context", ".", "get", "(", "'documentation'", ",", "{", "}", ")", ".", "get", "(", "'documentation'", ",", "[", "]", ")", ":", "u", "=", "parse_app_url", "(", "d", "[", "'url'", "]", ")", "if", "u", ".", "target_format", "==", "'md'", ":", "# The README.md file", "inline", "=", "''", "if", "u", ".", "proto", "==", "'file'", ":", "# File really ought to be relative", "t", "=", "doc", ".", "package_url", ".", "join_target", "(", "u", ")", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "else", ":", "try", ":", "t", "=", "u", ".", "get_resource", "(", ")", ".", "get_target", "(", ")", "except", "DownloadError", "as", "e", ":", "raise", "e", "try", ":", "with", "open", "(", "t", ".", "fspath", ")", "as", "f", ":", "inline", "+=", "f", ".", "read", "(", ")", "except", "FileNotFoundError", ":", "pass", "del", "d", "[", "'title'", "]", "# Will cause it to be ignored in next section", "# Strip off the leading title, if it exists, because it will be re-applied", "# by the templates", "import", "re", "lines", "=", "inline", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "if", "lines", "and", "lines", "[", "0", "]", ".", "startswith", "(", "'# '", ")", ":", "lines", "=", "lines", "[", "1", ":", "]", "context", "[", "'inline_doc'", "]", "=", "'\\n'", ".", "join", "(", "lines", ")", "# Convert doc section", "doc_links", "=", "{", "}", "images", "=", "{", "}", "for", "term_name", ",", "terms", "in", "context", "[", "'documentation'", "]", ".", "items", "(", ")", ":", "if", "term_name", "==", "'note'", ":", "context", "[", "'notes'", "]", "=", "terms", "else", ":", "for", "i", ",", "term", "in", "enumerate", "(", "terms", ")", ":", "try", ":", "if", "term_name", "==", "'image'", ":", "images", "[", "term", "[", "'title'", "]", "]", "=", "term", "else", ":", "doc_links", "[", "term", "[", "'title'", "]", "]", "=", "term", "except", "AttributeError", ":", "# A scalar", "pass", "# There should not be any scalars in the documentation section", "except", "KeyError", ":", "pass", "# ignore entries without titles", "except", "TypeError", ":", "pass", "# Also probably a ascalar", "context", "[", "'doc_links'", "]", "=", "doc_links", "context", "[", "'images'", "]", "=", "images", "del", "context", "[", "'documentation'", "]", "#", "# Update contacts", "origin", "=", "None", "for", "term_name", ",", "terms", "in", "context", "[", "'contacts'", "]", ".", "items", "(", ")", ":", "if", "isinstance", "(", "terms", ",", "dict", ")", ":", "origin", "=", "terms", "# Origin is a scalar in roort, must be converted to sequence here", "else", ":", "for", "t", "in", "terms", ":", "try", ":", "t", ".", "update", "(", "process_contact", "(", "t", ")", ")", "except", "AttributeError", ":", "pass", "# Probably got a scalar", "if", "origin", ":", "origin", ".", "update", "(", "process_contact", "(", "origin", ")", ")", "context", "[", "'contacts'", "]", "[", "'origin'", "]", "=", "[", "origin", "]", "# For resources and references, convert scalars into lists of dicts, which are the", "# default for Datafiles and References.", "for", "section", "in", "(", "'references'", ",", "'resources'", ")", ":", "for", "term_key", ",", "term_vals", "in", "context", ".", "get", "(", "section", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "isinstance", "(", "term_vals", ",", "dict", ")", ":", "if", "'@value'", "in", "term_vals", ":", "term_vals", "[", "'url'", "]", "=", "term_vals", "[", "'@value'", "]", "del", "term_vals", "[", "'@value'", "]", "new_term_vals", "=", "[", "term_vals", "]", "elif", "isinstance", "(", "term_vals", ",", "list", ")", ":", "new_term_vals", "=", "None", "else", ":", "new_term_vals", "=", "[", "{", "'url'", ":", "term_vals", ",", "'name'", ":", "term_vals", "}", "]", "if", "new_term_vals", ":", "context", "[", "section", "]", "[", "term_key", "]", "=", "new_term_vals", "context", "[", "'distributions'", "]", "=", "{", "}", "for", "dist", "in", "doc", ".", "find", "(", "'Root.Distribution'", ")", ":", "context", "[", "'distributions'", "]", "[", "dist", ".", "type", "]", "=", "dist", ".", "value", "if", "doc", ".", "find", "(", "'Root.Giturl'", ")", ":", "context", "[", "'distributions'", "]", "[", "'source'", "]", "=", "doc", ".", "get_value", "(", "'Root.Giturl'", ")", "return", "context" ]
Create a Jinja context for display
[ "Create", "a", "Jinja", "context", "for", "display" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L477-L619
Metatab/metapack
metapack/html.py
markdown
def markdown(doc, title=True, template='short_documentation.md'): """Markdown, specifically for the Notes field in a CKAN dataset""" from jinja2 import Environment, PackageLoader, select_autoescape env = Environment( loader=PackageLoader('metapack', 'support/templates') #autoescape=select_autoescape(['html', 'xml']) ) context = display_context(doc) return env.get_template(template).render(**context)
python
def markdown(doc, title=True, template='short_documentation.md'): """Markdown, specifically for the Notes field in a CKAN dataset""" from jinja2 import Environment, PackageLoader, select_autoescape env = Environment( loader=PackageLoader('metapack', 'support/templates') #autoescape=select_autoescape(['html', 'xml']) ) context = display_context(doc) return env.get_template(template).render(**context)
[ "def", "markdown", "(", "doc", ",", "title", "=", "True", ",", "template", "=", "'short_documentation.md'", ")", ":", "from", "jinja2", "import", "Environment", ",", "PackageLoader", ",", "select_autoescape", "env", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'metapack'", ",", "'support/templates'", ")", "#autoescape=select_autoescape(['html', 'xml'])", ")", "context", "=", "display_context", "(", "doc", ")", "return", "env", ".", "get_template", "(", "template", ")", ".", "render", "(", "*", "*", "context", ")" ]
Markdown, specifically for the Notes field in a CKAN dataset
[ "Markdown", "specifically", "for", "the", "Notes", "field", "in", "a", "CKAN", "dataset" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/html.py#L621-L632
project-rig/rig
rig/place_and_route/place/breadth_first.py
breadth_first_vertex_order
def breadth_first_vertex_order(vertices_resources, nets): """A generator which iterates over a set of vertices in a breadth-first order in terms of connectivity. For use as a vertex ordering for the sequential placer. """ # Special case: no vertices, just stop immediately if len(vertices_resources) == 0: return # Enumerate the set of nets attached to each vertex vertex_neighbours = defaultdict(set) for net in nets: # Note: Iterating over a Net object produces the set of vertices # involved in the net. vertex_neighbours[net.source].update(net) for sink in net.sinks: vertex_neighbours[sink].update(net) # Perform a breadth-first iteration over the vertices. unplaced_vertices = set(vertices_resources) vertex_queue = deque() while vertex_queue or unplaced_vertices: if not vertex_queue: vertex_queue.append(unplaced_vertices.pop()) vertex = vertex_queue.popleft() yield vertex vertex_queue.extend(v for v in vertex_neighbours[vertex] if v in unplaced_vertices) unplaced_vertices.difference_update(vertex_neighbours[vertex])
python
def breadth_first_vertex_order(vertices_resources, nets): """A generator which iterates over a set of vertices in a breadth-first order in terms of connectivity. For use as a vertex ordering for the sequential placer. """ # Special case: no vertices, just stop immediately if len(vertices_resources) == 0: return # Enumerate the set of nets attached to each vertex vertex_neighbours = defaultdict(set) for net in nets: # Note: Iterating over a Net object produces the set of vertices # involved in the net. vertex_neighbours[net.source].update(net) for sink in net.sinks: vertex_neighbours[sink].update(net) # Perform a breadth-first iteration over the vertices. unplaced_vertices = set(vertices_resources) vertex_queue = deque() while vertex_queue or unplaced_vertices: if not vertex_queue: vertex_queue.append(unplaced_vertices.pop()) vertex = vertex_queue.popleft() yield vertex vertex_queue.extend(v for v in vertex_neighbours[vertex] if v in unplaced_vertices) unplaced_vertices.difference_update(vertex_neighbours[vertex])
[ "def", "breadth_first_vertex_order", "(", "vertices_resources", ",", "nets", ")", ":", "# Special case: no vertices, just stop immediately", "if", "len", "(", "vertices_resources", ")", "==", "0", ":", "return", "# Enumerate the set of nets attached to each vertex", "vertex_neighbours", "=", "defaultdict", "(", "set", ")", "for", "net", "in", "nets", ":", "# Note: Iterating over a Net object produces the set of vertices", "# involved in the net.", "vertex_neighbours", "[", "net", ".", "source", "]", ".", "update", "(", "net", ")", "for", "sink", "in", "net", ".", "sinks", ":", "vertex_neighbours", "[", "sink", "]", ".", "update", "(", "net", ")", "# Perform a breadth-first iteration over the vertices.", "unplaced_vertices", "=", "set", "(", "vertices_resources", ")", "vertex_queue", "=", "deque", "(", ")", "while", "vertex_queue", "or", "unplaced_vertices", ":", "if", "not", "vertex_queue", ":", "vertex_queue", ".", "append", "(", "unplaced_vertices", ".", "pop", "(", ")", ")", "vertex", "=", "vertex_queue", ".", "popleft", "(", ")", "yield", "vertex", "vertex_queue", ".", "extend", "(", "v", "for", "v", "in", "vertex_neighbours", "[", "vertex", "]", "if", "v", "in", "unplaced_vertices", ")", "unplaced_vertices", ".", "difference_update", "(", "vertex_neighbours", "[", "vertex", "]", ")" ]
A generator which iterates over a set of vertices in a breadth-first order in terms of connectivity. For use as a vertex ordering for the sequential placer.
[ "A", "generator", "which", "iterates", "over", "a", "set", "of", "vertices", "in", "a", "breadth", "-", "first", "order", "in", "terms", "of", "connectivity", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/breadth_first.py#L8-L39
project-rig/rig
rig/place_and_route/place/breadth_first.py
place
def place(vertices_resources, nets, machine, constraints, chip_order=None): """Places vertices in breadth-first order onto chips in the machine. This is a thin wrapper around the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placement algorithm which uses the :py:func:`breadth_first_vertex_order` vertex ordering. Parameters ---------- chip_order : None or iterable The order in which chips should be tried as a candidate location for a vertex. See the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placer's argument of the same name. """ return sequential_place(vertices_resources, nets, machine, constraints, breadth_first_vertex_order(vertices_resources, nets), chip_order)
python
def place(vertices_resources, nets, machine, constraints, chip_order=None): """Places vertices in breadth-first order onto chips in the machine. This is a thin wrapper around the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placement algorithm which uses the :py:func:`breadth_first_vertex_order` vertex ordering. Parameters ---------- chip_order : None or iterable The order in which chips should be tried as a candidate location for a vertex. See the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placer's argument of the same name. """ return sequential_place(vertices_resources, nets, machine, constraints, breadth_first_vertex_order(vertices_resources, nets), chip_order)
[ "def", "place", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "chip_order", "=", "None", ")", ":", "return", "sequential_place", "(", "vertices_resources", ",", "nets", ",", "machine", ",", "constraints", ",", "breadth_first_vertex_order", "(", "vertices_resources", ",", "nets", ")", ",", "chip_order", ")" ]
Places vertices in breadth-first order onto chips in the machine. This is a thin wrapper around the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placement algorithm which uses the :py:func:`breadth_first_vertex_order` vertex ordering. Parameters ---------- chip_order : None or iterable The order in which chips should be tried as a candidate location for a vertex. See the :py:func:`sequential <rig.place_and_route.place.sequential.place>` placer's argument of the same name.
[ "Places", "vertices", "in", "breadth", "-", "first", "order", "onto", "chips", "in", "the", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/breadth_first.py#L42-L61
TC01/python-xkcd
xkcd.py
getRandomComic
def getRandomComic(): """ Produces a :class:`Comic` object for a random xkcd comic. Uses the Python standard library random number generator in order to select a comic. Returns the resulting comic object.""" random.seed() numComics = getLatestComicNum() number = random.randint(1, numComics) return Comic(number)
python
def getRandomComic(): """ Produces a :class:`Comic` object for a random xkcd comic. Uses the Python standard library random number generator in order to select a comic. Returns the resulting comic object.""" random.seed() numComics = getLatestComicNum() number = random.randint(1, numComics) return Comic(number)
[ "def", "getRandomComic", "(", ")", ":", "random", ".", "seed", "(", ")", "numComics", "=", "getLatestComicNum", "(", ")", "number", "=", "random", ".", "randint", "(", "1", ",", "numComics", ")", "return", "Comic", "(", "number", ")" ]
Produces a :class:`Comic` object for a random xkcd comic. Uses the Python standard library random number generator in order to select a comic. Returns the resulting comic object.
[ "Produces", "a", ":", "class", ":", "Comic", "object", "for", "a", "random", "xkcd", "comic", ".", "Uses", "the", "Python", "standard", "library", "random", "number", "generator", "in", "order", "to", "select", "a", "comic", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L339-L348
TC01/python-xkcd
xkcd.py
getComic
def getComic(number, silent=True): """ Produces a :class:`Comic` object with index equal to the provided argument. Prints an error in the event of a failure (i.e. the number is less than zero or greater than the latest comic number) and returns an empty Comic object. Arguments: an integer or string that represents a number, "number", that is the index of the comic in question. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the resulting Comic object for the provided index if successful, or a Comic object with -1 as the index if not.""" numComics = getLatestComicNum() if type(number) is str and number.isdigit(): number = int(number) if number > numComics or number <= 0: if not silent: print("Error: You have requested an invalid comic.") return Comic(-1) return Comic(number)
python
def getComic(number, silent=True): """ Produces a :class:`Comic` object with index equal to the provided argument. Prints an error in the event of a failure (i.e. the number is less than zero or greater than the latest comic number) and returns an empty Comic object. Arguments: an integer or string that represents a number, "number", that is the index of the comic in question. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the resulting Comic object for the provided index if successful, or a Comic object with -1 as the index if not.""" numComics = getLatestComicNum() if type(number) is str and number.isdigit(): number = int(number) if number > numComics or number <= 0: if not silent: print("Error: You have requested an invalid comic.") return Comic(-1) return Comic(number)
[ "def", "getComic", "(", "number", ",", "silent", "=", "True", ")", ":", "numComics", "=", "getLatestComicNum", "(", ")", "if", "type", "(", "number", ")", "is", "str", "and", "number", ".", "isdigit", "(", ")", ":", "number", "=", "int", "(", "number", ")", "if", "number", ">", "numComics", "or", "number", "<=", "0", ":", "if", "not", "silent", ":", "print", "(", "\"Error: You have requested an invalid comic.\"", ")", "return", "Comic", "(", "-", "1", ")", "return", "Comic", "(", "number", ")" ]
Produces a :class:`Comic` object with index equal to the provided argument. Prints an error in the event of a failure (i.e. the number is less than zero or greater than the latest comic number) and returns an empty Comic object. Arguments: an integer or string that represents a number, "number", that is the index of the comic in question. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the resulting Comic object for the provided index if successful, or a Comic object with -1 as the index if not.
[ "Produces", "a", ":", "class", ":", "Comic", "object", "with", "index", "equal", "to", "the", "provided", "argument", ".", "Prints", "an", "error", "in", "the", "event", "of", "a", "failure", "(", "i", ".", "e", ".", "the", "number", "is", "less", "than", "zero", "or", "greater", "than", "the", "latest", "comic", "number", ")", "and", "returns", "an", "empty", "Comic", "object", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L350-L371
TC01/python-xkcd
xkcd.py
getWhatIfArchive
def getWhatIfArchive(): """ Parses the xkcd What If archive. getWhatIfArchive passes the HTML text of the archive page into a :class:`WhatIfArchiveParser` and then calls the parser's :func:`WhatIfArchiveParser.getWhatIfs` method and returns the dictionary produced. This function returns a dictionary mapping article numbers to :class:`WhatIf` objects for every What If article published thus far. If the parsing fails, for whatever reason, the dictionary will be empty.""" archive = urllib.urlopen(archiveUrl) text = archive.read() if sys.version_info[0] >= 3: text = text.decode('utf-8') archive.close() parser = WhatIfArchiveParser() parser.feed(text) return parser.getWhatIfs()
python
def getWhatIfArchive(): """ Parses the xkcd What If archive. getWhatIfArchive passes the HTML text of the archive page into a :class:`WhatIfArchiveParser` and then calls the parser's :func:`WhatIfArchiveParser.getWhatIfs` method and returns the dictionary produced. This function returns a dictionary mapping article numbers to :class:`WhatIf` objects for every What If article published thus far. If the parsing fails, for whatever reason, the dictionary will be empty.""" archive = urllib.urlopen(archiveUrl) text = archive.read() if sys.version_info[0] >= 3: text = text.decode('utf-8') archive.close() parser = WhatIfArchiveParser() parser.feed(text) return parser.getWhatIfs()
[ "def", "getWhatIfArchive", "(", ")", ":", "archive", "=", "urllib", ".", "urlopen", "(", "archiveUrl", ")", "text", "=", "archive", ".", "read", "(", ")", "if", "sys", ".", "version_info", "[", "0", "]", ">=", "3", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "archive", ".", "close", "(", ")", "parser", "=", "WhatIfArchiveParser", "(", ")", "parser", ".", "feed", "(", "text", ")", "return", "parser", ".", "getWhatIfs", "(", ")" ]
Parses the xkcd What If archive. getWhatIfArchive passes the HTML text of the archive page into a :class:`WhatIfArchiveParser` and then calls the parser's :func:`WhatIfArchiveParser.getWhatIfs` method and returns the dictionary produced. This function returns a dictionary mapping article numbers to :class:`WhatIf` objects for every What If article published thus far. If the parsing fails, for whatever reason, the dictionary will be empty.
[ "Parses", "the", "xkcd", "What", "If", "archive", ".", "getWhatIfArchive", "passes", "the", "HTML", "text", "of", "the", "archive", "page", "into", "a", ":", "class", ":", "WhatIfArchiveParser", "and", "then", "calls", "the", "parser", "s", ":", "func", ":", "WhatIfArchiveParser", ".", "getWhatIfs", "method", "and", "returns", "the", "dictionary", "produced", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L375-L391
TC01/python-xkcd
xkcd.py
getRandomWhatIf
def getRandomWhatIf(): """ Returns a randomly generated :class:`WhatIf` object, using the Python standard library random number generator to select the object. The object is returned from the dictionary produced by :func:`getWhatIfArchive`; like the other What If routines, this function is called first in order to get a list of all previously published What Ifs.""" random.seed() archive = getWhatIfArchive() latest = getLatestWhatIfNum(archive) number = random.randint(1, latest) return archive[number]
python
def getRandomWhatIf(): """ Returns a randomly generated :class:`WhatIf` object, using the Python standard library random number generator to select the object. The object is returned from the dictionary produced by :func:`getWhatIfArchive`; like the other What If routines, this function is called first in order to get a list of all previously published What Ifs.""" random.seed() archive = getWhatIfArchive() latest = getLatestWhatIfNum(archive) number = random.randint(1, latest) return archive[number]
[ "def", "getRandomWhatIf", "(", ")", ":", "random", ".", "seed", "(", ")", "archive", "=", "getWhatIfArchive", "(", ")", "latest", "=", "getLatestWhatIfNum", "(", "archive", ")", "number", "=", "random", ".", "randint", "(", "1", ",", "latest", ")", "return", "archive", "[", "number", "]" ]
Returns a randomly generated :class:`WhatIf` object, using the Python standard library random number generator to select the object. The object is returned from the dictionary produced by :func:`getWhatIfArchive`; like the other What If routines, this function is called first in order to get a list of all previously published What Ifs.
[ "Returns", "a", "randomly", "generated", ":", "class", ":", "WhatIf", "object", "using", "the", "Python", "standard", "library", "random", "number", "generator", "to", "select", "the", "object", ".", "The", "object", "is", "returned", "from", "the", "dictionary", "produced", "by", ":", "func", ":", "getWhatIfArchive", ";", "like", "the", "other", "What", "If", "routines", "this", "function", "is", "called", "first", "in", "order", "to", "get", "a", "list", "of", "all", "previously", "published", "What", "Ifs", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L425-L436
TC01/python-xkcd
xkcd.py
getWhatIf
def getWhatIf(number): """ Returns a :class:`WhatIf` object corresponding to the What If article of index passed to the function. If the index is less than zero or greater than the maximum number of articles published thus far, None is returned instead. Like all the routines for handling What If articles, :func:`getWhatIfArchive` is called first in order to establish a list of all previously published What Ifs. Arguments: number: an integer or string that represents a number, this is the index of article to retrieve. Returns the resulting :class:`WhatIf` object.""" archive = getWhatIfArchive() latest = getLatestWhatIfNum(archive) if type(number) is str and number.isdigit(): number = int(number) if number > latest or latest <= 0: return None return archive[number]
python
def getWhatIf(number): """ Returns a :class:`WhatIf` object corresponding to the What If article of index passed to the function. If the index is less than zero or greater than the maximum number of articles published thus far, None is returned instead. Like all the routines for handling What If articles, :func:`getWhatIfArchive` is called first in order to establish a list of all previously published What Ifs. Arguments: number: an integer or string that represents a number, this is the index of article to retrieve. Returns the resulting :class:`WhatIf` object.""" archive = getWhatIfArchive() latest = getLatestWhatIfNum(archive) if type(number) is str and number.isdigit(): number = int(number) if number > latest or latest <= 0: return None return archive[number]
[ "def", "getWhatIf", "(", "number", ")", ":", "archive", "=", "getWhatIfArchive", "(", ")", "latest", "=", "getLatestWhatIfNum", "(", "archive", ")", "if", "type", "(", "number", ")", "is", "str", "and", "number", ".", "isdigit", "(", ")", ":", "number", "=", "int", "(", "number", ")", "if", "number", ">", "latest", "or", "latest", "<=", "0", ":", "return", "None", "return", "archive", "[", "number", "]" ]
Returns a :class:`WhatIf` object corresponding to the What If article of index passed to the function. If the index is less than zero or greater than the maximum number of articles published thus far, None is returned instead. Like all the routines for handling What If articles, :func:`getWhatIfArchive` is called first in order to establish a list of all previously published What Ifs. Arguments: number: an integer or string that represents a number, this is the index of article to retrieve. Returns the resulting :class:`WhatIf` object.
[ "Returns", "a", ":", "class", ":", "WhatIf", "object", "corresponding", "to", "the", "What", "If", "article", "of", "index", "passed", "to", "the", "function", ".", "If", "the", "index", "is", "less", "than", "zero", "or", "greater", "than", "the", "maximum", "number", "of", "articles", "published", "thus", "far", "None", "is", "returned", "instead", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L438-L460
TC01/python-xkcd
xkcd.py
convertToAscii
def convertToAscii(string, error="?"): """ Utility function that converts a unicode string to ASCII. This exists so the :class:`Comic` class can be compatible with Python 2 libraries that expect ASCII strings, such as Twisted (as of this writing, anyway). It is unlikely something you will need directly, and its use is discouraged. Arguments: string: the string to attempt to convert. error: a string that will be substituted into 'string' wherever Python is unable to automatically do the conversion. convertToAscii returns the converted string.""" running = True asciiString = string while running: try: asciiString = asciiString.encode('ascii') except UnicodeError as unicode: start = unicode.start end = unicode.end asciiString = asciiString[:start] + "?" + asciiString[end:] else: running = False return asciiString
python
def convertToAscii(string, error="?"): """ Utility function that converts a unicode string to ASCII. This exists so the :class:`Comic` class can be compatible with Python 2 libraries that expect ASCII strings, such as Twisted (as of this writing, anyway). It is unlikely something you will need directly, and its use is discouraged. Arguments: string: the string to attempt to convert. error: a string that will be substituted into 'string' wherever Python is unable to automatically do the conversion. convertToAscii returns the converted string.""" running = True asciiString = string while running: try: asciiString = asciiString.encode('ascii') except UnicodeError as unicode: start = unicode.start end = unicode.end asciiString = asciiString[:start] + "?" + asciiString[end:] else: running = False return asciiString
[ "def", "convertToAscii", "(", "string", ",", "error", "=", "\"?\"", ")", ":", "running", "=", "True", "asciiString", "=", "string", "while", "running", ":", "try", ":", "asciiString", "=", "asciiString", ".", "encode", "(", "'ascii'", ")", "except", "UnicodeError", "as", "unicode", ":", "start", "=", "unicode", ".", "start", "end", "=", "unicode", ".", "end", "asciiString", "=", "asciiString", "[", ":", "start", "]", "+", "\"?\"", "+", "asciiString", "[", "end", ":", "]", "else", ":", "running", "=", "False", "return", "asciiString" ]
Utility function that converts a unicode string to ASCII. This exists so the :class:`Comic` class can be compatible with Python 2 libraries that expect ASCII strings, such as Twisted (as of this writing, anyway). It is unlikely something you will need directly, and its use is discouraged. Arguments: string: the string to attempt to convert. error: a string that will be substituted into 'string' wherever Python is unable to automatically do the conversion. convertToAscii returns the converted string.
[ "Utility", "function", "that", "converts", "a", "unicode", "string", "to", "ASCII", ".", "This", "exists", "so", "the", ":", "class", ":", "Comic", "class", "can", "be", "compatible", "with", "Python", "2", "libraries", "that", "expect", "ASCII", "strings", "such", "as", "Twisted", "(", "as", "of", "this", "writing", "anyway", ")", ".", "It", "is", "unlikely", "something", "you", "will", "need", "directly", "and", "its", "use", "is", "discouraged", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L464-L491
TC01/python-xkcd
xkcd.py
Comic.download
def download(self, output="", outputFile="", silent=True): """ Downloads the image of the comic onto your computer. Arguments: output: the output directory where comics will be downloaded to. The default argument for 'output is the empty string; if the empty string is passed, it defaults to a "Downloads" directory in your home folder (this directory will be created if it does not exist). outputFile: the filename that will be written. If the empty string is passed, outputFile will default to a string of the form xkcd-(comic number)-(image filename), so for example, xkcd-1691-optimization.png. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the path to the downloaded file, or an empty string in the event of failure.""" image = urllib.urlopen(self.imageLink).read() #Process optional input to work out where the dowload will go and what it'll be called if output != "": output = os.path.abspath(os.path.expanduser(output)) if output == "" or not os.path.exists(output): output = os.path.expanduser(os.path.join("~", "Downloads")) # Create ~/Downloads if it doesn't exist, since this is the default path. if not os.path.exists(output): os.mkdir(output) if outputFile == "": outputFile = "xkcd-" + str(self.number) + "-" + self.imageName output = os.path.join(output, outputFile) try: download = open(output, 'wb') except: if not silent: print("Unable to make file " + output) return "" download.write(image) download.close() return output
python
def download(self, output="", outputFile="", silent=True): """ Downloads the image of the comic onto your computer. Arguments: output: the output directory where comics will be downloaded to. The default argument for 'output is the empty string; if the empty string is passed, it defaults to a "Downloads" directory in your home folder (this directory will be created if it does not exist). outputFile: the filename that will be written. If the empty string is passed, outputFile will default to a string of the form xkcd-(comic number)-(image filename), so for example, xkcd-1691-optimization.png. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the path to the downloaded file, or an empty string in the event of failure.""" image = urllib.urlopen(self.imageLink).read() #Process optional input to work out where the dowload will go and what it'll be called if output != "": output = os.path.abspath(os.path.expanduser(output)) if output == "" or not os.path.exists(output): output = os.path.expanduser(os.path.join("~", "Downloads")) # Create ~/Downloads if it doesn't exist, since this is the default path. if not os.path.exists(output): os.mkdir(output) if outputFile == "": outputFile = "xkcd-" + str(self.number) + "-" + self.imageName output = os.path.join(output, outputFile) try: download = open(output, 'wb') except: if not silent: print("Unable to make file " + output) return "" download.write(image) download.close() return output
[ "def", "download", "(", "self", ",", "output", "=", "\"\"", ",", "outputFile", "=", "\"\"", ",", "silent", "=", "True", ")", ":", "image", "=", "urllib", ".", "urlopen", "(", "self", ".", "imageLink", ")", ".", "read", "(", ")", "#Process optional input to work out where the dowload will go and what it'll be called", "if", "output", "!=", "\"\"", ":", "output", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "output", ")", ")", "if", "output", "==", "\"\"", "or", "not", "os", ".", "path", ".", "exists", "(", "output", ")", ":", "output", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "\"Downloads\"", ")", ")", "# Create ~/Downloads if it doesn't exist, since this is the default path.", "if", "not", "os", ".", "path", ".", "exists", "(", "output", ")", ":", "os", ".", "mkdir", "(", "output", ")", "if", "outputFile", "==", "\"\"", ":", "outputFile", "=", "\"xkcd-\"", "+", "str", "(", "self", ".", "number", ")", "+", "\"-\"", "+", "self", ".", "imageName", "output", "=", "os", ".", "path", ".", "join", "(", "output", ",", "outputFile", ")", "try", ":", "download", "=", "open", "(", "output", ",", "'wb'", ")", "except", ":", "if", "not", "silent", ":", "print", "(", "\"Unable to make file \"", "+", "output", ")", "return", "\"\"", "download", ".", "write", "(", "image", ")", "download", ".", "close", "(", ")", "return", "output" ]
Downloads the image of the comic onto your computer. Arguments: output: the output directory where comics will be downloaded to. The default argument for 'output is the empty string; if the empty string is passed, it defaults to a "Downloads" directory in your home folder (this directory will be created if it does not exist). outputFile: the filename that will be written. If the empty string is passed, outputFile will default to a string of the form xkcd-(comic number)-(image filename), so for example, xkcd-1691-optimization.png. silent: boolean, defaults to True. If set to False, an error will be printed to standard output should the provided integer argument not be valid. Returns the path to the downloaded file, or an empty string in the event of failure.
[ "Downloads", "the", "image", "of", "the", "comic", "onto", "your", "computer", "." ]
train
https://github.com/TC01/python-xkcd/blob/6998d4073507eea228185e02ad1d9071c77fa955/xkcd.py#L277-L317
project-rig/rig
setup.py
replace_local_hyperlinks
def replace_local_hyperlinks( text, base_url="https://github.com/project-rig/rig/blob/master/"): """Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used. """ def get_new_url(url): return base_url + url[2:] # Deal with anonymous URLS for match in re.finditer(r"^__ (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub("^__ {}".format(orig_url), "__ {}".format(url), text, flags=re.MULTILINE) # Deal with named URLS for match in re.finditer(r"^\.\. _(?P<identifier>[^:]*): (?P<url>\./.*)", text, re.MULTILINE): identifier = match.groupdict()["identifier"] orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub( "^\.\. _{}: {}".format(identifier, orig_url), ".. _{}: {}".format(identifier, url), text, flags=re.MULTILINE) # Deal with image URLS for match in re.finditer(r"^\.\. image:: (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = text.replace(".. image:: {}".format(orig_url), ".. image:: {}".format(url)) return text
python
def replace_local_hyperlinks( text, base_url="https://github.com/project-rig/rig/blob/master/"): """Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used. """ def get_new_url(url): return base_url + url[2:] # Deal with anonymous URLS for match in re.finditer(r"^__ (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub("^__ {}".format(orig_url), "__ {}".format(url), text, flags=re.MULTILINE) # Deal with named URLS for match in re.finditer(r"^\.\. _(?P<identifier>[^:]*): (?P<url>\./.*)", text, re.MULTILINE): identifier = match.groupdict()["identifier"] orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub( "^\.\. _{}: {}".format(identifier, orig_url), ".. _{}: {}".format(identifier, url), text, flags=re.MULTILINE) # Deal with image URLS for match in re.finditer(r"^\.\. image:: (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = text.replace(".. image:: {}".format(orig_url), ".. image:: {}".format(url)) return text
[ "def", "replace_local_hyperlinks", "(", "text", ",", "base_url", "=", "\"https://github.com/project-rig/rig/blob/master/\"", ")", ":", "def", "get_new_url", "(", "url", ")", ":", "return", "base_url", "+", "url", "[", "2", ":", "]", "# Deal with anonymous URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^__ (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "re", ".", "sub", "(", "\"^__ {}\"", ".", "format", "(", "orig_url", ")", ",", "\"__ {}\"", ".", "format", "(", "url", ")", ",", "text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "# Deal with named URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "identifier", "=", "match", ".", "groupdict", "(", ")", "[", "\"identifier\"", "]", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "re", ".", "sub", "(", "\"^\\.\\. _{}: {}\"", ".", "format", "(", "identifier", ",", "orig_url", ")", ",", "\".. _{}: {}\"", ".", "format", "(", "identifier", ",", "url", ")", ",", "text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "# Deal with image URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^\\.\\. image:: (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "text", ".", "replace", "(", "\".. image:: {}\"", ".", "format", "(", "orig_url", ")", ",", "\".. image:: {}\"", ".", "format", "(", "url", ")", ")", "return", "text" ]
Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used.
[ "Replace", "local", "hyperlinks", "in", "RST", "with", "absolute", "addresses", "using", "the", "given", "base", "URL", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/setup.py#L13-L55
Metatab/metapack
metapack/cli/index.py
dump_index
def dump_index(args, idx): """Create a metatab file for the index""" import csv import sys from metatab import MetatabDoc doc = MetatabDoc() pack_section = doc.new_section('Packages', ['Identifier', 'Name', 'Nvname', 'Version', 'Format']) r = doc['Root'] r.new_term('Root.Title', 'Package Index') for p in idx.list(): pack_section.new_term('Package', p['url'], identifier=p['ident'], name=p['name'], nvname=p['nvname'], version=p['version'], format=p['format']) doc.write_csv(args.dump)
python
def dump_index(args, idx): """Create a metatab file for the index""" import csv import sys from metatab import MetatabDoc doc = MetatabDoc() pack_section = doc.new_section('Packages', ['Identifier', 'Name', 'Nvname', 'Version', 'Format']) r = doc['Root'] r.new_term('Root.Title', 'Package Index') for p in idx.list(): pack_section.new_term('Package', p['url'], identifier=p['ident'], name=p['name'], nvname=p['nvname'], version=p['version'], format=p['format']) doc.write_csv(args.dump)
[ "def", "dump_index", "(", "args", ",", "idx", ")", ":", "import", "csv", "import", "sys", "from", "metatab", "import", "MetatabDoc", "doc", "=", "MetatabDoc", "(", ")", "pack_section", "=", "doc", ".", "new_section", "(", "'Packages'", ",", "[", "'Identifier'", ",", "'Name'", ",", "'Nvname'", ",", "'Version'", ",", "'Format'", "]", ")", "r", "=", "doc", "[", "'Root'", "]", "r", ".", "new_term", "(", "'Root.Title'", ",", "'Package Index'", ")", "for", "p", "in", "idx", ".", "list", "(", ")", ":", "pack_section", ".", "new_term", "(", "'Package'", ",", "p", "[", "'url'", "]", ",", "identifier", "=", "p", "[", "'ident'", "]", ",", "name", "=", "p", "[", "'name'", "]", ",", "nvname", "=", "p", "[", "'nvname'", "]", ",", "version", "=", "p", "[", "'version'", "]", ",", "format", "=", "p", "[", "'format'", "]", ")", "doc", ".", "write_csv", "(", "args", ".", "dump", ")" ]
Create a metatab file for the index
[ "Create", "a", "metatab", "file", "for", "the", "index" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/index.py#L223-L248
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._setup
def _setup(self): """Setup the layer two agent.""" agent_config = CONF.get("AGENT", {}) self._worker_count = agent_config.get('worker_count') self._phys_net_map = agent_config.get( 'physical_network_vswitch_mappings', []) self._local_network_vswitch = agent_config.get( 'local_network_vswitch') self._load_physical_network_mappings(self._phys_net_map) self._validate_vswitches() self._endpoints.append(self) self._event_callback_pairs.extend([ (self._utils.EVENT_TYPE_CREATE, self._process_added_port_event), (self._utils.EVENT_TYPE_DELETE, self._process_removed_port_event) ]) tpool.set_num_threads(self._worker_count)
python
def _setup(self): """Setup the layer two agent.""" agent_config = CONF.get("AGENT", {}) self._worker_count = agent_config.get('worker_count') self._phys_net_map = agent_config.get( 'physical_network_vswitch_mappings', []) self._local_network_vswitch = agent_config.get( 'local_network_vswitch') self._load_physical_network_mappings(self._phys_net_map) self._validate_vswitches() self._endpoints.append(self) self._event_callback_pairs.extend([ (self._utils.EVENT_TYPE_CREATE, self._process_added_port_event), (self._utils.EVENT_TYPE_DELETE, self._process_removed_port_event) ]) tpool.set_num_threads(self._worker_count)
[ "def", "_setup", "(", "self", ")", ":", "agent_config", "=", "CONF", ".", "get", "(", "\"AGENT\"", ",", "{", "}", ")", "self", ".", "_worker_count", "=", "agent_config", ".", "get", "(", "'worker_count'", ")", "self", ".", "_phys_net_map", "=", "agent_config", ".", "get", "(", "'physical_network_vswitch_mappings'", ",", "[", "]", ")", "self", ".", "_local_network_vswitch", "=", "agent_config", ".", "get", "(", "'local_network_vswitch'", ")", "self", ".", "_load_physical_network_mappings", "(", "self", ".", "_phys_net_map", ")", "self", ".", "_validate_vswitches", "(", ")", "self", ".", "_endpoints", ".", "append", "(", "self", ")", "self", ".", "_event_callback_pairs", ".", "extend", "(", "[", "(", "self", ".", "_utils", ".", "EVENT_TYPE_CREATE", ",", "self", ".", "_process_added_port_event", ")", ",", "(", "self", ".", "_utils", ".", "EVENT_TYPE_DELETE", ",", "self", ".", "_process_removed_port_event", ")", "]", ")", "tpool", ".", "set_num_threads", "(", "self", ".", "_worker_count", ")" ]
Setup the layer two agent.
[ "Setup", "the", "layer", "two", "agent", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L78-L95
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._setup_rpc
def _setup_rpc(self): """Setup the RPC client for the current agent.""" self._plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self._state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self._client = n_rpc.get_client(self.target) self._consumers.extend([ [topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.PORT, topics.DELETE] ]) self._connection = agent_rpc.create_consumers( self._endpoints, self._topic, self._consumers, start_listening=False ) self._setup_qos_extension() self._connection.consume_in_threads() report_interval = CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
python
def _setup_rpc(self): """Setup the RPC client for the current agent.""" self._plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self._state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self._client = n_rpc.get_client(self.target) self._consumers.extend([ [topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.PORT, topics.DELETE] ]) self._connection = agent_rpc.create_consumers( self._endpoints, self._topic, self._consumers, start_listening=False ) self._setup_qos_extension() self._connection.consume_in_threads() report_interval = CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval)
[ "def", "_setup_rpc", "(", "self", ")", ":", "self", ".", "_plugin_rpc", "=", "agent_rpc", ".", "PluginApi", "(", "topics", ".", "PLUGIN", ")", "self", ".", "_state_rpc", "=", "agent_rpc", ".", "PluginReportStateAPI", "(", "topics", ".", "PLUGIN", ")", "self", ".", "_client", "=", "n_rpc", ".", "get_client", "(", "self", ".", "target", ")", "self", ".", "_consumers", ".", "extend", "(", "[", "[", "topics", ".", "PORT", ",", "topics", ".", "UPDATE", "]", ",", "[", "topics", ".", "NETWORK", ",", "topics", ".", "DELETE", "]", ",", "[", "topics", ".", "PORT", ",", "topics", ".", "DELETE", "]", "]", ")", "self", ".", "_connection", "=", "agent_rpc", ".", "create_consumers", "(", "self", ".", "_endpoints", ",", "self", ".", "_topic", ",", "self", ".", "_consumers", ",", "start_listening", "=", "False", ")", "self", ".", "_setup_qos_extension", "(", ")", "self", ".", "_connection", ".", "consume_in_threads", "(", ")", "report_interval", "=", "CONF", ".", "AGENT", ".", "report_interval", "if", "report_interval", ":", "heartbeat", "=", "loopingcall", ".", "FixedIntervalLoopingCall", "(", "self", ".", "_report_state", ")", "heartbeat", ".", "start", "(", "interval", "=", "report_interval", ")" ]
Setup the RPC client for the current agent.
[ "Setup", "the", "RPC", "client", "for", "the", "current", "agent", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L101-L123
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._process_added_port_event
def _process_added_port_event(self, port_name): """Callback for added ports.""" LOG.info("Hyper-V VM vNIC added: %s", port_name) self._added_ports.add(port_name)
python
def _process_added_port_event(self, port_name): """Callback for added ports.""" LOG.info("Hyper-V VM vNIC added: %s", port_name) self._added_ports.add(port_name)
[ "def", "_process_added_port_event", "(", "self", ",", "port_name", ")", ":", "LOG", ".", "info", "(", "\"Hyper-V VM vNIC added: %s\"", ",", "port_name", ")", "self", ".", "_added_ports", ".", "add", "(", "port_name", ")" ]
Callback for added ports.
[ "Callback", "for", "added", "ports", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L125-L128
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._load_physical_network_mappings
def _load_physical_network_mappings(self, phys_net_vswitch_mappings): """Load all the information regarding the physical network.""" for mapping in phys_net_vswitch_mappings: parts = mapping.split(':') if len(parts) != 2: LOG.debug('Invalid physical network mapping: %s', mapping) else: pattern = re.escape(parts[0].strip()).replace('\\*', '.*') pattern = pattern + '$' vswitch = parts[1].strip() self._physical_network_mappings[pattern] = vswitch
python
def _load_physical_network_mappings(self, phys_net_vswitch_mappings): """Load all the information regarding the physical network.""" for mapping in phys_net_vswitch_mappings: parts = mapping.split(':') if len(parts) != 2: LOG.debug('Invalid physical network mapping: %s', mapping) else: pattern = re.escape(parts[0].strip()).replace('\\*', '.*') pattern = pattern + '$' vswitch = parts[1].strip() self._physical_network_mappings[pattern] = vswitch
[ "def", "_load_physical_network_mappings", "(", "self", ",", "phys_net_vswitch_mappings", ")", ":", "for", "mapping", "in", "phys_net_vswitch_mappings", ":", "parts", "=", "mapping", ".", "split", "(", "':'", ")", "if", "len", "(", "parts", ")", "!=", "2", ":", "LOG", ".", "debug", "(", "'Invalid physical network mapping: %s'", ",", "mapping", ")", "else", ":", "pattern", "=", "re", ".", "escape", "(", "parts", "[", "0", "]", ".", "strip", "(", ")", ")", ".", "replace", "(", "'\\\\*'", ",", "'.*'", ")", "pattern", "=", "pattern", "+", "'$'", "vswitch", "=", "parts", "[", "1", "]", ".", "strip", "(", ")", "self", ".", "_physical_network_mappings", "[", "pattern", "]", "=", "vswitch" ]
Load all the information regarding the physical network.
[ "Load", "all", "the", "information", "regarding", "the", "physical", "network", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L134-L144
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._get_vswitch_name
def _get_vswitch_name(self, network_type, physical_network): """Get the vswitch name for the received network information.""" if network_type != constants.TYPE_LOCAL: vswitch_name = self._get_vswitch_for_physical_network( physical_network) else: vswitch_name = self._local_network_vswitch if vswitch_name: return vswitch_name err_msg = _("No vSwitch configured for physical network " "'%(physical_network)s'. Neutron network type: " "'%(network_type)s'.") raise exception.NetworkingHyperVException( err_msg % dict(physical_network=physical_network, network_type=network_type))
python
def _get_vswitch_name(self, network_type, physical_network): """Get the vswitch name for the received network information.""" if network_type != constants.TYPE_LOCAL: vswitch_name = self._get_vswitch_for_physical_network( physical_network) else: vswitch_name = self._local_network_vswitch if vswitch_name: return vswitch_name err_msg = _("No vSwitch configured for physical network " "'%(physical_network)s'. Neutron network type: " "'%(network_type)s'.") raise exception.NetworkingHyperVException( err_msg % dict(physical_network=physical_network, network_type=network_type))
[ "def", "_get_vswitch_name", "(", "self", ",", "network_type", ",", "physical_network", ")", ":", "if", "network_type", "!=", "constants", ".", "TYPE_LOCAL", ":", "vswitch_name", "=", "self", ".", "_get_vswitch_for_physical_network", "(", "physical_network", ")", "else", ":", "vswitch_name", "=", "self", ".", "_local_network_vswitch", "if", "vswitch_name", ":", "return", "vswitch_name", "err_msg", "=", "_", "(", "\"No vSwitch configured for physical network \"", "\"'%(physical_network)s'. Neutron network type: \"", "\"'%(network_type)s'.\"", ")", "raise", "exception", ".", "NetworkingHyperVException", "(", "err_msg", "%", "dict", "(", "physical_network", "=", "physical_network", ",", "network_type", "=", "network_type", ")", ")" ]
Get the vswitch name for the received network information.
[ "Get", "the", "vswitch", "name", "for", "the", "received", "network", "information", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L189-L205
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._get_vswitch_for_physical_network
def _get_vswitch_for_physical_network(self, phys_network_name): """Get the vswitch name for the received network name.""" for pattern in self._physical_network_mappings: if phys_network_name is None: phys_network_name = '' if re.match(pattern, phys_network_name): return self._physical_network_mappings[pattern]
python
def _get_vswitch_for_physical_network(self, phys_network_name): """Get the vswitch name for the received network name.""" for pattern in self._physical_network_mappings: if phys_network_name is None: phys_network_name = '' if re.match(pattern, phys_network_name): return self._physical_network_mappings[pattern]
[ "def", "_get_vswitch_for_physical_network", "(", "self", ",", "phys_network_name", ")", ":", "for", "pattern", "in", "self", ".", "_physical_network_mappings", ":", "if", "phys_network_name", "is", "None", ":", "phys_network_name", "=", "''", "if", "re", ".", "match", "(", "pattern", ",", "phys_network_name", ")", ":", "return", "self", ".", "_physical_network_mappings", "[", "pattern", "]" ]
Get the vswitch name for the received network name.
[ "Get", "the", "vswitch", "name", "for", "the", "received", "network", "name", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L207-L213
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._get_network_vswitch_map_by_port_id
def _get_network_vswitch_map_by_port_id(self, port_id): """Get the vswitch name for the received port id.""" for network_id, vswitch in six.iteritems(self._network_vswitch_map): if port_id in vswitch['ports']: return (network_id, vswitch) # If the port was not found, just return (None, None) return (None, None)
python
def _get_network_vswitch_map_by_port_id(self, port_id): """Get the vswitch name for the received port id.""" for network_id, vswitch in six.iteritems(self._network_vswitch_map): if port_id in vswitch['ports']: return (network_id, vswitch) # If the port was not found, just return (None, None) return (None, None)
[ "def", "_get_network_vswitch_map_by_port_id", "(", "self", ",", "port_id", ")", ":", "for", "network_id", ",", "vswitch", "in", "six", ".", "iteritems", "(", "self", ".", "_network_vswitch_map", ")", ":", "if", "port_id", "in", "vswitch", "[", "'ports'", "]", ":", "return", "(", "network_id", ",", "vswitch", ")", "# If the port was not found, just return (None, None)", "return", "(", "None", ",", "None", ")" ]
Get the vswitch name for the received port id.
[ "Get", "the", "vswitch", "name", "for", "the", "received", "port", "id", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L215-L222
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._update_port_status_cache
def _update_port_status_cache(self, device, device_bound=True): """Update the ports status cache.""" with self._cache_lock: if device_bound: self._bound_ports.add(device) self._unbound_ports.discard(device) else: self._bound_ports.discard(device) self._unbound_ports.add(device)
python
def _update_port_status_cache(self, device, device_bound=True): """Update the ports status cache.""" with self._cache_lock: if device_bound: self._bound_ports.add(device) self._unbound_ports.discard(device) else: self._bound_ports.discard(device) self._unbound_ports.add(device)
[ "def", "_update_port_status_cache", "(", "self", ",", "device", ",", "device_bound", "=", "True", ")", ":", "with", "self", ".", "_cache_lock", ":", "if", "device_bound", ":", "self", ".", "_bound_ports", ".", "add", "(", "device", ")", "self", ".", "_unbound_ports", ".", "discard", "(", "device", ")", "else", ":", "self", ".", "_bound_ports", ".", "discard", "(", "device", ")", "self", ".", "_unbound_ports", ".", "add", "(", "device", ")" ]
Update the ports status cache.
[ "Update", "the", "ports", "status", "cache", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L224-L232
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._create_event_listeners
def _create_event_listeners(self): """Create and bind the event listeners.""" LOG.debug("Create the event listeners.") for event_type, callback in self._event_callback_pairs: LOG.debug("Create listener for %r event", event_type) listener = self._utils.get_vnic_event_listener(event_type) eventlet.spawn_n(listener, callback)
python
def _create_event_listeners(self): """Create and bind the event listeners.""" LOG.debug("Create the event listeners.") for event_type, callback in self._event_callback_pairs: LOG.debug("Create listener for %r event", event_type) listener = self._utils.get_vnic_event_listener(event_type) eventlet.spawn_n(listener, callback)
[ "def", "_create_event_listeners", "(", "self", ")", ":", "LOG", ".", "debug", "(", "\"Create the event listeners.\"", ")", "for", "event_type", ",", "callback", "in", "self", ".", "_event_callback_pairs", ":", "LOG", ".", "debug", "(", "\"Create listener for %r event\"", ",", "event_type", ")", "listener", "=", "self", ".", "_utils", ".", "get_vnic_event_listener", "(", "event_type", ")", "eventlet", ".", "spawn_n", "(", "listener", ",", "callback", ")" ]
Create and bind the event listeners.
[ "Create", "and", "bind", "the", "event", "listeners", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L234-L240
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._port_bound
def _port_bound(self, port_id, network_id, network_type, physical_network, segmentation_id, port_security_enabled, set_port_sriov): """Bind the port to the recived network.""" LOG.debug("Binding port %s", port_id) if network_id not in self._network_vswitch_map: self._provision_network( port_id, network_id, network_type, physical_network, segmentation_id) vswitch_map = self._network_vswitch_map[network_id] vswitch_map['ports'].append(port_id) LOG.debug("Trying to connect the current port to vswitch %r.", vswitch_map['vswitch_name']) self._utils.connect_vnic_to_vswitch( vswitch_name=vswitch_map['vswitch_name'], switch_port_name=port_id, ) if set_port_sriov: LOG.debug("Enabling SR-IOV for port: %s", port_id) self._utils.set_vswitch_port_sriov(port_id, True)
python
def _port_bound(self, port_id, network_id, network_type, physical_network, segmentation_id, port_security_enabled, set_port_sriov): """Bind the port to the recived network.""" LOG.debug("Binding port %s", port_id) if network_id not in self._network_vswitch_map: self._provision_network( port_id, network_id, network_type, physical_network, segmentation_id) vswitch_map = self._network_vswitch_map[network_id] vswitch_map['ports'].append(port_id) LOG.debug("Trying to connect the current port to vswitch %r.", vswitch_map['vswitch_name']) self._utils.connect_vnic_to_vswitch( vswitch_name=vswitch_map['vswitch_name'], switch_port_name=port_id, ) if set_port_sriov: LOG.debug("Enabling SR-IOV for port: %s", port_id) self._utils.set_vswitch_port_sriov(port_id, True)
[ "def", "_port_bound", "(", "self", ",", "port_id", ",", "network_id", ",", "network_type", ",", "physical_network", ",", "segmentation_id", ",", "port_security_enabled", ",", "set_port_sriov", ")", ":", "LOG", ".", "debug", "(", "\"Binding port %s\"", ",", "port_id", ")", "if", "network_id", "not", "in", "self", ".", "_network_vswitch_map", ":", "self", ".", "_provision_network", "(", "port_id", ",", "network_id", ",", "network_type", ",", "physical_network", ",", "segmentation_id", ")", "vswitch_map", "=", "self", ".", "_network_vswitch_map", "[", "network_id", "]", "vswitch_map", "[", "'ports'", "]", ".", "append", "(", "port_id", ")", "LOG", ".", "debug", "(", "\"Trying to connect the current port to vswitch %r.\"", ",", "vswitch_map", "[", "'vswitch_name'", "]", ")", "self", ".", "_utils", ".", "connect_vnic_to_vswitch", "(", "vswitch_name", "=", "vswitch_map", "[", "'vswitch_name'", "]", ",", "switch_port_name", "=", "port_id", ",", ")", "if", "set_port_sriov", ":", "LOG", ".", "debug", "(", "\"Enabling SR-IOV for port: %s\"", ",", "port_id", ")", "self", ".", "_utils", ".", "set_vswitch_port_sriov", "(", "port_id", ",", "True", ")" ]
Bind the port to the recived network.
[ "Bind", "the", "port", "to", "the", "recived", "network", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L251-L272
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent.process_added_port
def process_added_port(self, device_details): """Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases. """ device = device_details['device'] port_id = device_details['port_id'] reprocess = True try: self._process_added_port(device_details) LOG.debug("Updating cached port %s status as UP.", port_id) self._update_port_status_cache(device, device_bound=True) LOG.info("Port %s processed.", port_id) except os_win_exc.HyperVvNicNotFound: LOG.debug('vNIC %s not found. This can happen if the VM was ' 'destroyed.', port_id) reprocess = False except os_win_exc.HyperVPortNotFoundException: LOG.debug('vSwitch port %s not found. This can happen if the VM ' 'was destroyed.', port_id) # NOTE(claudiub): just to be on the safe side, in case Hyper-V said # that the port was added, but it hasn't really, we're leaving # reprocess = True. If the VM / vNIC was removed, on the next # reprocess, a HyperVvNicNotFound will be raised. except Exception as ex: # NOTE(claudiub): in case of a non-transient error, the port will # be processed over and over again, and will not be reported as # bound (e.g.: InvalidParameterValue when setting QoS), until the # port is deleted. These issues have to be investigated and solved LOG.exception("Exception encountered while processing " "port %(port_id)s. Exception: %(ex)s", dict(port_id=port_id, ex=ex)) else: # no exception encountered, no need to reprocess. reprocess = False if reprocess: # Readd the port as "added", so it can be reprocessed. self._added_ports.add(device) # Force cache refresh. self._refresh_cache = True return False return True
python
def process_added_port(self, device_details): """Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases. """ device = device_details['device'] port_id = device_details['port_id'] reprocess = True try: self._process_added_port(device_details) LOG.debug("Updating cached port %s status as UP.", port_id) self._update_port_status_cache(device, device_bound=True) LOG.info("Port %s processed.", port_id) except os_win_exc.HyperVvNicNotFound: LOG.debug('vNIC %s not found. This can happen if the VM was ' 'destroyed.', port_id) reprocess = False except os_win_exc.HyperVPortNotFoundException: LOG.debug('vSwitch port %s not found. This can happen if the VM ' 'was destroyed.', port_id) # NOTE(claudiub): just to be on the safe side, in case Hyper-V said # that the port was added, but it hasn't really, we're leaving # reprocess = True. If the VM / vNIC was removed, on the next # reprocess, a HyperVvNicNotFound will be raised. except Exception as ex: # NOTE(claudiub): in case of a non-transient error, the port will # be processed over and over again, and will not be reported as # bound (e.g.: InvalidParameterValue when setting QoS), until the # port is deleted. These issues have to be investigated and solved LOG.exception("Exception encountered while processing " "port %(port_id)s. Exception: %(ex)s", dict(port_id=port_id, ex=ex)) else: # no exception encountered, no need to reprocess. reprocess = False if reprocess: # Readd the port as "added", so it can be reprocessed. self._added_ports.add(device) # Force cache refresh. self._refresh_cache = True return False return True
[ "def", "process_added_port", "(", "self", ",", "device_details", ")", ":", "device", "=", "device_details", "[", "'device'", "]", "port_id", "=", "device_details", "[", "'port_id'", "]", "reprocess", "=", "True", "try", ":", "self", ".", "_process_added_port", "(", "device_details", ")", "LOG", ".", "debug", "(", "\"Updating cached port %s status as UP.\"", ",", "port_id", ")", "self", ".", "_update_port_status_cache", "(", "device", ",", "device_bound", "=", "True", ")", "LOG", ".", "info", "(", "\"Port %s processed.\"", ",", "port_id", ")", "except", "os_win_exc", ".", "HyperVvNicNotFound", ":", "LOG", ".", "debug", "(", "'vNIC %s not found. This can happen if the VM was '", "'destroyed.'", ",", "port_id", ")", "reprocess", "=", "False", "except", "os_win_exc", ".", "HyperVPortNotFoundException", ":", "LOG", ".", "debug", "(", "'vSwitch port %s not found. This can happen if the VM '", "'was destroyed.'", ",", "port_id", ")", "# NOTE(claudiub): just to be on the safe side, in case Hyper-V said", "# that the port was added, but it hasn't really, we're leaving", "# reprocess = True. If the VM / vNIC was removed, on the next", "# reprocess, a HyperVvNicNotFound will be raised.", "except", "Exception", "as", "ex", ":", "# NOTE(claudiub): in case of a non-transient error, the port will", "# be processed over and over again, and will not be reported as", "# bound (e.g.: InvalidParameterValue when setting QoS), until the", "# port is deleted. These issues have to be investigated and solved", "LOG", ".", "exception", "(", "\"Exception encountered while processing \"", "\"port %(port_id)s. Exception: %(ex)s\"", ",", "dict", "(", "port_id", "=", "port_id", ",", "ex", "=", "ex", ")", ")", "else", ":", "# no exception encountered, no need to reprocess.", "reprocess", "=", "False", "if", "reprocess", ":", "# Readd the port as \"added\", so it can be reprocessed.", "self", ".", "_added_ports", ".", "add", "(", "device", ")", "# Force cache refresh.", "self", ".", "_refresh_cache", "=", "True", "return", "False", "return", "True" ]
Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases.
[ "Process", "the", "new", "ports", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L304-L350
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._treat_devices_added
def _treat_devices_added(self): """Process the new devices.""" try: devices_details_list = self._plugin_rpc.get_devices_details_list( self._context, self._added_ports, self._agent_id) except Exception as exc: LOG.debug("Unable to get ports details for " "devices %(devices)s: %(exc)s", {'devices': self._added_ports, 'exc': exc}) return for device_details in devices_details_list: device = device_details['device'] LOG.info("Adding port %s", device) if 'port_id' in device_details: LOG.info("Port %(device)s updated. " "Details: %(device_details)s", {'device': device, 'device_details': device_details}) eventlet.spawn_n(self.process_added_port, device_details) else: LOG.debug("Missing port_id from device details: " "%(device)s. Details: %(device_details)s", {'device': device, 'device_details': device_details}) LOG.debug("Remove the port from added ports set, so it " "doesn't get reprocessed.") self._added_ports.discard(device)
python
def _treat_devices_added(self): """Process the new devices.""" try: devices_details_list = self._plugin_rpc.get_devices_details_list( self._context, self._added_ports, self._agent_id) except Exception as exc: LOG.debug("Unable to get ports details for " "devices %(devices)s: %(exc)s", {'devices': self._added_ports, 'exc': exc}) return for device_details in devices_details_list: device = device_details['device'] LOG.info("Adding port %s", device) if 'port_id' in device_details: LOG.info("Port %(device)s updated. " "Details: %(device_details)s", {'device': device, 'device_details': device_details}) eventlet.spawn_n(self.process_added_port, device_details) else: LOG.debug("Missing port_id from device details: " "%(device)s. Details: %(device_details)s", {'device': device, 'device_details': device_details}) LOG.debug("Remove the port from added ports set, so it " "doesn't get reprocessed.") self._added_ports.discard(device)
[ "def", "_treat_devices_added", "(", "self", ")", ":", "try", ":", "devices_details_list", "=", "self", ".", "_plugin_rpc", ".", "get_devices_details_list", "(", "self", ".", "_context", ",", "self", ".", "_added_ports", ",", "self", ".", "_agent_id", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "debug", "(", "\"Unable to get ports details for \"", "\"devices %(devices)s: %(exc)s\"", ",", "{", "'devices'", ":", "self", ".", "_added_ports", ",", "'exc'", ":", "exc", "}", ")", "return", "for", "device_details", "in", "devices_details_list", ":", "device", "=", "device_details", "[", "'device'", "]", "LOG", ".", "info", "(", "\"Adding port %s\"", ",", "device", ")", "if", "'port_id'", "in", "device_details", ":", "LOG", ".", "info", "(", "\"Port %(device)s updated. \"", "\"Details: %(device_details)s\"", ",", "{", "'device'", ":", "device", ",", "'device_details'", ":", "device_details", "}", ")", "eventlet", ".", "spawn_n", "(", "self", ".", "process_added_port", ",", "device_details", ")", "else", ":", "LOG", ".", "debug", "(", "\"Missing port_id from device details: \"", "\"%(device)s. Details: %(device_details)s\"", ",", "{", "'device'", ":", "device", ",", "'device_details'", ":", "device_details", "}", ")", "LOG", ".", "debug", "(", "\"Remove the port from added ports set, so it \"", "\"doesn't get reprocessed.\"", ")", "self", ".", "_added_ports", ".", "discard", "(", "device", ")" ]
Process the new devices.
[ "Process", "the", "new", "devices", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L352-L378
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._process_removed_port
def _process_removed_port(self, device): """Process the removed ports.""" LOG.debug("Trying to remove the port %r", device) self._update_port_status_cache(device, device_bound=False) self._port_unbound(device, vnic_deleted=True) LOG.debug("The port was successfully removed.") self._removed_ports.discard(device)
python
def _process_removed_port(self, device): """Process the removed ports.""" LOG.debug("Trying to remove the port %r", device) self._update_port_status_cache(device, device_bound=False) self._port_unbound(device, vnic_deleted=True) LOG.debug("The port was successfully removed.") self._removed_ports.discard(device)
[ "def", "_process_removed_port", "(", "self", ",", "device", ")", ":", "LOG", ".", "debug", "(", "\"Trying to remove the port %r\"", ",", "device", ")", "self", ".", "_update_port_status_cache", "(", "device", ",", "device_bound", "=", "False", ")", "self", ".", "_port_unbound", "(", "device", ",", "vnic_deleted", "=", "True", ")", "LOG", ".", "debug", "(", "\"The port was successfully removed.\"", ")", "self", ".", "_removed_ports", ".", "discard", "(", "device", ")" ]
Process the removed ports.
[ "Process", "the", "removed", "ports", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L380-L387
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._treat_devices_removed
def _treat_devices_removed(self): """Process the removed devices.""" for device in self._removed_ports.copy(): eventlet.spawn_n(self._process_removed_port, device)
python
def _treat_devices_removed(self): """Process the removed devices.""" for device in self._removed_ports.copy(): eventlet.spawn_n(self._process_removed_port, device)
[ "def", "_treat_devices_removed", "(", "self", ")", ":", "for", "device", "in", "self", ".", "_removed_ports", ".", "copy", "(", ")", ":", "eventlet", ".", "spawn_n", "(", "self", ".", "_process_removed_port", ",", "device", ")" ]
Process the removed devices.
[ "Process", "the", "removed", "devices", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L389-L392
openstack/networking-hyperv
networking_hyperv/neutron/agent/layer2.py
Layer2Agent._work
def _work(self): """Process the information regarding the available ports.""" if self._refresh_cache: # Inconsistent cache might cause exceptions. For example, # if a port has been removed, it will be known in the next # loop. Using the old switch port can cause exceptions. LOG.debug("Refreshing os_win caches...") self._utils.update_cache() self._refresh_cache = False if self._bound_ports or self._unbound_ports: eventlet.spawn_n(self._notify_plugin_on_port_updates) # notify plugin about port deltas if self._added_ports: LOG.debug("Agent loop has new devices!") self._treat_devices_added() if self._removed_ports: LOG.debug("Agent loop has lost devices...") self._treat_devices_removed()
python
def _work(self): """Process the information regarding the available ports.""" if self._refresh_cache: # Inconsistent cache might cause exceptions. For example, # if a port has been removed, it will be known in the next # loop. Using the old switch port can cause exceptions. LOG.debug("Refreshing os_win caches...") self._utils.update_cache() self._refresh_cache = False if self._bound_ports or self._unbound_ports: eventlet.spawn_n(self._notify_plugin_on_port_updates) # notify plugin about port deltas if self._added_ports: LOG.debug("Agent loop has new devices!") self._treat_devices_added() if self._removed_ports: LOG.debug("Agent loop has lost devices...") self._treat_devices_removed()
[ "def", "_work", "(", "self", ")", ":", "if", "self", ".", "_refresh_cache", ":", "# Inconsistent cache might cause exceptions. For example,", "# if a port has been removed, it will be known in the next", "# loop. Using the old switch port can cause exceptions.", "LOG", ".", "debug", "(", "\"Refreshing os_win caches...\"", ")", "self", ".", "_utils", ".", "update_cache", "(", ")", "self", ".", "_refresh_cache", "=", "False", "if", "self", ".", "_bound_ports", "or", "self", ".", "_unbound_ports", ":", "eventlet", ".", "spawn_n", "(", "self", ".", "_notify_plugin_on_port_updates", ")", "# notify plugin about port deltas", "if", "self", ".", "_added_ports", ":", "LOG", ".", "debug", "(", "\"Agent loop has new devices!\"", ")", "self", ".", "_treat_devices_added", "(", ")", "if", "self", ".", "_removed_ports", ":", "LOG", ".", "debug", "(", "\"Agent loop has lost devices...\"", ")", "self", ".", "_treat_devices_removed", "(", ")" ]
Process the information regarding the available ports.
[ "Process", "the", "information", "regarding", "the", "available", "ports", "." ]
train
https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/layer2.py#L412-L432
Metatab/metapack
metapack/cli/build.py
build
def build(subparsers): """ Build source packages. The mp build program runs all of the resources listed in a Metatab file and produces one or more Metapack packages with those resources localized. It will always try to produce a Filesystem package, and may optionally produce Excel, Zip and CSV packages. Typical usage is to be run inside a source package directory with .. code-block:: bash $ mp build To build all of the package types: .. code-block:: bash $ mp build -fezc By default, packages are built with versioned names. The :option:`--nonversion-name` option will create file packages with non-versioned name, and the :option:`--nonversioned-link` option will produce a non-versioned soft link pointing to the versioned file. """ parser = subparsers.add_parser( 'build', help='Build derived packages', description=build.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog='') parser.set_defaults(run_command=run_metapack) parser.add_argument('metatabfile', nargs='?', help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. " ) parser.add_argument('-p', '--profile', help="Name of a BOTO or AWS credentails profile", required=False) parser.add_argument('-D', '--package-directory', help="Write Zip, Excel and CSV packages to an alternate directory", required=False) parser.add_argument('-F', '--force', action='store_true', default=False, help='Force some operations, like updating the name and building packages') parser.add_argument('-R', '--reuse-resources', action='store_true', default=False, help='When building Filesystem package, try to reuse resources built in prior build') group = parser.add_mutually_exclusive_group() group.add_argument('-n', '--nonversion-name', action='store_true', default=False, help='Write file packages with non-versioned names') group.add_argument('-N', '--nonversion-link', action='store_true', default=False, help='Create links with nonversioned names to file packages') parser.set_defaults(handler=None) ## ## Derived Package Group derived_group = parser.add_argument_group('Derived Packages', 'Generate other types of packages') derived_group.add_argument('-e', '--excel', action='store_true', default=False, help='Create an excel archive from a metatab file') derived_group.add_argument('-z', '--zip', action='store_true', default=False, help='Create a zip archive from a metatab file') derived_group.add_argument('-f', '--filesystem', action='store_true', default=False, help='Create a filesystem archive from a metatab file') derived_group.add_argument('-c', '--csv', action='store_true', default=False, help='Create a CSV archive from a metatab file') ## ## Administration Group admin_group = parser.add_argument_group('Administration', 'Information and administration') admin_group.add_argument('--clean-cache', default=False, action='store_true', help="Clean the download cache") admin_group.add_argument('-C', '--clean', default=False, action='store_true', help="For some operations, like updating schemas, clear the section of existing terms first")
python
def build(subparsers): """ Build source packages. The mp build program runs all of the resources listed in a Metatab file and produces one or more Metapack packages with those resources localized. It will always try to produce a Filesystem package, and may optionally produce Excel, Zip and CSV packages. Typical usage is to be run inside a source package directory with .. code-block:: bash $ mp build To build all of the package types: .. code-block:: bash $ mp build -fezc By default, packages are built with versioned names. The :option:`--nonversion-name` option will create file packages with non-versioned name, and the :option:`--nonversioned-link` option will produce a non-versioned soft link pointing to the versioned file. """ parser = subparsers.add_parser( 'build', help='Build derived packages', description=build.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog='') parser.set_defaults(run_command=run_metapack) parser.add_argument('metatabfile', nargs='?', help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. " ) parser.add_argument('-p', '--profile', help="Name of a BOTO or AWS credentails profile", required=False) parser.add_argument('-D', '--package-directory', help="Write Zip, Excel and CSV packages to an alternate directory", required=False) parser.add_argument('-F', '--force', action='store_true', default=False, help='Force some operations, like updating the name and building packages') parser.add_argument('-R', '--reuse-resources', action='store_true', default=False, help='When building Filesystem package, try to reuse resources built in prior build') group = parser.add_mutually_exclusive_group() group.add_argument('-n', '--nonversion-name', action='store_true', default=False, help='Write file packages with non-versioned names') group.add_argument('-N', '--nonversion-link', action='store_true', default=False, help='Create links with nonversioned names to file packages') parser.set_defaults(handler=None) ## ## Derived Package Group derived_group = parser.add_argument_group('Derived Packages', 'Generate other types of packages') derived_group.add_argument('-e', '--excel', action='store_true', default=False, help='Create an excel archive from a metatab file') derived_group.add_argument('-z', '--zip', action='store_true', default=False, help='Create a zip archive from a metatab file') derived_group.add_argument('-f', '--filesystem', action='store_true', default=False, help='Create a filesystem archive from a metatab file') derived_group.add_argument('-c', '--csv', action='store_true', default=False, help='Create a CSV archive from a metatab file') ## ## Administration Group admin_group = parser.add_argument_group('Administration', 'Information and administration') admin_group.add_argument('--clean-cache', default=False, action='store_true', help="Clean the download cache") admin_group.add_argument('-C', '--clean', default=False, action='store_true', help="For some operations, like updating schemas, clear the section of existing terms first")
[ "def", "build", "(", "subparsers", ")", ":", "parser", "=", "subparsers", ".", "add_parser", "(", "'build'", ",", "help", "=", "'Build derived packages'", ",", "description", "=", "build", ".", "__doc__", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ",", "epilog", "=", "''", ")", "parser", ".", "set_defaults", "(", "run_command", "=", "run_metapack", ")", "parser", ".", "add_argument", "(", "'metatabfile'", ",", "nargs", "=", "'?'", ",", "help", "=", "\"Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. \"", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--profile'", ",", "help", "=", "\"Name of a BOTO or AWS credentails profile\"", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'-D'", ",", "'--package-directory'", ",", "help", "=", "\"Write Zip, Excel and CSV packages to an alternate directory\"", ",", "required", "=", "False", ")", "parser", ".", "add_argument", "(", "'-F'", ",", "'--force'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Force some operations, like updating the name and building packages'", ")", "parser", ".", "add_argument", "(", "'-R'", ",", "'--reuse-resources'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'When building Filesystem package, try to reuse resources built in prior build'", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'-n'", ",", "'--nonversion-name'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Write file packages with non-versioned names'", ")", "group", ".", "add_argument", "(", "'-N'", ",", "'--nonversion-link'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create links with nonversioned names to file packages'", ")", "parser", ".", "set_defaults", "(", "handler", "=", "None", ")", "##", "## Derived Package Group", "derived_group", "=", "parser", ".", "add_argument_group", "(", "'Derived Packages'", ",", "'Generate other types of packages'", ")", "derived_group", ".", "add_argument", "(", "'-e'", ",", "'--excel'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create an excel archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-z'", ",", "'--zip'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a zip archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-f'", ",", "'--filesystem'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a filesystem archive from a metatab file'", ")", "derived_group", ".", "add_argument", "(", "'-c'", ",", "'--csv'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Create a CSV archive from a metatab file'", ")", "##", "## Administration Group", "admin_group", "=", "parser", ".", "add_argument_group", "(", "'Administration'", ",", "'Information and administration'", ")", "admin_group", ".", "add_argument", "(", "'--clean-cache'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Clean the download cache\"", ")", "admin_group", ".", "add_argument", "(", "'-C'", ",", "'--clean'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "\"For some operations, like updating schemas, clear the section of existing terms first\"", ")" ]
Build source packages. The mp build program runs all of the resources listed in a Metatab file and produces one or more Metapack packages with those resources localized. It will always try to produce a Filesystem package, and may optionally produce Excel, Zip and CSV packages. Typical usage is to be run inside a source package directory with .. code-block:: bash $ mp build To build all of the package types: .. code-block:: bash $ mp build -fezc By default, packages are built with versioned names. The :option:`--nonversion-name` option will create file packages with non-versioned name, and the :option:`--nonversioned-link` option will produce a non-versioned soft link pointing to the versioned file.
[ "Build", "source", "packages", "." ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/build.py#L34-L126
Metatab/metapack
metapack/cli/build.py
metatab_derived_handler
def metatab_derived_handler(m): """Create local Zip, Excel and Filesystem packages :param m: :param skip_if_exists: :return: """ from metapack.exc import PackageError from metapack.util import get_materialized_data_cache from shutil import rmtree create_list = [] url = None doc = MetapackDoc(m.mt_file) env = get_lib_module_dict(doc) package_dir = m.package_root if m.args.package_directory: # If this is set, the FS package will be built to m.package_root, but the # file packages will be built to package_dir package_dir = parse_app_url(m.args.package_directory) update_name(m.mt_file, fail_on_missing=False, report_unchanged=False) process_schemas(m.mt_file, cache=m.cache, clean=m.args.clean, report_found=False) nv_name = m.args.nonversion_name nv_link = m.args.nonversion_link # Remove any data that may have been cached , for instance, from Jupyter notebooks rmtree(get_materialized_data_cache(doc), ignore_errors=True) reuse_resources=m.args.reuse_resources try: # Always create a filesystem package before ZIP or Excel, so we can use it as a source for # data for the other packages. This means that Transform processes and programs only need # to be run once. _, url, created = make_filesystem_package(m.mt_file, m.package_root, m.cache, env, m.args.force, False, nv_link, reuse_resources=reuse_resources) create_list.append(('fs', url, created)) lb_path = Path( m.package_root.fspath,'last_build') if created or not lb_path.exists(): Path( m.package_root.fspath,'last_build').touch() m.mt_file = url env = {} # Don't need it anymore, since no more programs will be run. if m.args.excel is not False: _, url, created = make_excel_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('xlsx', url, created)) if m.args.zip is not False: _, url, created = make_zip_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('zip', url, created)) if m.args.csv is not False: _, url, created = make_csv_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('csv', url, created)) except PackageError as e: err("Failed to generate package: {}".format(e)) index_packages(m) return create_list
python
def metatab_derived_handler(m): """Create local Zip, Excel and Filesystem packages :param m: :param skip_if_exists: :return: """ from metapack.exc import PackageError from metapack.util import get_materialized_data_cache from shutil import rmtree create_list = [] url = None doc = MetapackDoc(m.mt_file) env = get_lib_module_dict(doc) package_dir = m.package_root if m.args.package_directory: # If this is set, the FS package will be built to m.package_root, but the # file packages will be built to package_dir package_dir = parse_app_url(m.args.package_directory) update_name(m.mt_file, fail_on_missing=False, report_unchanged=False) process_schemas(m.mt_file, cache=m.cache, clean=m.args.clean, report_found=False) nv_name = m.args.nonversion_name nv_link = m.args.nonversion_link # Remove any data that may have been cached , for instance, from Jupyter notebooks rmtree(get_materialized_data_cache(doc), ignore_errors=True) reuse_resources=m.args.reuse_resources try: # Always create a filesystem package before ZIP or Excel, so we can use it as a source for # data for the other packages. This means that Transform processes and programs only need # to be run once. _, url, created = make_filesystem_package(m.mt_file, m.package_root, m.cache, env, m.args.force, False, nv_link, reuse_resources=reuse_resources) create_list.append(('fs', url, created)) lb_path = Path( m.package_root.fspath,'last_build') if created or not lb_path.exists(): Path( m.package_root.fspath,'last_build').touch() m.mt_file = url env = {} # Don't need it anymore, since no more programs will be run. if m.args.excel is not False: _, url, created = make_excel_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('xlsx', url, created)) if m.args.zip is not False: _, url, created = make_zip_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('zip', url, created)) if m.args.csv is not False: _, url, created = make_csv_package(m.mt_file, package_dir, m.cache, env, m.args.force, nv_name, nv_link) create_list.append(('csv', url, created)) except PackageError as e: err("Failed to generate package: {}".format(e)) index_packages(m) return create_list
[ "def", "metatab_derived_handler", "(", "m", ")", ":", "from", "metapack", ".", "exc", "import", "PackageError", "from", "metapack", ".", "util", "import", "get_materialized_data_cache", "from", "shutil", "import", "rmtree", "create_list", "=", "[", "]", "url", "=", "None", "doc", "=", "MetapackDoc", "(", "m", ".", "mt_file", ")", "env", "=", "get_lib_module_dict", "(", "doc", ")", "package_dir", "=", "m", ".", "package_root", "if", "m", ".", "args", ".", "package_directory", ":", "# If this is set, the FS package will be built to m.package_root, but the", "# file packages will be built to package_dir", "package_dir", "=", "parse_app_url", "(", "m", ".", "args", ".", "package_directory", ")", "update_name", "(", "m", ".", "mt_file", ",", "fail_on_missing", "=", "False", ",", "report_unchanged", "=", "False", ")", "process_schemas", "(", "m", ".", "mt_file", ",", "cache", "=", "m", ".", "cache", ",", "clean", "=", "m", ".", "args", ".", "clean", ",", "report_found", "=", "False", ")", "nv_name", "=", "m", ".", "args", ".", "nonversion_name", "nv_link", "=", "m", ".", "args", ".", "nonversion_link", "# Remove any data that may have been cached , for instance, from Jupyter notebooks", "rmtree", "(", "get_materialized_data_cache", "(", "doc", ")", ",", "ignore_errors", "=", "True", ")", "reuse_resources", "=", "m", ".", "args", ".", "reuse_resources", "try", ":", "# Always create a filesystem package before ZIP or Excel, so we can use it as a source for", "# data for the other packages. This means that Transform processes and programs only need", "# to be run once.", "_", ",", "url", ",", "created", "=", "make_filesystem_package", "(", "m", ".", "mt_file", ",", "m", ".", "package_root", ",", "m", ".", "cache", ",", "env", ",", "m", ".", "args", ".", "force", ",", "False", ",", "nv_link", ",", "reuse_resources", "=", "reuse_resources", ")", "create_list", ".", "append", "(", "(", "'fs'", ",", "url", ",", "created", ")", ")", "lb_path", "=", "Path", "(", "m", ".", "package_root", ".", "fspath", ",", "'last_build'", ")", "if", "created", "or", "not", "lb_path", ".", "exists", "(", ")", ":", "Path", "(", "m", ".", "package_root", ".", "fspath", ",", "'last_build'", ")", ".", "touch", "(", ")", "m", ".", "mt_file", "=", "url", "env", "=", "{", "}", "# Don't need it anymore, since no more programs will be run.", "if", "m", ".", "args", ".", "excel", "is", "not", "False", ":", "_", ",", "url", ",", "created", "=", "make_excel_package", "(", "m", ".", "mt_file", ",", "package_dir", ",", "m", ".", "cache", ",", "env", ",", "m", ".", "args", ".", "force", ",", "nv_name", ",", "nv_link", ")", "create_list", ".", "append", "(", "(", "'xlsx'", ",", "url", ",", "created", ")", ")", "if", "m", ".", "args", ".", "zip", "is", "not", "False", ":", "_", ",", "url", ",", "created", "=", "make_zip_package", "(", "m", ".", "mt_file", ",", "package_dir", ",", "m", ".", "cache", ",", "env", ",", "m", ".", "args", ".", "force", ",", "nv_name", ",", "nv_link", ")", "create_list", ".", "append", "(", "(", "'zip'", ",", "url", ",", "created", ")", ")", "if", "m", ".", "args", ".", "csv", "is", "not", "False", ":", "_", ",", "url", ",", "created", "=", "make_csv_package", "(", "m", ".", "mt_file", ",", "package_dir", ",", "m", ".", "cache", ",", "env", ",", "m", ".", "args", ".", "force", ",", "nv_name", ",", "nv_link", ")", "create_list", ".", "append", "(", "(", "'csv'", ",", "url", ",", "created", ")", ")", "except", "PackageError", "as", "e", ":", "err", "(", "\"Failed to generate package: {}\"", ".", "format", "(", "e", ")", ")", "index_packages", "(", "m", ")", "return", "create_list" ]
Create local Zip, Excel and Filesystem packages :param m: :param skip_if_exists: :return:
[ "Create", "local", "Zip", "Excel", "and", "Filesystem", "packages" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/build.py#L161-L234
Metatab/metapack
metapack/jupyter/__init__.py
init
def init(): """Initialize features that are normally initialized in the CLI""" from metapack.appurl import SearchUrl import metapack as mp from os import environ SearchUrl.initialize() # This makes the 'index:" urls work mp.Downloader.context.update(environ)
python
def init(): """Initialize features that are normally initialized in the CLI""" from metapack.appurl import SearchUrl import metapack as mp from os import environ SearchUrl.initialize() # This makes the 'index:" urls work mp.Downloader.context.update(environ)
[ "def", "init", "(", ")", ":", "from", "metapack", ".", "appurl", "import", "SearchUrl", "import", "metapack", "as", "mp", "from", "os", "import", "environ", "SearchUrl", ".", "initialize", "(", ")", "# This makes the 'index:\" urls work", "mp", ".", "Downloader", ".", "context", ".", "update", "(", "environ", ")" ]
Initialize features that are normally initialized in the CLI
[ "Initialize", "features", "that", "are", "normally", "initialized", "in", "the", "CLI" ]
train
https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/__init__.py#L10-L18
project-rig/rig
rig/machine_control/struct_file.py
read_struct_file
def read_struct_file(struct_data): """Interpret a struct file defining the location of variables in memory. Parameters ---------- struct_data : :py:class:`bytes` String of :py:class:`bytes` containing data to interpret as the struct definition. Returns ------- {struct_name: :py:class:`~.Struct`} A dictionary mapping the struct name to a :py:class:`~.Struct` instance. **Note:** the struct name will be a string of bytes, e.g., `b"vcpu"`. """ # Holders for all structs structs = dict() # Holders for the current struct name = None # Iterate over every line in the file for i, l in enumerate(struct_data.splitlines()): # Empty the line of comments, if the line is empty then skip to the # next line. Split on whitespace to get the tokens. tokens = re_comment.sub(b"", l).strip().split() if len(tokens) == 0: continue elif len(tokens) == 3: # 3 tokens implies header data (key, _, value) = tokens if key == b"name": if name is not None: if structs[name].size is None: raise ValueError( "size value missing for struct '{}'".format(name)) if structs[name].base is None: raise ValueError( "base value missing for struct '{}'".format(name)) name = value structs[name] = Struct(name) elif key == b"size": structs[name].size = num(value) elif key == b"base": structs[name].base = num(value) else: raise ValueError(key) elif len(tokens) == 5: # 5 tokens implies entry in struct. (field, pack, offset, printf, default) = tokens # Convert the packing character from Perl to Python standard num_pack = re_numbered_pack.match(pack) if num_pack is not None: pack = (num_pack.group("num") + perl_to_python_packs[num_pack.group("char")]) else: pack = perl_to_python_packs[pack] # If the field is an array then extract the length length = 1 field_exp = re_array_field.match(field) if field_exp is not None: field = field_exp.group("field") length = num(field_exp.group("length")) structs[name][field] = StructField(pack, num(offset), printf, num(default), length) else: raise ValueError( "line {}: Invalid syntax in struct file".format(i)) # Final check for setting size and base if structs[name].size is None: raise ValueError( "size value missing for struct '{}'".format(name)) if structs[name].base is None: raise ValueError( "base value missing for struct '{}'".format(name)) return structs
python
def read_struct_file(struct_data): """Interpret a struct file defining the location of variables in memory. Parameters ---------- struct_data : :py:class:`bytes` String of :py:class:`bytes` containing data to interpret as the struct definition. Returns ------- {struct_name: :py:class:`~.Struct`} A dictionary mapping the struct name to a :py:class:`~.Struct` instance. **Note:** the struct name will be a string of bytes, e.g., `b"vcpu"`. """ # Holders for all structs structs = dict() # Holders for the current struct name = None # Iterate over every line in the file for i, l in enumerate(struct_data.splitlines()): # Empty the line of comments, if the line is empty then skip to the # next line. Split on whitespace to get the tokens. tokens = re_comment.sub(b"", l).strip().split() if len(tokens) == 0: continue elif len(tokens) == 3: # 3 tokens implies header data (key, _, value) = tokens if key == b"name": if name is not None: if structs[name].size is None: raise ValueError( "size value missing for struct '{}'".format(name)) if structs[name].base is None: raise ValueError( "base value missing for struct '{}'".format(name)) name = value structs[name] = Struct(name) elif key == b"size": structs[name].size = num(value) elif key == b"base": structs[name].base = num(value) else: raise ValueError(key) elif len(tokens) == 5: # 5 tokens implies entry in struct. (field, pack, offset, printf, default) = tokens # Convert the packing character from Perl to Python standard num_pack = re_numbered_pack.match(pack) if num_pack is not None: pack = (num_pack.group("num") + perl_to_python_packs[num_pack.group("char")]) else: pack = perl_to_python_packs[pack] # If the field is an array then extract the length length = 1 field_exp = re_array_field.match(field) if field_exp is not None: field = field_exp.group("field") length = num(field_exp.group("length")) structs[name][field] = StructField(pack, num(offset), printf, num(default), length) else: raise ValueError( "line {}: Invalid syntax in struct file".format(i)) # Final check for setting size and base if structs[name].size is None: raise ValueError( "size value missing for struct '{}'".format(name)) if structs[name].base is None: raise ValueError( "base value missing for struct '{}'".format(name)) return structs
[ "def", "read_struct_file", "(", "struct_data", ")", ":", "# Holders for all structs", "structs", "=", "dict", "(", ")", "# Holders for the current struct", "name", "=", "None", "# Iterate over every line in the file", "for", "i", ",", "l", "in", "enumerate", "(", "struct_data", ".", "splitlines", "(", ")", ")", ":", "# Empty the line of comments, if the line is empty then skip to the", "# next line. Split on whitespace to get the tokens.", "tokens", "=", "re_comment", ".", "sub", "(", "b\"\"", ",", "l", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "==", "0", ":", "continue", "elif", "len", "(", "tokens", ")", "==", "3", ":", "# 3 tokens implies header data", "(", "key", ",", "_", ",", "value", ")", "=", "tokens", "if", "key", "==", "b\"name\"", ":", "if", "name", "is", "not", "None", ":", "if", "structs", "[", "name", "]", ".", "size", "is", "None", ":", "raise", "ValueError", "(", "\"size value missing for struct '{}'\"", ".", "format", "(", "name", ")", ")", "if", "structs", "[", "name", "]", ".", "base", "is", "None", ":", "raise", "ValueError", "(", "\"base value missing for struct '{}'\"", ".", "format", "(", "name", ")", ")", "name", "=", "value", "structs", "[", "name", "]", "=", "Struct", "(", "name", ")", "elif", "key", "==", "b\"size\"", ":", "structs", "[", "name", "]", ".", "size", "=", "num", "(", "value", ")", "elif", "key", "==", "b\"base\"", ":", "structs", "[", "name", "]", ".", "base", "=", "num", "(", "value", ")", "else", ":", "raise", "ValueError", "(", "key", ")", "elif", "len", "(", "tokens", ")", "==", "5", ":", "# 5 tokens implies entry in struct.", "(", "field", ",", "pack", ",", "offset", ",", "printf", ",", "default", ")", "=", "tokens", "# Convert the packing character from Perl to Python standard", "num_pack", "=", "re_numbered_pack", ".", "match", "(", "pack", ")", "if", "num_pack", "is", "not", "None", ":", "pack", "=", "(", "num_pack", ".", "group", "(", "\"num\"", ")", "+", "perl_to_python_packs", "[", "num_pack", ".", "group", "(", "\"char\"", ")", "]", ")", "else", ":", "pack", "=", "perl_to_python_packs", "[", "pack", "]", "# If the field is an array then extract the length", "length", "=", "1", "field_exp", "=", "re_array_field", ".", "match", "(", "field", ")", "if", "field_exp", "is", "not", "None", ":", "field", "=", "field_exp", ".", "group", "(", "\"field\"", ")", "length", "=", "num", "(", "field_exp", ".", "group", "(", "\"length\"", ")", ")", "structs", "[", "name", "]", "[", "field", "]", "=", "StructField", "(", "pack", ",", "num", "(", "offset", ")", ",", "printf", ",", "num", "(", "default", ")", ",", "length", ")", "else", ":", "raise", "ValueError", "(", "\"line {}: Invalid syntax in struct file\"", ".", "format", "(", "i", ")", ")", "# Final check for setting size and base", "if", "structs", "[", "name", "]", ".", "size", "is", "None", ":", "raise", "ValueError", "(", "\"size value missing for struct '{}'\"", ".", "format", "(", "name", ")", ")", "if", "structs", "[", "name", "]", ".", "base", "is", "None", ":", "raise", "ValueError", "(", "\"base value missing for struct '{}'\"", ".", "format", "(", "name", ")", ")", "return", "structs" ]
Interpret a struct file defining the location of variables in memory. Parameters ---------- struct_data : :py:class:`bytes` String of :py:class:`bytes` containing data to interpret as the struct definition. Returns ------- {struct_name: :py:class:`~.Struct`} A dictionary mapping the struct name to a :py:class:`~.Struct` instance. **Note:** the struct name will be a string of bytes, e.g., `b"vcpu"`.
[ "Interpret", "a", "struct", "file", "defining", "the", "location", "of", "variables", "in", "memory", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/struct_file.py#L9-L91
project-rig/rig
rig/machine_control/struct_file.py
num
def num(value): """Convert a value from one of several bases to an int.""" if re_hex_num.match(value): return int(value, base=16) else: return int(value)
python
def num(value): """Convert a value from one of several bases to an int.""" if re_hex_num.match(value): return int(value, base=16) else: return int(value)
[ "def", "num", "(", "value", ")", ":", "if", "re_hex_num", ".", "match", "(", "value", ")", ":", "return", "int", "(", "value", ",", "base", "=", "16", ")", "else", ":", "return", "int", "(", "value", ")" ]
Convert a value from one of several bases to an int.
[ "Convert", "a", "value", "from", "one", "of", "several", "bases", "to", "an", "int", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/struct_file.py#L101-L106
project-rig/rig
rig/machine_control/struct_file.py
Struct.update_default_values
def update_default_values(self, **updates): """Replace the default values of specified fields. Parameters ---------- Parameters are taken as keyword-arguments of `field=new_value`. Raises ------ KeyError If a field doesn't exist in the struct. """ for (field, value) in six.iteritems(updates): fname = six.b(field) self[fname] = self[fname]._replace(default=value)
python
def update_default_values(self, **updates): """Replace the default values of specified fields. Parameters ---------- Parameters are taken as keyword-arguments of `field=new_value`. Raises ------ KeyError If a field doesn't exist in the struct. """ for (field, value) in six.iteritems(updates): fname = six.b(field) self[fname] = self[fname]._replace(default=value)
[ "def", "update_default_values", "(", "self", ",", "*", "*", "updates", ")", ":", "for", "(", "field", ",", "value", ")", "in", "six", ".", "iteritems", "(", "updates", ")", ":", "fname", "=", "six", ".", "b", "(", "field", ")", "self", "[", "fname", "]", "=", "self", "[", "fname", "]", ".", "_replace", "(", "default", "=", "value", ")" ]
Replace the default values of specified fields. Parameters ---------- Parameters are taken as keyword-arguments of `field=new_value`. Raises ------ KeyError If a field doesn't exist in the struct.
[ "Replace", "the", "default", "values", "of", "specified", "fields", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/struct_file.py#L132-L146
project-rig/rig
rig/machine_control/struct_file.py
Struct.pack
def pack(self): """Pack the struct (and its default values) into a string of bytes. Returns ------- :py:class:`bytes` Byte-string representation of struct containing default values. """ # Generate a buffer big enough to hold the packed values data = bytearray(b"\x00" * self.size) # Iterate over the fields, pack each value in little-endian format and # insert into the buffered data. for field in six.itervalues(self.fields): packed_data = struct.pack(b"<" + field.pack_chars, field.default) data[field.offset:len(packed_data)+field.offset] = packed_data return bytes(data)
python
def pack(self): """Pack the struct (and its default values) into a string of bytes. Returns ------- :py:class:`bytes` Byte-string representation of struct containing default values. """ # Generate a buffer big enough to hold the packed values data = bytearray(b"\x00" * self.size) # Iterate over the fields, pack each value in little-endian format and # insert into the buffered data. for field in six.itervalues(self.fields): packed_data = struct.pack(b"<" + field.pack_chars, field.default) data[field.offset:len(packed_data)+field.offset] = packed_data return bytes(data)
[ "def", "pack", "(", "self", ")", ":", "# Generate a buffer big enough to hold the packed values", "data", "=", "bytearray", "(", "b\"\\x00\"", "*", "self", ".", "size", ")", "# Iterate over the fields, pack each value in little-endian format and", "# insert into the buffered data.", "for", "field", "in", "six", ".", "itervalues", "(", "self", ".", "fields", ")", ":", "packed_data", "=", "struct", ".", "pack", "(", "b\"<\"", "+", "field", ".", "pack_chars", ",", "field", ".", "default", ")", "data", "[", "field", ".", "offset", ":", "len", "(", "packed_data", ")", "+", "field", ".", "offset", "]", "=", "packed_data", "return", "bytes", "(", "data", ")" ]
Pack the struct (and its default values) into a string of bytes. Returns ------- :py:class:`bytes` Byte-string representation of struct containing default values.
[ "Pack", "the", "struct", "(", "and", "its", "default", "values", ")", "into", "a", "string", "of", "bytes", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/struct_file.py#L159-L176
NicolasLM/spinach
spinach/brokers/base.py
Broker.next_future_job_delta
def next_future_job_delta(self) -> Optional[float]: """Give the amount of seconds before the next future job is due.""" job = self._get_next_future_job() if not job: return None return (job.at - datetime.now(timezone.utc)).total_seconds()
python
def next_future_job_delta(self) -> Optional[float]: """Give the amount of seconds before the next future job is due.""" job = self._get_next_future_job() if not job: return None return (job.at - datetime.now(timezone.utc)).total_seconds()
[ "def", "next_future_job_delta", "(", "self", ")", "->", "Optional", "[", "float", "]", ":", "job", "=", "self", ".", "_get_next_future_job", "(", ")", "if", "not", "job", ":", "return", "None", "return", "(", "job", ".", "at", "-", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", ")", ".", "total_seconds", "(", ")" ]
Give the amount of seconds before the next future job is due.
[ "Give", "the", "amount", "of", "seconds", "before", "the", "next", "future", "job", "is", "due", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/brokers/base.py#L102-L107
project-rig/rig
rig/machine_control/utils.py
sdram_alloc_for_vertices
def sdram_alloc_for_vertices(controller, placements, allocations, core_as_tag=True, sdram_resource=SDRAM, cores_resource=Cores, clear=False): """Allocate and return a file-like view of a region of SDRAM for each vertex which uses SDRAM as a resource. The tag assigned to each region of assigned SDRAM is the index of the first core that each vertex is assigned. For example:: placements = {vertex: (0, 5)} allocations = {vertex: {Cores: slice(3, 6), SDRAM: slice(204, 304)}} sdram_allocations = sdram_alloc_for_vertices( controller, placements, allocations ) Will allocate a 100-byte block of SDRAM for the vertex which is allocated cores 3-5 on chip (0, 5). The region of SDRAM will be tagged `3` (because this is the index of the first core). Parameters ---------- controller : :py:class:`rig.machine_control.MachineController` Controller to use to allocate the SDRAM. placements : {vertex: (x, y), ...} Mapping of vertices to the chips they have been placed on. Same as produced by placers. allocations : {vertex: {resource: allocation, ...}, ...} Mapping of vertices to the resources they have been allocated. A block of memory of the size specified by the `sdram_resource` (default: :py:class:`~rig.place_and_route.SDRAM`) resource will be allocated for each vertex. Note that location of the supplied allocation is *not* used. When `core_as_tag=True`, the tag allocated will be the ID of the first core used by the vertex (indicated by the `cores_resource`, default :py:class:`~rig.place_and_route.Cores`), otherwise the tag will be set to 0. clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Other Parameters ---------------- core_as_tag : bool Use the index of the first allocated core as the tag for the region of memory, otherwise 0 will be used. sdram_resource : resource (default :py:class:`~rig.place_and_route.SDRAM`) Key used to indicate SDRAM usage in the resources dictionary. cores_resource : resource (default :py:class:`~rig.place_and_route.Cores`) Key used to indicate cores which have been allocated in the allocations dictionary. Returns ------- {vertex: :py:class:`.MemoryIO`, ...} A file-like object for each vertex which can be used to read and write to the region of SDRAM allocated to the vertex. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or a tag is already taken or invalid. """ # For each vertex we perform an SDRAM alloc to get a file-like for # the vertex. vertex_memory = dict() for vertex, allocs in six.iteritems(allocations): if sdram_resource in allocs: sdram_slice = allocs[sdram_resource] assert sdram_slice.step is None size = sdram_slice.stop - sdram_slice.start x, y = placements[vertex] if core_as_tag: tag = allocs[cores_resource].start else: tag = 0 # Get the memory vertex_memory[vertex] = controller.sdram_alloc_as_filelike( size, tag, x=x, y=y, clear=clear ) return vertex_memory
python
def sdram_alloc_for_vertices(controller, placements, allocations, core_as_tag=True, sdram_resource=SDRAM, cores_resource=Cores, clear=False): """Allocate and return a file-like view of a region of SDRAM for each vertex which uses SDRAM as a resource. The tag assigned to each region of assigned SDRAM is the index of the first core that each vertex is assigned. For example:: placements = {vertex: (0, 5)} allocations = {vertex: {Cores: slice(3, 6), SDRAM: slice(204, 304)}} sdram_allocations = sdram_alloc_for_vertices( controller, placements, allocations ) Will allocate a 100-byte block of SDRAM for the vertex which is allocated cores 3-5 on chip (0, 5). The region of SDRAM will be tagged `3` (because this is the index of the first core). Parameters ---------- controller : :py:class:`rig.machine_control.MachineController` Controller to use to allocate the SDRAM. placements : {vertex: (x, y), ...} Mapping of vertices to the chips they have been placed on. Same as produced by placers. allocations : {vertex: {resource: allocation, ...}, ...} Mapping of vertices to the resources they have been allocated. A block of memory of the size specified by the `sdram_resource` (default: :py:class:`~rig.place_and_route.SDRAM`) resource will be allocated for each vertex. Note that location of the supplied allocation is *not* used. When `core_as_tag=True`, the tag allocated will be the ID of the first core used by the vertex (indicated by the `cores_resource`, default :py:class:`~rig.place_and_route.Cores`), otherwise the tag will be set to 0. clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Other Parameters ---------------- core_as_tag : bool Use the index of the first allocated core as the tag for the region of memory, otherwise 0 will be used. sdram_resource : resource (default :py:class:`~rig.place_and_route.SDRAM`) Key used to indicate SDRAM usage in the resources dictionary. cores_resource : resource (default :py:class:`~rig.place_and_route.Cores`) Key used to indicate cores which have been allocated in the allocations dictionary. Returns ------- {vertex: :py:class:`.MemoryIO`, ...} A file-like object for each vertex which can be used to read and write to the region of SDRAM allocated to the vertex. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or a tag is already taken or invalid. """ # For each vertex we perform an SDRAM alloc to get a file-like for # the vertex. vertex_memory = dict() for vertex, allocs in six.iteritems(allocations): if sdram_resource in allocs: sdram_slice = allocs[sdram_resource] assert sdram_slice.step is None size = sdram_slice.stop - sdram_slice.start x, y = placements[vertex] if core_as_tag: tag = allocs[cores_resource].start else: tag = 0 # Get the memory vertex_memory[vertex] = controller.sdram_alloc_as_filelike( size, tag, x=x, y=y, clear=clear ) return vertex_memory
[ "def", "sdram_alloc_for_vertices", "(", "controller", ",", "placements", ",", "allocations", ",", "core_as_tag", "=", "True", ",", "sdram_resource", "=", "SDRAM", ",", "cores_resource", "=", "Cores", ",", "clear", "=", "False", ")", ":", "# For each vertex we perform an SDRAM alloc to get a file-like for", "# the vertex.", "vertex_memory", "=", "dict", "(", ")", "for", "vertex", ",", "allocs", "in", "six", ".", "iteritems", "(", "allocations", ")", ":", "if", "sdram_resource", "in", "allocs", ":", "sdram_slice", "=", "allocs", "[", "sdram_resource", "]", "assert", "sdram_slice", ".", "step", "is", "None", "size", "=", "sdram_slice", ".", "stop", "-", "sdram_slice", ".", "start", "x", ",", "y", "=", "placements", "[", "vertex", "]", "if", "core_as_tag", ":", "tag", "=", "allocs", "[", "cores_resource", "]", ".", "start", "else", ":", "tag", "=", "0", "# Get the memory", "vertex_memory", "[", "vertex", "]", "=", "controller", ".", "sdram_alloc_as_filelike", "(", "size", ",", "tag", ",", "x", "=", "x", ",", "y", "=", "y", ",", "clear", "=", "clear", ")", "return", "vertex_memory" ]
Allocate and return a file-like view of a region of SDRAM for each vertex which uses SDRAM as a resource. The tag assigned to each region of assigned SDRAM is the index of the first core that each vertex is assigned. For example:: placements = {vertex: (0, 5)} allocations = {vertex: {Cores: slice(3, 6), SDRAM: slice(204, 304)}} sdram_allocations = sdram_alloc_for_vertices( controller, placements, allocations ) Will allocate a 100-byte block of SDRAM for the vertex which is allocated cores 3-5 on chip (0, 5). The region of SDRAM will be tagged `3` (because this is the index of the first core). Parameters ---------- controller : :py:class:`rig.machine_control.MachineController` Controller to use to allocate the SDRAM. placements : {vertex: (x, y), ...} Mapping of vertices to the chips they have been placed on. Same as produced by placers. allocations : {vertex: {resource: allocation, ...}, ...} Mapping of vertices to the resources they have been allocated. A block of memory of the size specified by the `sdram_resource` (default: :py:class:`~rig.place_and_route.SDRAM`) resource will be allocated for each vertex. Note that location of the supplied allocation is *not* used. When `core_as_tag=True`, the tag allocated will be the ID of the first core used by the vertex (indicated by the `cores_resource`, default :py:class:`~rig.place_and_route.Cores`), otherwise the tag will be set to 0. clear : bool If True the requested memory will be filled with zeros before the pointer is returned. If False (the default) the memory will be left as-is. Other Parameters ---------------- core_as_tag : bool Use the index of the first allocated core as the tag for the region of memory, otherwise 0 will be used. sdram_resource : resource (default :py:class:`~rig.place_and_route.SDRAM`) Key used to indicate SDRAM usage in the resources dictionary. cores_resource : resource (default :py:class:`~rig.place_and_route.Cores`) Key used to indicate cores which have been allocated in the allocations dictionary. Returns ------- {vertex: :py:class:`.MemoryIO`, ...} A file-like object for each vertex which can be used to read and write to the region of SDRAM allocated to the vertex. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or a tag is already taken or invalid.
[ "Allocate", "and", "return", "a", "file", "-", "like", "view", "of", "a", "region", "of", "SDRAM", "for", "each", "vertex", "which", "uses", "SDRAM", "as", "a", "resource", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/utils.py#L6-L94
NicolasLM/spinach
spinach/job.py
advance_job_status
def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): """Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals. """ duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info('Finished execution of %s in %s', job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info('Retry requested during execution %d/%d of %s ' 'after %s, retry in %s', *log_args) else: logger.warning('Error during execution %d/%d of %s after %s, ' 'retry in %s', *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( 'Error during execution %d/%d of %s after %s', job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
python
def advance_job_status(namespace: str, job: Job, duration: float, err: Optional[Exception]): """Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals. """ duration = human_duration(duration) if not err: job.status = JobStatus.SUCCEEDED logger.info('Finished execution of %s in %s', job, duration) return if job.should_retry: job.status = JobStatus.NOT_SET job.retries += 1 if isinstance(err, RetryException) and err.at is not None: job.at = err.at else: job.at = (datetime.now(timezone.utc) + exponential_backoff(job.retries)) signals.job_schedule_retry.send(namespace, job=job, err=err) log_args = ( job.retries, job.max_retries + 1, job, duration, human_duration( (job.at - datetime.now(tz=timezone.utc)).total_seconds() ) ) if isinstance(err, RetryException): logger.info('Retry requested during execution %d/%d of %s ' 'after %s, retry in %s', *log_args) else: logger.warning('Error during execution %d/%d of %s after %s, ' 'retry in %s', *log_args) return job.status = JobStatus.FAILED signals.job_failed.send(namespace, job=job, err=err) logger.error( 'Error during execution %d/%d of %s after %s', job.max_retries + 1, job.max_retries + 1, job, duration, exc_info=err )
[ "def", "advance_job_status", "(", "namespace", ":", "str", ",", "job", ":", "Job", ",", "duration", ":", "float", ",", "err", ":", "Optional", "[", "Exception", "]", ")", ":", "duration", "=", "human_duration", "(", "duration", ")", "if", "not", "err", ":", "job", ".", "status", "=", "JobStatus", ".", "SUCCEEDED", "logger", ".", "info", "(", "'Finished execution of %s in %s'", ",", "job", ",", "duration", ")", "return", "if", "job", ".", "should_retry", ":", "job", ".", "status", "=", "JobStatus", ".", "NOT_SET", "job", ".", "retries", "+=", "1", "if", "isinstance", "(", "err", ",", "RetryException", ")", "and", "err", ".", "at", "is", "not", "None", ":", "job", ".", "at", "=", "err", ".", "at", "else", ":", "job", ".", "at", "=", "(", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", "+", "exponential_backoff", "(", "job", ".", "retries", ")", ")", "signals", ".", "job_schedule_retry", ".", "send", "(", "namespace", ",", "job", "=", "job", ",", "err", "=", "err", ")", "log_args", "=", "(", "job", ".", "retries", ",", "job", ".", "max_retries", "+", "1", ",", "job", ",", "duration", ",", "human_duration", "(", "(", "job", ".", "at", "-", "datetime", ".", "now", "(", "tz", "=", "timezone", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", ")", "if", "isinstance", "(", "err", ",", "RetryException", ")", ":", "logger", ".", "info", "(", "'Retry requested during execution %d/%d of %s '", "'after %s, retry in %s'", ",", "*", "log_args", ")", "else", ":", "logger", ".", "warning", "(", "'Error during execution %d/%d of %s after %s, '", "'retry in %s'", ",", "*", "log_args", ")", "return", "job", ".", "status", "=", "JobStatus", ".", "FAILED", "signals", ".", "job_failed", ".", "send", "(", "namespace", ",", "job", "=", "job", ",", "err", "=", "err", ")", "logger", ".", "error", "(", "'Error during execution %d/%d of %s after %s'", ",", "job", ".", "max_retries", "+", "1", ",", "job", ".", "max_retries", "+", "1", ",", "job", ",", "duration", ",", "exc_info", "=", "err", ")" ]
Advance the status of a job depending on its execution. This function is called after a job has been executed. It calculates its next status and calls the appropriate signals.
[ "Advance", "the", "status", "of", "a", "job", "depending", "on", "its", "execution", "." ]
train
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/job.py#L152-L197
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile
def tile(self, zoom, row, col): """ Return Tile object of this TilePyramid. - zoom: zoom level - row: tile matrix row - col: tile matrix column """ return Tile(self, zoom, row, col)
python
def tile(self, zoom, row, col): """ Return Tile object of this TilePyramid. - zoom: zoom level - row: tile matrix row - col: tile matrix column """ return Tile(self, zoom, row, col)
[ "def", "tile", "(", "self", ",", "zoom", ",", "row", ",", "col", ")", ":", "return", "Tile", "(", "self", ",", "zoom", ",", "row", ",", "col", ")" ]
Return Tile object of this TilePyramid. - zoom: zoom level - row: tile matrix row - col: tile matrix column
[ "Return", "Tile", "object", "of", "this", "TilePyramid", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L62-L70
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.matrix_width
def matrix_width(self, zoom): """ Tile matrix width (number of columns) at zoom level. - zoom: zoom level """ validate_zoom(zoom) width = int(math.ceil(self.grid.shape.width * 2**(zoom) / self.metatiling)) return 1 if width < 1 else width
python
def matrix_width(self, zoom): """ Tile matrix width (number of columns) at zoom level. - zoom: zoom level """ validate_zoom(zoom) width = int(math.ceil(self.grid.shape.width * 2**(zoom) / self.metatiling)) return 1 if width < 1 else width
[ "def", "matrix_width", "(", "self", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "width", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "grid", ".", "shape", ".", "width", "*", "2", "**", "(", "zoom", ")", "/", "self", ".", "metatiling", ")", ")", "return", "1", "if", "width", "<", "1", "else", "width" ]
Tile matrix width (number of columns) at zoom level. - zoom: zoom level
[ "Tile", "matrix", "width", "(", "number", "of", "columns", ")", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L72-L80
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.matrix_height
def matrix_height(self, zoom): """ Tile matrix height (number of rows) at zoom level. - zoom: zoom level """ validate_zoom(zoom) height = int(math.ceil(self.grid.shape.height * 2**(zoom) / self.metatiling)) return 1 if height < 1 else height
python
def matrix_height(self, zoom): """ Tile matrix height (number of rows) at zoom level. - zoom: zoom level """ validate_zoom(zoom) height = int(math.ceil(self.grid.shape.height * 2**(zoom) / self.metatiling)) return 1 if height < 1 else height
[ "def", "matrix_height", "(", "self", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "height", "=", "int", "(", "math", ".", "ceil", "(", "self", ".", "grid", ".", "shape", ".", "height", "*", "2", "**", "(", "zoom", ")", "/", "self", ".", "metatiling", ")", ")", "return", "1", "if", "height", "<", "1", "else", "height" ]
Tile matrix height (number of rows) at zoom level. - zoom: zoom level
[ "Tile", "matrix", "height", "(", "number", "of", "rows", ")", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L82-L90
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile_x_size
def tile_x_size(self, zoom): """ Width of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_x_size is deprecated")) validate_zoom(zoom) return round(self.x_size / self.matrix_width(zoom), ROUND)
python
def tile_x_size(self, zoom): """ Width of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_x_size is deprecated")) validate_zoom(zoom) return round(self.x_size / self.matrix_width(zoom), ROUND)
[ "def", "tile_x_size", "(", "self", ",", "zoom", ")", ":", "warnings", ".", "warn", "(", "DeprecationWarning", "(", "\"tile_x_size is deprecated\"", ")", ")", "validate_zoom", "(", "zoom", ")", "return", "round", "(", "self", ".", "x_size", "/", "self", ".", "matrix_width", "(", "zoom", ")", ",", "ROUND", ")" ]
Width of a tile in SRID units at zoom level. - zoom: zoom level
[ "Width", "of", "a", "tile", "in", "SRID", "units", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L92-L100
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile_y_size
def tile_y_size(self, zoom): """ Height of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_y_size is deprecated")) validate_zoom(zoom) return round(self.y_size / self.matrix_height(zoom), ROUND)
python
def tile_y_size(self, zoom): """ Height of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_y_size is deprecated")) validate_zoom(zoom) return round(self.y_size / self.matrix_height(zoom), ROUND)
[ "def", "tile_y_size", "(", "self", ",", "zoom", ")", ":", "warnings", ".", "warn", "(", "DeprecationWarning", "(", "\"tile_y_size is deprecated\"", ")", ")", "validate_zoom", "(", "zoom", ")", "return", "round", "(", "self", ".", "y_size", "/", "self", ".", "matrix_height", "(", "zoom", ")", ",", "ROUND", ")" ]
Height of a tile in SRID units at zoom level. - zoom: zoom level
[ "Height", "of", "a", "tile", "in", "SRID", "units", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L102-L110
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile_width
def tile_width(self, zoom): """ Tile width in pixel. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_width is deprecated")) validate_zoom(zoom) matrix_pixel = 2**(zoom) * self.tile_size * self.grid.shape.width tile_pixel = self.tile_size * self.metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel
python
def tile_width(self, zoom): """ Tile width in pixel. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_width is deprecated")) validate_zoom(zoom) matrix_pixel = 2**(zoom) * self.tile_size * self.grid.shape.width tile_pixel = self.tile_size * self.metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel
[ "def", "tile_width", "(", "self", ",", "zoom", ")", ":", "warnings", ".", "warn", "(", "DeprecationWarning", "(", "\"tile_width is deprecated\"", ")", ")", "validate_zoom", "(", "zoom", ")", "matrix_pixel", "=", "2", "**", "(", "zoom", ")", "*", "self", ".", "tile_size", "*", "self", ".", "grid", ".", "shape", ".", "width", "tile_pixel", "=", "self", ".", "tile_size", "*", "self", ".", "metatiling", "return", "matrix_pixel", "if", "tile_pixel", ">", "matrix_pixel", "else", "tile_pixel" ]
Tile width in pixel. - zoom: zoom level
[ "Tile", "width", "in", "pixel", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L112-L122
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile_height
def tile_height(self, zoom): """ Tile height in pixel. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_height is deprecated")) validate_zoom(zoom) matrix_pixel = 2**(zoom) * self.tile_size * self.grid.shape.height tile_pixel = self.tile_size * self.metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel
python
def tile_height(self, zoom): """ Tile height in pixel. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_height is deprecated")) validate_zoom(zoom) matrix_pixel = 2**(zoom) * self.tile_size * self.grid.shape.height tile_pixel = self.tile_size * self.metatiling return matrix_pixel if tile_pixel > matrix_pixel else tile_pixel
[ "def", "tile_height", "(", "self", ",", "zoom", ")", ":", "warnings", ".", "warn", "(", "DeprecationWarning", "(", "\"tile_height is deprecated\"", ")", ")", "validate_zoom", "(", "zoom", ")", "matrix_pixel", "=", "2", "**", "(", "zoom", ")", "*", "self", ".", "tile_size", "*", "self", ".", "grid", ".", "shape", ".", "height", "tile_pixel", "=", "self", ".", "tile_size", "*", "self", ".", "metatiling", "return", "matrix_pixel", "if", "tile_pixel", ">", "matrix_pixel", "else", "tile_pixel" ]
Tile height in pixel. - zoom: zoom level
[ "Tile", "height", "in", "pixel", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L124-L134
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.pixel_x_size
def pixel_x_size(self, zoom): """ Width of a pixel in SRID units at zoom level. - zoom: zoom level """ validate_zoom(zoom) return round( (self.grid.right - self.grid.left) / (self.grid.shape.width * 2**zoom * self.tile_size), ROUND )
python
def pixel_x_size(self, zoom): """ Width of a pixel in SRID units at zoom level. - zoom: zoom level """ validate_zoom(zoom) return round( (self.grid.right - self.grid.left) / (self.grid.shape.width * 2**zoom * self.tile_size), ROUND )
[ "def", "pixel_x_size", "(", "self", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "return", "round", "(", "(", "self", ".", "grid", ".", "right", "-", "self", ".", "grid", ".", "left", ")", "/", "(", "self", ".", "grid", ".", "shape", ".", "width", "*", "2", "**", "zoom", "*", "self", ".", "tile_size", ")", ",", "ROUND", ")" ]
Width of a pixel in SRID units at zoom level. - zoom: zoom level
[ "Width", "of", "a", "pixel", "in", "SRID", "units", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L136-L147
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.pixel_y_size
def pixel_y_size(self, zoom): """ Height of a pixel in SRID units at zoom level. - zoom: zoom level """ validate_zoom(zoom) return round( (self.grid.top - self.grid.bottom) / (self.grid.shape.height * 2**zoom * self.tile_size), ROUND )
python
def pixel_y_size(self, zoom): """ Height of a pixel in SRID units at zoom level. - zoom: zoom level """ validate_zoom(zoom) return round( (self.grid.top - self.grid.bottom) / (self.grid.shape.height * 2**zoom * self.tile_size), ROUND )
[ "def", "pixel_y_size", "(", "self", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "return", "round", "(", "(", "self", ".", "grid", ".", "top", "-", "self", ".", "grid", ".", "bottom", ")", "/", "(", "self", ".", "grid", ".", "shape", ".", "height", "*", "2", "**", "zoom", "*", "self", ".", "tile_size", ")", ",", "ROUND", ")" ]
Height of a pixel in SRID units at zoom level. - zoom: zoom level
[ "Height", "of", "a", "pixel", "in", "SRID", "units", "at", "zoom", "level", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L149-L160
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tiles_from_bounds
def tiles_from_bounds(self, bounds, zoom): """ Return all tiles intersecting with bounds. Bounds values will be cleaned if they cross the antimeridian or are outside of the Northern or Southern tile pyramid bounds. - bounds: tuple of (left, bottom, right, top) bounding values in tile pyramid CRS - zoom: zoom level """ validate_zoom(zoom) if not isinstance(bounds, tuple) or len(bounds) != 4: raise ValueError("bounds must be a tuple of left, bottom, right, top values") if not isinstance(bounds, Bounds): bounds = Bounds(*bounds) if self.is_global: for tile in _global_tiles_from_bounds(self, bounds, zoom): yield tile else: for tile in _tiles_from_cleaned_bounds(self, bounds, zoom): yield tile
python
def tiles_from_bounds(self, bounds, zoom): """ Return all tiles intersecting with bounds. Bounds values will be cleaned if they cross the antimeridian or are outside of the Northern or Southern tile pyramid bounds. - bounds: tuple of (left, bottom, right, top) bounding values in tile pyramid CRS - zoom: zoom level """ validate_zoom(zoom) if not isinstance(bounds, tuple) or len(bounds) != 4: raise ValueError("bounds must be a tuple of left, bottom, right, top values") if not isinstance(bounds, Bounds): bounds = Bounds(*bounds) if self.is_global: for tile in _global_tiles_from_bounds(self, bounds, zoom): yield tile else: for tile in _tiles_from_cleaned_bounds(self, bounds, zoom): yield tile
[ "def", "tiles_from_bounds", "(", "self", ",", "bounds", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "if", "not", "isinstance", "(", "bounds", ",", "tuple", ")", "or", "len", "(", "bounds", ")", "!=", "4", ":", "raise", "ValueError", "(", "\"bounds must be a tuple of left, bottom, right, top values\"", ")", "if", "not", "isinstance", "(", "bounds", ",", "Bounds", ")", ":", "bounds", "=", "Bounds", "(", "*", "bounds", ")", "if", "self", ".", "is_global", ":", "for", "tile", "in", "_global_tiles_from_bounds", "(", "self", ",", "bounds", ",", "zoom", ")", ":", "yield", "tile", "else", ":", "for", "tile", "in", "_tiles_from_cleaned_bounds", "(", "self", ",", "bounds", ",", "zoom", ")", ":", "yield", "tile" ]
Return all tiles intersecting with bounds. Bounds values will be cleaned if they cross the antimeridian or are outside of the Northern or Southern tile pyramid bounds. - bounds: tuple of (left, bottom, right, top) bounding values in tile pyramid CRS - zoom: zoom level
[ "Return", "all", "tiles", "intersecting", "with", "bounds", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L173-L194
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tiles_from_bbox
def tiles_from_bbox(self, geometry, zoom): """ All metatiles intersecting with given bounding box. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) return self.tiles_from_bounds(geometry.bounds, zoom)
python
def tiles_from_bbox(self, geometry, zoom): """ All metatiles intersecting with given bounding box. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) return self.tiles_from_bounds(geometry.bounds, zoom)
[ "def", "tiles_from_bbox", "(", "self", ",", "geometry", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "return", "self", ".", "tiles_from_bounds", "(", "geometry", ".", "bounds", ",", "zoom", ")" ]
All metatiles intersecting with given bounding box. - geometry: shapely geometry - zoom: zoom level
[ "All", "metatiles", "intersecting", "with", "given", "bounding", "box", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L196-L204
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tiles_from_geom
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) if geometry.is_empty: return if not geometry.is_valid: raise ValueError("no valid geometry: %s" % geometry.type) if geometry.geom_type == "Point": yield self.tile_from_xy(geometry.x, geometry.y, zoom) elif geometry.geom_type == "MultiPoint": for point in geometry: yield self.tile_from_xy(point.x, point.y, zoom) elif geometry.geom_type in ( "LineString", "MultiLineString", "Polygon", "MultiPolygon", "GeometryCollection" ): prepared_geometry = prep(clip_geometry_to_srs_bounds(geometry, self)) for tile in self.tiles_from_bbox(geometry, zoom): if prepared_geometry.intersects(tile.bbox()): yield tile
python
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) if geometry.is_empty: return if not geometry.is_valid: raise ValueError("no valid geometry: %s" % geometry.type) if geometry.geom_type == "Point": yield self.tile_from_xy(geometry.x, geometry.y, zoom) elif geometry.geom_type == "MultiPoint": for point in geometry: yield self.tile_from_xy(point.x, point.y, zoom) elif geometry.geom_type in ( "LineString", "MultiLineString", "Polygon", "MultiPolygon", "GeometryCollection" ): prepared_geometry = prep(clip_geometry_to_srs_bounds(geometry, self)) for tile in self.tiles_from_bbox(geometry, zoom): if prepared_geometry.intersects(tile.bbox()): yield tile
[ "def", "tiles_from_geom", "(", "self", ",", "geometry", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "if", "geometry", ".", "is_empty", ":", "return", "if", "not", "geometry", ".", "is_valid", ":", "raise", "ValueError", "(", "\"no valid geometry: %s\"", "%", "geometry", ".", "type", ")", "if", "geometry", ".", "geom_type", "==", "\"Point\"", ":", "yield", "self", ".", "tile_from_xy", "(", "geometry", ".", "x", ",", "geometry", ".", "y", ",", "zoom", ")", "elif", "geometry", ".", "geom_type", "==", "\"MultiPoint\"", ":", "for", "point", "in", "geometry", ":", "yield", "self", ".", "tile_from_xy", "(", "point", ".", "x", ",", "point", ".", "y", ",", "zoom", ")", "elif", "geometry", ".", "geom_type", "in", "(", "\"LineString\"", ",", "\"MultiLineString\"", ",", "\"Polygon\"", ",", "\"MultiPolygon\"", ",", "\"GeometryCollection\"", ")", ":", "prepared_geometry", "=", "prep", "(", "clip_geometry_to_srs_bounds", "(", "geometry", ",", "self", ")", ")", "for", "tile", "in", "self", ".", "tiles_from_bbox", "(", "geometry", ",", "zoom", ")", ":", "if", "prepared_geometry", ".", "intersects", "(", "tile", ".", "bbox", "(", ")", ")", ":", "yield", "tile" ]
Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level
[ "Return", "all", "tiles", "intersecting", "with", "input", "geometry", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L206-L230
ungarj/tilematrix
tilematrix/_tilepyramid.py
TilePyramid.tile_from_xy
def tile_from_xy(self, x, y, zoom, on_edge_use="rb"): """ Return tile covering a point defined by x and y values. - x: x coordinate - y: y coordinate - zoom: zoom level - on_edge_use: determine which Tile to pick if Point hits a grid edge - rb: right bottom (default) - rt: right top - lt: left top - lb: left bottom """ validate_zoom(zoom) if x < self.left or x > self.right or y < self.bottom or y > self.top: raise ValueError("x or y are outside of grid bounds") if on_edge_use not in ["lb", "rb", "rt", "lt"]: raise ValueError("on_edge_use must be one of lb, rb, rt or lt") return _tile_from_xy(self, x, y, zoom, on_edge_use=on_edge_use)
python
def tile_from_xy(self, x, y, zoom, on_edge_use="rb"): """ Return tile covering a point defined by x and y values. - x: x coordinate - y: y coordinate - zoom: zoom level - on_edge_use: determine which Tile to pick if Point hits a grid edge - rb: right bottom (default) - rt: right top - lt: left top - lb: left bottom """ validate_zoom(zoom) if x < self.left or x > self.right or y < self.bottom or y > self.top: raise ValueError("x or y are outside of grid bounds") if on_edge_use not in ["lb", "rb", "rt", "lt"]: raise ValueError("on_edge_use must be one of lb, rb, rt or lt") return _tile_from_xy(self, x, y, zoom, on_edge_use=on_edge_use)
[ "def", "tile_from_xy", "(", "self", ",", "x", ",", "y", ",", "zoom", ",", "on_edge_use", "=", "\"rb\"", ")", ":", "validate_zoom", "(", "zoom", ")", "if", "x", "<", "self", ".", "left", "or", "x", ">", "self", ".", "right", "or", "y", "<", "self", ".", "bottom", "or", "y", ">", "self", ".", "top", ":", "raise", "ValueError", "(", "\"x or y are outside of grid bounds\"", ")", "if", "on_edge_use", "not", "in", "[", "\"lb\"", ",", "\"rb\"", ",", "\"rt\"", ",", "\"lt\"", "]", ":", "raise", "ValueError", "(", "\"on_edge_use must be one of lb, rb, rt or lt\"", ")", "return", "_tile_from_xy", "(", "self", ",", "x", ",", "y", ",", "zoom", ",", "on_edge_use", "=", "on_edge_use", ")" ]
Return tile covering a point defined by x and y values. - x: x coordinate - y: y coordinate - zoom: zoom level - on_edge_use: determine which Tile to pick if Point hits a grid edge - rb: right bottom (default) - rt: right top - lt: left top - lb: left bottom
[ "Return", "tile", "covering", "a", "point", "defined", "by", "x", "and", "y", "values", "." ]
train
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L232-L250
happyleavesaoc/python-voobly
utils/update_metadata.py
get_ladder_metadata
def get_ladder_metadata(session, url): """Get ladder metadata.""" parsed = make_scrape_request(session, url) tag = parsed.find('a', href=re.compile(LADDER_ID_REGEX)) return { 'id': int(tag['href'].split('/')[-1]), 'slug': url.split('/')[-1], 'url': url }
python
def get_ladder_metadata(session, url): """Get ladder metadata.""" parsed = make_scrape_request(session, url) tag = parsed.find('a', href=re.compile(LADDER_ID_REGEX)) return { 'id': int(tag['href'].split('/')[-1]), 'slug': url.split('/')[-1], 'url': url }
[ "def", "get_ladder_metadata", "(", "session", ",", "url", ")", ":", "parsed", "=", "make_scrape_request", "(", "session", ",", "url", ")", "tag", "=", "parsed", ".", "find", "(", "'a'", ",", "href", "=", "re", ".", "compile", "(", "LADDER_ID_REGEX", ")", ")", "return", "{", "'id'", ":", "int", "(", "tag", "[", "'href'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", ",", "'slug'", ":", "url", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ",", "'url'", ":", "url", "}" ]
Get ladder metadata.
[ "Get", "ladder", "metadata", "." ]
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/utils/update_metadata.py#L14-L22
happyleavesaoc/python-voobly
utils/update_metadata.py
get_ladders_metadata
def get_ladders_metadata(session, parsed): """Get metadata for all ladders.""" ladders = {} for ladder in parsed.find_all('a', href=re.compile(LADDER_URL_REGEX)): ladders[ladder.text] = get_ladder_metadata(session, ladder['href']) return ladders
python
def get_ladders_metadata(session, parsed): """Get metadata for all ladders.""" ladders = {} for ladder in parsed.find_all('a', href=re.compile(LADDER_URL_REGEX)): ladders[ladder.text] = get_ladder_metadata(session, ladder['href']) return ladders
[ "def", "get_ladders_metadata", "(", "session", ",", "parsed", ")", ":", "ladders", "=", "{", "}", "for", "ladder", "in", "parsed", ".", "find_all", "(", "'a'", ",", "href", "=", "re", ".", "compile", "(", "LADDER_URL_REGEX", ")", ")", ":", "ladders", "[", "ladder", ".", "text", "]", "=", "get_ladder_metadata", "(", "session", ",", "ladder", "[", "'href'", "]", ")", "return", "ladders" ]
Get metadata for all ladders.
[ "Get", "metadata", "for", "all", "ladders", "." ]
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/utils/update_metadata.py#L25-L30
happyleavesaoc/python-voobly
utils/update_metadata.py
get_metadata
def get_metadata(session, games): """Get metadata for games (only ladder data right now).""" for data in games.values(): parsed = make_scrape_request(session, data['url']) return { 'ladders': get_ladders_metadata(session, parsed) }
python
def get_metadata(session, games): """Get metadata for games (only ladder data right now).""" for data in games.values(): parsed = make_scrape_request(session, data['url']) return { 'ladders': get_ladders_metadata(session, parsed) }
[ "def", "get_metadata", "(", "session", ",", "games", ")", ":", "for", "data", "in", "games", ".", "values", "(", ")", ":", "parsed", "=", "make_scrape_request", "(", "session", ",", "data", "[", "'url'", "]", ")", "return", "{", "'ladders'", ":", "get_ladders_metadata", "(", "session", ",", "parsed", ")", "}" ]
Get metadata for games (only ladder data right now).
[ "Get", "metadata", "for", "games", "(", "only", "ladder", "data", "right", "now", ")", "." ]
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/utils/update_metadata.py#L33-L39
happyleavesaoc/python-voobly
utils/update_metadata.py
update_metadata
def update_metadata(session, path=DATA_PATH): """Update metadata files (only ladders right now).""" with open(os.path.join(path, 'games.json')) as handle: games = json.loads(handle.read()) for key, data in get_metadata(session, games).items(): with open(os.path.join(path, '{}.json'.format(key)), 'w') as handle: handle.write(json.dumps(data, indent=2))
python
def update_metadata(session, path=DATA_PATH): """Update metadata files (only ladders right now).""" with open(os.path.join(path, 'games.json')) as handle: games = json.loads(handle.read()) for key, data in get_metadata(session, games).items(): with open(os.path.join(path, '{}.json'.format(key)), 'w') as handle: handle.write(json.dumps(data, indent=2))
[ "def", "update_metadata", "(", "session", ",", "path", "=", "DATA_PATH", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'games.json'", ")", ")", "as", "handle", ":", "games", "=", "json", ".", "loads", "(", "handle", ".", "read", "(", ")", ")", "for", "key", ",", "data", "in", "get_metadata", "(", "session", ",", "games", ")", ".", "items", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'{}.json'", ".", "format", "(", "key", ")", ")", ",", "'w'", ")", "as", "handle", ":", "handle", ".", "write", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "2", ")", ")" ]
Update metadata files (only ladders right now).
[ "Update", "metadata", "files", "(", "only", "ladders", "right", "now", ")", "." ]
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/utils/update_metadata.py#L42-L49
project-rig/rig
rig/scripts/rig_counters.py
sample_counters
def sample_counters(mc, system_info): """Sample every router counter in the machine.""" return { (x, y): mc.get_router_diagnostics(x, y) for (x, y) in system_info }
python
def sample_counters(mc, system_info): """Sample every router counter in the machine.""" return { (x, y): mc.get_router_diagnostics(x, y) for (x, y) in system_info }
[ "def", "sample_counters", "(", "mc", ",", "system_info", ")", ":", "return", "{", "(", "x", ",", "y", ")", ":", "mc", ".", "get_router_diagnostics", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "system_info", "}" ]
Sample every router counter in the machine.
[ "Sample", "every", "router", "counter", "in", "the", "machine", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_counters.py#L24-L28
project-rig/rig
rig/scripts/rig_counters.py
deltas
def deltas(last, now): """Return the change in counter values (accounting for wrap-around).""" return { xy: RouterDiagnostics(*((n - l) & 0xFFFFFFFF for l, n in zip(last[xy], now[xy]))) for xy in last }
python
def deltas(last, now): """Return the change in counter values (accounting for wrap-around).""" return { xy: RouterDiagnostics(*((n - l) & 0xFFFFFFFF for l, n in zip(last[xy], now[xy]))) for xy in last }
[ "def", "deltas", "(", "last", ",", "now", ")", ":", "return", "{", "xy", ":", "RouterDiagnostics", "(", "*", "(", "(", "n", "-", "l", ")", "&", "0xFFFFFFFF", "for", "l", ",", "n", "in", "zip", "(", "last", "[", "xy", "]", ",", "now", "[", "xy", "]", ")", ")", ")", "for", "xy", "in", "last", "}" ]
Return the change in counter values (accounting for wrap-around).
[ "Return", "the", "change", "in", "counter", "values", "(", "accounting", "for", "wrap", "-", "around", ")", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_counters.py#L31-L37
project-rig/rig
rig/scripts/rig_counters.py
monitor_counters
def monitor_counters(mc, output, counters, detailed, f): """Monitor the counters on a specified machine, taking a snap-shot every time the generator 'f' yields.""" # Print CSV header output.write("time,{}{}\n".format("x,y," if detailed else "", ",".join(counters))) system_info = mc.get_system_info() # Make an initial sample of the counters last_counter_values = sample_counters(mc, system_info) start_time = time.time() for _ in f(): # Snapshot the change in counter values counter_values = sample_counters(mc, system_info) delta = deltas(last_counter_values, counter_values) last_counter_values = counter_values now = time.time() - start_time # Output the changes if detailed: for x, y in sorted(system_info): output.write("{:0.1f},{},{},{}\n".format( now, x, y, ",".join(str(getattr(delta[(x, y)], c)) for c in counters))) else: totals = [0 for _ in counters] for xy in sorted(system_info): for i, counter in enumerate(counters): totals[i] += getattr(delta[xy], counter) output.write("{:0.1f},{}\n".format( now, ",".join(map(str, totals))))
python
def monitor_counters(mc, output, counters, detailed, f): """Monitor the counters on a specified machine, taking a snap-shot every time the generator 'f' yields.""" # Print CSV header output.write("time,{}{}\n".format("x,y," if detailed else "", ",".join(counters))) system_info = mc.get_system_info() # Make an initial sample of the counters last_counter_values = sample_counters(mc, system_info) start_time = time.time() for _ in f(): # Snapshot the change in counter values counter_values = sample_counters(mc, system_info) delta = deltas(last_counter_values, counter_values) last_counter_values = counter_values now = time.time() - start_time # Output the changes if detailed: for x, y in sorted(system_info): output.write("{:0.1f},{},{},{}\n".format( now, x, y, ",".join(str(getattr(delta[(x, y)], c)) for c in counters))) else: totals = [0 for _ in counters] for xy in sorted(system_info): for i, counter in enumerate(counters): totals[i] += getattr(delta[xy], counter) output.write("{:0.1f},{}\n".format( now, ",".join(map(str, totals))))
[ "def", "monitor_counters", "(", "mc", ",", "output", ",", "counters", ",", "detailed", ",", "f", ")", ":", "# Print CSV header", "output", ".", "write", "(", "\"time,{}{}\\n\"", ".", "format", "(", "\"x,y,\"", "if", "detailed", "else", "\"\"", ",", "\",\"", ".", "join", "(", "counters", ")", ")", ")", "system_info", "=", "mc", ".", "get_system_info", "(", ")", "# Make an initial sample of the counters", "last_counter_values", "=", "sample_counters", "(", "mc", ",", "system_info", ")", "start_time", "=", "time", ".", "time", "(", ")", "for", "_", "in", "f", "(", ")", ":", "# Snapshot the change in counter values", "counter_values", "=", "sample_counters", "(", "mc", ",", "system_info", ")", "delta", "=", "deltas", "(", "last_counter_values", ",", "counter_values", ")", "last_counter_values", "=", "counter_values", "now", "=", "time", ".", "time", "(", ")", "-", "start_time", "# Output the changes", "if", "detailed", ":", "for", "x", ",", "y", "in", "sorted", "(", "system_info", ")", ":", "output", ".", "write", "(", "\"{:0.1f},{},{},{}\\n\"", ".", "format", "(", "now", ",", "x", ",", "y", ",", "\",\"", ".", "join", "(", "str", "(", "getattr", "(", "delta", "[", "(", "x", ",", "y", ")", "]", ",", "c", ")", ")", "for", "c", "in", "counters", ")", ")", ")", "else", ":", "totals", "=", "[", "0", "for", "_", "in", "counters", "]", "for", "xy", "in", "sorted", "(", "system_info", ")", ":", "for", "i", ",", "counter", "in", "enumerate", "(", "counters", ")", ":", "totals", "[", "i", "]", "+=", "getattr", "(", "delta", "[", "xy", "]", ",", "counter", ")", "output", ".", "write", "(", "\"{:0.1f},{}\\n\"", ".", "format", "(", "now", ",", "\",\"", ".", "join", "(", "map", "(", "str", ",", "totals", ")", ")", ")", ")" ]
Monitor the counters on a specified machine, taking a snap-shot every time the generator 'f' yields.
[ "Monitor", "the", "counters", "on", "a", "specified", "machine", "taking", "a", "snap", "-", "shot", "every", "time", "the", "generator", "f", "yields", "." ]
train
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_counters.py#L40-L75