repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
project-rig/rig | rig/scripts/rig_counters.py | press_enter | def press_enter(multiple=False, silent=False):
"""Return a generator function which yields every time the user presses
return."""
def f():
try:
while True:
if silent:
yield input()
else:
sys.stderr.write("<press enter> ")
sys.stderr.flush()
yield input()
if not multiple:
break
except (EOFError, KeyboardInterrupt):
# User Ctrl+D or Ctrl+C'd
if not silent:
# Prevents the user's terminal getting clobbered
sys.stderr.write("\n")
sys.stderr.flush()
return
return f | python | def press_enter(multiple=False, silent=False):
"""Return a generator function which yields every time the user presses
return."""
def f():
try:
while True:
if silent:
yield input()
else:
sys.stderr.write("<press enter> ")
sys.stderr.flush()
yield input()
if not multiple:
break
except (EOFError, KeyboardInterrupt):
# User Ctrl+D or Ctrl+C'd
if not silent:
# Prevents the user's terminal getting clobbered
sys.stderr.write("\n")
sys.stderr.flush()
return
return f | [
"def",
"press_enter",
"(",
"multiple",
"=",
"False",
",",
"silent",
"=",
"False",
")",
":",
"def",
"f",
"(",
")",
":",
"try",
":",
"while",
"True",
":",
"if",
"silent",
":",
"yield",
"input",
"(",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"<press enter> \"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"yield",
"input",
"(",
")",
"if",
"not",
"multiple",
":",
"break",
"except",
"(",
"EOFError",
",",
"KeyboardInterrupt",
")",
":",
"# User Ctrl+D or Ctrl+C'd",
"if",
"not",
"silent",
":",
"# Prevents the user's terminal getting clobbered",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\n\"",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"return",
"return",
"f"
] | Return a generator function which yields every time the user presses
return. | [
"Return",
"a",
"generator",
"function",
"which",
"yields",
"every",
"time",
"the",
"user",
"presses",
"return",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_counters.py#L78-L101 |
project-rig/rig | rig/machine_control/unbooted_ping.py | listen | def listen(timeout=6.0, port=BOOT_PORT):
"""Listen for a 'ping' broadcast message from an unbooted SpiNNaker board.
Unbooted SpiNNaker boards send out a UDP broadcast message every 4-ish
seconds on port 54321. This function listens for such messages and reports
the IP address that it came from.
Parameters
----------
timeout : float
Number of seconds to wait for a message to arrive.
port : int
The port number to listen on.
Returns
-------
str or None
The IP address of the SpiNNaker board from which a ping was received or
None if no ping was observed.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Don't take control of this socket in the system (i.e. allow other
# processes to bind to it) since we're listening for broadcasts.
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Listen for the broadcasts
s.bind(('0.0.0.0', port))
s.settimeout(timeout)
try:
message, (ipaddr, port) = s.recvfrom(512)
return ipaddr
except socket.timeout:
return None | python | def listen(timeout=6.0, port=BOOT_PORT):
"""Listen for a 'ping' broadcast message from an unbooted SpiNNaker board.
Unbooted SpiNNaker boards send out a UDP broadcast message every 4-ish
seconds on port 54321. This function listens for such messages and reports
the IP address that it came from.
Parameters
----------
timeout : float
Number of seconds to wait for a message to arrive.
port : int
The port number to listen on.
Returns
-------
str or None
The IP address of the SpiNNaker board from which a ping was received or
None if no ping was observed.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Don't take control of this socket in the system (i.e. allow other
# processes to bind to it) since we're listening for broadcasts.
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Listen for the broadcasts
s.bind(('0.0.0.0', port))
s.settimeout(timeout)
try:
message, (ipaddr, port) = s.recvfrom(512)
return ipaddr
except socket.timeout:
return None | [
"def",
"listen",
"(",
"timeout",
"=",
"6.0",
",",
"port",
"=",
"BOOT_PORT",
")",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"# Don't take control of this socket in the system (i.e. allow other",
"# processes to bind to it) since we're listening for broadcasts.",
"s",
".",
"setsockopt",
"(",
"socket",
".",
"SOL_SOCKET",
",",
"socket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"# Listen for the broadcasts",
"s",
".",
"bind",
"(",
"(",
"'0.0.0.0'",
",",
"port",
")",
")",
"s",
".",
"settimeout",
"(",
"timeout",
")",
"try",
":",
"message",
",",
"(",
"ipaddr",
",",
"port",
")",
"=",
"s",
".",
"recvfrom",
"(",
"512",
")",
"return",
"ipaddr",
"except",
"socket",
".",
"timeout",
":",
"return",
"None"
] | Listen for a 'ping' broadcast message from an unbooted SpiNNaker board.
Unbooted SpiNNaker boards send out a UDP broadcast message every 4-ish
seconds on port 54321. This function listens for such messages and reports
the IP address that it came from.
Parameters
----------
timeout : float
Number of seconds to wait for a message to arrive.
port : int
The port number to listen on.
Returns
-------
str or None
The IP address of the SpiNNaker board from which a ping was received or
None if no ping was observed. | [
"Listen",
"for",
"a",
"ping",
"broadcast",
"message",
"from",
"an",
"unbooted",
"SpiNNaker",
"board",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/unbooted_ping.py#L8-L42 |
Metatab/metapack | metapack/cli/s3.py | clear_cache | def clear_cache(m, files_processed):
"""Remove any files we may have uploaded from the cache. """
for what, reason, url, path in files_processed:
cp = m.doc.downloader.cache_path(url)
if m.cache.exists(cp):
m.cache.remove(cp) | python | def clear_cache(m, files_processed):
"""Remove any files we may have uploaded from the cache. """
for what, reason, url, path in files_processed:
cp = m.doc.downloader.cache_path(url)
if m.cache.exists(cp):
m.cache.remove(cp) | [
"def",
"clear_cache",
"(",
"m",
",",
"files_processed",
")",
":",
"for",
"what",
",",
"reason",
",",
"url",
",",
"path",
"in",
"files_processed",
":",
"cp",
"=",
"m",
".",
"doc",
".",
"downloader",
".",
"cache_path",
"(",
"url",
")",
"if",
"m",
".",
"cache",
".",
"exists",
"(",
"cp",
")",
":",
"m",
".",
"cache",
".",
"remove",
"(",
"cp",
")"
] | Remove any files we may have uploaded from the cache. | [
"Remove",
"any",
"files",
"we",
"may",
"have",
"uploaded",
"from",
"the",
"cache",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/s3.py#L135-L142 |
Metatab/metapack | metapack/jupyter/magic.py | fill_categorical_na | def fill_categorical_na(df, nan_cat='NA'):
"""Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks """
for col in df.columns[df.isna().any()].tolist():
if df[col].dtype.name != 'category':
# If not categorical, fill with a blank, which creates and
# empty cell in CSV.
df[col] = df[col].fillna('')
else:
try:
df[col].cat.add_categories([nan_cat], inplace=True)
except ValueError:
pass
df[col] = df[col].fillna(nan_cat)
return df | python | def fill_categorical_na(df, nan_cat='NA'):
"""Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks """
for col in df.columns[df.isna().any()].tolist():
if df[col].dtype.name != 'category':
# If not categorical, fill with a blank, which creates and
# empty cell in CSV.
df[col] = df[col].fillna('')
else:
try:
df[col].cat.add_categories([nan_cat], inplace=True)
except ValueError:
pass
df[col] = df[col].fillna(nan_cat)
return df | [
"def",
"fill_categorical_na",
"(",
"df",
",",
"nan_cat",
"=",
"'NA'",
")",
":",
"for",
"col",
"in",
"df",
".",
"columns",
"[",
"df",
".",
"isna",
"(",
")",
".",
"any",
"(",
")",
"]",
".",
"tolist",
"(",
")",
":",
"if",
"df",
"[",
"col",
"]",
".",
"dtype",
".",
"name",
"!=",
"'category'",
":",
"# If not categorical, fill with a blank, which creates and",
"# empty cell in CSV.",
"df",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"fillna",
"(",
"''",
")",
"else",
":",
"try",
":",
"df",
"[",
"col",
"]",
".",
"cat",
".",
"add_categories",
"(",
"[",
"nan_cat",
"]",
",",
"inplace",
"=",
"True",
")",
"except",
"ValueError",
":",
"pass",
"df",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"fillna",
"(",
"nan_cat",
")",
"return",
"df"
] | Fill categoricals with 'NA', possibly creating a new category,
and fill other NaNa with blanks | [
"Fill",
"categoricals",
"with",
"NA",
"possibly",
"creating",
"a",
"new",
"category",
"and",
"fill",
"other",
"NaNa",
"with",
"blanks"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/magic.py#L41-L59 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | executor_ips | def executor_ips(sc):
""" gets the unique ip addresses of the executors of the current application
This uses the REST API of the status web UI on the driver (http://spark.apache.org/docs/latest/monitoring.html)
:param sc: Spark context
:return: set of ip addresses
"""
try:
app_id = sc.applicationId
except AttributeError:
app_id = sc.getConf().get('spark.app.id')
# for getting the url (see: https://github.com/apache/spark/pull/15000)
try:
base_url = sc.uiWebUrl
except AttributeError:
base_url = sc._jsc.sc().uiWebUrl().get()
url = base_url + '/api/v1/applications/' + app_id + '/executors'
try:
data = json.load(urlopen(url))
except TypeError:
response = urlopen(url)
str_response = response.read().decode('utf-8')
data = json.loads(str_response)
ips = set(map(lambda x: x[u'hostPort'].split(':')[0], data))
return ips | python | def executor_ips(sc):
""" gets the unique ip addresses of the executors of the current application
This uses the REST API of the status web UI on the driver (http://spark.apache.org/docs/latest/monitoring.html)
:param sc: Spark context
:return: set of ip addresses
"""
try:
app_id = sc.applicationId
except AttributeError:
app_id = sc.getConf().get('spark.app.id')
# for getting the url (see: https://github.com/apache/spark/pull/15000)
try:
base_url = sc.uiWebUrl
except AttributeError:
base_url = sc._jsc.sc().uiWebUrl().get()
url = base_url + '/api/v1/applications/' + app_id + '/executors'
try:
data = json.load(urlopen(url))
except TypeError:
response = urlopen(url)
str_response = response.read().decode('utf-8')
data = json.loads(str_response)
ips = set(map(lambda x: x[u'hostPort'].split(':')[0], data))
return ips | [
"def",
"executor_ips",
"(",
"sc",
")",
":",
"try",
":",
"app_id",
"=",
"sc",
".",
"applicationId",
"except",
"AttributeError",
":",
"app_id",
"=",
"sc",
".",
"getConf",
"(",
")",
".",
"get",
"(",
"'spark.app.id'",
")",
"# for getting the url (see: https://github.com/apache/spark/pull/15000)\r",
"try",
":",
"base_url",
"=",
"sc",
".",
"uiWebUrl",
"except",
"AttributeError",
":",
"base_url",
"=",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")",
".",
"uiWebUrl",
"(",
")",
".",
"get",
"(",
")",
"url",
"=",
"base_url",
"+",
"'/api/v1/applications/'",
"+",
"app_id",
"+",
"'/executors'",
"try",
":",
"data",
"=",
"json",
".",
"load",
"(",
"urlopen",
"(",
"url",
")",
")",
"except",
"TypeError",
":",
"response",
"=",
"urlopen",
"(",
"url",
")",
"str_response",
"=",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"str_response",
")",
"ips",
"=",
"set",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"u'hostPort'",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
",",
"data",
")",
")",
"return",
"ips"
] | gets the unique ip addresses of the executors of the current application
This uses the REST API of the status web UI on the driver (http://spark.apache.org/docs/latest/monitoring.html)
:param sc: Spark context
:return: set of ip addresses | [
"gets",
"the",
"unique",
"ip",
"addresses",
"of",
"the",
"executors",
"of",
"the",
"current",
"application",
"This",
"uses",
"the",
"REST",
"API",
"of",
"the",
"status",
"web",
"UI",
"on",
"the",
"driver",
"(",
"http",
":",
"//",
"spark",
".",
"apache",
".",
"org",
"/",
"docs",
"/",
"latest",
"/",
"monitoring",
".",
"html",
")",
":",
"param",
"sc",
":",
"Spark",
"context",
":",
"return",
":",
"set",
"of",
"ip",
"addresses"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L21-L45 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | change | def change(sc=None, app_name='customSpark', master=None, wait='ips', min_cores=None, min_ips=None, timeout=30,
refresh_rate=0.5, fail_on_timeout=False, **kwargs):
""" Returns a new Spark Context (sc) object with added properties set
:param sc: current SparkContext if None will create a new one
:param app_name: name of new spark app
:param master: url to master, if None will get from current sc
:param wait: when to return after asking for a new sc (or max of timeout seconds):
'ips': wait for all the previous ips that were connected to return (needs sc to not be None)
'cores': wait for min_cores
None: return immediately
:param min_cores: when wait is 'cores' will wait until defaultParallelism is back to at least this value.
if None will be set to defaultParallelism.
:param min_ips: when wait is 'ips' will wait until number of unique executor ips is back to at least this value.
if None will be set to the what the original sc had.
:param timeout: max time in seconds to wait for new sc if wait is 'ips' or 'cores'
:param fail_on_timeout: whether to fail if timeout has reached
:param refresh_rate: how long to wait in seconds between each check of defaultParallelism
:param kwargs: added properties to set. In the form of key value pairs (replaces '.' with '_' in key)
examples: spark_task_cores='1', spark_python_worker_memory='8g'
see: http://spark.apache.org/docs/latest/configuration.html
:return: a new SparkContext
"""
# checking input
if master is None and sc is None:
raise ValueError('Both master and sc are None')
if master is None:
master = sc.getConf().get(u'spark.master')
if wait == 'ips':
if sc is None:
if min_ips is None:
min_ips = 1
elif min_ips is None:
min_ips = len(executor_ips(sc))
elif wait == 'cores':
if min_cores is None:
if sc is None:
logging.getLogger('pySparkUtils').info('Both sc and min_cores are None: setting target_cores to 2')
min_cores = 2
else:
min_cores = sc.defaultParallelism
logging.getLogger('pySparkUtils').info('min_cores is None: setting target_cores to: %d' % min_cores)
elif wait is not None:
raise ValueError("wait should be: ['ips','cores',None] got: %s" % wait)
if sc is not None:
logging.getLogger('pySparkUtils').info('Stopping original sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
sc.stop()
# building a new configuration with added arguments
conf = SparkConf().setMaster(master).setAppName(app_name)
for key in kwargs.keys():
name = key.replace('_', '.', 100)
value = kwargs[key]
conf = conf.set(name, value)
logging.getLogger('pySparkUtils').info('Setting %s to: %s' % (name, value))
# starting the new context and waiting for defaultParallelism to get back to original value
sc = SparkContext(conf=conf)
if wait == 'cores':
total_time = 0
while sc.defaultParallelism < min_cores and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
elif wait == 'ips':
total_time = 0
while len(executor_ips(sc)) < min_ips and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
logging.getLogger('pySparkUtils').info('Returning new sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
return sc | python | def change(sc=None, app_name='customSpark', master=None, wait='ips', min_cores=None, min_ips=None, timeout=30,
refresh_rate=0.5, fail_on_timeout=False, **kwargs):
""" Returns a new Spark Context (sc) object with added properties set
:param sc: current SparkContext if None will create a new one
:param app_name: name of new spark app
:param master: url to master, if None will get from current sc
:param wait: when to return after asking for a new sc (or max of timeout seconds):
'ips': wait for all the previous ips that were connected to return (needs sc to not be None)
'cores': wait for min_cores
None: return immediately
:param min_cores: when wait is 'cores' will wait until defaultParallelism is back to at least this value.
if None will be set to defaultParallelism.
:param min_ips: when wait is 'ips' will wait until number of unique executor ips is back to at least this value.
if None will be set to the what the original sc had.
:param timeout: max time in seconds to wait for new sc if wait is 'ips' or 'cores'
:param fail_on_timeout: whether to fail if timeout has reached
:param refresh_rate: how long to wait in seconds between each check of defaultParallelism
:param kwargs: added properties to set. In the form of key value pairs (replaces '.' with '_' in key)
examples: spark_task_cores='1', spark_python_worker_memory='8g'
see: http://spark.apache.org/docs/latest/configuration.html
:return: a new SparkContext
"""
# checking input
if master is None and sc is None:
raise ValueError('Both master and sc are None')
if master is None:
master = sc.getConf().get(u'spark.master')
if wait == 'ips':
if sc is None:
if min_ips is None:
min_ips = 1
elif min_ips is None:
min_ips = len(executor_ips(sc))
elif wait == 'cores':
if min_cores is None:
if sc is None:
logging.getLogger('pySparkUtils').info('Both sc and min_cores are None: setting target_cores to 2')
min_cores = 2
else:
min_cores = sc.defaultParallelism
logging.getLogger('pySparkUtils').info('min_cores is None: setting target_cores to: %d' % min_cores)
elif wait is not None:
raise ValueError("wait should be: ['ips','cores',None] got: %s" % wait)
if sc is not None:
logging.getLogger('pySparkUtils').info('Stopping original sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
sc.stop()
# building a new configuration with added arguments
conf = SparkConf().setMaster(master).setAppName(app_name)
for key in kwargs.keys():
name = key.replace('_', '.', 100)
value = kwargs[key]
conf = conf.set(name, value)
logging.getLogger('pySparkUtils').info('Setting %s to: %s' % (name, value))
# starting the new context and waiting for defaultParallelism to get back to original value
sc = SparkContext(conf=conf)
if wait == 'cores':
total_time = 0
while sc.defaultParallelism < min_cores and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
elif wait == 'ips':
total_time = 0
while len(executor_ips(sc)) < min_ips and total_time < timeout:
time.sleep(refresh_rate)
total_time += refresh_rate
if fail_on_timeout and total_time >= timeout:
sc.stop()
raise RuntimeError('Time out reached when changing sc')
logging.getLogger('pySparkUtils').info('Returning new sc with %d cores and %d executors' %
(sc.defaultParallelism, len(executor_ips(sc))))
return sc | [
"def",
"change",
"(",
"sc",
"=",
"None",
",",
"app_name",
"=",
"'customSpark'",
",",
"master",
"=",
"None",
",",
"wait",
"=",
"'ips'",
",",
"min_cores",
"=",
"None",
",",
"min_ips",
"=",
"None",
",",
"timeout",
"=",
"30",
",",
"refresh_rate",
"=",
"0.5",
",",
"fail_on_timeout",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# checking input\r",
"if",
"master",
"is",
"None",
"and",
"sc",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Both master and sc are None'",
")",
"if",
"master",
"is",
"None",
":",
"master",
"=",
"sc",
".",
"getConf",
"(",
")",
".",
"get",
"(",
"u'spark.master'",
")",
"if",
"wait",
"==",
"'ips'",
":",
"if",
"sc",
"is",
"None",
":",
"if",
"min_ips",
"is",
"None",
":",
"min_ips",
"=",
"1",
"elif",
"min_ips",
"is",
"None",
":",
"min_ips",
"=",
"len",
"(",
"executor_ips",
"(",
"sc",
")",
")",
"elif",
"wait",
"==",
"'cores'",
":",
"if",
"min_cores",
"is",
"None",
":",
"if",
"sc",
"is",
"None",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Both sc and min_cores are None: setting target_cores to 2'",
")",
"min_cores",
"=",
"2",
"else",
":",
"min_cores",
"=",
"sc",
".",
"defaultParallelism",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'min_cores is None: setting target_cores to: %d'",
"%",
"min_cores",
")",
"elif",
"wait",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"wait should be: ['ips','cores',None] got: %s\"",
"%",
"wait",
")",
"if",
"sc",
"is",
"not",
"None",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Stopping original sc with %d cores and %d executors'",
"%",
"(",
"sc",
".",
"defaultParallelism",
",",
"len",
"(",
"executor_ips",
"(",
"sc",
")",
")",
")",
")",
"sc",
".",
"stop",
"(",
")",
"# building a new configuration with added arguments\r",
"conf",
"=",
"SparkConf",
"(",
")",
".",
"setMaster",
"(",
"master",
")",
".",
"setAppName",
"(",
"app_name",
")",
"for",
"key",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"name",
"=",
"key",
".",
"replace",
"(",
"'_'",
",",
"'.'",
",",
"100",
")",
"value",
"=",
"kwargs",
"[",
"key",
"]",
"conf",
"=",
"conf",
".",
"set",
"(",
"name",
",",
"value",
")",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Setting %s to: %s'",
"%",
"(",
"name",
",",
"value",
")",
")",
"# starting the new context and waiting for defaultParallelism to get back to original value\r",
"sc",
"=",
"SparkContext",
"(",
"conf",
"=",
"conf",
")",
"if",
"wait",
"==",
"'cores'",
":",
"total_time",
"=",
"0",
"while",
"sc",
".",
"defaultParallelism",
"<",
"min_cores",
"and",
"total_time",
"<",
"timeout",
":",
"time",
".",
"sleep",
"(",
"refresh_rate",
")",
"total_time",
"+=",
"refresh_rate",
"if",
"fail_on_timeout",
"and",
"total_time",
">=",
"timeout",
":",
"sc",
".",
"stop",
"(",
")",
"raise",
"RuntimeError",
"(",
"'Time out reached when changing sc'",
")",
"elif",
"wait",
"==",
"'ips'",
":",
"total_time",
"=",
"0",
"while",
"len",
"(",
"executor_ips",
"(",
"sc",
")",
")",
"<",
"min_ips",
"and",
"total_time",
"<",
"timeout",
":",
"time",
".",
"sleep",
"(",
"refresh_rate",
")",
"total_time",
"+=",
"refresh_rate",
"if",
"fail_on_timeout",
"and",
"total_time",
">=",
"timeout",
":",
"sc",
".",
"stop",
"(",
")",
"raise",
"RuntimeError",
"(",
"'Time out reached when changing sc'",
")",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Returning new sc with %d cores and %d executors'",
"%",
"(",
"sc",
".",
"defaultParallelism",
",",
"len",
"(",
"executor_ips",
"(",
"sc",
")",
")",
")",
")",
"return",
"sc"
] | Returns a new Spark Context (sc) object with added properties set
:param sc: current SparkContext if None will create a new one
:param app_name: name of new spark app
:param master: url to master, if None will get from current sc
:param wait: when to return after asking for a new sc (or max of timeout seconds):
'ips': wait for all the previous ips that were connected to return (needs sc to not be None)
'cores': wait for min_cores
None: return immediately
:param min_cores: when wait is 'cores' will wait until defaultParallelism is back to at least this value.
if None will be set to defaultParallelism.
:param min_ips: when wait is 'ips' will wait until number of unique executor ips is back to at least this value.
if None will be set to the what the original sc had.
:param timeout: max time in seconds to wait for new sc if wait is 'ips' or 'cores'
:param fail_on_timeout: whether to fail if timeout has reached
:param refresh_rate: how long to wait in seconds between each check of defaultParallelism
:param kwargs: added properties to set. In the form of key value pairs (replaces '.' with '_' in key)
examples: spark_task_cores='1', spark_python_worker_memory='8g'
see: http://spark.apache.org/docs/latest/configuration.html
:return: a new SparkContext | [
"Returns",
"a",
"new",
"Spark",
"Context",
"(",
"sc",
")",
"object",
"with",
"added",
"properties",
"set",
":",
"param",
"sc",
":",
"current",
"SparkContext",
"if",
"None",
"will",
"create",
"a",
"new",
"one",
":",
"param",
"app_name",
":",
"name",
"of",
"new",
"spark",
"app",
":",
"param",
"master",
":",
"url",
"to",
"master",
"if",
"None",
"will",
"get",
"from",
"current",
"sc",
":",
"param",
"wait",
":",
"when",
"to",
"return",
"after",
"asking",
"for",
"a",
"new",
"sc",
"(",
"or",
"max",
"of",
"timeout",
"seconds",
")",
":",
"ips",
":",
"wait",
"for",
"all",
"the",
"previous",
"ips",
"that",
"were",
"connected",
"to",
"return",
"(",
"needs",
"sc",
"to",
"not",
"be",
"None",
")",
"cores",
":",
"wait",
"for",
"min_cores",
"None",
":",
"return",
"immediately",
":",
"param",
"min_cores",
":",
"when",
"wait",
"is",
"cores",
"will",
"wait",
"until",
"defaultParallelism",
"is",
"back",
"to",
"at",
"least",
"this",
"value",
".",
"if",
"None",
"will",
"be",
"set",
"to",
"defaultParallelism",
".",
":",
"param",
"min_ips",
":",
"when",
"wait",
"is",
"ips",
"will",
"wait",
"until",
"number",
"of",
"unique",
"executor",
"ips",
"is",
"back",
"to",
"at",
"least",
"this",
"value",
".",
"if",
"None",
"will",
"be",
"set",
"to",
"the",
"what",
"the",
"original",
"sc",
"had",
".",
":",
"param",
"timeout",
":",
"max",
"time",
"in",
"seconds",
"to",
"wait",
"for",
"new",
"sc",
"if",
"wait",
"is",
"ips",
"or",
"cores",
":",
"param",
"fail_on_timeout",
":",
"whether",
"to",
"fail",
"if",
"timeout",
"has",
"reached",
":",
"param",
"refresh_rate",
":",
"how",
"long",
"to",
"wait",
"in",
"seconds",
"between",
"each",
"check",
"of",
"defaultParallelism",
":",
"param",
"kwargs",
":",
"added",
"properties",
"to",
"set",
".",
"In",
"the",
"form",
"of",
"key",
"value",
"pairs",
"(",
"replaces",
".",
"with",
"_",
"in",
"key",
")",
"examples",
":",
"spark_task_cores",
"=",
"1",
"spark_python_worker_memory",
"=",
"8g",
"see",
":",
"http",
":",
"//",
"spark",
".",
"apache",
".",
"org",
"/",
"docs",
"/",
"latest",
"/",
"configuration",
".",
"html",
":",
"return",
":",
"a",
"new",
"SparkContext"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L48-L126 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | fallback | def fallback(func):
""" Decorator function for functions that handle spark context.
If a function changes sc we might lose it if an error occurs in the function.
In the event of an error this decorator will log the error but return sc.
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.getLogger('pySparkUtils').error('Decorator handled exception %s' % e, exc_info=True)
_, _, tb = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next
frame = tb.tb_frame
for key, value in iteritems(frame.f_locals):
if isinstance(value, SparkContext) and value._jsc is not None:
return frame.f_locals[key]
logging.getLogger('pySparkUtils').error('Could not find SparkContext', exc_info=True)
return None
return dec | python | def fallback(func):
""" Decorator function for functions that handle spark context.
If a function changes sc we might lose it if an error occurs in the function.
In the event of an error this decorator will log the error but return sc.
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
logging.getLogger('pySparkUtils').error('Decorator handled exception %s' % e, exc_info=True)
_, _, tb = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next
frame = tb.tb_frame
for key, value in iteritems(frame.f_locals):
if isinstance(value, SparkContext) and value._jsc is not None:
return frame.f_locals[key]
logging.getLogger('pySparkUtils').error('Could not find SparkContext', exc_info=True)
return None
return dec | [
"def",
"fallback",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"dec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"error",
"(",
"'Decorator handled exception %s'",
"%",
"e",
",",
"exc_info",
"=",
"True",
")",
"_",
",",
"_",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"while",
"tb",
".",
"tb_next",
":",
"tb",
"=",
"tb",
".",
"tb_next",
"frame",
"=",
"tb",
".",
"tb_frame",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"frame",
".",
"f_locals",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"SparkContext",
")",
"and",
"value",
".",
"_jsc",
"is",
"not",
"None",
":",
"return",
"frame",
".",
"f_locals",
"[",
"key",
"]",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"error",
"(",
"'Could not find SparkContext'",
",",
"exc_info",
"=",
"True",
")",
"return",
"None",
"return",
"dec"
] | Decorator function for functions that handle spark context.
If a function changes sc we might lose it if an error occurs in the function.
In the event of an error this decorator will log the error but return sc.
:param func: function to decorate
:return: decorated function | [
"Decorator",
"function",
"for",
"functions",
"that",
"handle",
"spark",
"context",
".",
"If",
"a",
"function",
"changes",
"sc",
"we",
"might",
"lose",
"it",
"if",
"an",
"error",
"occurs",
"in",
"the",
"function",
".",
"In",
"the",
"event",
"of",
"an",
"error",
"this",
"decorator",
"will",
"log",
"the",
"error",
"but",
"return",
"sc",
".",
":",
"param",
"func",
":",
"function",
"to",
"decorate",
":",
"return",
":",
"decorated",
"function"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L129-L153 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | thunder_decorator | def thunder_decorator(func):
""" Decorator for functions so they could get as input a thunder.Images / thunder.Series object,
while they are expecting an rdd. Also will return the data from rdd to the appropriate type
Assumes only one input object of type Images/Series, and up to one output object of type RDD
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
# find Images / Series object in args
result = None
args = list(args)
image_args = list(map(lambda x: isinstance(x, td.images.Images), args))
series_args = list(map(lambda x: isinstance(x, td.series.Series), args))
rdd_args = list(map(lambda x: isinstance(x, RDD), args))
# find Images / Series object in kwargs
image_kwargs = []
series_kwargs = []
rdd_kwargs = []
for key, value in iteritems(kwargs):
if isinstance(value, td.images.Images):
image_kwargs.append(key)
if isinstance(value, td.series.Series):
series_kwargs.append(key)
if isinstance(value, RDD):
rdd_kwargs.append(key)
# make sure there is only one
count = sum(image_args) + sum(series_args) + sum(rdd_args) + len(image_kwargs) + len(series_kwargs) + \
len(rdd_kwargs)
if count == 0:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got None')
if count > 1:
raise ValueError('Expecting on input argument of type Series / Images, got: %d' % count)
# bypass for RDD
if sum(rdd_args) or len(rdd_kwargs):
return func(*args, **kwargs)
image_flag = None
# convert to rdd and send
if sum(image_args) > 0:
image_flag = True
index = np.where(image_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if sum(series_args) > 0:
image_flag = False
index = np.where(series_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if len(image_kwargs) > 0:
image_flag = True
kwargs[image_kwargs[0]] = kwargs[image_kwargs[0]].tordd()
result = func(*args, **kwargs)
if len(series_kwargs) > 0:
image_flag = False
kwargs[series_kwargs[0]] = kwargs[series_kwargs[0]].tordd()
result = func(*args, **kwargs)
if image_flag is None:
raise RuntimeError('Target function did not run')
# handle output
if not isinstance(result, tuple):
result = (result,)
result_len = len(result)
rdd_index = np.where(list(map(lambda x: isinstance(x, RDD), result)))[0]
# no RDD as output
if len(rdd_index) == 0:
logging.getLogger('pySparkUtils').debug('No RDDs found in output')
if result_len == 1:
return result[0]
else:
return result
if len(rdd_index) > 1:
raise ValueError('Expecting one RDD as output got: %d' % len(rdd_index))
result = list(result)
rdd_index = rdd_index[0]
# handle type of output
if image_flag:
result[rdd_index] = td.images.fromrdd(result[rdd_index])
else:
result[rdd_index] = td.series.fromrdd(result[rdd_index])
if result_len == 1:
return result[0]
else:
return result
return dec | python | def thunder_decorator(func):
""" Decorator for functions so they could get as input a thunder.Images / thunder.Series object,
while they are expecting an rdd. Also will return the data from rdd to the appropriate type
Assumes only one input object of type Images/Series, and up to one output object of type RDD
:param func: function to decorate
:return: decorated function
"""
@wraps(func)
def dec(*args, **kwargs):
# find Images / Series object in args
result = None
args = list(args)
image_args = list(map(lambda x: isinstance(x, td.images.Images), args))
series_args = list(map(lambda x: isinstance(x, td.series.Series), args))
rdd_args = list(map(lambda x: isinstance(x, RDD), args))
# find Images / Series object in kwargs
image_kwargs = []
series_kwargs = []
rdd_kwargs = []
for key, value in iteritems(kwargs):
if isinstance(value, td.images.Images):
image_kwargs.append(key)
if isinstance(value, td.series.Series):
series_kwargs.append(key)
if isinstance(value, RDD):
rdd_kwargs.append(key)
# make sure there is only one
count = sum(image_args) + sum(series_args) + sum(rdd_args) + len(image_kwargs) + len(series_kwargs) + \
len(rdd_kwargs)
if count == 0:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got None')
if count > 1:
raise ValueError('Expecting on input argument of type Series / Images, got: %d' % count)
# bypass for RDD
if sum(rdd_args) or len(rdd_kwargs):
return func(*args, **kwargs)
image_flag = None
# convert to rdd and send
if sum(image_args) > 0:
image_flag = True
index = np.where(image_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if sum(series_args) > 0:
image_flag = False
index = np.where(series_args)[0][0]
args[index] = args[index].tordd()
result = func(*args, **kwargs)
if len(image_kwargs) > 0:
image_flag = True
kwargs[image_kwargs[0]] = kwargs[image_kwargs[0]].tordd()
result = func(*args, **kwargs)
if len(series_kwargs) > 0:
image_flag = False
kwargs[series_kwargs[0]] = kwargs[series_kwargs[0]].tordd()
result = func(*args, **kwargs)
if image_flag is None:
raise RuntimeError('Target function did not run')
# handle output
if not isinstance(result, tuple):
result = (result,)
result_len = len(result)
rdd_index = np.where(list(map(lambda x: isinstance(x, RDD), result)))[0]
# no RDD as output
if len(rdd_index) == 0:
logging.getLogger('pySparkUtils').debug('No RDDs found in output')
if result_len == 1:
return result[0]
else:
return result
if len(rdd_index) > 1:
raise ValueError('Expecting one RDD as output got: %d' % len(rdd_index))
result = list(result)
rdd_index = rdd_index[0]
# handle type of output
if image_flag:
result[rdd_index] = td.images.fromrdd(result[rdd_index])
else:
result[rdd_index] = td.series.fromrdd(result[rdd_index])
if result_len == 1:
return result[0]
else:
return result
return dec | [
"def",
"thunder_decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"dec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# find Images / Series object in args\r",
"result",
"=",
"None",
"args",
"=",
"list",
"(",
"args",
")",
"image_args",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"td",
".",
"images",
".",
"Images",
")",
",",
"args",
")",
")",
"series_args",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"td",
".",
"series",
".",
"Series",
")",
",",
"args",
")",
")",
"rdd_args",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"RDD",
")",
",",
"args",
")",
")",
"# find Images / Series object in kwargs\r",
"image_kwargs",
"=",
"[",
"]",
"series_kwargs",
"=",
"[",
"]",
"rdd_kwargs",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"td",
".",
"images",
".",
"Images",
")",
":",
"image_kwargs",
".",
"append",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"td",
".",
"series",
".",
"Series",
")",
":",
"series_kwargs",
".",
"append",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"RDD",
")",
":",
"rdd_kwargs",
".",
"append",
"(",
"key",
")",
"# make sure there is only one\r",
"count",
"=",
"sum",
"(",
"image_args",
")",
"+",
"sum",
"(",
"series_args",
")",
"+",
"sum",
"(",
"rdd_args",
")",
"+",
"len",
"(",
"image_kwargs",
")",
"+",
"len",
"(",
"series_kwargs",
")",
"+",
"len",
"(",
"rdd_kwargs",
")",
"if",
"count",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'Wrong data type, expected [RDD, Images, Series] got None'",
")",
"if",
"count",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Expecting on input argument of type Series / Images, got: %d'",
"%",
"count",
")",
"# bypass for RDD\r",
"if",
"sum",
"(",
"rdd_args",
")",
"or",
"len",
"(",
"rdd_kwargs",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"image_flag",
"=",
"None",
"# convert to rdd and send\r",
"if",
"sum",
"(",
"image_args",
")",
">",
"0",
":",
"image_flag",
"=",
"True",
"index",
"=",
"np",
".",
"where",
"(",
"image_args",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"args",
"[",
"index",
"]",
"=",
"args",
"[",
"index",
"]",
".",
"tordd",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"sum",
"(",
"series_args",
")",
">",
"0",
":",
"image_flag",
"=",
"False",
"index",
"=",
"np",
".",
"where",
"(",
"series_args",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"args",
"[",
"index",
"]",
"=",
"args",
"[",
"index",
"]",
".",
"tordd",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"image_kwargs",
")",
">",
"0",
":",
"image_flag",
"=",
"True",
"kwargs",
"[",
"image_kwargs",
"[",
"0",
"]",
"]",
"=",
"kwargs",
"[",
"image_kwargs",
"[",
"0",
"]",
"]",
".",
"tordd",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"len",
"(",
"series_kwargs",
")",
">",
"0",
":",
"image_flag",
"=",
"False",
"kwargs",
"[",
"series_kwargs",
"[",
"0",
"]",
"]",
"=",
"kwargs",
"[",
"series_kwargs",
"[",
"0",
"]",
"]",
".",
"tordd",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"image_flag",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Target function did not run'",
")",
"# handle output\r",
"if",
"not",
"isinstance",
"(",
"result",
",",
"tuple",
")",
":",
"result",
"=",
"(",
"result",
",",
")",
"result_len",
"=",
"len",
"(",
"result",
")",
"rdd_index",
"=",
"np",
".",
"where",
"(",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"RDD",
")",
",",
"result",
")",
")",
")",
"[",
"0",
"]",
"# no RDD as output\r",
"if",
"len",
"(",
"rdd_index",
")",
"==",
"0",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"debug",
"(",
"'No RDDs found in output'",
")",
"if",
"result_len",
"==",
"1",
":",
"return",
"result",
"[",
"0",
"]",
"else",
":",
"return",
"result",
"if",
"len",
"(",
"rdd_index",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Expecting one RDD as output got: %d'",
"%",
"len",
"(",
"rdd_index",
")",
")",
"result",
"=",
"list",
"(",
"result",
")",
"rdd_index",
"=",
"rdd_index",
"[",
"0",
"]",
"# handle type of output\r",
"if",
"image_flag",
":",
"result",
"[",
"rdd_index",
"]",
"=",
"td",
".",
"images",
".",
"fromrdd",
"(",
"result",
"[",
"rdd_index",
"]",
")",
"else",
":",
"result",
"[",
"rdd_index",
"]",
"=",
"td",
".",
"series",
".",
"fromrdd",
"(",
"result",
"[",
"rdd_index",
"]",
")",
"if",
"result_len",
"==",
"1",
":",
"return",
"result",
"[",
"0",
"]",
"else",
":",
"return",
"result",
"return",
"dec"
] | Decorator for functions so they could get as input a thunder.Images / thunder.Series object,
while they are expecting an rdd. Also will return the data from rdd to the appropriate type
Assumes only one input object of type Images/Series, and up to one output object of type RDD
:param func: function to decorate
:return: decorated function | [
"Decorator",
"for",
"functions",
"so",
"they",
"could",
"get",
"as",
"input",
"a",
"thunder",
".",
"Images",
"/",
"thunder",
".",
"Series",
"object",
"while",
"they",
"are",
"expecting",
"an",
"rdd",
".",
"Also",
"will",
"return",
"the",
"data",
"from",
"rdd",
"to",
"the",
"appropriate",
"type",
"Assumes",
"only",
"one",
"input",
"object",
"of",
"type",
"Images",
"/",
"Series",
"and",
"up",
"to",
"one",
"output",
"object",
"of",
"type",
"RDD",
":",
"param",
"func",
":",
"function",
"to",
"decorate",
":",
"return",
":",
"decorated",
"function"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L156-L251 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | balanced_repartition | def balanced_repartition(data, partitions):
""" balanced_repartition(data, partitions)
Reparations an RDD making sure data is evenly distributed across partitions
for Spark version < 2.1 (see: https://issues.apache.org/jira/browse/SPARK-17817)
or < 2.3 when #partitions is power of 2 (see: https://issues.apache.org/jira/browse/SPARK-21782)
:param data: RDD
:param partitions: number of partition to use
:return: repartitioned data
"""
def repartition(data_inner, partitions_inner):
# repartition by zipping an index to the data, repartition by % on it and removing it
data_inner = data_inner.zipWithIndex().map(lambda x: (x[1], x[0]))
data_inner = data_inner.partitionBy(partitions_inner, lambda x: x % partitions_inner)
return data_inner.map(lambda x: x[1])
if isinstance(data, RDD):
return repartition(data, partitions)
else:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got: %s' % type(data)) | python | def balanced_repartition(data, partitions):
""" balanced_repartition(data, partitions)
Reparations an RDD making sure data is evenly distributed across partitions
for Spark version < 2.1 (see: https://issues.apache.org/jira/browse/SPARK-17817)
or < 2.3 when #partitions is power of 2 (see: https://issues.apache.org/jira/browse/SPARK-21782)
:param data: RDD
:param partitions: number of partition to use
:return: repartitioned data
"""
def repartition(data_inner, partitions_inner):
# repartition by zipping an index to the data, repartition by % on it and removing it
data_inner = data_inner.zipWithIndex().map(lambda x: (x[1], x[0]))
data_inner = data_inner.partitionBy(partitions_inner, lambda x: x % partitions_inner)
return data_inner.map(lambda x: x[1])
if isinstance(data, RDD):
return repartition(data, partitions)
else:
raise ValueError('Wrong data type, expected [RDD, Images, Series] got: %s' % type(data)) | [
"def",
"balanced_repartition",
"(",
"data",
",",
"partitions",
")",
":",
"def",
"repartition",
"(",
"data_inner",
",",
"partitions_inner",
")",
":",
"# repartition by zipping an index to the data, repartition by % on it and removing it\r",
"data_inner",
"=",
"data_inner",
".",
"zipWithIndex",
"(",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
")",
")",
"data_inner",
"=",
"data_inner",
".",
"partitionBy",
"(",
"partitions_inner",
",",
"lambda",
"x",
":",
"x",
"%",
"partitions_inner",
")",
"return",
"data_inner",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
"if",
"isinstance",
"(",
"data",
",",
"RDD",
")",
":",
"return",
"repartition",
"(",
"data",
",",
"partitions",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Wrong data type, expected [RDD, Images, Series] got: %s'",
"%",
"type",
"(",
"data",
")",
")"
] | balanced_repartition(data, partitions)
Reparations an RDD making sure data is evenly distributed across partitions
for Spark version < 2.1 (see: https://issues.apache.org/jira/browse/SPARK-17817)
or < 2.3 when #partitions is power of 2 (see: https://issues.apache.org/jira/browse/SPARK-21782)
:param data: RDD
:param partitions: number of partition to use
:return: repartitioned data | [
"balanced_repartition",
"(",
"data",
"partitions",
")",
"Reparations",
"an",
"RDD",
"making",
"sure",
"data",
"is",
"evenly",
"distributed",
"across",
"partitions",
"for",
"Spark",
"version",
"<",
"2",
".",
"1",
"(",
"see",
":",
"https",
":",
"//",
"issues",
".",
"apache",
".",
"org",
"/",
"jira",
"/",
"browse",
"/",
"SPARK",
"-",
"17817",
")",
"or",
"<",
"2",
".",
"3",
"when",
"#partitions",
"is",
"power",
"of",
"2",
"(",
"see",
":",
"https",
":",
"//",
"issues",
".",
"apache",
".",
"org",
"/",
"jira",
"/",
"browse",
"/",
"SPARK",
"-",
"21782",
")",
":",
"param",
"data",
":",
"RDD",
":",
"param",
"partitions",
":",
"number",
"of",
"partition",
"to",
"use",
":",
"return",
":",
"repartitioned",
"data"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L255-L275 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | regroup | def regroup(rdd, groups=10, check_first=False):
""" Regroup an rdd using a new key added that is 0 ... number of groups - 1
:param rdd: input rdd as a (k,v) pairs
:param groups: number of groups to concatenate to
:param check_first: check if first value is a key value pair.
:return: a new rdd in the form of (groupNum, list of (k, v) in that group) pairs
Example:
>>> data = sc.parallelize(zip(range(4), range(4)))
>>> data.collect()
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> data2 = regroup(data, 2)
>>> data2.collect()
[(0, [(0, 0), (2, 2)]), (1, [(1, 1), (3, 3)])]
"""
if check_first:
first = rdd.first()
if isinstance(first, (list, tuple, collections.Iterable)):
if len(first) != 2:
raise ValueError('first item was not not length 2: %d' % len(first))
else:
raise ValueError('first item was wrong type: %s' % type(first))
rdd = rdd.map(lambda kv: (kv[0] % groups, (kv[0], kv[1])), preservesPartitioning=True)
return rdd.groupByKey().mapValues(list) | python | def regroup(rdd, groups=10, check_first=False):
""" Regroup an rdd using a new key added that is 0 ... number of groups - 1
:param rdd: input rdd as a (k,v) pairs
:param groups: number of groups to concatenate to
:param check_first: check if first value is a key value pair.
:return: a new rdd in the form of (groupNum, list of (k, v) in that group) pairs
Example:
>>> data = sc.parallelize(zip(range(4), range(4)))
>>> data.collect()
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> data2 = regroup(data, 2)
>>> data2.collect()
[(0, [(0, 0), (2, 2)]), (1, [(1, 1), (3, 3)])]
"""
if check_first:
first = rdd.first()
if isinstance(first, (list, tuple, collections.Iterable)):
if len(first) != 2:
raise ValueError('first item was not not length 2: %d' % len(first))
else:
raise ValueError('first item was wrong type: %s' % type(first))
rdd = rdd.map(lambda kv: (kv[0] % groups, (kv[0], kv[1])), preservesPartitioning=True)
return rdd.groupByKey().mapValues(list) | [
"def",
"regroup",
"(",
"rdd",
",",
"groups",
"=",
"10",
",",
"check_first",
"=",
"False",
")",
":",
"if",
"check_first",
":",
"first",
"=",
"rdd",
".",
"first",
"(",
")",
"if",
"isinstance",
"(",
"first",
",",
"(",
"list",
",",
"tuple",
",",
"collections",
".",
"Iterable",
")",
")",
":",
"if",
"len",
"(",
"first",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'first item was not not length 2: %d'",
"%",
"len",
"(",
"first",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'first item was wrong type: %s'",
"%",
"type",
"(",
"first",
")",
")",
"rdd",
"=",
"rdd",
".",
"map",
"(",
"lambda",
"kv",
":",
"(",
"kv",
"[",
"0",
"]",
"%",
"groups",
",",
"(",
"kv",
"[",
"0",
"]",
",",
"kv",
"[",
"1",
"]",
")",
")",
",",
"preservesPartitioning",
"=",
"True",
")",
"return",
"rdd",
".",
"groupByKey",
"(",
")",
".",
"mapValues",
"(",
"list",
")"
] | Regroup an rdd using a new key added that is 0 ... number of groups - 1
:param rdd: input rdd as a (k,v) pairs
:param groups: number of groups to concatenate to
:param check_first: check if first value is a key value pair.
:return: a new rdd in the form of (groupNum, list of (k, v) in that group) pairs
Example:
>>> data = sc.parallelize(zip(range(4), range(4)))
>>> data.collect()
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> data2 = regroup(data, 2)
>>> data2.collect()
[(0, [(0, 0), (2, 2)]), (1, [(1, 1), (3, 3)])] | [
"Regroup",
"an",
"rdd",
"using",
"a",
"new",
"key",
"added",
"that",
"is",
"0",
"...",
"number",
"of",
"groups",
"-",
"1",
":",
"param",
"rdd",
":",
"input",
"rdd",
"as",
"a",
"(",
"k",
"v",
")",
"pairs",
":",
"param",
"groups",
":",
"number",
"of",
"groups",
"to",
"concatenate",
"to",
":",
"param",
"check_first",
":",
"check",
"if",
"first",
"value",
"is",
"a",
"key",
"value",
"pair",
".",
":",
"return",
":",
"a",
"new",
"rdd",
"in",
"the",
"form",
"of",
"(",
"groupNum",
"list",
"of",
"(",
"k",
"v",
")",
"in",
"that",
"group",
")",
"pairs",
"Example",
":",
">>>",
"data",
"=",
"sc",
".",
"parallelize",
"(",
"zip",
"(",
"range",
"(",
"4",
")",
"range",
"(",
"4",
")))",
">>>",
"data",
".",
"collect",
"()",
"[",
"(",
"0",
"0",
")",
"(",
"1",
"1",
")",
"(",
"2",
"2",
")",
"(",
"3",
"3",
")",
"]",
">>>",
"data2",
"=",
"regroup",
"(",
"data",
"2",
")",
">>>",
"data2",
".",
"collect",
"()",
"[",
"(",
"0",
"[",
"(",
"0",
"0",
")",
"(",
"2",
"2",
")",
"]",
")",
"(",
"1",
"[",
"(",
"1",
"1",
")",
"(",
"3",
"3",
")",
"]",
")",
"]"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L279-L305 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | save_rdd_as_pickle | def save_rdd_as_pickle(rdd, path, batch_size=10, overwrite=False):
""" Saves an rdd by grouping all the records of each partition as one pickle file
:param rdd: rdd to save
:param path: where to save
:param batch_size: batch size to pass to spark saveAsPickleFile
:param overwrite: if directory exist whether to overwrite
"""
if os.path.isdir(path):
if overwrite:
logging.getLogger('pySparkUtils').info('Deleting files from: %s' % path)
shutil.rmtree(path)
logging.getLogger('pySparkUtils').info('Done deleting files from: %s' % path)
else:
logging.getLogger('pySparkUtils').error('Directory %s already exists '
'and overwrite is false' % path)
raise IOError('Directory %s already exists and overwrite is false'
% path)
rdd.glom().saveAsPickleFile(path, batchSize=batch_size)
logging.getLogger('pySparkUtils').info('Saved rdd as pickle to: %s' % path) | python | def save_rdd_as_pickle(rdd, path, batch_size=10, overwrite=False):
""" Saves an rdd by grouping all the records of each partition as one pickle file
:param rdd: rdd to save
:param path: where to save
:param batch_size: batch size to pass to spark saveAsPickleFile
:param overwrite: if directory exist whether to overwrite
"""
if os.path.isdir(path):
if overwrite:
logging.getLogger('pySparkUtils').info('Deleting files from: %s' % path)
shutil.rmtree(path)
logging.getLogger('pySparkUtils').info('Done deleting files from: %s' % path)
else:
logging.getLogger('pySparkUtils').error('Directory %s already exists '
'and overwrite is false' % path)
raise IOError('Directory %s already exists and overwrite is false'
% path)
rdd.glom().saveAsPickleFile(path, batchSize=batch_size)
logging.getLogger('pySparkUtils').info('Saved rdd as pickle to: %s' % path) | [
"def",
"save_rdd_as_pickle",
"(",
"rdd",
",",
"path",
",",
"batch_size",
"=",
"10",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"if",
"overwrite",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Deleting files from: %s'",
"%",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Done deleting files from: %s'",
"%",
"path",
")",
"else",
":",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"error",
"(",
"'Directory %s already exists '",
"'and overwrite is false'",
"%",
"path",
")",
"raise",
"IOError",
"(",
"'Directory %s already exists and overwrite is false'",
"%",
"path",
")",
"rdd",
".",
"glom",
"(",
")",
".",
"saveAsPickleFile",
"(",
"path",
",",
"batchSize",
"=",
"batch_size",
")",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Saved rdd as pickle to: %s'",
"%",
"path",
")"
] | Saves an rdd by grouping all the records of each partition as one pickle file
:param rdd: rdd to save
:param path: where to save
:param batch_size: batch size to pass to spark saveAsPickleFile
:param overwrite: if directory exist whether to overwrite | [
"Saves",
"an",
"rdd",
"by",
"grouping",
"all",
"the",
"records",
"of",
"each",
"partition",
"as",
"one",
"pickle",
"file",
":",
"param",
"rdd",
":",
"rdd",
"to",
"save",
":",
"param",
"path",
":",
"where",
"to",
"save",
":",
"param",
"batch_size",
":",
"batch",
"size",
"to",
"pass",
"to",
"spark",
"saveAsPickleFile",
":",
"param",
"overwrite",
":",
"if",
"directory",
"exist",
"whether",
"to",
"overwrite"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L309-L328 |
boazmohar/pySparkUtils | pySparkUtils/utils.py | load_rdd_from_pickle | def load_rdd_from_pickle(sc, path, min_partitions=None, return_type='images'):
""" Loads an rdd that was saved as one pickle file per partition
:param sc: Spark Context
:param path: directory to load from
:param min_partitions: minimum number of partitions. If None will be sc.defaultParallelism
:param return_type: what to return:
'rdd' - RDD
'images' - Thunder Images object
'series' - Thunder Series object
:return: based on return type.
"""
if min_partitions is None:
min_partitions = sc.defaultParallelism
rdd = sc.pickleFile(path, minPartitions=min_partitions)
rdd = rdd.flatMap(lambda x: x)
if return_type == 'images':
result = td.images.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'series':
result = td.series.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'rdd':
result = rdd.repartition(min_partitions)
else:
raise ValueError('return_type not supported: %s' % return_type)
logging.getLogger('pySparkUtils').info('Loaded rdd from: %s as type: %s'
% (path, return_type))
return result | python | def load_rdd_from_pickle(sc, path, min_partitions=None, return_type='images'):
""" Loads an rdd that was saved as one pickle file per partition
:param sc: Spark Context
:param path: directory to load from
:param min_partitions: minimum number of partitions. If None will be sc.defaultParallelism
:param return_type: what to return:
'rdd' - RDD
'images' - Thunder Images object
'series' - Thunder Series object
:return: based on return type.
"""
if min_partitions is None:
min_partitions = sc.defaultParallelism
rdd = sc.pickleFile(path, minPartitions=min_partitions)
rdd = rdd.flatMap(lambda x: x)
if return_type == 'images':
result = td.images.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'series':
result = td.series.fromrdd(rdd).repartition(min_partitions)
elif return_type == 'rdd':
result = rdd.repartition(min_partitions)
else:
raise ValueError('return_type not supported: %s' % return_type)
logging.getLogger('pySparkUtils').info('Loaded rdd from: %s as type: %s'
% (path, return_type))
return result | [
"def",
"load_rdd_from_pickle",
"(",
"sc",
",",
"path",
",",
"min_partitions",
"=",
"None",
",",
"return_type",
"=",
"'images'",
")",
":",
"if",
"min_partitions",
"is",
"None",
":",
"min_partitions",
"=",
"sc",
".",
"defaultParallelism",
"rdd",
"=",
"sc",
".",
"pickleFile",
"(",
"path",
",",
"minPartitions",
"=",
"min_partitions",
")",
"rdd",
"=",
"rdd",
".",
"flatMap",
"(",
"lambda",
"x",
":",
"x",
")",
"if",
"return_type",
"==",
"'images'",
":",
"result",
"=",
"td",
".",
"images",
".",
"fromrdd",
"(",
"rdd",
")",
".",
"repartition",
"(",
"min_partitions",
")",
"elif",
"return_type",
"==",
"'series'",
":",
"result",
"=",
"td",
".",
"series",
".",
"fromrdd",
"(",
"rdd",
")",
".",
"repartition",
"(",
"min_partitions",
")",
"elif",
"return_type",
"==",
"'rdd'",
":",
"result",
"=",
"rdd",
".",
"repartition",
"(",
"min_partitions",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'return_type not supported: %s'",
"%",
"return_type",
")",
"logging",
".",
"getLogger",
"(",
"'pySparkUtils'",
")",
".",
"info",
"(",
"'Loaded rdd from: %s as type: %s'",
"%",
"(",
"path",
",",
"return_type",
")",
")",
"return",
"result"
] | Loads an rdd that was saved as one pickle file per partition
:param sc: Spark Context
:param path: directory to load from
:param min_partitions: minimum number of partitions. If None will be sc.defaultParallelism
:param return_type: what to return:
'rdd' - RDD
'images' - Thunder Images object
'series' - Thunder Series object
:return: based on return type. | [
"Loads",
"an",
"rdd",
"that",
"was",
"saved",
"as",
"one",
"pickle",
"file",
"per",
"partition",
":",
"param",
"sc",
":",
"Spark",
"Context",
":",
"param",
"path",
":",
"directory",
"to",
"load",
"from",
":",
"param",
"min_partitions",
":",
"minimum",
"number",
"of",
"partitions",
".",
"If",
"None",
"will",
"be",
"sc",
".",
"defaultParallelism",
":",
"param",
"return_type",
":",
"what",
"to",
"return",
":",
"rdd",
"-",
"RDD",
"images",
"-",
"Thunder",
"Images",
"object",
"series",
"-",
"Thunder",
"Series",
"object",
":",
"return",
":",
"based",
"on",
"return",
"type",
"."
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/utils.py#L331-L357 |
Metatab/metapack | metapack/jupyter/pandas.py | MetatabDataFrame.geo | def geo(self):
"""Return a geopandas dataframe"""
import geopandas as gpd
from shapely.geometry.polygon import BaseGeometry
from shapely.wkt import loads
gdf = gpd.GeoDataFrame(self)
first = next(gdf.iterrows())[1].geometry
if isinstance(first, str):
# We have a GeoDataframe, but the geometry column is still strings, so
# it must be converted
shapes = [ loads(row['geometry']) for i, row in gdf.iterrows()]
elif not isinstance(first, BaseGeometry):
# If we are reading a metatab package, the geometry column's type should be
# 'geometry' which will give the geometry values class type of
# rowpipe.valuetype.geo.ShapeValue. However, there are other
# types of objects that have a 'shape' property.
shapes = [row['geometry'].shape for i, row in gdf.iterrows()]
else:
shapes = gdf['geometry']
gdf['geometry'] = gpd.GeoSeries(shapes)
gdf.set_geometry('geometry')
return gdf | python | def geo(self):
"""Return a geopandas dataframe"""
import geopandas as gpd
from shapely.geometry.polygon import BaseGeometry
from shapely.wkt import loads
gdf = gpd.GeoDataFrame(self)
first = next(gdf.iterrows())[1].geometry
if isinstance(first, str):
# We have a GeoDataframe, but the geometry column is still strings, so
# it must be converted
shapes = [ loads(row['geometry']) for i, row in gdf.iterrows()]
elif not isinstance(first, BaseGeometry):
# If we are reading a metatab package, the geometry column's type should be
# 'geometry' which will give the geometry values class type of
# rowpipe.valuetype.geo.ShapeValue. However, there are other
# types of objects that have a 'shape' property.
shapes = [row['geometry'].shape for i, row in gdf.iterrows()]
else:
shapes = gdf['geometry']
gdf['geometry'] = gpd.GeoSeries(shapes)
gdf.set_geometry('geometry')
return gdf | [
"def",
"geo",
"(",
"self",
")",
":",
"import",
"geopandas",
"as",
"gpd",
"from",
"shapely",
".",
"geometry",
".",
"polygon",
"import",
"BaseGeometry",
"from",
"shapely",
".",
"wkt",
"import",
"loads",
"gdf",
"=",
"gpd",
".",
"GeoDataFrame",
"(",
"self",
")",
"first",
"=",
"next",
"(",
"gdf",
".",
"iterrows",
"(",
")",
")",
"[",
"1",
"]",
".",
"geometry",
"if",
"isinstance",
"(",
"first",
",",
"str",
")",
":",
"# We have a GeoDataframe, but the geometry column is still strings, so",
"# it must be converted",
"shapes",
"=",
"[",
"loads",
"(",
"row",
"[",
"'geometry'",
"]",
")",
"for",
"i",
",",
"row",
"in",
"gdf",
".",
"iterrows",
"(",
")",
"]",
"elif",
"not",
"isinstance",
"(",
"first",
",",
"BaseGeometry",
")",
":",
"# If we are reading a metatab package, the geometry column's type should be",
"# 'geometry' which will give the geometry values class type of",
"# rowpipe.valuetype.geo.ShapeValue. However, there are other",
"# types of objects that have a 'shape' property.",
"shapes",
"=",
"[",
"row",
"[",
"'geometry'",
"]",
".",
"shape",
"for",
"i",
",",
"row",
"in",
"gdf",
".",
"iterrows",
"(",
")",
"]",
"else",
":",
"shapes",
"=",
"gdf",
"[",
"'geometry'",
"]",
"gdf",
"[",
"'geometry'",
"]",
"=",
"gpd",
".",
"GeoSeries",
"(",
"shapes",
")",
"gdf",
".",
"set_geometry",
"(",
"'geometry'",
")",
"return",
"gdf"
] | Return a geopandas dataframe | [
"Return",
"a",
"geopandas",
"dataframe"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/pandas.py#L51-L79 |
Metatab/metapack | metapack/jupyter/pandas.py | MetatabDataFrame.rows | def rows(self):
"""Yield rows like a partition does, with a header first, then rows. """
yield [self.index.name] + list(self.columns)
for t in self.itertuples():
yield list(t) | python | def rows(self):
"""Yield rows like a partition does, with a header first, then rows. """
yield [self.index.name] + list(self.columns)
for t in self.itertuples():
yield list(t) | [
"def",
"rows",
"(",
"self",
")",
":",
"yield",
"[",
"self",
".",
"index",
".",
"name",
"]",
"+",
"list",
"(",
"self",
".",
"columns",
")",
"for",
"t",
"in",
"self",
".",
"itertuples",
"(",
")",
":",
"yield",
"list",
"(",
"t",
")"
] | Yield rows like a partition does, with a header first, then rows. | [
"Yield",
"rows",
"like",
"a",
"partition",
"does",
"with",
"a",
"header",
"first",
"then",
"rows",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/pandas.py#L128-L134 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | _matches_node_set_props | def _matches_node_set_props(type_info, node_set_props):
"""
Check whether the node_set properties match the given model type definition
"""
matches = None
for key in node_set_props:
ns_val = node_set_props[key]
if key in type_info:
if ns_val==type_info[key]:
if matches:
matches = matches and True
else:
matches = True
else:
matches = False
return matches | python | def _matches_node_set_props(type_info, node_set_props):
"""
Check whether the node_set properties match the given model type definition
"""
matches = None
for key in node_set_props:
ns_val = node_set_props[key]
if key in type_info:
if ns_val==type_info[key]:
if matches:
matches = matches and True
else:
matches = True
else:
matches = False
return matches | [
"def",
"_matches_node_set_props",
"(",
"type_info",
",",
"node_set_props",
")",
":",
"matches",
"=",
"None",
"for",
"key",
"in",
"node_set_props",
":",
"ns_val",
"=",
"node_set_props",
"[",
"key",
"]",
"if",
"key",
"in",
"type_info",
":",
"if",
"ns_val",
"==",
"type_info",
"[",
"key",
"]",
":",
"if",
"matches",
":",
"matches",
"=",
"matches",
"and",
"True",
"else",
":",
"matches",
"=",
"True",
"else",
":",
"matches",
"=",
"False",
"return",
"matches"
] | Check whether the node_set properties match the given model type definition | [
"Check",
"whether",
"the",
"node_set",
"properties",
"match",
"the",
"given",
"model",
"type",
"definition"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L62-L77 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | get_neuroml_from_sonata | def get_neuroml_from_sonata(sonata_filename, id, generate_lems = True, format='xml'):
"""
Return a NeuroMLDocument with (most of) the contents of the Sonata model
"""
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
sr = SonataReader(filename=sonata_filename, id=id)
sr.parse(neuroml_handler)
nml_doc = neuroml_handler.get_nml_doc()
sr.add_neuroml_components(nml_doc)
if format == 'xml':
nml_file_name = '%s.net.nml'%id
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
elif format == 'hdf5':
nml_file_name = '%s.net.nml.h5'%id
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v('Written to: %s'%nml_file_name)
if generate_lems:
lems_file_name = sr.generate_lems_file(nml_file_name, nml_doc)
return sr, lems_file_name, nml_file_name, nml_doc
return nml_doc | python | def get_neuroml_from_sonata(sonata_filename, id, generate_lems = True, format='xml'):
"""
Return a NeuroMLDocument with (most of) the contents of the Sonata model
"""
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
sr = SonataReader(filename=sonata_filename, id=id)
sr.parse(neuroml_handler)
nml_doc = neuroml_handler.get_nml_doc()
sr.add_neuroml_components(nml_doc)
if format == 'xml':
nml_file_name = '%s.net.nml'%id
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
elif format == 'hdf5':
nml_file_name = '%s.net.nml.h5'%id
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v('Written to: %s'%nml_file_name)
if generate_lems:
lems_file_name = sr.generate_lems_file(nml_file_name, nml_doc)
return sr, lems_file_name, nml_file_name, nml_doc
return nml_doc | [
"def",
"get_neuroml_from_sonata",
"(",
"sonata_filename",
",",
"id",
",",
"generate_lems",
"=",
"True",
",",
"format",
"=",
"'xml'",
")",
":",
"from",
"neuroml",
".",
"hdf5",
".",
"NetworkBuilder",
"import",
"NetworkBuilder",
"neuroml_handler",
"=",
"NetworkBuilder",
"(",
")",
"sr",
"=",
"SonataReader",
"(",
"filename",
"=",
"sonata_filename",
",",
"id",
"=",
"id",
")",
"sr",
".",
"parse",
"(",
"neuroml_handler",
")",
"nml_doc",
"=",
"neuroml_handler",
".",
"get_nml_doc",
"(",
")",
"sr",
".",
"add_neuroml_components",
"(",
"nml_doc",
")",
"if",
"format",
"==",
"'xml'",
":",
"nml_file_name",
"=",
"'%s.net.nml'",
"%",
"id",
"from",
"neuroml",
".",
"writers",
"import",
"NeuroMLWriter",
"NeuroMLWriter",
".",
"write",
"(",
"nml_doc",
",",
"nml_file_name",
")",
"elif",
"format",
"==",
"'hdf5'",
":",
"nml_file_name",
"=",
"'%s.net.nml.h5'",
"%",
"id",
"from",
"neuroml",
".",
"writers",
"import",
"NeuroMLHdf5Writer",
"NeuroMLHdf5Writer",
".",
"write",
"(",
"nml_doc",
",",
"nml_file_name",
")",
"print_v",
"(",
"'Written to: %s'",
"%",
"nml_file_name",
")",
"if",
"generate_lems",
":",
"lems_file_name",
"=",
"sr",
".",
"generate_lems_file",
"(",
"nml_file_name",
",",
"nml_doc",
")",
"return",
"sr",
",",
"lems_file_name",
",",
"nml_file_name",
",",
"nml_doc",
"return",
"nml_doc"
] | Return a NeuroMLDocument with (most of) the contents of the Sonata model | [
"Return",
"a",
"NeuroMLDocument",
"with",
"(",
"most",
"of",
")",
"the",
"contents",
"of",
"the",
"Sonata",
"model"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L901-L934 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | SonataReader.subs | def subs(self, path):
"""
Search the strings in a config file for a substitutable value, e.g.
"morphologies_dir": "$COMPONENT_DIR/morphologies",
"""
#print_v('Checking for: \n %s, \n %s \n in %s'%(self.substitutes,self.init_substitutes,path))
if type(path) == int or type(path) == float:
return path
for s in self.init_substitutes:
if path.startswith(s):
path = path.replace(s,self.init_substitutes[s], 1)
#print_v(' So far: %s'%path)
for s in self.substitutes:
if s in path:
path = path.replace(s,self.substitutes[s])
#print_v(' Returning: %s'%path)
return path | python | def subs(self, path):
"""
Search the strings in a config file for a substitutable value, e.g.
"morphologies_dir": "$COMPONENT_DIR/morphologies",
"""
#print_v('Checking for: \n %s, \n %s \n in %s'%(self.substitutes,self.init_substitutes,path))
if type(path) == int or type(path) == float:
return path
for s in self.init_substitutes:
if path.startswith(s):
path = path.replace(s,self.init_substitutes[s], 1)
#print_v(' So far: %s'%path)
for s in self.substitutes:
if s in path:
path = path.replace(s,self.substitutes[s])
#print_v(' Returning: %s'%path)
return path | [
"def",
"subs",
"(",
"self",
",",
"path",
")",
":",
"#print_v('Checking for: \\n %s, \\n %s \\n in %s'%(self.substitutes,self.init_substitutes,path))",
"if",
"type",
"(",
"path",
")",
"==",
"int",
"or",
"type",
"(",
"path",
")",
"==",
"float",
":",
"return",
"path",
"for",
"s",
"in",
"self",
".",
"init_substitutes",
":",
"if",
"path",
".",
"startswith",
"(",
"s",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"s",
",",
"self",
".",
"init_substitutes",
"[",
"s",
"]",
",",
"1",
")",
"#print_v(' So far: %s'%path)",
"for",
"s",
"in",
"self",
".",
"substitutes",
":",
"if",
"s",
"in",
"path",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"s",
",",
"self",
".",
"substitutes",
"[",
"s",
"]",
")",
"#print_v(' Returning: %s'%path)",
"return",
"path"
] | Search the strings in a config file for a substitutable value, e.g.
"morphologies_dir": "$COMPONENT_DIR/morphologies", | [
"Search",
"the",
"strings",
"in",
"a",
"config",
"file",
"for",
"a",
"substitutable",
"value",
"e",
".",
"g",
".",
"morphologies_dir",
":",
"$COMPONENT_DIR",
"/",
"morphologies"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L117-L133 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | SonataReader.parse | def parse(self, handler):
"""
Main method to parse the Sonata files and call the appropriate methods
in the handler
"""
########################################################################
# load the main configuration scripts
main_config_filename = os.path.abspath(self.parameters['filename'])
config = load_json(main_config_filename)
self.init_substitutes = {'.':'%s/'%os.path.dirname(main_config_filename),
'../':'%s/'%os.path.dirname(os.path.dirname(main_config_filename))}
self.substitutes = {'${configdir}':'%s'%os.path.dirname(main_config_filename)}
if 'network' in config:
self.network_config = load_json(self.subs(config['network']))
else:
self.network_config = config
if 'simulation' in config:
self.simulation_config = load_json(self.subs(config['simulation']))
else:
self.simulation_config = None
for m in self.network_config['manifest']:
path = self.subs(self.network_config['manifest'][m])
self.substitutes[m] = path
if 'id' in self.parameters:
id = self.parameters['id']
else:
id = 'SonataNetwork'
if id[0].isdigit(): # id like 9_cells is not a valid id for NeuroML
id='NML2_%s'%id
########################################################################
# Feed the handler the info on the network
self.handler = handler
notes = "Network read in from Sonata: %s"%main_config_filename
handler.handle_document_start(id, notes)
handler.handle_network(id, notes)
self.node_types = {}
########################################################################
# Get info from nodes files
for n in self.network_config['networks']['nodes']:
nodes_file = self.subs(n['nodes_file'])
node_types_file = self.subs(n['node_types_file'])
print_v("\nLoading nodes from %s and %s"%(nodes_file,node_types_file))
h5file=tables.open_file(nodes_file,mode='r')
print_v("Opened HDF5 file: %s"%(h5file.filename))
self.parse_group(h5file.root.nodes)
h5file.close()
self.node_types[self.current_sonata_pop] = load_csv_props(node_types_file)
self.current_sonata_pop = None
########################################################################
# Get info from edges files
self.edges_info = {}
self.conn_info = {}
if 'edges' in self.network_config['networks']:
for e in self.network_config['networks']['edges']:
edges_file = self.subs(e['edges_file'])
edge_types_file = self.subs(e['edge_types_file'])
print_v("\nLoading edges from %s and %s"%(edges_file,edge_types_file))
h5file=tables.open_file(edges_file,mode='r')
print_v("Opened HDF5 file: %s"%(h5file.filename))
self.parse_group(h5file.root.edges)
h5file.close()
self.edges_info[self.current_edge] = load_csv_props(edge_types_file)
self.current_edge = None
########################################################################
# Use extracted node/cell info to create populations
for sonata_pop in self.cell_info:
types_vs_pops = {}
for type in self.cell_info[sonata_pop]['type_count']:
node_type_info = self.node_types[sonata_pop][type]
model_name_type = node_type_info['model_name'] if 'model_name' in node_type_info \
else (node_type_info['pop_name'] if 'pop_name' in node_type_info else node_type_info['model_type'])
model_type = node_type_info['model_type']
model_template = node_type_info['model_template'] if 'model_template' in node_type_info else '- None -'
nml_pop_id = '%s_%s_%s'%(sonata_pop,model_name_type,type)
print_v(" - Adding population: %s which has model info: %s"%(nml_pop_id, node_type_info))
size = self.cell_info[sonata_pop]['type_count'][type]
if model_type=='point_process' and model_template=='nrn:IntFire1':
raise Exception('Point process model not currently supported: %s\nTry expressing the I&F cell in NEST format with nest:iaf_psc_alpha'%model_template)
pop_comp = 'cell_%s'%nml_pop_id #model_template.replace(':','_')
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = model_type
dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params']
self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file)
elif model_type=='point_process' and model_template=='nest:iaf_psc_alpha':
pop_comp = 'cell_%s'%nml_pop_id # = model_template.replace(':','_')
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = model_type
self.pop_comp_info[pop_comp]['model_template'] = model_template
dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params']
self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file)
else:
pop_comp = DUMMY_CELL
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = pop_comp
self.nml_pop_vs_comps[nml_pop_id] = pop_comp
properties = {}
properties['type_id']=type
properties['sonata_population']=sonata_pop
properties['region']=sonata_pop
for i in node_type_info:
properties[i]=node_type_info[i]
if i=='ei':
properties['type']=node_type_info[i].upper()
color = '%s %s %s'%(self.myrandom.random(),self.myrandom.random(),self.myrandom.random())
try:
import opencortex.utils.color as occ
interneuron = 'SOM' in nml_pop_id or 'PV' in nml_pop_id
if 'L23' in nml_pop_id:
color = occ.L23_INTERNEURON if interneuron else occ.L23_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L23'))
if 'L4' in nml_pop_id:
color = occ.L4_INTERNEURON if interneuron else occ.L4_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L4'))
if 'L5' in nml_pop_id:
color = occ.L5_INTERNEURON if interneuron else occ.L5_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L5'))
if 'L6' in nml_pop_id:
color = occ.L6_INTERNEURON if interneuron else occ.L6_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L6'))
except:
pass # Don't specify a particular color, use random, not a problem...
properties['color']=color
if True or not 'locations' in self.cell_info[sonata_pop]['0']:
properties={} ############# temp for LEMS...
if model_type != 'virtual':
self.handler.handle_population(nml_pop_id,
pop_comp,
size,
component_obj=None,
properties=properties)
types_vs_pops[type] = nml_pop_id
self.cell_info[sonata_pop]['pop_count'] = {}
self.cell_info[sonata_pop]['pop_map'] = {}
for i in self.cell_info[sonata_pop]['types']:
pop = types_vs_pops[self.cell_info[sonata_pop]['types'][i]]
if not pop in self.cell_info[sonata_pop]['pop_count']:
self.cell_info[sonata_pop]['pop_count'][pop] = 0
index = self.cell_info[sonata_pop]['pop_count'][pop]
self.cell_info[sonata_pop]['pop_map'][i] = (pop, index)
if not pop in self.nml_ids_vs_gids:
self.nml_ids_vs_gids[pop] = {}
self.nml_ids_vs_gids[pop][index] = (sonata_pop, i)
if i in self.cell_info[sonata_pop]['0']['locations']:
if not pop in self.nml_pops_having_locations:
self.nml_pops_having_locations.append(pop)
pos = self.cell_info[sonata_pop]['0']['locations'][i]
#print('Adding pos %i: %s'%(i,pos))
self.handler.handle_location(index,
pop,
pop_comp,
pos['x'] if 'x' in pos and pos['x'] is not None else 0,
pos['y'] if 'y' in pos and pos['y'] is not None else 0,
pos['z'] if 'z' in pos and pos['z'] is not None else 0)
self.cell_info[sonata_pop]['pop_count'][pop]+=1
########################################################################
# Load simulation info into self.simulation_config
if self.simulation_config:
if self.simulation_config:
for m in self.simulation_config['manifest']:
path = self.subs(self.simulation_config['manifest'][m])
self.substitutes[m] = path
for s1 in ['output']:
for k in self.simulation_config[s1]:
self.simulation_config[s1][k] = self.subs(self.simulation_config[s1][k])
for s1 in ['inputs']:
for s2 in self.simulation_config[s1]:
for k in self.simulation_config[s1][s2]:
self.simulation_config[s1][s2][k] = self.subs(self.simulation_config[s1][s2][k])
if 'node_sets_file' in self.simulation_config:
node_sets = load_json(self.subs(self.simulation_config['node_sets_file']))
self.simulation_config['node_sets'] = node_sets
if not 'node_sets' in self.simulation_config:
self.simulation_config['node_sets'] = {}
for sonata_pop in self.cell_info:
self.node_set_mappings[sonata_pop] = {}
for sindex in self.cell_info[sonata_pop]['pop_map']:
nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0]
nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1]
# Add all in this sonata_pop to a 'node_set' named after the sonata_pop
if not nml_pop in self.node_set_mappings[sonata_pop]:
self.node_set_mappings[sonata_pop][nml_pop] = []
self.node_set_mappings[sonata_pop][nml_pop].append(nml_index)
#pp.pprint(self.simulation_config)
#pp.pprint(self.pop_comp_info)
for node_set in self.simulation_config['node_sets']:
self.node_set_mappings[node_set] = {}
node_set_props = self.simulation_config['node_sets'][node_set]
#print_v('===========Checking which cells in pops match node_set: %s = %s'%(node_set,node_set_props))
for sonata_pop in self.cell_info:
for sindex in self.cell_info[sonata_pop]['pop_map']:
#print('Does %s %s match %s?'%(sonata_pop, sindex, node_set_props))
type = self.cell_info[sonata_pop]['types'][sindex]
type_info = self.node_types[sonata_pop][type]
nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0]
nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1]
if 'population' in node_set_props and node_set_props['population'] == sonata_pop:
if 'node_id' in node_set_props and sindex in node_set_props['node_id']:
if not nml_pop in self.node_set_mappings[node_set]:
self.node_set_mappings[node_set][nml_pop] = []
self.node_set_mappings[node_set][nml_pop].append(nml_index)
matches = _matches_node_set_props(type_info, node_set_props)
#print_v('Node %i in %s (NML: %s[%i]) has type %s (%s); matches: %s'%(sindex, sonata_pop, nml_pop, nml_index, type, type_info, matches))
if matches:
if not nml_pop in self.node_set_mappings[node_set]:
self.node_set_mappings[node_set][nml_pop] = []
self.node_set_mappings[node_set][nml_pop].append(nml_index)
##pp.pprint(self.node_set_mappings)
########################################################################
# Extract info from inputs in simulation_config
#pp.pprint(self.simulation_config)
for input in self.simulation_config['inputs']:
info = self.simulation_config['inputs'][input]
#print_v(" - Adding input: %s which has info: %s"%(input, info))
self.input_comp_info[input] = {}
self.input_comp_info[input][info['input_type']] = {}
node_set = info['node_set']
if info['input_type'] == 'current_clamp':
comp = 'PG_%s'%input
self.input_comp_info[input][info['input_type']][comp] = {'amp':info['amp'],'delay':info['delay'],'duration':info['duration']}
for nml_pop_id in self.node_set_mappings[node_set]:
input_list_id = 'il_%s_%s'%(input,nml_pop_id)
indices = self.node_set_mappings[node_set][nml_pop_id]
self.handler.handle_input_list(input_list_id,
nml_pop_id,
comp,
len(indices))
count = 0
for index in indices:
self.handler.handle_single_input(input_list_id,
count,
cellId = index,
segId = 0,
fract = 0.5)
count+=1
elif info['input_type'] == 'spikes':
node_info = self.cell_info[node_set]
from pyneuroml.plot.PlotSpikes import read_sonata_spikes_hdf5_file
ids_times = read_sonata_spikes_hdf5_file(self.subs(info['input_file']))
for id in ids_times:
times = ids_times[id]
if id in node_info['pop_map']:
nml_pop_id, cell_id = node_info['pop_map'][id]
print_v("Cell %i in Sonata node set %s (cell %s in nml pop %s) has %i spikes"%(id, node_set, nml_pop_id, cell_id, len(times)))
component = '%s__%i'%(nml_pop_id,cell_id)
self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times}
'''
input_list_id = 'il_%s_%i'%(input,cell_id)
self.handler.handle_input_list(input_list_id,
nml_pop_id,
component,
1)
self.handler.handle_single_input(input_list_id,
0,
cellId = cell_id,
segId = 0,
fract = 0.5)
'''
else:
print_v("Cell %i in Sonata node set %s NOT FOUND!"%(id, node_set))
else:
raise Exception("Sonata input type not yet supported: %s"%(info['input_type']))
########################################################################
# Use extracted edge info to create connections
projections_created = []
for conn in self.conn_info:
pre_node = self.conn_info[conn]['pre_node']
post_node = self.conn_info[conn]['post_node']
for i in range(len(self.conn_info[conn]['pre_id'])):
pre_id = self.conn_info[conn]['pre_id'][i]
post_id = self.conn_info[conn]['post_id'][i]
nsyns = self.conn_info[conn]['nsyns'][i] if 'nsyns' in self.conn_info[conn] else 1
type = self.conn_info[conn]['edge_type_id'][i]
#print_v(' Conn with %i syns, type %s: %s(%s) -> %s(%s)'%(nsyns,type,pre_node,pre_id,post_node,post_id))
pre_pop,pre_i = self.cell_info[pre_node]['pop_map'][pre_id]
post_pop,post_i = self.cell_info[post_node]['pop_map'][post_id]
#print_v(' Mapped: Conn %s(%s) -> %s(%s)'%(pre_pop,pre_i,post_pop,post_i))
# print self.edges_info[conn][type]
#print self.cell_info[pre_node]
#print 11
#print self.node_types[pre_node]
#print 22
cell_type_pre = self.cell_info[pre_node]['types'][pre_id]
#print cell_type_pre
#print 444
pop_type_pre = self.node_types[pre_node][cell_type_pre]['model_type']
#print pop_type_pre
#print 333
synapse = self.edges_info[conn][type]['dynamics_params'].split('.')[0]
self.syn_comp_info[synapse] = {}
#print self.edges_info[conn][type]
#pp.pprint(self.init_substitutes)
#pp.pprint(self.substitutes)
dynamics_params_file = self.subs(self.network_config['components']['synaptic_models_dir']) +'/'+self.edges_info[conn][type]['dynamics_params']
#print_v('Adding syn %s (at %s)'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file))
#TODO: don't load this file every connection!!!
self.syn_comp_info[synapse]['dynamics_params'] = load_json(dynamics_params_file)
proj_id = '%s_%s_%s'%(pre_pop,post_pop,synapse)
sign = self.syn_comp_info[synapse]['dynamics_params']['sign'] if 'sign' in self.syn_comp_info[synapse]['dynamics_params'] else 1
weight = self.edges_info[conn][type]['syn_weight'] if 'syn_weight' in self.edges_info[conn][type] else 1.0
syn_weight_edge_group_0 = self.conn_info[conn]['syn_weight_edge_group_0'][i] if 'syn_weight_edge_group_0' in self.conn_info[conn] else None
# Assume this overrides value from csv file...
if syn_weight_edge_group_0:
weight = syn_weight_edge_group_0
#print_v('Adding syn %s (at %s), weight: %s, sign: %s, nsyns: %s'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file, weight, sign, nsyns))
weight_scale = 0.001
if 'level_of_detail' in self.syn_comp_info[synapse]['dynamics_params']:
weight_scale = 1
weight=weight_scale * sign * weight * nsyns
delay = self.edges_info[conn][type]['delay'] if 'delay' in self.edges_info[conn][type] else 0
if not pop_type_pre == 'virtual':
if not proj_id in projections_created:
self.handler.handle_projection(proj_id,
pre_pop,
post_pop,
synapse)
projections_created.append(proj_id)
self.handler.handle_connection(proj_id,
i,
pre_pop,
post_pop,
synapse, \
pre_i, \
post_i, \
weight=weight, \
delay=delay)
else:
component = '%s__%i'%(pre_pop,pre_i)
#print_v(' --- Connecting %s to %s[%s]'%(component, post_pop, post_i))
#self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times}
input_list_id = 'il_%s_%s_%i_%i'%(component,post_pop,post_i,i)
self.handler.handle_input_list(input_list_id,
post_pop,
component,
1)
self.handler.handle_single_input(input_list_id,
0,
cellId = post_i,
segId = 0,
fract = 0.5,
weight=weight)
"""
print('~~~~~~~~~~~~~~~')
print('node_types:')
pp.pprint(self.node_types)
print('~~~~~~~~~~~~~~~')
print('cell_info:')
pp.pprint(self.cell_info)
print('================')""" | python | def parse(self, handler):
"""
Main method to parse the Sonata files and call the appropriate methods
in the handler
"""
########################################################################
# load the main configuration scripts
main_config_filename = os.path.abspath(self.parameters['filename'])
config = load_json(main_config_filename)
self.init_substitutes = {'.':'%s/'%os.path.dirname(main_config_filename),
'../':'%s/'%os.path.dirname(os.path.dirname(main_config_filename))}
self.substitutes = {'${configdir}':'%s'%os.path.dirname(main_config_filename)}
if 'network' in config:
self.network_config = load_json(self.subs(config['network']))
else:
self.network_config = config
if 'simulation' in config:
self.simulation_config = load_json(self.subs(config['simulation']))
else:
self.simulation_config = None
for m in self.network_config['manifest']:
path = self.subs(self.network_config['manifest'][m])
self.substitutes[m] = path
if 'id' in self.parameters:
id = self.parameters['id']
else:
id = 'SonataNetwork'
if id[0].isdigit(): # id like 9_cells is not a valid id for NeuroML
id='NML2_%s'%id
########################################################################
# Feed the handler the info on the network
self.handler = handler
notes = "Network read in from Sonata: %s"%main_config_filename
handler.handle_document_start(id, notes)
handler.handle_network(id, notes)
self.node_types = {}
########################################################################
# Get info from nodes files
for n in self.network_config['networks']['nodes']:
nodes_file = self.subs(n['nodes_file'])
node_types_file = self.subs(n['node_types_file'])
print_v("\nLoading nodes from %s and %s"%(nodes_file,node_types_file))
h5file=tables.open_file(nodes_file,mode='r')
print_v("Opened HDF5 file: %s"%(h5file.filename))
self.parse_group(h5file.root.nodes)
h5file.close()
self.node_types[self.current_sonata_pop] = load_csv_props(node_types_file)
self.current_sonata_pop = None
########################################################################
# Get info from edges files
self.edges_info = {}
self.conn_info = {}
if 'edges' in self.network_config['networks']:
for e in self.network_config['networks']['edges']:
edges_file = self.subs(e['edges_file'])
edge_types_file = self.subs(e['edge_types_file'])
print_v("\nLoading edges from %s and %s"%(edges_file,edge_types_file))
h5file=tables.open_file(edges_file,mode='r')
print_v("Opened HDF5 file: %s"%(h5file.filename))
self.parse_group(h5file.root.edges)
h5file.close()
self.edges_info[self.current_edge] = load_csv_props(edge_types_file)
self.current_edge = None
########################################################################
# Use extracted node/cell info to create populations
for sonata_pop in self.cell_info:
types_vs_pops = {}
for type in self.cell_info[sonata_pop]['type_count']:
node_type_info = self.node_types[sonata_pop][type]
model_name_type = node_type_info['model_name'] if 'model_name' in node_type_info \
else (node_type_info['pop_name'] if 'pop_name' in node_type_info else node_type_info['model_type'])
model_type = node_type_info['model_type']
model_template = node_type_info['model_template'] if 'model_template' in node_type_info else '- None -'
nml_pop_id = '%s_%s_%s'%(sonata_pop,model_name_type,type)
print_v(" - Adding population: %s which has model info: %s"%(nml_pop_id, node_type_info))
size = self.cell_info[sonata_pop]['type_count'][type]
if model_type=='point_process' and model_template=='nrn:IntFire1':
raise Exception('Point process model not currently supported: %s\nTry expressing the I&F cell in NEST format with nest:iaf_psc_alpha'%model_template)
pop_comp = 'cell_%s'%nml_pop_id #model_template.replace(':','_')
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = model_type
dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params']
self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file)
elif model_type=='point_process' and model_template=='nest:iaf_psc_alpha':
pop_comp = 'cell_%s'%nml_pop_id # = model_template.replace(':','_')
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = model_type
self.pop_comp_info[pop_comp]['model_template'] = model_template
dynamics_params_file = self.subs(self.network_config['components']['point_neuron_models_dir']) +'/'+node_type_info['dynamics_params']
self.pop_comp_info[pop_comp]['dynamics_params'] = load_json(dynamics_params_file)
else:
pop_comp = DUMMY_CELL
self.pop_comp_info[pop_comp] = {}
self.pop_comp_info[pop_comp]['model_type'] = pop_comp
self.nml_pop_vs_comps[nml_pop_id] = pop_comp
properties = {}
properties['type_id']=type
properties['sonata_population']=sonata_pop
properties['region']=sonata_pop
for i in node_type_info:
properties[i]=node_type_info[i]
if i=='ei':
properties['type']=node_type_info[i].upper()
color = '%s %s %s'%(self.myrandom.random(),self.myrandom.random(),self.myrandom.random())
try:
import opencortex.utils.color as occ
interneuron = 'SOM' in nml_pop_id or 'PV' in nml_pop_id
if 'L23' in nml_pop_id:
color = occ.L23_INTERNEURON if interneuron else occ.L23_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L23'))
if 'L4' in nml_pop_id:
color = occ.L4_INTERNEURON if interneuron else occ.L4_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L4'))
if 'L5' in nml_pop_id:
color = occ.L5_INTERNEURON if interneuron else occ.L5_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L5'))
if 'L6' in nml_pop_id:
color = occ.L6_INTERNEURON if interneuron else occ.L6_PRINCIPAL_CELL
pop.properties.append(neuroml.Property('region','L6'))
except:
pass # Don't specify a particular color, use random, not a problem...
properties['color']=color
if True or not 'locations' in self.cell_info[sonata_pop]['0']:
properties={} ############# temp for LEMS...
if model_type != 'virtual':
self.handler.handle_population(nml_pop_id,
pop_comp,
size,
component_obj=None,
properties=properties)
types_vs_pops[type] = nml_pop_id
self.cell_info[sonata_pop]['pop_count'] = {}
self.cell_info[sonata_pop]['pop_map'] = {}
for i in self.cell_info[sonata_pop]['types']:
pop = types_vs_pops[self.cell_info[sonata_pop]['types'][i]]
if not pop in self.cell_info[sonata_pop]['pop_count']:
self.cell_info[sonata_pop]['pop_count'][pop] = 0
index = self.cell_info[sonata_pop]['pop_count'][pop]
self.cell_info[sonata_pop]['pop_map'][i] = (pop, index)
if not pop in self.nml_ids_vs_gids:
self.nml_ids_vs_gids[pop] = {}
self.nml_ids_vs_gids[pop][index] = (sonata_pop, i)
if i in self.cell_info[sonata_pop]['0']['locations']:
if not pop in self.nml_pops_having_locations:
self.nml_pops_having_locations.append(pop)
pos = self.cell_info[sonata_pop]['0']['locations'][i]
#print('Adding pos %i: %s'%(i,pos))
self.handler.handle_location(index,
pop,
pop_comp,
pos['x'] if 'x' in pos and pos['x'] is not None else 0,
pos['y'] if 'y' in pos and pos['y'] is not None else 0,
pos['z'] if 'z' in pos and pos['z'] is not None else 0)
self.cell_info[sonata_pop]['pop_count'][pop]+=1
########################################################################
# Load simulation info into self.simulation_config
if self.simulation_config:
if self.simulation_config:
for m in self.simulation_config['manifest']:
path = self.subs(self.simulation_config['manifest'][m])
self.substitutes[m] = path
for s1 in ['output']:
for k in self.simulation_config[s1]:
self.simulation_config[s1][k] = self.subs(self.simulation_config[s1][k])
for s1 in ['inputs']:
for s2 in self.simulation_config[s1]:
for k in self.simulation_config[s1][s2]:
self.simulation_config[s1][s2][k] = self.subs(self.simulation_config[s1][s2][k])
if 'node_sets_file' in self.simulation_config:
node_sets = load_json(self.subs(self.simulation_config['node_sets_file']))
self.simulation_config['node_sets'] = node_sets
if not 'node_sets' in self.simulation_config:
self.simulation_config['node_sets'] = {}
for sonata_pop in self.cell_info:
self.node_set_mappings[sonata_pop] = {}
for sindex in self.cell_info[sonata_pop]['pop_map']:
nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0]
nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1]
# Add all in this sonata_pop to a 'node_set' named after the sonata_pop
if not nml_pop in self.node_set_mappings[sonata_pop]:
self.node_set_mappings[sonata_pop][nml_pop] = []
self.node_set_mappings[sonata_pop][nml_pop].append(nml_index)
#pp.pprint(self.simulation_config)
#pp.pprint(self.pop_comp_info)
for node_set in self.simulation_config['node_sets']:
self.node_set_mappings[node_set] = {}
node_set_props = self.simulation_config['node_sets'][node_set]
#print_v('===========Checking which cells in pops match node_set: %s = %s'%(node_set,node_set_props))
for sonata_pop in self.cell_info:
for sindex in self.cell_info[sonata_pop]['pop_map']:
#print('Does %s %s match %s?'%(sonata_pop, sindex, node_set_props))
type = self.cell_info[sonata_pop]['types'][sindex]
type_info = self.node_types[sonata_pop][type]
nml_pop = self.cell_info[sonata_pop]['pop_map'][sindex][0]
nml_index = self.cell_info[sonata_pop]['pop_map'][sindex][1]
if 'population' in node_set_props and node_set_props['population'] == sonata_pop:
if 'node_id' in node_set_props and sindex in node_set_props['node_id']:
if not nml_pop in self.node_set_mappings[node_set]:
self.node_set_mappings[node_set][nml_pop] = []
self.node_set_mappings[node_set][nml_pop].append(nml_index)
matches = _matches_node_set_props(type_info, node_set_props)
#print_v('Node %i in %s (NML: %s[%i]) has type %s (%s); matches: %s'%(sindex, sonata_pop, nml_pop, nml_index, type, type_info, matches))
if matches:
if not nml_pop in self.node_set_mappings[node_set]:
self.node_set_mappings[node_set][nml_pop] = []
self.node_set_mappings[node_set][nml_pop].append(nml_index)
##pp.pprint(self.node_set_mappings)
########################################################################
# Extract info from inputs in simulation_config
#pp.pprint(self.simulation_config)
for input in self.simulation_config['inputs']:
info = self.simulation_config['inputs'][input]
#print_v(" - Adding input: %s which has info: %s"%(input, info))
self.input_comp_info[input] = {}
self.input_comp_info[input][info['input_type']] = {}
node_set = info['node_set']
if info['input_type'] == 'current_clamp':
comp = 'PG_%s'%input
self.input_comp_info[input][info['input_type']][comp] = {'amp':info['amp'],'delay':info['delay'],'duration':info['duration']}
for nml_pop_id in self.node_set_mappings[node_set]:
input_list_id = 'il_%s_%s'%(input,nml_pop_id)
indices = self.node_set_mappings[node_set][nml_pop_id]
self.handler.handle_input_list(input_list_id,
nml_pop_id,
comp,
len(indices))
count = 0
for index in indices:
self.handler.handle_single_input(input_list_id,
count,
cellId = index,
segId = 0,
fract = 0.5)
count+=1
elif info['input_type'] == 'spikes':
node_info = self.cell_info[node_set]
from pyneuroml.plot.PlotSpikes import read_sonata_spikes_hdf5_file
ids_times = read_sonata_spikes_hdf5_file(self.subs(info['input_file']))
for id in ids_times:
times = ids_times[id]
if id in node_info['pop_map']:
nml_pop_id, cell_id = node_info['pop_map'][id]
print_v("Cell %i in Sonata node set %s (cell %s in nml pop %s) has %i spikes"%(id, node_set, nml_pop_id, cell_id, len(times)))
component = '%s__%i'%(nml_pop_id,cell_id)
self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times}
'''
input_list_id = 'il_%s_%i'%(input,cell_id)
self.handler.handle_input_list(input_list_id,
nml_pop_id,
component,
1)
self.handler.handle_single_input(input_list_id,
0,
cellId = cell_id,
segId = 0,
fract = 0.5)
'''
else:
print_v("Cell %i in Sonata node set %s NOT FOUND!"%(id, node_set))
else:
raise Exception("Sonata input type not yet supported: %s"%(info['input_type']))
########################################################################
# Use extracted edge info to create connections
projections_created = []
for conn in self.conn_info:
pre_node = self.conn_info[conn]['pre_node']
post_node = self.conn_info[conn]['post_node']
for i in range(len(self.conn_info[conn]['pre_id'])):
pre_id = self.conn_info[conn]['pre_id'][i]
post_id = self.conn_info[conn]['post_id'][i]
nsyns = self.conn_info[conn]['nsyns'][i] if 'nsyns' in self.conn_info[conn] else 1
type = self.conn_info[conn]['edge_type_id'][i]
#print_v(' Conn with %i syns, type %s: %s(%s) -> %s(%s)'%(nsyns,type,pre_node,pre_id,post_node,post_id))
pre_pop,pre_i = self.cell_info[pre_node]['pop_map'][pre_id]
post_pop,post_i = self.cell_info[post_node]['pop_map'][post_id]
#print_v(' Mapped: Conn %s(%s) -> %s(%s)'%(pre_pop,pre_i,post_pop,post_i))
# print self.edges_info[conn][type]
#print self.cell_info[pre_node]
#print 11
#print self.node_types[pre_node]
#print 22
cell_type_pre = self.cell_info[pre_node]['types'][pre_id]
#print cell_type_pre
#print 444
pop_type_pre = self.node_types[pre_node][cell_type_pre]['model_type']
#print pop_type_pre
#print 333
synapse = self.edges_info[conn][type]['dynamics_params'].split('.')[0]
self.syn_comp_info[synapse] = {}
#print self.edges_info[conn][type]
#pp.pprint(self.init_substitutes)
#pp.pprint(self.substitutes)
dynamics_params_file = self.subs(self.network_config['components']['synaptic_models_dir']) +'/'+self.edges_info[conn][type]['dynamics_params']
#print_v('Adding syn %s (at %s)'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file))
#TODO: don't load this file every connection!!!
self.syn_comp_info[synapse]['dynamics_params'] = load_json(dynamics_params_file)
proj_id = '%s_%s_%s'%(pre_pop,post_pop,synapse)
sign = self.syn_comp_info[synapse]['dynamics_params']['sign'] if 'sign' in self.syn_comp_info[synapse]['dynamics_params'] else 1
weight = self.edges_info[conn][type]['syn_weight'] if 'syn_weight' in self.edges_info[conn][type] else 1.0
syn_weight_edge_group_0 = self.conn_info[conn]['syn_weight_edge_group_0'][i] if 'syn_weight_edge_group_0' in self.conn_info[conn] else None
# Assume this overrides value from csv file...
if syn_weight_edge_group_0:
weight = syn_weight_edge_group_0
#print_v('Adding syn %s (at %s), weight: %s, sign: %s, nsyns: %s'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file, weight, sign, nsyns))
weight_scale = 0.001
if 'level_of_detail' in self.syn_comp_info[synapse]['dynamics_params']:
weight_scale = 1
weight=weight_scale * sign * weight * nsyns
delay = self.edges_info[conn][type]['delay'] if 'delay' in self.edges_info[conn][type] else 0
if not pop_type_pre == 'virtual':
if not proj_id in projections_created:
self.handler.handle_projection(proj_id,
pre_pop,
post_pop,
synapse)
projections_created.append(proj_id)
self.handler.handle_connection(proj_id,
i,
pre_pop,
post_pop,
synapse, \
pre_i, \
post_i, \
weight=weight, \
delay=delay)
else:
component = '%s__%i'%(pre_pop,pre_i)
#print_v(' --- Connecting %s to %s[%s]'%(component, post_pop, post_i))
#self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times}
input_list_id = 'il_%s_%s_%i_%i'%(component,post_pop,post_i,i)
self.handler.handle_input_list(input_list_id,
post_pop,
component,
1)
self.handler.handle_single_input(input_list_id,
0,
cellId = post_i,
segId = 0,
fract = 0.5,
weight=weight)
"""
print('~~~~~~~~~~~~~~~')
print('node_types:')
pp.pprint(self.node_types)
print('~~~~~~~~~~~~~~~')
print('cell_info:')
pp.pprint(self.cell_info)
print('================')""" | [
"def",
"parse",
"(",
"self",
",",
"handler",
")",
":",
"########################################################################",
"# load the main configuration scripts ",
"main_config_filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"parameters",
"[",
"'filename'",
"]",
")",
"config",
"=",
"load_json",
"(",
"main_config_filename",
")",
"self",
".",
"init_substitutes",
"=",
"{",
"'.'",
":",
"'%s/'",
"%",
"os",
".",
"path",
".",
"dirname",
"(",
"main_config_filename",
")",
",",
"'../'",
":",
"'%s/'",
"%",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"main_config_filename",
")",
")",
"}",
"self",
".",
"substitutes",
"=",
"{",
"'${configdir}'",
":",
"'%s'",
"%",
"os",
".",
"path",
".",
"dirname",
"(",
"main_config_filename",
")",
"}",
"if",
"'network'",
"in",
"config",
":",
"self",
".",
"network_config",
"=",
"load_json",
"(",
"self",
".",
"subs",
"(",
"config",
"[",
"'network'",
"]",
")",
")",
"else",
":",
"self",
".",
"network_config",
"=",
"config",
"if",
"'simulation'",
"in",
"config",
":",
"self",
".",
"simulation_config",
"=",
"load_json",
"(",
"self",
".",
"subs",
"(",
"config",
"[",
"'simulation'",
"]",
")",
")",
"else",
":",
"self",
".",
"simulation_config",
"=",
"None",
"for",
"m",
"in",
"self",
".",
"network_config",
"[",
"'manifest'",
"]",
":",
"path",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"network_config",
"[",
"'manifest'",
"]",
"[",
"m",
"]",
")",
"self",
".",
"substitutes",
"[",
"m",
"]",
"=",
"path",
"if",
"'id'",
"in",
"self",
".",
"parameters",
":",
"id",
"=",
"self",
".",
"parameters",
"[",
"'id'",
"]",
"else",
":",
"id",
"=",
"'SonataNetwork'",
"if",
"id",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"# id like 9_cells is not a valid id for NeuroML",
"id",
"=",
"'NML2_%s'",
"%",
"id",
"########################################################################",
"# Feed the handler the info on the network ",
"self",
".",
"handler",
"=",
"handler",
"notes",
"=",
"\"Network read in from Sonata: %s\"",
"%",
"main_config_filename",
"handler",
".",
"handle_document_start",
"(",
"id",
",",
"notes",
")",
"handler",
".",
"handle_network",
"(",
"id",
",",
"notes",
")",
"self",
".",
"node_types",
"=",
"{",
"}",
"########################################################################",
"# Get info from nodes files ",
"for",
"n",
"in",
"self",
".",
"network_config",
"[",
"'networks'",
"]",
"[",
"'nodes'",
"]",
":",
"nodes_file",
"=",
"self",
".",
"subs",
"(",
"n",
"[",
"'nodes_file'",
"]",
")",
"node_types_file",
"=",
"self",
".",
"subs",
"(",
"n",
"[",
"'node_types_file'",
"]",
")",
"print_v",
"(",
"\"\\nLoading nodes from %s and %s\"",
"%",
"(",
"nodes_file",
",",
"node_types_file",
")",
")",
"h5file",
"=",
"tables",
".",
"open_file",
"(",
"nodes_file",
",",
"mode",
"=",
"'r'",
")",
"print_v",
"(",
"\"Opened HDF5 file: %s\"",
"%",
"(",
"h5file",
".",
"filename",
")",
")",
"self",
".",
"parse_group",
"(",
"h5file",
".",
"root",
".",
"nodes",
")",
"h5file",
".",
"close",
"(",
")",
"self",
".",
"node_types",
"[",
"self",
".",
"current_sonata_pop",
"]",
"=",
"load_csv_props",
"(",
"node_types_file",
")",
"self",
".",
"current_sonata_pop",
"=",
"None",
"########################################################################",
"# Get info from edges files ",
"self",
".",
"edges_info",
"=",
"{",
"}",
"self",
".",
"conn_info",
"=",
"{",
"}",
"if",
"'edges'",
"in",
"self",
".",
"network_config",
"[",
"'networks'",
"]",
":",
"for",
"e",
"in",
"self",
".",
"network_config",
"[",
"'networks'",
"]",
"[",
"'edges'",
"]",
":",
"edges_file",
"=",
"self",
".",
"subs",
"(",
"e",
"[",
"'edges_file'",
"]",
")",
"edge_types_file",
"=",
"self",
".",
"subs",
"(",
"e",
"[",
"'edge_types_file'",
"]",
")",
"print_v",
"(",
"\"\\nLoading edges from %s and %s\"",
"%",
"(",
"edges_file",
",",
"edge_types_file",
")",
")",
"h5file",
"=",
"tables",
".",
"open_file",
"(",
"edges_file",
",",
"mode",
"=",
"'r'",
")",
"print_v",
"(",
"\"Opened HDF5 file: %s\"",
"%",
"(",
"h5file",
".",
"filename",
")",
")",
"self",
".",
"parse_group",
"(",
"h5file",
".",
"root",
".",
"edges",
")",
"h5file",
".",
"close",
"(",
")",
"self",
".",
"edges_info",
"[",
"self",
".",
"current_edge",
"]",
"=",
"load_csv_props",
"(",
"edge_types_file",
")",
"self",
".",
"current_edge",
"=",
"None",
"########################################################################",
"# Use extracted node/cell info to create populations",
"for",
"sonata_pop",
"in",
"self",
".",
"cell_info",
":",
"types_vs_pops",
"=",
"{",
"}",
"for",
"type",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'type_count'",
"]",
":",
"node_type_info",
"=",
"self",
".",
"node_types",
"[",
"sonata_pop",
"]",
"[",
"type",
"]",
"model_name_type",
"=",
"node_type_info",
"[",
"'model_name'",
"]",
"if",
"'model_name'",
"in",
"node_type_info",
"else",
"(",
"node_type_info",
"[",
"'pop_name'",
"]",
"if",
"'pop_name'",
"in",
"node_type_info",
"else",
"node_type_info",
"[",
"'model_type'",
"]",
")",
"model_type",
"=",
"node_type_info",
"[",
"'model_type'",
"]",
"model_template",
"=",
"node_type_info",
"[",
"'model_template'",
"]",
"if",
"'model_template'",
"in",
"node_type_info",
"else",
"'- None -'",
"nml_pop_id",
"=",
"'%s_%s_%s'",
"%",
"(",
"sonata_pop",
",",
"model_name_type",
",",
"type",
")",
"print_v",
"(",
"\" - Adding population: %s which has model info: %s\"",
"%",
"(",
"nml_pop_id",
",",
"node_type_info",
")",
")",
"size",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'type_count'",
"]",
"[",
"type",
"]",
"if",
"model_type",
"==",
"'point_process'",
"and",
"model_template",
"==",
"'nrn:IntFire1'",
":",
"raise",
"Exception",
"(",
"'Point process model not currently supported: %s\\nTry expressing the I&F cell in NEST format with nest:iaf_psc_alpha'",
"%",
"model_template",
")",
"pop_comp",
"=",
"'cell_%s'",
"%",
"nml_pop_id",
"#model_template.replace(':','_')",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"=",
"{",
"}",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'model_type'",
"]",
"=",
"model_type",
"dynamics_params_file",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"network_config",
"[",
"'components'",
"]",
"[",
"'point_neuron_models_dir'",
"]",
")",
"+",
"'/'",
"+",
"node_type_info",
"[",
"'dynamics_params'",
"]",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'dynamics_params'",
"]",
"=",
"load_json",
"(",
"dynamics_params_file",
")",
"elif",
"model_type",
"==",
"'point_process'",
"and",
"model_template",
"==",
"'nest:iaf_psc_alpha'",
":",
"pop_comp",
"=",
"'cell_%s'",
"%",
"nml_pop_id",
"# = model_template.replace(':','_')",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"=",
"{",
"}",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'model_type'",
"]",
"=",
"model_type",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'model_template'",
"]",
"=",
"model_template",
"dynamics_params_file",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"network_config",
"[",
"'components'",
"]",
"[",
"'point_neuron_models_dir'",
"]",
")",
"+",
"'/'",
"+",
"node_type_info",
"[",
"'dynamics_params'",
"]",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'dynamics_params'",
"]",
"=",
"load_json",
"(",
"dynamics_params_file",
")",
"else",
":",
"pop_comp",
"=",
"DUMMY_CELL",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"=",
"{",
"}",
"self",
".",
"pop_comp_info",
"[",
"pop_comp",
"]",
"[",
"'model_type'",
"]",
"=",
"pop_comp",
"self",
".",
"nml_pop_vs_comps",
"[",
"nml_pop_id",
"]",
"=",
"pop_comp",
"properties",
"=",
"{",
"}",
"properties",
"[",
"'type_id'",
"]",
"=",
"type",
"properties",
"[",
"'sonata_population'",
"]",
"=",
"sonata_pop",
"properties",
"[",
"'region'",
"]",
"=",
"sonata_pop",
"for",
"i",
"in",
"node_type_info",
":",
"properties",
"[",
"i",
"]",
"=",
"node_type_info",
"[",
"i",
"]",
"if",
"i",
"==",
"'ei'",
":",
"properties",
"[",
"'type'",
"]",
"=",
"node_type_info",
"[",
"i",
"]",
".",
"upper",
"(",
")",
"color",
"=",
"'%s %s %s'",
"%",
"(",
"self",
".",
"myrandom",
".",
"random",
"(",
")",
",",
"self",
".",
"myrandom",
".",
"random",
"(",
")",
",",
"self",
".",
"myrandom",
".",
"random",
"(",
")",
")",
"try",
":",
"import",
"opencortex",
".",
"utils",
".",
"color",
"as",
"occ",
"interneuron",
"=",
"'SOM'",
"in",
"nml_pop_id",
"or",
"'PV'",
"in",
"nml_pop_id",
"if",
"'L23'",
"in",
"nml_pop_id",
":",
"color",
"=",
"occ",
".",
"L23_INTERNEURON",
"if",
"interneuron",
"else",
"occ",
".",
"L23_PRINCIPAL_CELL",
"pop",
".",
"properties",
".",
"append",
"(",
"neuroml",
".",
"Property",
"(",
"'region'",
",",
"'L23'",
")",
")",
"if",
"'L4'",
"in",
"nml_pop_id",
":",
"color",
"=",
"occ",
".",
"L4_INTERNEURON",
"if",
"interneuron",
"else",
"occ",
".",
"L4_PRINCIPAL_CELL",
"pop",
".",
"properties",
".",
"append",
"(",
"neuroml",
".",
"Property",
"(",
"'region'",
",",
"'L4'",
")",
")",
"if",
"'L5'",
"in",
"nml_pop_id",
":",
"color",
"=",
"occ",
".",
"L5_INTERNEURON",
"if",
"interneuron",
"else",
"occ",
".",
"L5_PRINCIPAL_CELL",
"pop",
".",
"properties",
".",
"append",
"(",
"neuroml",
".",
"Property",
"(",
"'region'",
",",
"'L5'",
")",
")",
"if",
"'L6'",
"in",
"nml_pop_id",
":",
"color",
"=",
"occ",
".",
"L6_INTERNEURON",
"if",
"interneuron",
"else",
"occ",
".",
"L6_PRINCIPAL_CELL",
"pop",
".",
"properties",
".",
"append",
"(",
"neuroml",
".",
"Property",
"(",
"'region'",
",",
"'L6'",
")",
")",
"except",
":",
"pass",
"# Don't specify a particular color, use random, not a problem...",
"properties",
"[",
"'color'",
"]",
"=",
"color",
"if",
"True",
"or",
"not",
"'locations'",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'0'",
"]",
":",
"properties",
"=",
"{",
"}",
"############# temp for LEMS...",
"if",
"model_type",
"!=",
"'virtual'",
":",
"self",
".",
"handler",
".",
"handle_population",
"(",
"nml_pop_id",
",",
"pop_comp",
",",
"size",
",",
"component_obj",
"=",
"None",
",",
"properties",
"=",
"properties",
")",
"types_vs_pops",
"[",
"type",
"]",
"=",
"nml_pop_id",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_count'",
"]",
"=",
"{",
"}",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"=",
"{",
"}",
"for",
"i",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'types'",
"]",
":",
"pop",
"=",
"types_vs_pops",
"[",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'types'",
"]",
"[",
"i",
"]",
"]",
"if",
"not",
"pop",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_count'",
"]",
":",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_count'",
"]",
"[",
"pop",
"]",
"=",
"0",
"index",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_count'",
"]",
"[",
"pop",
"]",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"[",
"i",
"]",
"=",
"(",
"pop",
",",
"index",
")",
"if",
"not",
"pop",
"in",
"self",
".",
"nml_ids_vs_gids",
":",
"self",
".",
"nml_ids_vs_gids",
"[",
"pop",
"]",
"=",
"{",
"}",
"self",
".",
"nml_ids_vs_gids",
"[",
"pop",
"]",
"[",
"index",
"]",
"=",
"(",
"sonata_pop",
",",
"i",
")",
"if",
"i",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'0'",
"]",
"[",
"'locations'",
"]",
":",
"if",
"not",
"pop",
"in",
"self",
".",
"nml_pops_having_locations",
":",
"self",
".",
"nml_pops_having_locations",
".",
"append",
"(",
"pop",
")",
"pos",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'0'",
"]",
"[",
"'locations'",
"]",
"[",
"i",
"]",
"#print('Adding pos %i: %s'%(i,pos))",
"self",
".",
"handler",
".",
"handle_location",
"(",
"index",
",",
"pop",
",",
"pop_comp",
",",
"pos",
"[",
"'x'",
"]",
"if",
"'x'",
"in",
"pos",
"and",
"pos",
"[",
"'x'",
"]",
"is",
"not",
"None",
"else",
"0",
",",
"pos",
"[",
"'y'",
"]",
"if",
"'y'",
"in",
"pos",
"and",
"pos",
"[",
"'y'",
"]",
"is",
"not",
"None",
"else",
"0",
",",
"pos",
"[",
"'z'",
"]",
"if",
"'z'",
"in",
"pos",
"and",
"pos",
"[",
"'z'",
"]",
"is",
"not",
"None",
"else",
"0",
")",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_count'",
"]",
"[",
"pop",
"]",
"+=",
"1",
"########################################################################",
"# Load simulation info into self.simulation_config",
"if",
"self",
".",
"simulation_config",
":",
"if",
"self",
".",
"simulation_config",
":",
"for",
"m",
"in",
"self",
".",
"simulation_config",
"[",
"'manifest'",
"]",
":",
"path",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"simulation_config",
"[",
"'manifest'",
"]",
"[",
"m",
"]",
")",
"self",
".",
"substitutes",
"[",
"m",
"]",
"=",
"path",
"for",
"s1",
"in",
"[",
"'output'",
"]",
":",
"for",
"k",
"in",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
":",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
"[",
"k",
"]",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
"[",
"k",
"]",
")",
"for",
"s1",
"in",
"[",
"'inputs'",
"]",
":",
"for",
"s2",
"in",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
":",
"for",
"k",
"in",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
"[",
"s2",
"]",
":",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
"[",
"s2",
"]",
"[",
"k",
"]",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"simulation_config",
"[",
"s1",
"]",
"[",
"s2",
"]",
"[",
"k",
"]",
")",
"if",
"'node_sets_file'",
"in",
"self",
".",
"simulation_config",
":",
"node_sets",
"=",
"load_json",
"(",
"self",
".",
"subs",
"(",
"self",
".",
"simulation_config",
"[",
"'node_sets_file'",
"]",
")",
")",
"self",
".",
"simulation_config",
"[",
"'node_sets'",
"]",
"=",
"node_sets",
"if",
"not",
"'node_sets'",
"in",
"self",
".",
"simulation_config",
":",
"self",
".",
"simulation_config",
"[",
"'node_sets'",
"]",
"=",
"{",
"}",
"for",
"sonata_pop",
"in",
"self",
".",
"cell_info",
":",
"self",
".",
"node_set_mappings",
"[",
"sonata_pop",
"]",
"=",
"{",
"}",
"for",
"sindex",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
":",
"nml_pop",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"[",
"sindex",
"]",
"[",
"0",
"]",
"nml_index",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"[",
"sindex",
"]",
"[",
"1",
"]",
"# Add all in this sonata_pop to a 'node_set' named after the sonata_pop",
"if",
"not",
"nml_pop",
"in",
"self",
".",
"node_set_mappings",
"[",
"sonata_pop",
"]",
":",
"self",
".",
"node_set_mappings",
"[",
"sonata_pop",
"]",
"[",
"nml_pop",
"]",
"=",
"[",
"]",
"self",
".",
"node_set_mappings",
"[",
"sonata_pop",
"]",
"[",
"nml_pop",
"]",
".",
"append",
"(",
"nml_index",
")",
"#pp.pprint(self.simulation_config)",
"#pp.pprint(self.pop_comp_info)",
"for",
"node_set",
"in",
"self",
".",
"simulation_config",
"[",
"'node_sets'",
"]",
":",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"=",
"{",
"}",
"node_set_props",
"=",
"self",
".",
"simulation_config",
"[",
"'node_sets'",
"]",
"[",
"node_set",
"]",
"#print_v('===========Checking which cells in pops match node_set: %s = %s'%(node_set,node_set_props))",
"for",
"sonata_pop",
"in",
"self",
".",
"cell_info",
":",
"for",
"sindex",
"in",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
":",
"#print('Does %s %s match %s?'%(sonata_pop, sindex, node_set_props))",
"type",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'types'",
"]",
"[",
"sindex",
"]",
"type_info",
"=",
"self",
".",
"node_types",
"[",
"sonata_pop",
"]",
"[",
"type",
"]",
"nml_pop",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"[",
"sindex",
"]",
"[",
"0",
"]",
"nml_index",
"=",
"self",
".",
"cell_info",
"[",
"sonata_pop",
"]",
"[",
"'pop_map'",
"]",
"[",
"sindex",
"]",
"[",
"1",
"]",
"if",
"'population'",
"in",
"node_set_props",
"and",
"node_set_props",
"[",
"'population'",
"]",
"==",
"sonata_pop",
":",
"if",
"'node_id'",
"in",
"node_set_props",
"and",
"sindex",
"in",
"node_set_props",
"[",
"'node_id'",
"]",
":",
"if",
"not",
"nml_pop",
"in",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
":",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"[",
"nml_pop",
"]",
"=",
"[",
"]",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"[",
"nml_pop",
"]",
".",
"append",
"(",
"nml_index",
")",
"matches",
"=",
"_matches_node_set_props",
"(",
"type_info",
",",
"node_set_props",
")",
"#print_v('Node %i in %s (NML: %s[%i]) has type %s (%s); matches: %s'%(sindex, sonata_pop, nml_pop, nml_index, type, type_info, matches))",
"if",
"matches",
":",
"if",
"not",
"nml_pop",
"in",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
":",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"[",
"nml_pop",
"]",
"=",
"[",
"]",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"[",
"nml_pop",
"]",
".",
"append",
"(",
"nml_index",
")",
"##pp.pprint(self.node_set_mappings)",
"########################################################################",
"# Extract info from inputs in simulation_config",
"#pp.pprint(self.simulation_config)",
"for",
"input",
"in",
"self",
".",
"simulation_config",
"[",
"'inputs'",
"]",
":",
"info",
"=",
"self",
".",
"simulation_config",
"[",
"'inputs'",
"]",
"[",
"input",
"]",
"#print_v(\" - Adding input: %s which has info: %s\"%(input, info)) ",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"=",
"{",
"}",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"info",
"[",
"'input_type'",
"]",
"]",
"=",
"{",
"}",
"node_set",
"=",
"info",
"[",
"'node_set'",
"]",
"if",
"info",
"[",
"'input_type'",
"]",
"==",
"'current_clamp'",
":",
"comp",
"=",
"'PG_%s'",
"%",
"input",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"info",
"[",
"'input_type'",
"]",
"]",
"[",
"comp",
"]",
"=",
"{",
"'amp'",
":",
"info",
"[",
"'amp'",
"]",
",",
"'delay'",
":",
"info",
"[",
"'delay'",
"]",
",",
"'duration'",
":",
"info",
"[",
"'duration'",
"]",
"}",
"for",
"nml_pop_id",
"in",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
":",
"input_list_id",
"=",
"'il_%s_%s'",
"%",
"(",
"input",
",",
"nml_pop_id",
")",
"indices",
"=",
"self",
".",
"node_set_mappings",
"[",
"node_set",
"]",
"[",
"nml_pop_id",
"]",
"self",
".",
"handler",
".",
"handle_input_list",
"(",
"input_list_id",
",",
"nml_pop_id",
",",
"comp",
",",
"len",
"(",
"indices",
")",
")",
"count",
"=",
"0",
"for",
"index",
"in",
"indices",
":",
"self",
".",
"handler",
".",
"handle_single_input",
"(",
"input_list_id",
",",
"count",
",",
"cellId",
"=",
"index",
",",
"segId",
"=",
"0",
",",
"fract",
"=",
"0.5",
")",
"count",
"+=",
"1",
"elif",
"info",
"[",
"'input_type'",
"]",
"==",
"'spikes'",
":",
"node_info",
"=",
"self",
".",
"cell_info",
"[",
"node_set",
"]",
"from",
"pyneuroml",
".",
"plot",
".",
"PlotSpikes",
"import",
"read_sonata_spikes_hdf5_file",
"ids_times",
"=",
"read_sonata_spikes_hdf5_file",
"(",
"self",
".",
"subs",
"(",
"info",
"[",
"'input_file'",
"]",
")",
")",
"for",
"id",
"in",
"ids_times",
":",
"times",
"=",
"ids_times",
"[",
"id",
"]",
"if",
"id",
"in",
"node_info",
"[",
"'pop_map'",
"]",
":",
"nml_pop_id",
",",
"cell_id",
"=",
"node_info",
"[",
"'pop_map'",
"]",
"[",
"id",
"]",
"print_v",
"(",
"\"Cell %i in Sonata node set %s (cell %s in nml pop %s) has %i spikes\"",
"%",
"(",
"id",
",",
"node_set",
",",
"nml_pop_id",
",",
"cell_id",
",",
"len",
"(",
"times",
")",
")",
")",
"component",
"=",
"'%s__%i'",
"%",
"(",
"nml_pop_id",
",",
"cell_id",
")",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"info",
"[",
"'input_type'",
"]",
"]",
"[",
"component",
"]",
"=",
"{",
"'id'",
":",
"cell_id",
",",
"'times'",
":",
"times",
"}",
"'''\n input_list_id = 'il_%s_%i'%(input,cell_id)\n self.handler.handle_input_list(input_list_id, \n nml_pop_id, \n component, \n 1)\n\n self.handler.handle_single_input(input_list_id, \n 0, \n cellId = cell_id, \n segId = 0, \n fract = 0.5)\n '''",
"else",
":",
"print_v",
"(",
"\"Cell %i in Sonata node set %s NOT FOUND!\"",
"%",
"(",
"id",
",",
"node_set",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Sonata input type not yet supported: %s\"",
"%",
"(",
"info",
"[",
"'input_type'",
"]",
")",
")",
"########################################################################",
"# Use extracted edge info to create connections",
"projections_created",
"=",
"[",
"]",
"for",
"conn",
"in",
"self",
".",
"conn_info",
":",
"pre_node",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'pre_node'",
"]",
"post_node",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'post_node'",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'pre_id'",
"]",
")",
")",
":",
"pre_id",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'pre_id'",
"]",
"[",
"i",
"]",
"post_id",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'post_id'",
"]",
"[",
"i",
"]",
"nsyns",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'nsyns'",
"]",
"[",
"i",
"]",
"if",
"'nsyns'",
"in",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"else",
"1",
"type",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'edge_type_id'",
"]",
"[",
"i",
"]",
"#print_v(' Conn with %i syns, type %s: %s(%s) -> %s(%s)'%(nsyns,type,pre_node,pre_id,post_node,post_id))",
"pre_pop",
",",
"pre_i",
"=",
"self",
".",
"cell_info",
"[",
"pre_node",
"]",
"[",
"'pop_map'",
"]",
"[",
"pre_id",
"]",
"post_pop",
",",
"post_i",
"=",
"self",
".",
"cell_info",
"[",
"post_node",
"]",
"[",
"'pop_map'",
"]",
"[",
"post_id",
"]",
"#print_v(' Mapped: Conn %s(%s) -> %s(%s)'%(pre_pop,pre_i,post_pop,post_i))",
"# print self.edges_info[conn][type]",
"#print self.cell_info[pre_node]",
"#print 11",
"#print self.node_types[pre_node]",
"#print 22",
"cell_type_pre",
"=",
"self",
".",
"cell_info",
"[",
"pre_node",
"]",
"[",
"'types'",
"]",
"[",
"pre_id",
"]",
"#print cell_type_pre",
"#print 444",
"pop_type_pre",
"=",
"self",
".",
"node_types",
"[",
"pre_node",
"]",
"[",
"cell_type_pre",
"]",
"[",
"'model_type'",
"]",
"#print pop_type_pre",
"#print 333",
"synapse",
"=",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"[",
"'dynamics_params'",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"self",
".",
"syn_comp_info",
"[",
"synapse",
"]",
"=",
"{",
"}",
"#print self.edges_info[conn][type]",
"#pp.pprint(self.init_substitutes)",
"#pp.pprint(self.substitutes)",
"dynamics_params_file",
"=",
"self",
".",
"subs",
"(",
"self",
".",
"network_config",
"[",
"'components'",
"]",
"[",
"'synaptic_models_dir'",
"]",
")",
"+",
"'/'",
"+",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"[",
"'dynamics_params'",
"]",
"#print_v('Adding syn %s (at %s)'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file))",
"#TODO: don't load this file every connection!!!",
"self",
".",
"syn_comp_info",
"[",
"synapse",
"]",
"[",
"'dynamics_params'",
"]",
"=",
"load_json",
"(",
"dynamics_params_file",
")",
"proj_id",
"=",
"'%s_%s_%s'",
"%",
"(",
"pre_pop",
",",
"post_pop",
",",
"synapse",
")",
"sign",
"=",
"self",
".",
"syn_comp_info",
"[",
"synapse",
"]",
"[",
"'dynamics_params'",
"]",
"[",
"'sign'",
"]",
"if",
"'sign'",
"in",
"self",
".",
"syn_comp_info",
"[",
"synapse",
"]",
"[",
"'dynamics_params'",
"]",
"else",
"1",
"weight",
"=",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"[",
"'syn_weight'",
"]",
"if",
"'syn_weight'",
"in",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"else",
"1.0",
"syn_weight_edge_group_0",
"=",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"[",
"'syn_weight_edge_group_0'",
"]",
"[",
"i",
"]",
"if",
"'syn_weight_edge_group_0'",
"in",
"self",
".",
"conn_info",
"[",
"conn",
"]",
"else",
"None",
"# Assume this overrides value from csv file...",
"if",
"syn_weight_edge_group_0",
":",
"weight",
"=",
"syn_weight_edge_group_0",
"#print_v('Adding syn %s (at %s), weight: %s, sign: %s, nsyns: %s'%(self.edges_info[conn][type]['dynamics_params'], dynamics_params_file, weight, sign, nsyns))",
"weight_scale",
"=",
"0.001",
"if",
"'level_of_detail'",
"in",
"self",
".",
"syn_comp_info",
"[",
"synapse",
"]",
"[",
"'dynamics_params'",
"]",
":",
"weight_scale",
"=",
"1",
"weight",
"=",
"weight_scale",
"*",
"sign",
"*",
"weight",
"*",
"nsyns",
"delay",
"=",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"[",
"'delay'",
"]",
"if",
"'delay'",
"in",
"self",
".",
"edges_info",
"[",
"conn",
"]",
"[",
"type",
"]",
"else",
"0",
"if",
"not",
"pop_type_pre",
"==",
"'virtual'",
":",
"if",
"not",
"proj_id",
"in",
"projections_created",
":",
"self",
".",
"handler",
".",
"handle_projection",
"(",
"proj_id",
",",
"pre_pop",
",",
"post_pop",
",",
"synapse",
")",
"projections_created",
".",
"append",
"(",
"proj_id",
")",
"self",
".",
"handler",
".",
"handle_connection",
"(",
"proj_id",
",",
"i",
",",
"pre_pop",
",",
"post_pop",
",",
"synapse",
",",
"pre_i",
",",
"post_i",
",",
"weight",
"=",
"weight",
",",
"delay",
"=",
"delay",
")",
"else",
":",
"component",
"=",
"'%s__%i'",
"%",
"(",
"pre_pop",
",",
"pre_i",
")",
"#print_v(' --- Connecting %s to %s[%s]'%(component, post_pop, post_i))",
"#self.input_comp_info[input][info['input_type']][component] ={'id': cell_id, 'times': times}",
"input_list_id",
"=",
"'il_%s_%s_%i_%i'",
"%",
"(",
"component",
",",
"post_pop",
",",
"post_i",
",",
"i",
")",
"self",
".",
"handler",
".",
"handle_input_list",
"(",
"input_list_id",
",",
"post_pop",
",",
"component",
",",
"1",
")",
"self",
".",
"handler",
".",
"handle_single_input",
"(",
"input_list_id",
",",
"0",
",",
"cellId",
"=",
"post_i",
",",
"segId",
"=",
"0",
",",
"fract",
"=",
"0.5",
",",
"weight",
"=",
"weight",
")",
"\"\"\"\n print('~~~~~~~~~~~~~~~')\n print('node_types:')\n pp.pprint(self.node_types)\n print('~~~~~~~~~~~~~~~')\n print('cell_info:')\n pp.pprint(self.cell_info)\n print('================')\"\"\""
] | Main method to parse the Sonata files and call the appropriate methods
in the handler | [
"Main",
"method",
"to",
"parse",
"the",
"Sonata",
"files",
"and",
"call",
"the",
"appropriate",
"methods",
"in",
"the",
"handler"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L136-L606 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | SonataReader.add_neuroml_components | def add_neuroml_components(self, nml_doc):
"""
Based on cell & synapse properties found, create the corresponding NeuroML components
"""
is_nest = False
print_v("Adding NeuroML cells to: %s"%nml_doc.id)
#pp.pprint(self.pop_comp_info)
for c in self.pop_comp_info:
info = self.pop_comp_info[c]
model_template = info['model_template'] if 'model_template' in info else \
(info['dynamics_params']['type'] if 'dynamics_params' in info else
info['model_type'])
print_v(" - Adding %s: %s"%(model_template, info))
if info['model_type'] == 'point_process' and model_template == 'nest:iaf_psc_alpha':
is_nest = True
from neuroml import IF_curr_alpha
pynn0 = IF_curr_alpha(id=c,
cm=info['dynamics_params']['C_m']/1000.0,
i_offset="0",
tau_m=info['dynamics_params']['tau_m'],
tau_refrac=info['dynamics_params']['t_ref'],
tau_syn_E="1",
tau_syn_I="1",
v_init='-70',
v_reset=info['dynamics_params']['V_reset'],
v_rest=info['dynamics_params']['E_L'],
v_thresh=info['dynamics_params']['V_th'])
nml_doc.IF_curr_alpha.append(pynn0)
elif info['model_type'] == 'point_process' and model_template == 'NEURON_IntFire1':
contents = '''<Lems>
<intFire1Cell id="%s" thresh="1mV" reset="0mV" tau="%sms" refract="%sms"/>
</Lems>'''%(c, info['dynamics_params']['tau']*1000, info['dynamics_params']['refrac']*1000)
cell_file_name = '%s.xml'%c
cell_file = open(cell_file_name,'w')
cell_file.write(contents)
cell_file.close()
self.nml_includes.append(cell_file_name)
self.nml_includes.append('../../../examples/sonatatest/IntFireCells.xml')
else:
from neuroml import IafRefCell
IafRefCell0 = IafRefCell(id=DUMMY_CELL,
C=".2 nF",
thresh = "1mV",
reset="0mV",
refract="3ms",
leak_conductance="1.2 nS",
leak_reversal="0mV")
print_v(" - Adding: %s"%IafRefCell0)
nml_doc.iaf_ref_cells.append(IafRefCell0)
print_v("Adding NeuroML synapses to: %s"%nml_doc.id)
#pp.pprint(self.syn_comp_info)
for s in self.syn_comp_info:
dyn_params = self.syn_comp_info[s]['dynamics_params']
print_v(" - Syn: %s: %s"%(s, dyn_params))
if 'level_of_detail' in dyn_params and dyn_params['level_of_detail'] == 'exp2syn':
from neuroml import ExpTwoSynapse
syn = ExpTwoSynapse(id=s,
gbase="1nS",
erev="%smV"%dyn_params['erev'],
tau_rise="%sms"%dyn_params['tau1'],
tau_decay="%sms"%dyn_params['tau2'])
#print("Adding syn: %s"%syn)
nml_doc.exp_two_synapses.append(syn)
elif 'level_of_detail' in dyn_params and dyn_params['level_of_detail'] == 'instanteneous':
contents = '''<Lems>
<impulseSynapse id="%s"/>
</Lems>'''%(s)
syn_file_name = '%s.xml'%s
syn_file = open(syn_file_name,'w')
syn_file.write(contents)
syn_file.close()
self.nml_includes.append(syn_file_name)
#self.nml_includes.append('../examples/sonatatest/IntFireCells.xml')
else:
from neuroml import AlphaCurrSynapse
pynnSynn0 = AlphaCurrSynapse(id=s, tau_syn="2")
#print("Adding syn: %s"%pynnSynn0)
nml_doc.alpha_curr_synapses.append(pynnSynn0)
print_v("Adding NeuroML inputs to: %s"%nml_doc.id)
#pp.pprint(self.input_comp_info)
for input in self.input_comp_info:
for input_type in self.input_comp_info[input]:
if input_type == 'spikes':
for comp_id in self.input_comp_info[input][input_type]:
info = self.input_comp_info[input][input_type][comp_id]
print_v("Adding input %s: %s"%(comp_id, info.keys()))
nest_syn = _get_default_nest_syn(nml_doc)
from neuroml import TimedSynapticInput, Spike
tsi = TimedSynapticInput(id=comp_id, synapse=nest_syn.id, spike_target="./%s"%nest_syn.id)
nml_doc.timed_synaptic_inputs.append(tsi)
for ti in range(len(info['times'])):
tsi.spikes.append(Spike(id=ti, time='%sms'%info['times'][ti]))
elif input_type == 'current_clamp':
from neuroml import PulseGenerator
for comp_id in self.input_comp_info[input][input_type]:
info = self.input_comp_info[input][input_type][comp_id]
#TODO remove when https://github.com/AllenInstitute/sonata/issues/42 is fixed!
amp_template = '%spA' if is_nest else '%snA' #
pg = PulseGenerator(id=comp_id,delay='%sms'%info['delay'],duration='%sms'%info['duration'],amplitude=amp_template%info['amp'])
nml_doc.pulse_generators.append(pg) | python | def add_neuroml_components(self, nml_doc):
"""
Based on cell & synapse properties found, create the corresponding NeuroML components
"""
is_nest = False
print_v("Adding NeuroML cells to: %s"%nml_doc.id)
#pp.pprint(self.pop_comp_info)
for c in self.pop_comp_info:
info = self.pop_comp_info[c]
model_template = info['model_template'] if 'model_template' in info else \
(info['dynamics_params']['type'] if 'dynamics_params' in info else
info['model_type'])
print_v(" - Adding %s: %s"%(model_template, info))
if info['model_type'] == 'point_process' and model_template == 'nest:iaf_psc_alpha':
is_nest = True
from neuroml import IF_curr_alpha
pynn0 = IF_curr_alpha(id=c,
cm=info['dynamics_params']['C_m']/1000.0,
i_offset="0",
tau_m=info['dynamics_params']['tau_m'],
tau_refrac=info['dynamics_params']['t_ref'],
tau_syn_E="1",
tau_syn_I="1",
v_init='-70',
v_reset=info['dynamics_params']['V_reset'],
v_rest=info['dynamics_params']['E_L'],
v_thresh=info['dynamics_params']['V_th'])
nml_doc.IF_curr_alpha.append(pynn0)
elif info['model_type'] == 'point_process' and model_template == 'NEURON_IntFire1':
contents = '''<Lems>
<intFire1Cell id="%s" thresh="1mV" reset="0mV" tau="%sms" refract="%sms"/>
</Lems>'''%(c, info['dynamics_params']['tau']*1000, info['dynamics_params']['refrac']*1000)
cell_file_name = '%s.xml'%c
cell_file = open(cell_file_name,'w')
cell_file.write(contents)
cell_file.close()
self.nml_includes.append(cell_file_name)
self.nml_includes.append('../../../examples/sonatatest/IntFireCells.xml')
else:
from neuroml import IafRefCell
IafRefCell0 = IafRefCell(id=DUMMY_CELL,
C=".2 nF",
thresh = "1mV",
reset="0mV",
refract="3ms",
leak_conductance="1.2 nS",
leak_reversal="0mV")
print_v(" - Adding: %s"%IafRefCell0)
nml_doc.iaf_ref_cells.append(IafRefCell0)
print_v("Adding NeuroML synapses to: %s"%nml_doc.id)
#pp.pprint(self.syn_comp_info)
for s in self.syn_comp_info:
dyn_params = self.syn_comp_info[s]['dynamics_params']
print_v(" - Syn: %s: %s"%(s, dyn_params))
if 'level_of_detail' in dyn_params and dyn_params['level_of_detail'] == 'exp2syn':
from neuroml import ExpTwoSynapse
syn = ExpTwoSynapse(id=s,
gbase="1nS",
erev="%smV"%dyn_params['erev'],
tau_rise="%sms"%dyn_params['tau1'],
tau_decay="%sms"%dyn_params['tau2'])
#print("Adding syn: %s"%syn)
nml_doc.exp_two_synapses.append(syn)
elif 'level_of_detail' in dyn_params and dyn_params['level_of_detail'] == 'instanteneous':
contents = '''<Lems>
<impulseSynapse id="%s"/>
</Lems>'''%(s)
syn_file_name = '%s.xml'%s
syn_file = open(syn_file_name,'w')
syn_file.write(contents)
syn_file.close()
self.nml_includes.append(syn_file_name)
#self.nml_includes.append('../examples/sonatatest/IntFireCells.xml')
else:
from neuroml import AlphaCurrSynapse
pynnSynn0 = AlphaCurrSynapse(id=s, tau_syn="2")
#print("Adding syn: %s"%pynnSynn0)
nml_doc.alpha_curr_synapses.append(pynnSynn0)
print_v("Adding NeuroML inputs to: %s"%nml_doc.id)
#pp.pprint(self.input_comp_info)
for input in self.input_comp_info:
for input_type in self.input_comp_info[input]:
if input_type == 'spikes':
for comp_id in self.input_comp_info[input][input_type]:
info = self.input_comp_info[input][input_type][comp_id]
print_v("Adding input %s: %s"%(comp_id, info.keys()))
nest_syn = _get_default_nest_syn(nml_doc)
from neuroml import TimedSynapticInput, Spike
tsi = TimedSynapticInput(id=comp_id, synapse=nest_syn.id, spike_target="./%s"%nest_syn.id)
nml_doc.timed_synaptic_inputs.append(tsi)
for ti in range(len(info['times'])):
tsi.spikes.append(Spike(id=ti, time='%sms'%info['times'][ti]))
elif input_type == 'current_clamp':
from neuroml import PulseGenerator
for comp_id in self.input_comp_info[input][input_type]:
info = self.input_comp_info[input][input_type][comp_id]
#TODO remove when https://github.com/AllenInstitute/sonata/issues/42 is fixed!
amp_template = '%spA' if is_nest else '%snA' #
pg = PulseGenerator(id=comp_id,delay='%sms'%info['delay'],duration='%sms'%info['duration'],amplitude=amp_template%info['amp'])
nml_doc.pulse_generators.append(pg) | [
"def",
"add_neuroml_components",
"(",
"self",
",",
"nml_doc",
")",
":",
"is_nest",
"=",
"False",
"print_v",
"(",
"\"Adding NeuroML cells to: %s\"",
"%",
"nml_doc",
".",
"id",
")",
"#pp.pprint(self.pop_comp_info)",
"for",
"c",
"in",
"self",
".",
"pop_comp_info",
":",
"info",
"=",
"self",
".",
"pop_comp_info",
"[",
"c",
"]",
"model_template",
"=",
"info",
"[",
"'model_template'",
"]",
"if",
"'model_template'",
"in",
"info",
"else",
"(",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'type'",
"]",
"if",
"'dynamics_params'",
"in",
"info",
"else",
"info",
"[",
"'model_type'",
"]",
")",
"print_v",
"(",
"\" - Adding %s: %s\"",
"%",
"(",
"model_template",
",",
"info",
")",
")",
"if",
"info",
"[",
"'model_type'",
"]",
"==",
"'point_process'",
"and",
"model_template",
"==",
"'nest:iaf_psc_alpha'",
":",
"is_nest",
"=",
"True",
"from",
"neuroml",
"import",
"IF_curr_alpha",
"pynn0",
"=",
"IF_curr_alpha",
"(",
"id",
"=",
"c",
",",
"cm",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'C_m'",
"]",
"/",
"1000.0",
",",
"i_offset",
"=",
"\"0\"",
",",
"tau_m",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'tau_m'",
"]",
",",
"tau_refrac",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'t_ref'",
"]",
",",
"tau_syn_E",
"=",
"\"1\"",
",",
"tau_syn_I",
"=",
"\"1\"",
",",
"v_init",
"=",
"'-70'",
",",
"v_reset",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'V_reset'",
"]",
",",
"v_rest",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'E_L'",
"]",
",",
"v_thresh",
"=",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'V_th'",
"]",
")",
"nml_doc",
".",
"IF_curr_alpha",
".",
"append",
"(",
"pynn0",
")",
"elif",
"info",
"[",
"'model_type'",
"]",
"==",
"'point_process'",
"and",
"model_template",
"==",
"'NEURON_IntFire1'",
":",
"contents",
"=",
"'''<Lems>\n <intFire1Cell id=\"%s\" thresh=\"1mV\" reset=\"0mV\" tau=\"%sms\" refract=\"%sms\"/>\n</Lems>'''",
"%",
"(",
"c",
",",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'tau'",
"]",
"*",
"1000",
",",
"info",
"[",
"'dynamics_params'",
"]",
"[",
"'refrac'",
"]",
"*",
"1000",
")",
"cell_file_name",
"=",
"'%s.xml'",
"%",
"c",
"cell_file",
"=",
"open",
"(",
"cell_file_name",
",",
"'w'",
")",
"cell_file",
".",
"write",
"(",
"contents",
")",
"cell_file",
".",
"close",
"(",
")",
"self",
".",
"nml_includes",
".",
"append",
"(",
"cell_file_name",
")",
"self",
".",
"nml_includes",
".",
"append",
"(",
"'../../../examples/sonatatest/IntFireCells.xml'",
")",
"else",
":",
"from",
"neuroml",
"import",
"IafRefCell",
"IafRefCell0",
"=",
"IafRefCell",
"(",
"id",
"=",
"DUMMY_CELL",
",",
"C",
"=",
"\".2 nF\"",
",",
"thresh",
"=",
"\"1mV\"",
",",
"reset",
"=",
"\"0mV\"",
",",
"refract",
"=",
"\"3ms\"",
",",
"leak_conductance",
"=",
"\"1.2 nS\"",
",",
"leak_reversal",
"=",
"\"0mV\"",
")",
"print_v",
"(",
"\" - Adding: %s\"",
"%",
"IafRefCell0",
")",
"nml_doc",
".",
"iaf_ref_cells",
".",
"append",
"(",
"IafRefCell0",
")",
"print_v",
"(",
"\"Adding NeuroML synapses to: %s\"",
"%",
"nml_doc",
".",
"id",
")",
"#pp.pprint(self.syn_comp_info)",
"for",
"s",
"in",
"self",
".",
"syn_comp_info",
":",
"dyn_params",
"=",
"self",
".",
"syn_comp_info",
"[",
"s",
"]",
"[",
"'dynamics_params'",
"]",
"print_v",
"(",
"\" - Syn: %s: %s\"",
"%",
"(",
"s",
",",
"dyn_params",
")",
")",
"if",
"'level_of_detail'",
"in",
"dyn_params",
"and",
"dyn_params",
"[",
"'level_of_detail'",
"]",
"==",
"'exp2syn'",
":",
"from",
"neuroml",
"import",
"ExpTwoSynapse",
"syn",
"=",
"ExpTwoSynapse",
"(",
"id",
"=",
"s",
",",
"gbase",
"=",
"\"1nS\"",
",",
"erev",
"=",
"\"%smV\"",
"%",
"dyn_params",
"[",
"'erev'",
"]",
",",
"tau_rise",
"=",
"\"%sms\"",
"%",
"dyn_params",
"[",
"'tau1'",
"]",
",",
"tau_decay",
"=",
"\"%sms\"",
"%",
"dyn_params",
"[",
"'tau2'",
"]",
")",
"#print(\"Adding syn: %s\"%syn)",
"nml_doc",
".",
"exp_two_synapses",
".",
"append",
"(",
"syn",
")",
"elif",
"'level_of_detail'",
"in",
"dyn_params",
"and",
"dyn_params",
"[",
"'level_of_detail'",
"]",
"==",
"'instanteneous'",
":",
"contents",
"=",
"'''<Lems>\n <impulseSynapse id=\"%s\"/>\n</Lems>'''",
"%",
"(",
"s",
")",
"syn_file_name",
"=",
"'%s.xml'",
"%",
"s",
"syn_file",
"=",
"open",
"(",
"syn_file_name",
",",
"'w'",
")",
"syn_file",
".",
"write",
"(",
"contents",
")",
"syn_file",
".",
"close",
"(",
")",
"self",
".",
"nml_includes",
".",
"append",
"(",
"syn_file_name",
")",
"#self.nml_includes.append('../examples/sonatatest/IntFireCells.xml')",
"else",
":",
"from",
"neuroml",
"import",
"AlphaCurrSynapse",
"pynnSynn0",
"=",
"AlphaCurrSynapse",
"(",
"id",
"=",
"s",
",",
"tau_syn",
"=",
"\"2\"",
")",
"#print(\"Adding syn: %s\"%pynnSynn0)",
"nml_doc",
".",
"alpha_curr_synapses",
".",
"append",
"(",
"pynnSynn0",
")",
"print_v",
"(",
"\"Adding NeuroML inputs to: %s\"",
"%",
"nml_doc",
".",
"id",
")",
"#pp.pprint(self.input_comp_info)",
"for",
"input",
"in",
"self",
".",
"input_comp_info",
":",
"for",
"input_type",
"in",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
":",
"if",
"input_type",
"==",
"'spikes'",
":",
"for",
"comp_id",
"in",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"input_type",
"]",
":",
"info",
"=",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"input_type",
"]",
"[",
"comp_id",
"]",
"print_v",
"(",
"\"Adding input %s: %s\"",
"%",
"(",
"comp_id",
",",
"info",
".",
"keys",
"(",
")",
")",
")",
"nest_syn",
"=",
"_get_default_nest_syn",
"(",
"nml_doc",
")",
"from",
"neuroml",
"import",
"TimedSynapticInput",
",",
"Spike",
"tsi",
"=",
"TimedSynapticInput",
"(",
"id",
"=",
"comp_id",
",",
"synapse",
"=",
"nest_syn",
".",
"id",
",",
"spike_target",
"=",
"\"./%s\"",
"%",
"nest_syn",
".",
"id",
")",
"nml_doc",
".",
"timed_synaptic_inputs",
".",
"append",
"(",
"tsi",
")",
"for",
"ti",
"in",
"range",
"(",
"len",
"(",
"info",
"[",
"'times'",
"]",
")",
")",
":",
"tsi",
".",
"spikes",
".",
"append",
"(",
"Spike",
"(",
"id",
"=",
"ti",
",",
"time",
"=",
"'%sms'",
"%",
"info",
"[",
"'times'",
"]",
"[",
"ti",
"]",
")",
")",
"elif",
"input_type",
"==",
"'current_clamp'",
":",
"from",
"neuroml",
"import",
"PulseGenerator",
"for",
"comp_id",
"in",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"input_type",
"]",
":",
"info",
"=",
"self",
".",
"input_comp_info",
"[",
"input",
"]",
"[",
"input_type",
"]",
"[",
"comp_id",
"]",
"#TODO remove when https://github.com/AllenInstitute/sonata/issues/42 is fixed!",
"amp_template",
"=",
"'%spA'",
"if",
"is_nest",
"else",
"'%snA'",
"# ",
"pg",
"=",
"PulseGenerator",
"(",
"id",
"=",
"comp_id",
",",
"delay",
"=",
"'%sms'",
"%",
"info",
"[",
"'delay'",
"]",
",",
"duration",
"=",
"'%sms'",
"%",
"info",
"[",
"'duration'",
"]",
",",
"amplitude",
"=",
"amp_template",
"%",
"info",
"[",
"'amp'",
"]",
")",
"nml_doc",
".",
"pulse_generators",
".",
"append",
"(",
"pg",
")"
] | Based on cell & synapse properties found, create the corresponding NeuroML components | [
"Based",
"on",
"cell",
"&",
"synapse",
"properties",
"found",
"create",
"the",
"corresponding",
"NeuroML",
"components"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L699-L831 |
NeuroML/NeuroMLlite | neuromllite/SonataReader.py | SonataReader.generate_lems_file | def generate_lems_file(self, nml_file_name, nml_doc):
"""
Generate a LEMS file to use in simulations of the NeuroML file
"""
#pp.pprint(self.simulation_config)
#pp.pprint(self.pop_comp_info)
#pp.pprint(self.node_set_mappings)
if 'output' in self.simulation_config:
gen_spike_saves_for_all_somas = True
target = nml_doc.networks[0].id
sim_id = 'Sim_%s'%target
duration = self.simulation_config['run']['tstop']
dt = self.simulation_config['run']['dt']
lems_file_name = 'LEMS_%s.xml'%sim_id
target_dir = "./"
gen_saves_for_quantities = {}
gen_plots_for_quantities = {}
if 'reports' in self.simulation_config:
if 'membrane_potential' in self.simulation_config['reports']:
mp = self.simulation_config['reports']['membrane_potential']
node_set = self.node_set_mappings[mp['cells']]
for nml_pop in node_set:
comp = self.nml_pop_vs_comps[nml_pop]
ids = node_set[nml_pop]
display = 'Voltages_%s'%nml_pop
file_name = '%s.v.dat'%nml_pop
for id in ids:
quantity = '%s/%i/%s/%s'%(nml_pop,id,comp,'v')
if not nml_pop in self.nml_pops_having_locations:
quantity = '%s[%i]/%s'%(nml_pop,id,'v')
if not display in gen_plots_for_quantities:
gen_plots_for_quantities[display] = []
gen_plots_for_quantities[display].append(quantity)
if not file_name in gen_saves_for_quantities:
gen_saves_for_quantities[file_name] = []
gen_saves_for_quantities[file_name].append(quantity)
generate_lems_file_for_neuroml(sim_id,
nml_file_name,
target,
duration,
dt,
lems_file_name,
target_dir,
include_extra_files = self.nml_includes,
gen_plots_for_all_v = False,
plot_all_segments = False,
gen_plots_for_quantities = gen_plots_for_quantities, # Dict with displays vs lists of quantity paths
gen_saves_for_all_v = False,
save_all_segments = False,
gen_saves_for_quantities = gen_saves_for_quantities, # List of populations, all pops if = []
gen_spike_saves_for_all_somas = gen_spike_saves_for_all_somas,
report_file_name = REPORT_FILE,
copy_neuroml = True,
verbose=True)
return lems_file_name | python | def generate_lems_file(self, nml_file_name, nml_doc):
"""
Generate a LEMS file to use in simulations of the NeuroML file
"""
#pp.pprint(self.simulation_config)
#pp.pprint(self.pop_comp_info)
#pp.pprint(self.node_set_mappings)
if 'output' in self.simulation_config:
gen_spike_saves_for_all_somas = True
target = nml_doc.networks[0].id
sim_id = 'Sim_%s'%target
duration = self.simulation_config['run']['tstop']
dt = self.simulation_config['run']['dt']
lems_file_name = 'LEMS_%s.xml'%sim_id
target_dir = "./"
gen_saves_for_quantities = {}
gen_plots_for_quantities = {}
if 'reports' in self.simulation_config:
if 'membrane_potential' in self.simulation_config['reports']:
mp = self.simulation_config['reports']['membrane_potential']
node_set = self.node_set_mappings[mp['cells']]
for nml_pop in node_set:
comp = self.nml_pop_vs_comps[nml_pop]
ids = node_set[nml_pop]
display = 'Voltages_%s'%nml_pop
file_name = '%s.v.dat'%nml_pop
for id in ids:
quantity = '%s/%i/%s/%s'%(nml_pop,id,comp,'v')
if not nml_pop in self.nml_pops_having_locations:
quantity = '%s[%i]/%s'%(nml_pop,id,'v')
if not display in gen_plots_for_quantities:
gen_plots_for_quantities[display] = []
gen_plots_for_quantities[display].append(quantity)
if not file_name in gen_saves_for_quantities:
gen_saves_for_quantities[file_name] = []
gen_saves_for_quantities[file_name].append(quantity)
generate_lems_file_for_neuroml(sim_id,
nml_file_name,
target,
duration,
dt,
lems_file_name,
target_dir,
include_extra_files = self.nml_includes,
gen_plots_for_all_v = False,
plot_all_segments = False,
gen_plots_for_quantities = gen_plots_for_quantities, # Dict with displays vs lists of quantity paths
gen_saves_for_all_v = False,
save_all_segments = False,
gen_saves_for_quantities = gen_saves_for_quantities, # List of populations, all pops if = []
gen_spike_saves_for_all_somas = gen_spike_saves_for_all_somas,
report_file_name = REPORT_FILE,
copy_neuroml = True,
verbose=True)
return lems_file_name | [
"def",
"generate_lems_file",
"(",
"self",
",",
"nml_file_name",
",",
"nml_doc",
")",
":",
"#pp.pprint(self.simulation_config)",
"#pp.pprint(self.pop_comp_info)",
"#pp.pprint(self.node_set_mappings)",
"if",
"'output'",
"in",
"self",
".",
"simulation_config",
":",
"gen_spike_saves_for_all_somas",
"=",
"True",
"target",
"=",
"nml_doc",
".",
"networks",
"[",
"0",
"]",
".",
"id",
"sim_id",
"=",
"'Sim_%s'",
"%",
"target",
"duration",
"=",
"self",
".",
"simulation_config",
"[",
"'run'",
"]",
"[",
"'tstop'",
"]",
"dt",
"=",
"self",
".",
"simulation_config",
"[",
"'run'",
"]",
"[",
"'dt'",
"]",
"lems_file_name",
"=",
"'LEMS_%s.xml'",
"%",
"sim_id",
"target_dir",
"=",
"\"./\"",
"gen_saves_for_quantities",
"=",
"{",
"}",
"gen_plots_for_quantities",
"=",
"{",
"}",
"if",
"'reports'",
"in",
"self",
".",
"simulation_config",
":",
"if",
"'membrane_potential'",
"in",
"self",
".",
"simulation_config",
"[",
"'reports'",
"]",
":",
"mp",
"=",
"self",
".",
"simulation_config",
"[",
"'reports'",
"]",
"[",
"'membrane_potential'",
"]",
"node_set",
"=",
"self",
".",
"node_set_mappings",
"[",
"mp",
"[",
"'cells'",
"]",
"]",
"for",
"nml_pop",
"in",
"node_set",
":",
"comp",
"=",
"self",
".",
"nml_pop_vs_comps",
"[",
"nml_pop",
"]",
"ids",
"=",
"node_set",
"[",
"nml_pop",
"]",
"display",
"=",
"'Voltages_%s'",
"%",
"nml_pop",
"file_name",
"=",
"'%s.v.dat'",
"%",
"nml_pop",
"for",
"id",
"in",
"ids",
":",
"quantity",
"=",
"'%s/%i/%s/%s'",
"%",
"(",
"nml_pop",
",",
"id",
",",
"comp",
",",
"'v'",
")",
"if",
"not",
"nml_pop",
"in",
"self",
".",
"nml_pops_having_locations",
":",
"quantity",
"=",
"'%s[%i]/%s'",
"%",
"(",
"nml_pop",
",",
"id",
",",
"'v'",
")",
"if",
"not",
"display",
"in",
"gen_plots_for_quantities",
":",
"gen_plots_for_quantities",
"[",
"display",
"]",
"=",
"[",
"]",
"gen_plots_for_quantities",
"[",
"display",
"]",
".",
"append",
"(",
"quantity",
")",
"if",
"not",
"file_name",
"in",
"gen_saves_for_quantities",
":",
"gen_saves_for_quantities",
"[",
"file_name",
"]",
"=",
"[",
"]",
"gen_saves_for_quantities",
"[",
"file_name",
"]",
".",
"append",
"(",
"quantity",
")",
"generate_lems_file_for_neuroml",
"(",
"sim_id",
",",
"nml_file_name",
",",
"target",
",",
"duration",
",",
"dt",
",",
"lems_file_name",
",",
"target_dir",
",",
"include_extra_files",
"=",
"self",
".",
"nml_includes",
",",
"gen_plots_for_all_v",
"=",
"False",
",",
"plot_all_segments",
"=",
"False",
",",
"gen_plots_for_quantities",
"=",
"gen_plots_for_quantities",
",",
"# Dict with displays vs lists of quantity paths",
"gen_saves_for_all_v",
"=",
"False",
",",
"save_all_segments",
"=",
"False",
",",
"gen_saves_for_quantities",
"=",
"gen_saves_for_quantities",
",",
"# List of populations, all pops if = []",
"gen_spike_saves_for_all_somas",
"=",
"gen_spike_saves_for_all_somas",
",",
"report_file_name",
"=",
"REPORT_FILE",
",",
"copy_neuroml",
"=",
"True",
",",
"verbose",
"=",
"True",
")",
"return",
"lems_file_name"
] | Generate a LEMS file to use in simulations of the NeuroML file | [
"Generate",
"a",
"LEMS",
"file",
"to",
"use",
"in",
"simulations",
"of",
"the",
"NeuroML",
"file"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/SonataReader.py#L834-L898 |
ungarj/tilematrix | tilematrix/_funcs.py | clip_geometry_to_srs_bounds | def clip_geometry_to_srs_bounds(geometry, pyramid, multipart=False):
"""
Clip input geometry to SRS bounds of given TilePyramid.
If geometry passes the antimeridian, it will be split up in a multipart
geometry and shifted to within the SRS boundaries.
Note: geometry SRS must be the TilePyramid SRS!
- geometry: any shapely geometry
- pyramid: a TilePyramid object
- multipart: return list of geometries instead of a GeometryCollection
"""
if not geometry.is_valid:
raise ValueError("invalid geometry given")
pyramid_bbox = box(*pyramid.bounds)
# Special case for global tile pyramids if geometry extends over tile
# pyramid boundaries (such as the antimeridian).
if pyramid.is_global and not geometry.within(pyramid_bbox):
inside_geom = geometry.intersection(pyramid_bbox)
outside_geom = geometry.difference(pyramid_bbox)
# shift outside geometry so it lies within SRS bounds
if isinstance(outside_geom, Polygon):
outside_geom = [outside_geom]
all_geoms = [inside_geom]
for geom in outside_geom:
geom_bounds = Bounds(*geom.bounds)
if geom_bounds.left < pyramid.left:
geom = translate(geom, xoff=2*pyramid.right)
elif geom_bounds.right > pyramid.right:
geom = translate(geom, xoff=-2*pyramid.right)
all_geoms.append(geom)
if multipart:
return all_geoms
else:
return GeometryCollection(all_geoms)
else:
if multipart:
return [geometry]
else:
return geometry | python | def clip_geometry_to_srs_bounds(geometry, pyramid, multipart=False):
"""
Clip input geometry to SRS bounds of given TilePyramid.
If geometry passes the antimeridian, it will be split up in a multipart
geometry and shifted to within the SRS boundaries.
Note: geometry SRS must be the TilePyramid SRS!
- geometry: any shapely geometry
- pyramid: a TilePyramid object
- multipart: return list of geometries instead of a GeometryCollection
"""
if not geometry.is_valid:
raise ValueError("invalid geometry given")
pyramid_bbox = box(*pyramid.bounds)
# Special case for global tile pyramids if geometry extends over tile
# pyramid boundaries (such as the antimeridian).
if pyramid.is_global and not geometry.within(pyramid_bbox):
inside_geom = geometry.intersection(pyramid_bbox)
outside_geom = geometry.difference(pyramid_bbox)
# shift outside geometry so it lies within SRS bounds
if isinstance(outside_geom, Polygon):
outside_geom = [outside_geom]
all_geoms = [inside_geom]
for geom in outside_geom:
geom_bounds = Bounds(*geom.bounds)
if geom_bounds.left < pyramid.left:
geom = translate(geom, xoff=2*pyramid.right)
elif geom_bounds.right > pyramid.right:
geom = translate(geom, xoff=-2*pyramid.right)
all_geoms.append(geom)
if multipart:
return all_geoms
else:
return GeometryCollection(all_geoms)
else:
if multipart:
return [geometry]
else:
return geometry | [
"def",
"clip_geometry_to_srs_bounds",
"(",
"geometry",
",",
"pyramid",
",",
"multipart",
"=",
"False",
")",
":",
"if",
"not",
"geometry",
".",
"is_valid",
":",
"raise",
"ValueError",
"(",
"\"invalid geometry given\"",
")",
"pyramid_bbox",
"=",
"box",
"(",
"*",
"pyramid",
".",
"bounds",
")",
"# Special case for global tile pyramids if geometry extends over tile",
"# pyramid boundaries (such as the antimeridian).",
"if",
"pyramid",
".",
"is_global",
"and",
"not",
"geometry",
".",
"within",
"(",
"pyramid_bbox",
")",
":",
"inside_geom",
"=",
"geometry",
".",
"intersection",
"(",
"pyramid_bbox",
")",
"outside_geom",
"=",
"geometry",
".",
"difference",
"(",
"pyramid_bbox",
")",
"# shift outside geometry so it lies within SRS bounds",
"if",
"isinstance",
"(",
"outside_geom",
",",
"Polygon",
")",
":",
"outside_geom",
"=",
"[",
"outside_geom",
"]",
"all_geoms",
"=",
"[",
"inside_geom",
"]",
"for",
"geom",
"in",
"outside_geom",
":",
"geom_bounds",
"=",
"Bounds",
"(",
"*",
"geom",
".",
"bounds",
")",
"if",
"geom_bounds",
".",
"left",
"<",
"pyramid",
".",
"left",
":",
"geom",
"=",
"translate",
"(",
"geom",
",",
"xoff",
"=",
"2",
"*",
"pyramid",
".",
"right",
")",
"elif",
"geom_bounds",
".",
"right",
">",
"pyramid",
".",
"right",
":",
"geom",
"=",
"translate",
"(",
"geom",
",",
"xoff",
"=",
"-",
"2",
"*",
"pyramid",
".",
"right",
")",
"all_geoms",
".",
"append",
"(",
"geom",
")",
"if",
"multipart",
":",
"return",
"all_geoms",
"else",
":",
"return",
"GeometryCollection",
"(",
"all_geoms",
")",
"else",
":",
"if",
"multipart",
":",
"return",
"[",
"geometry",
"]",
"else",
":",
"return",
"geometry"
] | Clip input geometry to SRS bounds of given TilePyramid.
If geometry passes the antimeridian, it will be split up in a multipart
geometry and shifted to within the SRS boundaries.
Note: geometry SRS must be the TilePyramid SRS!
- geometry: any shapely geometry
- pyramid: a TilePyramid object
- multipart: return list of geometries instead of a GeometryCollection | [
"Clip",
"input",
"geometry",
"to",
"SRS",
"bounds",
"of",
"given",
"TilePyramid",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L19-L60 |
ungarj/tilematrix | tilematrix/_funcs.py | snap_bounds | def snap_bounds(bounds=None, tile_pyramid=None, zoom=None, pixelbuffer=0):
"""
Extend bounds to be aligned with union of tile bboxes.
- bounds: (left, bottom, right, top)
- tile_pyramid: a TilePyramid object
- zoom: target zoom level
- pixelbuffer: apply pixelbuffer
"""
bounds = Bounds(*bounds)
validate_zoom(zoom)
lb = _tile_from_xy(tile_pyramid, bounds.left, bounds.bottom, zoom, on_edge_use="rt")
rt = _tile_from_xy(tile_pyramid, bounds.right, bounds.top, zoom, on_edge_use="lb")
left, bottom, _, _ = lb.bounds(pixelbuffer)
_, _, right, top = rt.bounds(pixelbuffer)
return Bounds(left, bottom, right, top) | python | def snap_bounds(bounds=None, tile_pyramid=None, zoom=None, pixelbuffer=0):
"""
Extend bounds to be aligned with union of tile bboxes.
- bounds: (left, bottom, right, top)
- tile_pyramid: a TilePyramid object
- zoom: target zoom level
- pixelbuffer: apply pixelbuffer
"""
bounds = Bounds(*bounds)
validate_zoom(zoom)
lb = _tile_from_xy(tile_pyramid, bounds.left, bounds.bottom, zoom, on_edge_use="rt")
rt = _tile_from_xy(tile_pyramid, bounds.right, bounds.top, zoom, on_edge_use="lb")
left, bottom, _, _ = lb.bounds(pixelbuffer)
_, _, right, top = rt.bounds(pixelbuffer)
return Bounds(left, bottom, right, top) | [
"def",
"snap_bounds",
"(",
"bounds",
"=",
"None",
",",
"tile_pyramid",
"=",
"None",
",",
"zoom",
"=",
"None",
",",
"pixelbuffer",
"=",
"0",
")",
":",
"bounds",
"=",
"Bounds",
"(",
"*",
"bounds",
")",
"validate_zoom",
"(",
"zoom",
")",
"lb",
"=",
"_tile_from_xy",
"(",
"tile_pyramid",
",",
"bounds",
".",
"left",
",",
"bounds",
".",
"bottom",
",",
"zoom",
",",
"on_edge_use",
"=",
"\"rt\"",
")",
"rt",
"=",
"_tile_from_xy",
"(",
"tile_pyramid",
",",
"bounds",
".",
"right",
",",
"bounds",
".",
"top",
",",
"zoom",
",",
"on_edge_use",
"=",
"\"lb\"",
")",
"left",
",",
"bottom",
",",
"_",
",",
"_",
"=",
"lb",
".",
"bounds",
"(",
"pixelbuffer",
")",
"_",
",",
"_",
",",
"right",
",",
"top",
"=",
"rt",
".",
"bounds",
"(",
"pixelbuffer",
")",
"return",
"Bounds",
"(",
"left",
",",
"bottom",
",",
"right",
",",
"top",
")"
] | Extend bounds to be aligned with union of tile bboxes.
- bounds: (left, bottom, right, top)
- tile_pyramid: a TilePyramid object
- zoom: target zoom level
- pixelbuffer: apply pixelbuffer | [
"Extend",
"bounds",
"to",
"be",
"aligned",
"with",
"union",
"of",
"tile",
"bboxes",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L63-L78 |
ungarj/tilematrix | tilematrix/_funcs.py | _verify_shape_bounds | def _verify_shape_bounds(shape, bounds):
"""Verify that shape corresponds to bounds apect ratio."""
if not isinstance(shape, (tuple, list)) or len(shape) != 2:
raise TypeError(
"shape must be a tuple or list with two elements: %s" % str(shape)
)
if not isinstance(bounds, (tuple, list)) or len(bounds) != 4:
raise TypeError(
"bounds must be a tuple or list with four elements: %s" % str(bounds)
)
shape = Shape(*shape)
bounds = Bounds(*bounds)
shape_ratio = shape.width / shape.height
bounds_ratio = (bounds.right - bounds.left) / (bounds.top - bounds.bottom)
if abs(shape_ratio - bounds_ratio) > DELTA:
min_length = min([
(bounds.right - bounds.left) / shape.width,
(bounds.top - bounds.bottom) / shape.height
])
proposed_bounds = Bounds(
bounds.left,
bounds.bottom,
bounds.left + shape.width * min_length,
bounds.bottom + shape.height * min_length
)
raise ValueError(
"shape ratio (%s) must equal bounds ratio (%s); try %s" % (
shape_ratio, bounds_ratio, proposed_bounds
)
) | python | def _verify_shape_bounds(shape, bounds):
"""Verify that shape corresponds to bounds apect ratio."""
if not isinstance(shape, (tuple, list)) or len(shape) != 2:
raise TypeError(
"shape must be a tuple or list with two elements: %s" % str(shape)
)
if not isinstance(bounds, (tuple, list)) or len(bounds) != 4:
raise TypeError(
"bounds must be a tuple or list with four elements: %s" % str(bounds)
)
shape = Shape(*shape)
bounds = Bounds(*bounds)
shape_ratio = shape.width / shape.height
bounds_ratio = (bounds.right - bounds.left) / (bounds.top - bounds.bottom)
if abs(shape_ratio - bounds_ratio) > DELTA:
min_length = min([
(bounds.right - bounds.left) / shape.width,
(bounds.top - bounds.bottom) / shape.height
])
proposed_bounds = Bounds(
bounds.left,
bounds.bottom,
bounds.left + shape.width * min_length,
bounds.bottom + shape.height * min_length
)
raise ValueError(
"shape ratio (%s) must equal bounds ratio (%s); try %s" % (
shape_ratio, bounds_ratio, proposed_bounds
)
) | [
"def",
"_verify_shape_bounds",
"(",
"shape",
",",
"bounds",
")",
":",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"(",
"tuple",
",",
"list",
")",
")",
"or",
"len",
"(",
"shape",
")",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"\"shape must be a tuple or list with two elements: %s\"",
"%",
"str",
"(",
"shape",
")",
")",
"if",
"not",
"isinstance",
"(",
"bounds",
",",
"(",
"tuple",
",",
"list",
")",
")",
"or",
"len",
"(",
"bounds",
")",
"!=",
"4",
":",
"raise",
"TypeError",
"(",
"\"bounds must be a tuple or list with four elements: %s\"",
"%",
"str",
"(",
"bounds",
")",
")",
"shape",
"=",
"Shape",
"(",
"*",
"shape",
")",
"bounds",
"=",
"Bounds",
"(",
"*",
"bounds",
")",
"shape_ratio",
"=",
"shape",
".",
"width",
"/",
"shape",
".",
"height",
"bounds_ratio",
"=",
"(",
"bounds",
".",
"right",
"-",
"bounds",
".",
"left",
")",
"/",
"(",
"bounds",
".",
"top",
"-",
"bounds",
".",
"bottom",
")",
"if",
"abs",
"(",
"shape_ratio",
"-",
"bounds_ratio",
")",
">",
"DELTA",
":",
"min_length",
"=",
"min",
"(",
"[",
"(",
"bounds",
".",
"right",
"-",
"bounds",
".",
"left",
")",
"/",
"shape",
".",
"width",
",",
"(",
"bounds",
".",
"top",
"-",
"bounds",
".",
"bottom",
")",
"/",
"shape",
".",
"height",
"]",
")",
"proposed_bounds",
"=",
"Bounds",
"(",
"bounds",
".",
"left",
",",
"bounds",
".",
"bottom",
",",
"bounds",
".",
"left",
"+",
"shape",
".",
"width",
"*",
"min_length",
",",
"bounds",
".",
"bottom",
"+",
"shape",
".",
"height",
"*",
"min_length",
")",
"raise",
"ValueError",
"(",
"\"shape ratio (%s) must equal bounds ratio (%s); try %s\"",
"%",
"(",
"shape_ratio",
",",
"bounds_ratio",
",",
"proposed_bounds",
")",
")"
] | Verify that shape corresponds to bounds apect ratio. | [
"Verify",
"that",
"shape",
"corresponds",
"to",
"bounds",
"apect",
"ratio",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L81-L110 |
ungarj/tilematrix | tilematrix/_funcs.py | _tile_intersecting_tilepyramid | def _tile_intersecting_tilepyramid(tile, tp):
"""Return all tiles from tilepyramid intersecting with tile."""
if tile.tp.grid != tp.grid:
raise ValueError("Tile and TilePyramid source grids must be the same.")
tile_metatiling = tile.tile_pyramid.metatiling
pyramid_metatiling = tp.metatiling
multiplier = tile_metatiling / pyramid_metatiling
if tile_metatiling > pyramid_metatiling:
return [
tp.tile(
tile.zoom,
int(multiplier) * tile.row + row_offset,
int(multiplier) * tile.col + col_offset
)
for row_offset, col_offset in product(
range(int(multiplier)), range(int(multiplier))
)
]
elif tile_metatiling < pyramid_metatiling:
return [tp.tile(
tile.zoom, int(multiplier * tile.row), int(multiplier * tile.col)
)]
else:
return [tp.tile(*tile.id)] | python | def _tile_intersecting_tilepyramid(tile, tp):
"""Return all tiles from tilepyramid intersecting with tile."""
if tile.tp.grid != tp.grid:
raise ValueError("Tile and TilePyramid source grids must be the same.")
tile_metatiling = tile.tile_pyramid.metatiling
pyramid_metatiling = tp.metatiling
multiplier = tile_metatiling / pyramid_metatiling
if tile_metatiling > pyramid_metatiling:
return [
tp.tile(
tile.zoom,
int(multiplier) * tile.row + row_offset,
int(multiplier) * tile.col + col_offset
)
for row_offset, col_offset in product(
range(int(multiplier)), range(int(multiplier))
)
]
elif tile_metatiling < pyramid_metatiling:
return [tp.tile(
tile.zoom, int(multiplier * tile.row), int(multiplier * tile.col)
)]
else:
return [tp.tile(*tile.id)] | [
"def",
"_tile_intersecting_tilepyramid",
"(",
"tile",
",",
"tp",
")",
":",
"if",
"tile",
".",
"tp",
".",
"grid",
"!=",
"tp",
".",
"grid",
":",
"raise",
"ValueError",
"(",
"\"Tile and TilePyramid source grids must be the same.\"",
")",
"tile_metatiling",
"=",
"tile",
".",
"tile_pyramid",
".",
"metatiling",
"pyramid_metatiling",
"=",
"tp",
".",
"metatiling",
"multiplier",
"=",
"tile_metatiling",
"/",
"pyramid_metatiling",
"if",
"tile_metatiling",
">",
"pyramid_metatiling",
":",
"return",
"[",
"tp",
".",
"tile",
"(",
"tile",
".",
"zoom",
",",
"int",
"(",
"multiplier",
")",
"*",
"tile",
".",
"row",
"+",
"row_offset",
",",
"int",
"(",
"multiplier",
")",
"*",
"tile",
".",
"col",
"+",
"col_offset",
")",
"for",
"row_offset",
",",
"col_offset",
"in",
"product",
"(",
"range",
"(",
"int",
"(",
"multiplier",
")",
")",
",",
"range",
"(",
"int",
"(",
"multiplier",
")",
")",
")",
"]",
"elif",
"tile_metatiling",
"<",
"pyramid_metatiling",
":",
"return",
"[",
"tp",
".",
"tile",
"(",
"tile",
".",
"zoom",
",",
"int",
"(",
"multiplier",
"*",
"tile",
".",
"row",
")",
",",
"int",
"(",
"multiplier",
"*",
"tile",
".",
"col",
")",
")",
"]",
"else",
":",
"return",
"[",
"tp",
".",
"tile",
"(",
"*",
"tile",
".",
"id",
")",
"]"
] | Return all tiles from tilepyramid intersecting with tile. | [
"Return",
"all",
"tiles",
"from",
"tilepyramid",
"intersecting",
"with",
"tile",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L126-L149 |
ungarj/tilematrix | tilematrix/_funcs.py | _global_tiles_from_bounds | def _global_tiles_from_bounds(tp, bounds, zoom):
"""Return also Tiles if bounds cross the antimeridian."""
seen = set()
# clip to tilepyramid top and bottom bounds
left, right = bounds.left, bounds.right
top = tp.top if bounds.top > tp.top else bounds.top
bottom = tp.bottom if bounds.bottom < tp.bottom else bounds.bottom
if left >= tp.left and right <= tp.right:
for tile in _tiles_from_cleaned_bounds(tp, bounds, zoom):
yield tile
# bounds overlap on the Western side with antimeridian
if left < tp.left:
for tile in chain(
# tiles west of antimeridian
_tiles_from_cleaned_bounds(
tp,
Bounds(left + (tp.right - tp.left), bottom, tp.right, top),
zoom
),
# tiles east of antimeridian
_tiles_from_cleaned_bounds(
tp, Bounds(tp.left, bottom, right, top), zoom
)
):
# make output tiles unique
if tile.id not in seen:
seen.add(tile.id)
yield tile
# bounds overlap on the Eastern side with antimeridian
if right > tp.right:
for tile in chain(
# tiles west of antimeridian
_tiles_from_cleaned_bounds(
tp, Bounds(left, bottom, tp.right, top), zoom
),
# tiles east of antimeridian
_tiles_from_cleaned_bounds(
tp,
Bounds(tp.left, bottom, right - (tp.right - tp.left), top),
zoom
)
):
# make output tiles unique
if tile.id not in seen:
seen.add(tile.id)
yield tile | python | def _global_tiles_from_bounds(tp, bounds, zoom):
"""Return also Tiles if bounds cross the antimeridian."""
seen = set()
# clip to tilepyramid top and bottom bounds
left, right = bounds.left, bounds.right
top = tp.top if bounds.top > tp.top else bounds.top
bottom = tp.bottom if bounds.bottom < tp.bottom else bounds.bottom
if left >= tp.left and right <= tp.right:
for tile in _tiles_from_cleaned_bounds(tp, bounds, zoom):
yield tile
# bounds overlap on the Western side with antimeridian
if left < tp.left:
for tile in chain(
# tiles west of antimeridian
_tiles_from_cleaned_bounds(
tp,
Bounds(left + (tp.right - tp.left), bottom, tp.right, top),
zoom
),
# tiles east of antimeridian
_tiles_from_cleaned_bounds(
tp, Bounds(tp.left, bottom, right, top), zoom
)
):
# make output tiles unique
if tile.id not in seen:
seen.add(tile.id)
yield tile
# bounds overlap on the Eastern side with antimeridian
if right > tp.right:
for tile in chain(
# tiles west of antimeridian
_tiles_from_cleaned_bounds(
tp, Bounds(left, bottom, tp.right, top), zoom
),
# tiles east of antimeridian
_tiles_from_cleaned_bounds(
tp,
Bounds(tp.left, bottom, right - (tp.right - tp.left), top),
zoom
)
):
# make output tiles unique
if tile.id not in seen:
seen.add(tile.id)
yield tile | [
"def",
"_global_tiles_from_bounds",
"(",
"tp",
",",
"bounds",
",",
"zoom",
")",
":",
"seen",
"=",
"set",
"(",
")",
"# clip to tilepyramid top and bottom bounds",
"left",
",",
"right",
"=",
"bounds",
".",
"left",
",",
"bounds",
".",
"right",
"top",
"=",
"tp",
".",
"top",
"if",
"bounds",
".",
"top",
">",
"tp",
".",
"top",
"else",
"bounds",
".",
"top",
"bottom",
"=",
"tp",
".",
"bottom",
"if",
"bounds",
".",
"bottom",
"<",
"tp",
".",
"bottom",
"else",
"bounds",
".",
"bottom",
"if",
"left",
">=",
"tp",
".",
"left",
"and",
"right",
"<=",
"tp",
".",
"right",
":",
"for",
"tile",
"in",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"bounds",
",",
"zoom",
")",
":",
"yield",
"tile",
"# bounds overlap on the Western side with antimeridian",
"if",
"left",
"<",
"tp",
".",
"left",
":",
"for",
"tile",
"in",
"chain",
"(",
"# tiles west of antimeridian",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"Bounds",
"(",
"left",
"+",
"(",
"tp",
".",
"right",
"-",
"tp",
".",
"left",
")",
",",
"bottom",
",",
"tp",
".",
"right",
",",
"top",
")",
",",
"zoom",
")",
",",
"# tiles east of antimeridian",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"Bounds",
"(",
"tp",
".",
"left",
",",
"bottom",
",",
"right",
",",
"top",
")",
",",
"zoom",
")",
")",
":",
"# make output tiles unique",
"if",
"tile",
".",
"id",
"not",
"in",
"seen",
":",
"seen",
".",
"add",
"(",
"tile",
".",
"id",
")",
"yield",
"tile",
"# bounds overlap on the Eastern side with antimeridian",
"if",
"right",
">",
"tp",
".",
"right",
":",
"for",
"tile",
"in",
"chain",
"(",
"# tiles west of antimeridian",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"Bounds",
"(",
"left",
",",
"bottom",
",",
"tp",
".",
"right",
",",
"top",
")",
",",
"zoom",
")",
",",
"# tiles east of antimeridian",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"Bounds",
"(",
"tp",
".",
"left",
",",
"bottom",
",",
"right",
"-",
"(",
"tp",
".",
"right",
"-",
"tp",
".",
"left",
")",
",",
"top",
")",
",",
"zoom",
")",
")",
":",
"# make output tiles unique",
"if",
"tile",
".",
"id",
"not",
"in",
"seen",
":",
"seen",
".",
"add",
"(",
"tile",
".",
"id",
")",
"yield",
"tile"
] | Return also Tiles if bounds cross the antimeridian. | [
"Return",
"also",
"Tiles",
"if",
"bounds",
"cross",
"the",
"antimeridian",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L152-L201 |
ungarj/tilematrix | tilematrix/_funcs.py | _tiles_from_cleaned_bounds | def _tiles_from_cleaned_bounds(tp, bounds, zoom):
"""Return all tiles intersecting with bounds."""
lb = _tile_from_xy(tp, bounds.left, bounds.bottom, zoom, on_edge_use="rt")
rt = _tile_from_xy(tp, bounds.right, bounds.top, zoom, on_edge_use="lb")
for tile_id in product([zoom], range(rt.row, lb.row + 1), range(lb.col, rt.col + 1)):
yield tp.tile(*tile_id) | python | def _tiles_from_cleaned_bounds(tp, bounds, zoom):
"""Return all tiles intersecting with bounds."""
lb = _tile_from_xy(tp, bounds.left, bounds.bottom, zoom, on_edge_use="rt")
rt = _tile_from_xy(tp, bounds.right, bounds.top, zoom, on_edge_use="lb")
for tile_id in product([zoom], range(rt.row, lb.row + 1), range(lb.col, rt.col + 1)):
yield tp.tile(*tile_id) | [
"def",
"_tiles_from_cleaned_bounds",
"(",
"tp",
",",
"bounds",
",",
"zoom",
")",
":",
"lb",
"=",
"_tile_from_xy",
"(",
"tp",
",",
"bounds",
".",
"left",
",",
"bounds",
".",
"bottom",
",",
"zoom",
",",
"on_edge_use",
"=",
"\"rt\"",
")",
"rt",
"=",
"_tile_from_xy",
"(",
"tp",
",",
"bounds",
".",
"right",
",",
"bounds",
".",
"top",
",",
"zoom",
",",
"on_edge_use",
"=",
"\"lb\"",
")",
"for",
"tile_id",
"in",
"product",
"(",
"[",
"zoom",
"]",
",",
"range",
"(",
"rt",
".",
"row",
",",
"lb",
".",
"row",
"+",
"1",
")",
",",
"range",
"(",
"lb",
".",
"col",
",",
"rt",
".",
"col",
"+",
"1",
")",
")",
":",
"yield",
"tp",
".",
"tile",
"(",
"*",
"tile_id",
")"
] | Return all tiles intersecting with bounds. | [
"Return",
"all",
"tiles",
"intersecting",
"with",
"bounds",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_funcs.py#L204-L209 |
Metatab/metapack | metapack/cli/open.py | open_args | def open_args(subparsers):
"""
The `mp open` command will open a resource with the system application, such as Excel or OpenOffice
"""
parser = subparsers.add_parser(
'open',
help='open a CSV resoruce with a system application',
description=open_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=open_cmd)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' ")
return parser | python | def open_args(subparsers):
"""
The `mp open` command will open a resource with the system application, such as Excel or OpenOffice
"""
parser = subparsers.add_parser(
'open',
help='open a CSV resoruce with a system application',
description=open_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=open_cmd)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' ")
return parser | [
"def",
"open_args",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'open'",
",",
"help",
"=",
"'open a CSV resoruce with a system application'",
",",
"description",
"=",
"open_args",
".",
"__doc__",
",",
"formatter_class",
"=",
"argparse",
".",
"RawDescriptionHelpFormatter",
",",
")",
"parser",
".",
"set_defaults",
"(",
"run_command",
"=",
"open_cmd",
")",
"parser",
".",
"add_argument",
"(",
"'metatabfile'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"\"Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' \"",
")",
"return",
"parser"
] | The `mp open` command will open a resource with the system application, such as Excel or OpenOffice | [
"The",
"mp",
"open",
"command",
"will",
"open",
"a",
"resource",
"with",
"the",
"system",
"application",
"such",
"as",
"Excel",
"or",
"OpenOffice"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/open.py#L23-L40 |
project-rig/rig | rig/place_and_route/route/ner.py | memoized_concentric_hexagons | def memoized_concentric_hexagons(radius):
"""A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons`
which memoizes the coordinates and stores them as a tuple. Note that the
caller must manually offset the coordinates as required.
This wrapper is used to avoid the need to repeatedly call
:py:func:`rig.geometry.concentric_hexagons` for every sink in a network.
This results in a relatively minor speedup (but at equally minor cost) in
large networks.
"""
out = _concentric_hexagons.get(radius)
if out is None:
out = tuple(concentric_hexagons(radius))
_concentric_hexagons[radius] = out
return out | python | def memoized_concentric_hexagons(radius):
"""A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons`
which memoizes the coordinates and stores them as a tuple. Note that the
caller must manually offset the coordinates as required.
This wrapper is used to avoid the need to repeatedly call
:py:func:`rig.geometry.concentric_hexagons` for every sink in a network.
This results in a relatively minor speedup (but at equally minor cost) in
large networks.
"""
out = _concentric_hexagons.get(radius)
if out is None:
out = tuple(concentric_hexagons(radius))
_concentric_hexagons[radius] = out
return out | [
"def",
"memoized_concentric_hexagons",
"(",
"radius",
")",
":",
"out",
"=",
"_concentric_hexagons",
".",
"get",
"(",
"radius",
")",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"tuple",
"(",
"concentric_hexagons",
"(",
"radius",
")",
")",
"_concentric_hexagons",
"[",
"radius",
"]",
"=",
"out",
"return",
"out"
] | A memoized wrapper around :py:func:`rig.geometry.concentric_hexagons`
which memoizes the coordinates and stores them as a tuple. Note that the
caller must manually offset the coordinates as required.
This wrapper is used to avoid the need to repeatedly call
:py:func:`rig.geometry.concentric_hexagons` for every sink in a network.
This results in a relatively minor speedup (but at equally minor cost) in
large networks. | [
"A",
"memoized",
"wrapper",
"around",
":",
"py",
":",
"func",
":",
"rig",
".",
"geometry",
".",
"concentric_hexagons",
"which",
"memoizes",
"the",
"coordinates",
"and",
"stores",
"them",
"as",
"a",
"tuple",
".",
"Note",
"that",
"the",
"caller",
"must",
"manually",
"offset",
"the",
"coordinates",
"as",
"required",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L38-L52 |
project-rig/rig | rig/place_and_route/route/ner.py | ner_net | def ner_net(source, destinations, width, height, wrap_around=False, radius=10):
"""Produce a shortest path tree for a given net using NER.
This is the kernel of the NER algorithm.
Parameters
----------
source : (x, y)
The coordinate of the source vertex.
destinations : iterable([(x, y), ...])
The coordinates of destination vertices.
width : int
Width of the system (nodes)
height : int
Height of the system (nodes)
wrap_around : bool
True if wrap-around links should be used, false if they should be
avoided.
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A RoutingTree is produced rooted at the source and visiting all
destinations but which does not contain any vertices etc. For
convenience, a dictionarry mapping from destination (x, y) coordinates
to the associated RoutingTree is provided to allow the caller to insert
these items.
"""
# Map from (x, y) to RoutingTree objects
route = {source: RoutingTree(source)}
# Handle each destination, sorted by distance from the source, closest
# first.
for destination in sorted(destinations,
key=(lambda destination:
shortest_mesh_path_length(
to_xyz(source), to_xyz(destination))
if not wrap_around else
shortest_torus_path_length(
to_xyz(source), to_xyz(destination),
width, height))):
# We shall attempt to find our nearest neighbouring placed node.
neighbour = None
# Try to find a nearby (within radius hops) node in the routing tree
# that we can route to (falling back on just routing to the source).
#
# In an implementation according to the algorithm's original
# specification looks for nodes at each point in a growing set of rings
# of concentric hexagons. If it doesn't find any destinations this
# means an awful lot of checks: 1261 for the default radius of 20.
#
# An alternative (but behaviourally identical) implementation scans the
# list of all route nodes created so far and finds the closest node
# which is < radius hops (falling back on the origin if no node is
# closer than radius hops). This implementation requires one check per
# existing route node. In most routes this is probably a lot less than
# 1261 since most routes will probably have at most a few hundred route
# nodes by the time the last destination is being routed.
#
# Which implementation is best is a difficult question to answer:
# * In principle nets with quite localised connections (e.g.
# nearest-neighbour or centroids traffic) may route slightly more
# quickly with the original algorithm since it may very quickly find
# a neighbour.
# * In nets which connect very spaced-out destinations the second
# implementation may be quicker since in such a scenario it is
# unlikely that a neighbour will be found.
# * In extremely high-fan-out nets (e.g. broadcasts), the original
# method is very likely to perform *far* better than the alternative
# method since most iterations will complete immediately while the
# alternative method must scan *all* the route vertices.
# As such, it should be clear that neither method alone is 'best' and
# both have degenerate performance in certain completely reasonable
# styles of net. As a result, a simple heuristic is used to decide
# which technique to use.
#
# The following micro-benchmarks are crude estimate of the
# runtime-per-iteration of each approach (at least in the case of a
# torus topology)::
#
# $ # Original approach
# $ python -m timeit --setup 'x, y, w, h, r = 1, 2, 5, 10, \
# {x:None for x in range(10)}' \
# 'x += 1; y += 1; x %= w; y %= h; (x, y) in r'
# 1000000 loops, best of 3: 0.207 usec per loop
# $ # Alternative approach
# $ python -m timeit --setup 'from rig.geometry import \
# shortest_torus_path_length' \
# 'shortest_torus_path_length( \
# (0, 1, 2), (3, 2, 1), 10, 10)'
# 1000000 loops, best of 3: 0.666 usec per loop
#
# From this we can approximately suggest that the alternative approach
# is 3x more expensive per iteration. A very crude heuristic is to use
# the original approach when the number of route nodes is more than
# 1/3rd of the number of routes checked by the original method.
concentric_hexagons = memoized_concentric_hexagons(radius)
if len(concentric_hexagons) < len(route) / 3:
# Original approach: Start looking for route nodes in a concentric
# spiral pattern out from the destination node.
for x, y in concentric_hexagons:
x += destination[0]
y += destination[1]
if wrap_around:
x %= width
y %= height
if (x, y) in route:
neighbour = (x, y)
break
else:
# Alternative approach: Scan over every route node and check to see
# if any are < radius, picking the closest one if so.
neighbour = None
neighbour_distance = None
for candidate_neighbour in route:
if wrap_around:
distance = shortest_torus_path_length(
to_xyz(candidate_neighbour), to_xyz(destination),
width, height)
else:
distance = shortest_mesh_path_length(
to_xyz(candidate_neighbour), to_xyz(destination))
if distance <= radius and (neighbour is None or
distance < neighbour_distance):
neighbour = candidate_neighbour
neighbour_distance = distance
# Fall back on routing directly to the source if no nodes within radius
# hops of the destination was found.
if neighbour is None:
neighbour = source
# Find the shortest vector from the neighbour to this destination
if wrap_around:
vector = shortest_torus_path(to_xyz(neighbour),
to_xyz(destination),
width, height)
else:
vector = shortest_mesh_path(to_xyz(neighbour), to_xyz(destination))
# The longest-dimension-first route may inadvertently pass through an
# already connected node. If the route is allowed to pass through that
# node it would create a cycle in the route which would be VeryBad(TM).
# As a result, we work backward through the route and truncate it at
# the first point where the route intersects with a connected node.
ldf = longest_dimension_first(vector, neighbour, width, height)
i = len(ldf)
for direction, (x, y) in reversed(ldf):
i -= 1
if (x, y) in route:
# We've just bumped into a node which is already part of the
# route, this becomes our new neighbour and we truncate the LDF
# route. (Note ldf list is truncated just after the current
# position since it gives (direction, destination) pairs).
neighbour = (x, y)
ldf = ldf[i + 1:]
break
# Take the longest dimension first route.
last_node = route[neighbour]
for direction, (x, y) in ldf:
this_node = RoutingTree((x, y))
route[(x, y)] = this_node
last_node.children.append((Routes(direction), this_node))
last_node = this_node
return (route[source], route) | python | def ner_net(source, destinations, width, height, wrap_around=False, radius=10):
"""Produce a shortest path tree for a given net using NER.
This is the kernel of the NER algorithm.
Parameters
----------
source : (x, y)
The coordinate of the source vertex.
destinations : iterable([(x, y), ...])
The coordinates of destination vertices.
width : int
Width of the system (nodes)
height : int
Height of the system (nodes)
wrap_around : bool
True if wrap-around links should be used, false if they should be
avoided.
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A RoutingTree is produced rooted at the source and visiting all
destinations but which does not contain any vertices etc. For
convenience, a dictionarry mapping from destination (x, y) coordinates
to the associated RoutingTree is provided to allow the caller to insert
these items.
"""
# Map from (x, y) to RoutingTree objects
route = {source: RoutingTree(source)}
# Handle each destination, sorted by distance from the source, closest
# first.
for destination in sorted(destinations,
key=(lambda destination:
shortest_mesh_path_length(
to_xyz(source), to_xyz(destination))
if not wrap_around else
shortest_torus_path_length(
to_xyz(source), to_xyz(destination),
width, height))):
# We shall attempt to find our nearest neighbouring placed node.
neighbour = None
# Try to find a nearby (within radius hops) node in the routing tree
# that we can route to (falling back on just routing to the source).
#
# In an implementation according to the algorithm's original
# specification looks for nodes at each point in a growing set of rings
# of concentric hexagons. If it doesn't find any destinations this
# means an awful lot of checks: 1261 for the default radius of 20.
#
# An alternative (but behaviourally identical) implementation scans the
# list of all route nodes created so far and finds the closest node
# which is < radius hops (falling back on the origin if no node is
# closer than radius hops). This implementation requires one check per
# existing route node. In most routes this is probably a lot less than
# 1261 since most routes will probably have at most a few hundred route
# nodes by the time the last destination is being routed.
#
# Which implementation is best is a difficult question to answer:
# * In principle nets with quite localised connections (e.g.
# nearest-neighbour or centroids traffic) may route slightly more
# quickly with the original algorithm since it may very quickly find
# a neighbour.
# * In nets which connect very spaced-out destinations the second
# implementation may be quicker since in such a scenario it is
# unlikely that a neighbour will be found.
# * In extremely high-fan-out nets (e.g. broadcasts), the original
# method is very likely to perform *far* better than the alternative
# method since most iterations will complete immediately while the
# alternative method must scan *all* the route vertices.
# As such, it should be clear that neither method alone is 'best' and
# both have degenerate performance in certain completely reasonable
# styles of net. As a result, a simple heuristic is used to decide
# which technique to use.
#
# The following micro-benchmarks are crude estimate of the
# runtime-per-iteration of each approach (at least in the case of a
# torus topology)::
#
# $ # Original approach
# $ python -m timeit --setup 'x, y, w, h, r = 1, 2, 5, 10, \
# {x:None for x in range(10)}' \
# 'x += 1; y += 1; x %= w; y %= h; (x, y) in r'
# 1000000 loops, best of 3: 0.207 usec per loop
# $ # Alternative approach
# $ python -m timeit --setup 'from rig.geometry import \
# shortest_torus_path_length' \
# 'shortest_torus_path_length( \
# (0, 1, 2), (3, 2, 1), 10, 10)'
# 1000000 loops, best of 3: 0.666 usec per loop
#
# From this we can approximately suggest that the alternative approach
# is 3x more expensive per iteration. A very crude heuristic is to use
# the original approach when the number of route nodes is more than
# 1/3rd of the number of routes checked by the original method.
concentric_hexagons = memoized_concentric_hexagons(radius)
if len(concentric_hexagons) < len(route) / 3:
# Original approach: Start looking for route nodes in a concentric
# spiral pattern out from the destination node.
for x, y in concentric_hexagons:
x += destination[0]
y += destination[1]
if wrap_around:
x %= width
y %= height
if (x, y) in route:
neighbour = (x, y)
break
else:
# Alternative approach: Scan over every route node and check to see
# if any are < radius, picking the closest one if so.
neighbour = None
neighbour_distance = None
for candidate_neighbour in route:
if wrap_around:
distance = shortest_torus_path_length(
to_xyz(candidate_neighbour), to_xyz(destination),
width, height)
else:
distance = shortest_mesh_path_length(
to_xyz(candidate_neighbour), to_xyz(destination))
if distance <= radius and (neighbour is None or
distance < neighbour_distance):
neighbour = candidate_neighbour
neighbour_distance = distance
# Fall back on routing directly to the source if no nodes within radius
# hops of the destination was found.
if neighbour is None:
neighbour = source
# Find the shortest vector from the neighbour to this destination
if wrap_around:
vector = shortest_torus_path(to_xyz(neighbour),
to_xyz(destination),
width, height)
else:
vector = shortest_mesh_path(to_xyz(neighbour), to_xyz(destination))
# The longest-dimension-first route may inadvertently pass through an
# already connected node. If the route is allowed to pass through that
# node it would create a cycle in the route which would be VeryBad(TM).
# As a result, we work backward through the route and truncate it at
# the first point where the route intersects with a connected node.
ldf = longest_dimension_first(vector, neighbour, width, height)
i = len(ldf)
for direction, (x, y) in reversed(ldf):
i -= 1
if (x, y) in route:
# We've just bumped into a node which is already part of the
# route, this becomes our new neighbour and we truncate the LDF
# route. (Note ldf list is truncated just after the current
# position since it gives (direction, destination) pairs).
neighbour = (x, y)
ldf = ldf[i + 1:]
break
# Take the longest dimension first route.
last_node = route[neighbour]
for direction, (x, y) in ldf:
this_node = RoutingTree((x, y))
route[(x, y)] = this_node
last_node.children.append((Routes(direction), this_node))
last_node = this_node
return (route[source], route) | [
"def",
"ner_net",
"(",
"source",
",",
"destinations",
",",
"width",
",",
"height",
",",
"wrap_around",
"=",
"False",
",",
"radius",
"=",
"10",
")",
":",
"# Map from (x, y) to RoutingTree objects",
"route",
"=",
"{",
"source",
":",
"RoutingTree",
"(",
"source",
")",
"}",
"# Handle each destination, sorted by distance from the source, closest",
"# first.",
"for",
"destination",
"in",
"sorted",
"(",
"destinations",
",",
"key",
"=",
"(",
"lambda",
"destination",
":",
"shortest_mesh_path_length",
"(",
"to_xyz",
"(",
"source",
")",
",",
"to_xyz",
"(",
"destination",
")",
")",
"if",
"not",
"wrap_around",
"else",
"shortest_torus_path_length",
"(",
"to_xyz",
"(",
"source",
")",
",",
"to_xyz",
"(",
"destination",
")",
",",
"width",
",",
"height",
")",
")",
")",
":",
"# We shall attempt to find our nearest neighbouring placed node.",
"neighbour",
"=",
"None",
"# Try to find a nearby (within radius hops) node in the routing tree",
"# that we can route to (falling back on just routing to the source).",
"#",
"# In an implementation according to the algorithm's original",
"# specification looks for nodes at each point in a growing set of rings",
"# of concentric hexagons. If it doesn't find any destinations this",
"# means an awful lot of checks: 1261 for the default radius of 20.",
"#",
"# An alternative (but behaviourally identical) implementation scans the",
"# list of all route nodes created so far and finds the closest node",
"# which is < radius hops (falling back on the origin if no node is",
"# closer than radius hops). This implementation requires one check per",
"# existing route node. In most routes this is probably a lot less than",
"# 1261 since most routes will probably have at most a few hundred route",
"# nodes by the time the last destination is being routed.",
"#",
"# Which implementation is best is a difficult question to answer:",
"# * In principle nets with quite localised connections (e.g.",
"# nearest-neighbour or centroids traffic) may route slightly more",
"# quickly with the original algorithm since it may very quickly find",
"# a neighbour.",
"# * In nets which connect very spaced-out destinations the second",
"# implementation may be quicker since in such a scenario it is",
"# unlikely that a neighbour will be found.",
"# * In extremely high-fan-out nets (e.g. broadcasts), the original",
"# method is very likely to perform *far* better than the alternative",
"# method since most iterations will complete immediately while the",
"# alternative method must scan *all* the route vertices.",
"# As such, it should be clear that neither method alone is 'best' and",
"# both have degenerate performance in certain completely reasonable",
"# styles of net. As a result, a simple heuristic is used to decide",
"# which technique to use.",
"#",
"# The following micro-benchmarks are crude estimate of the",
"# runtime-per-iteration of each approach (at least in the case of a",
"# torus topology)::",
"#",
"# $ # Original approach",
"# $ python -m timeit --setup 'x, y, w, h, r = 1, 2, 5, 10, \\",
"# {x:None for x in range(10)}' \\",
"# 'x += 1; y += 1; x %= w; y %= h; (x, y) in r'",
"# 1000000 loops, best of 3: 0.207 usec per loop",
"# $ # Alternative approach",
"# $ python -m timeit --setup 'from rig.geometry import \\",
"# shortest_torus_path_length' \\",
"# 'shortest_torus_path_length( \\",
"# (0, 1, 2), (3, 2, 1), 10, 10)'",
"# 1000000 loops, best of 3: 0.666 usec per loop",
"#",
"# From this we can approximately suggest that the alternative approach",
"# is 3x more expensive per iteration. A very crude heuristic is to use",
"# the original approach when the number of route nodes is more than",
"# 1/3rd of the number of routes checked by the original method.",
"concentric_hexagons",
"=",
"memoized_concentric_hexagons",
"(",
"radius",
")",
"if",
"len",
"(",
"concentric_hexagons",
")",
"<",
"len",
"(",
"route",
")",
"/",
"3",
":",
"# Original approach: Start looking for route nodes in a concentric",
"# spiral pattern out from the destination node.",
"for",
"x",
",",
"y",
"in",
"concentric_hexagons",
":",
"x",
"+=",
"destination",
"[",
"0",
"]",
"y",
"+=",
"destination",
"[",
"1",
"]",
"if",
"wrap_around",
":",
"x",
"%=",
"width",
"y",
"%=",
"height",
"if",
"(",
"x",
",",
"y",
")",
"in",
"route",
":",
"neighbour",
"=",
"(",
"x",
",",
"y",
")",
"break",
"else",
":",
"# Alternative approach: Scan over every route node and check to see",
"# if any are < radius, picking the closest one if so.",
"neighbour",
"=",
"None",
"neighbour_distance",
"=",
"None",
"for",
"candidate_neighbour",
"in",
"route",
":",
"if",
"wrap_around",
":",
"distance",
"=",
"shortest_torus_path_length",
"(",
"to_xyz",
"(",
"candidate_neighbour",
")",
",",
"to_xyz",
"(",
"destination",
")",
",",
"width",
",",
"height",
")",
"else",
":",
"distance",
"=",
"shortest_mesh_path_length",
"(",
"to_xyz",
"(",
"candidate_neighbour",
")",
",",
"to_xyz",
"(",
"destination",
")",
")",
"if",
"distance",
"<=",
"radius",
"and",
"(",
"neighbour",
"is",
"None",
"or",
"distance",
"<",
"neighbour_distance",
")",
":",
"neighbour",
"=",
"candidate_neighbour",
"neighbour_distance",
"=",
"distance",
"# Fall back on routing directly to the source if no nodes within radius",
"# hops of the destination was found.",
"if",
"neighbour",
"is",
"None",
":",
"neighbour",
"=",
"source",
"# Find the shortest vector from the neighbour to this destination",
"if",
"wrap_around",
":",
"vector",
"=",
"shortest_torus_path",
"(",
"to_xyz",
"(",
"neighbour",
")",
",",
"to_xyz",
"(",
"destination",
")",
",",
"width",
",",
"height",
")",
"else",
":",
"vector",
"=",
"shortest_mesh_path",
"(",
"to_xyz",
"(",
"neighbour",
")",
",",
"to_xyz",
"(",
"destination",
")",
")",
"# The longest-dimension-first route may inadvertently pass through an",
"# already connected node. If the route is allowed to pass through that",
"# node it would create a cycle in the route which would be VeryBad(TM).",
"# As a result, we work backward through the route and truncate it at",
"# the first point where the route intersects with a connected node.",
"ldf",
"=",
"longest_dimension_first",
"(",
"vector",
",",
"neighbour",
",",
"width",
",",
"height",
")",
"i",
"=",
"len",
"(",
"ldf",
")",
"for",
"direction",
",",
"(",
"x",
",",
"y",
")",
"in",
"reversed",
"(",
"ldf",
")",
":",
"i",
"-=",
"1",
"if",
"(",
"x",
",",
"y",
")",
"in",
"route",
":",
"# We've just bumped into a node which is already part of the",
"# route, this becomes our new neighbour and we truncate the LDF",
"# route. (Note ldf list is truncated just after the current",
"# position since it gives (direction, destination) pairs).",
"neighbour",
"=",
"(",
"x",
",",
"y",
")",
"ldf",
"=",
"ldf",
"[",
"i",
"+",
"1",
":",
"]",
"break",
"# Take the longest dimension first route.",
"last_node",
"=",
"route",
"[",
"neighbour",
"]",
"for",
"direction",
",",
"(",
"x",
",",
"y",
")",
"in",
"ldf",
":",
"this_node",
"=",
"RoutingTree",
"(",
"(",
"x",
",",
"y",
")",
")",
"route",
"[",
"(",
"x",
",",
"y",
")",
"]",
"=",
"this_node",
"last_node",
".",
"children",
".",
"append",
"(",
"(",
"Routes",
"(",
"direction",
")",
",",
"this_node",
")",
")",
"last_node",
"=",
"this_node",
"return",
"(",
"route",
"[",
"source",
"]",
",",
"route",
")"
] | Produce a shortest path tree for a given net using NER.
This is the kernel of the NER algorithm.
Parameters
----------
source : (x, y)
The coordinate of the source vertex.
destinations : iterable([(x, y), ...])
The coordinates of destination vertices.
width : int
Width of the system (nodes)
height : int
Height of the system (nodes)
wrap_around : bool
True if wrap-around links should be used, false if they should be
avoided.
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A RoutingTree is produced rooted at the source and visiting all
destinations but which does not contain any vertices etc. For
convenience, a dictionarry mapping from destination (x, y) coordinates
to the associated RoutingTree is provided to allow the caller to insert
these items. | [
"Produce",
"a",
"shortest",
"path",
"tree",
"for",
"a",
"given",
"net",
"using",
"NER",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L55-L228 |
project-rig/rig | rig/place_and_route/route/ner.py | copy_and_disconnect_tree | def copy_and_disconnect_tree(root, machine):
"""Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links.
"""
new_root = None
# Lookup for copied routing tree {(x, y): RoutingTree, ...}
new_lookup = {}
# List of missing connections in the copied routing tree [(new_parent,
# new_child), ...]
broken_links = set()
# A queue [(new_parent, direction, old_node), ...]
to_visit = deque([(None, None, root)])
while to_visit:
new_parent, direction, old_node = to_visit.popleft()
if old_node.chip in machine:
# Create a copy of the node
new_node = RoutingTree(old_node.chip)
new_lookup[new_node.chip] = new_node
else:
# This chip is dead, move all its children into the parent node
assert new_parent is not None, \
"Net cannot be sourced from a dead chip."
new_node = new_parent
if new_parent is None:
# This is the root node
new_root = new_node
elif new_node is not new_parent:
# If this node is not dead, check connectivity to parent node (no
# reason to check connectivity between a dead node and its parent).
if direction in links_between(new_parent.chip,
new_node.chip,
machine):
# Is connected via working link
new_parent.children.append((direction, new_node))
else:
# Link to parent is dead (or original parent was dead and the
# new parent is not adjacent)
broken_links.add((new_parent.chip, new_node.chip))
# Copy children
for child_direction, child in old_node.children:
to_visit.append((new_node, child_direction, child))
return (new_root, new_lookup, broken_links) | python | def copy_and_disconnect_tree(root, machine):
"""Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links.
"""
new_root = None
# Lookup for copied routing tree {(x, y): RoutingTree, ...}
new_lookup = {}
# List of missing connections in the copied routing tree [(new_parent,
# new_child), ...]
broken_links = set()
# A queue [(new_parent, direction, old_node), ...]
to_visit = deque([(None, None, root)])
while to_visit:
new_parent, direction, old_node = to_visit.popleft()
if old_node.chip in machine:
# Create a copy of the node
new_node = RoutingTree(old_node.chip)
new_lookup[new_node.chip] = new_node
else:
# This chip is dead, move all its children into the parent node
assert new_parent is not None, \
"Net cannot be sourced from a dead chip."
new_node = new_parent
if new_parent is None:
# This is the root node
new_root = new_node
elif new_node is not new_parent:
# If this node is not dead, check connectivity to parent node (no
# reason to check connectivity between a dead node and its parent).
if direction in links_between(new_parent.chip,
new_node.chip,
machine):
# Is connected via working link
new_parent.children.append((direction, new_node))
else:
# Link to parent is dead (or original parent was dead and the
# new parent is not adjacent)
broken_links.add((new_parent.chip, new_node.chip))
# Copy children
for child_direction, child in old_node.children:
to_visit.append((new_node, child_direction, child))
return (new_root, new_lookup, broken_links) | [
"def",
"copy_and_disconnect_tree",
"(",
"root",
",",
"machine",
")",
":",
"new_root",
"=",
"None",
"# Lookup for copied routing tree {(x, y): RoutingTree, ...}",
"new_lookup",
"=",
"{",
"}",
"# List of missing connections in the copied routing tree [(new_parent,",
"# new_child), ...]",
"broken_links",
"=",
"set",
"(",
")",
"# A queue [(new_parent, direction, old_node), ...]",
"to_visit",
"=",
"deque",
"(",
"[",
"(",
"None",
",",
"None",
",",
"root",
")",
"]",
")",
"while",
"to_visit",
":",
"new_parent",
",",
"direction",
",",
"old_node",
"=",
"to_visit",
".",
"popleft",
"(",
")",
"if",
"old_node",
".",
"chip",
"in",
"machine",
":",
"# Create a copy of the node",
"new_node",
"=",
"RoutingTree",
"(",
"old_node",
".",
"chip",
")",
"new_lookup",
"[",
"new_node",
".",
"chip",
"]",
"=",
"new_node",
"else",
":",
"# This chip is dead, move all its children into the parent node",
"assert",
"new_parent",
"is",
"not",
"None",
",",
"\"Net cannot be sourced from a dead chip.\"",
"new_node",
"=",
"new_parent",
"if",
"new_parent",
"is",
"None",
":",
"# This is the root node",
"new_root",
"=",
"new_node",
"elif",
"new_node",
"is",
"not",
"new_parent",
":",
"# If this node is not dead, check connectivity to parent node (no",
"# reason to check connectivity between a dead node and its parent).",
"if",
"direction",
"in",
"links_between",
"(",
"new_parent",
".",
"chip",
",",
"new_node",
".",
"chip",
",",
"machine",
")",
":",
"# Is connected via working link",
"new_parent",
".",
"children",
".",
"append",
"(",
"(",
"direction",
",",
"new_node",
")",
")",
"else",
":",
"# Link to parent is dead (or original parent was dead and the",
"# new parent is not adjacent)",
"broken_links",
".",
"add",
"(",
"(",
"new_parent",
".",
"chip",
",",
"new_node",
".",
"chip",
")",
")",
"# Copy children",
"for",
"child_direction",
",",
"child",
"in",
"old_node",
".",
"children",
":",
"to_visit",
".",
"append",
"(",
"(",
"new_node",
",",
"child_direction",
",",
"child",
")",
")",
"return",
"(",
"new_root",
",",
"new_lookup",
",",
"broken_links",
")"
] | Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links. | [
"Copy",
"a",
"RoutingTree",
"(",
"containing",
"nothing",
"but",
"RoutingTrees",
")",
"disconnecting",
"nodes",
"which",
"are",
"not",
"connected",
"in",
"the",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L231-L306 |
project-rig/rig | rig/place_and_route/route/ner.py | a_star | def a_star(sink, heuristic_source, sources, machine, wrap_around):
"""Use A* to find a path from any of the sources to the sink.
Note that the heuristic means that the search will proceed towards
heuristic_source without any concern for any other sources. This means that
the algorithm may miss a very close neighbour in order to pursue its goal
of reaching heuristic_source. This is not considered a problem since 1) the
heuristic source will typically be in the direction of the rest of the tree
and near by and often the closest entity 2) it prevents us accidentally
forming loops in the rest of the tree since we'll stop as soon as we touch
any part of it.
Parameters
----------
sink : (x, y)
heuristic_source : (x, y)
An element from `sources` which is used as a guiding heuristic for the
A* algorithm.
sources : set([(x, y), ...])
machine : :py:class:`~rig.place_and_route.Machine`
wrap_around : bool
Consider wrap-around links in heuristic distance calculations.
Returns
-------
[(:py:class:`~rig.routing_table.Routes`, (x, y)), ...]
A path starting with a coordinate in `sources` and terminating at
connected neighbour of `sink` (i.e. the path does not include `sink`).
The direction given is the link down which to proceed from the given
(x, y) to arrive at the next point in the path.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path cannot be found.
"""
# Select the heuristic function to use for distances
if wrap_around:
heuristic = (lambda node:
shortest_torus_path_length(to_xyz(node),
to_xyz(heuristic_source),
machine.width, machine.height))
else:
heuristic = (lambda node:
shortest_mesh_path_length(to_xyz(node),
to_xyz(heuristic_source)))
# A dictionary {node: (direction, previous_node}. An entry indicates that
# 1) the node has been visited and 2) which node we hopped from (and the
# direction used) to reach previous_node. This may be None if the node is
# the sink.
visited = {sink: None}
# The node which the tree will be reconnected to
selected_source = None
# A heap (accessed via heapq) of (distance, (x, y)) where distance is the
# distance between (x, y) and heuristic_source and (x, y) is a node to
# explore.
to_visit = [(heuristic(sink), sink)]
while to_visit:
_, node = heapq.heappop(to_visit)
# Terminate if we've found the destination
if node in sources:
selected_source = node
break
# Try all neighbouring locations. Note: link identifiers are from the
# perspective of the neighbour, not the current node!
for neighbour_link in Links:
vector = neighbour_link.opposite.to_vector()
neighbour = ((node[0] + vector[0]) % machine.width,
(node[1] + vector[1]) % machine.height)
# Skip links which are broken
if (neighbour[0], neighbour[1], neighbour_link) not in machine:
continue
# Skip neighbours who have already been visited
if neighbour in visited:
continue
# Explore all other neighbours
visited[neighbour] = (neighbour_link, node)
heapq.heappush(to_visit, (heuristic(neighbour), neighbour))
# Fail of no paths exist
if selected_source is None:
raise MachineHasDisconnectedSubregion(
"Could not find path from {} to {}".format(
sink, heuristic_source))
# Reconstruct the discovered path, starting from the source we found and
# working back until the sink.
path = [(Routes(visited[selected_source][0]), selected_source)]
while visited[path[-1][1]][1] != sink:
node = visited[path[-1][1]][1]
direction = Routes(visited[node][0])
path.append((direction, node))
return path | python | def a_star(sink, heuristic_source, sources, machine, wrap_around):
"""Use A* to find a path from any of the sources to the sink.
Note that the heuristic means that the search will proceed towards
heuristic_source without any concern for any other sources. This means that
the algorithm may miss a very close neighbour in order to pursue its goal
of reaching heuristic_source. This is not considered a problem since 1) the
heuristic source will typically be in the direction of the rest of the tree
and near by and often the closest entity 2) it prevents us accidentally
forming loops in the rest of the tree since we'll stop as soon as we touch
any part of it.
Parameters
----------
sink : (x, y)
heuristic_source : (x, y)
An element from `sources` which is used as a guiding heuristic for the
A* algorithm.
sources : set([(x, y), ...])
machine : :py:class:`~rig.place_and_route.Machine`
wrap_around : bool
Consider wrap-around links in heuristic distance calculations.
Returns
-------
[(:py:class:`~rig.routing_table.Routes`, (x, y)), ...]
A path starting with a coordinate in `sources` and terminating at
connected neighbour of `sink` (i.e. the path does not include `sink`).
The direction given is the link down which to proceed from the given
(x, y) to arrive at the next point in the path.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path cannot be found.
"""
# Select the heuristic function to use for distances
if wrap_around:
heuristic = (lambda node:
shortest_torus_path_length(to_xyz(node),
to_xyz(heuristic_source),
machine.width, machine.height))
else:
heuristic = (lambda node:
shortest_mesh_path_length(to_xyz(node),
to_xyz(heuristic_source)))
# A dictionary {node: (direction, previous_node}. An entry indicates that
# 1) the node has been visited and 2) which node we hopped from (and the
# direction used) to reach previous_node. This may be None if the node is
# the sink.
visited = {sink: None}
# The node which the tree will be reconnected to
selected_source = None
# A heap (accessed via heapq) of (distance, (x, y)) where distance is the
# distance between (x, y) and heuristic_source and (x, y) is a node to
# explore.
to_visit = [(heuristic(sink), sink)]
while to_visit:
_, node = heapq.heappop(to_visit)
# Terminate if we've found the destination
if node in sources:
selected_source = node
break
# Try all neighbouring locations. Note: link identifiers are from the
# perspective of the neighbour, not the current node!
for neighbour_link in Links:
vector = neighbour_link.opposite.to_vector()
neighbour = ((node[0] + vector[0]) % machine.width,
(node[1] + vector[1]) % machine.height)
# Skip links which are broken
if (neighbour[0], neighbour[1], neighbour_link) not in machine:
continue
# Skip neighbours who have already been visited
if neighbour in visited:
continue
# Explore all other neighbours
visited[neighbour] = (neighbour_link, node)
heapq.heappush(to_visit, (heuristic(neighbour), neighbour))
# Fail of no paths exist
if selected_source is None:
raise MachineHasDisconnectedSubregion(
"Could not find path from {} to {}".format(
sink, heuristic_source))
# Reconstruct the discovered path, starting from the source we found and
# working back until the sink.
path = [(Routes(visited[selected_source][0]), selected_source)]
while visited[path[-1][1]][1] != sink:
node = visited[path[-1][1]][1]
direction = Routes(visited[node][0])
path.append((direction, node))
return path | [
"def",
"a_star",
"(",
"sink",
",",
"heuristic_source",
",",
"sources",
",",
"machine",
",",
"wrap_around",
")",
":",
"# Select the heuristic function to use for distances",
"if",
"wrap_around",
":",
"heuristic",
"=",
"(",
"lambda",
"node",
":",
"shortest_torus_path_length",
"(",
"to_xyz",
"(",
"node",
")",
",",
"to_xyz",
"(",
"heuristic_source",
")",
",",
"machine",
".",
"width",
",",
"machine",
".",
"height",
")",
")",
"else",
":",
"heuristic",
"=",
"(",
"lambda",
"node",
":",
"shortest_mesh_path_length",
"(",
"to_xyz",
"(",
"node",
")",
",",
"to_xyz",
"(",
"heuristic_source",
")",
")",
")",
"# A dictionary {node: (direction, previous_node}. An entry indicates that",
"# 1) the node has been visited and 2) which node we hopped from (and the",
"# direction used) to reach previous_node. This may be None if the node is",
"# the sink.",
"visited",
"=",
"{",
"sink",
":",
"None",
"}",
"# The node which the tree will be reconnected to",
"selected_source",
"=",
"None",
"# A heap (accessed via heapq) of (distance, (x, y)) where distance is the",
"# distance between (x, y) and heuristic_source and (x, y) is a node to",
"# explore.",
"to_visit",
"=",
"[",
"(",
"heuristic",
"(",
"sink",
")",
",",
"sink",
")",
"]",
"while",
"to_visit",
":",
"_",
",",
"node",
"=",
"heapq",
".",
"heappop",
"(",
"to_visit",
")",
"# Terminate if we've found the destination",
"if",
"node",
"in",
"sources",
":",
"selected_source",
"=",
"node",
"break",
"# Try all neighbouring locations. Note: link identifiers are from the",
"# perspective of the neighbour, not the current node!",
"for",
"neighbour_link",
"in",
"Links",
":",
"vector",
"=",
"neighbour_link",
".",
"opposite",
".",
"to_vector",
"(",
")",
"neighbour",
"=",
"(",
"(",
"node",
"[",
"0",
"]",
"+",
"vector",
"[",
"0",
"]",
")",
"%",
"machine",
".",
"width",
",",
"(",
"node",
"[",
"1",
"]",
"+",
"vector",
"[",
"1",
"]",
")",
"%",
"machine",
".",
"height",
")",
"# Skip links which are broken",
"if",
"(",
"neighbour",
"[",
"0",
"]",
",",
"neighbour",
"[",
"1",
"]",
",",
"neighbour_link",
")",
"not",
"in",
"machine",
":",
"continue",
"# Skip neighbours who have already been visited",
"if",
"neighbour",
"in",
"visited",
":",
"continue",
"# Explore all other neighbours",
"visited",
"[",
"neighbour",
"]",
"=",
"(",
"neighbour_link",
",",
"node",
")",
"heapq",
".",
"heappush",
"(",
"to_visit",
",",
"(",
"heuristic",
"(",
"neighbour",
")",
",",
"neighbour",
")",
")",
"# Fail of no paths exist",
"if",
"selected_source",
"is",
"None",
":",
"raise",
"MachineHasDisconnectedSubregion",
"(",
"\"Could not find path from {} to {}\"",
".",
"format",
"(",
"sink",
",",
"heuristic_source",
")",
")",
"# Reconstruct the discovered path, starting from the source we found and",
"# working back until the sink.",
"path",
"=",
"[",
"(",
"Routes",
"(",
"visited",
"[",
"selected_source",
"]",
"[",
"0",
"]",
")",
",",
"selected_source",
")",
"]",
"while",
"visited",
"[",
"path",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
"!=",
"sink",
":",
"node",
"=",
"visited",
"[",
"path",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"]",
"[",
"1",
"]",
"direction",
"=",
"Routes",
"(",
"visited",
"[",
"node",
"]",
"[",
"0",
"]",
")",
"path",
".",
"append",
"(",
"(",
"direction",
",",
"node",
")",
")",
"return",
"path"
] | Use A* to find a path from any of the sources to the sink.
Note that the heuristic means that the search will proceed towards
heuristic_source without any concern for any other sources. This means that
the algorithm may miss a very close neighbour in order to pursue its goal
of reaching heuristic_source. This is not considered a problem since 1) the
heuristic source will typically be in the direction of the rest of the tree
and near by and often the closest entity 2) it prevents us accidentally
forming loops in the rest of the tree since we'll stop as soon as we touch
any part of it.
Parameters
----------
sink : (x, y)
heuristic_source : (x, y)
An element from `sources` which is used as a guiding heuristic for the
A* algorithm.
sources : set([(x, y), ...])
machine : :py:class:`~rig.place_and_route.Machine`
wrap_around : bool
Consider wrap-around links in heuristic distance calculations.
Returns
-------
[(:py:class:`~rig.routing_table.Routes`, (x, y)), ...]
A path starting with a coordinate in `sources` and terminating at
connected neighbour of `sink` (i.e. the path does not include `sink`).
The direction given is the link down which to proceed from the given
(x, y) to arrive at the next point in the path.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path cannot be found. | [
"Use",
"A",
"*",
"to",
"find",
"a",
"path",
"from",
"any",
"of",
"the",
"sources",
"to",
"the",
"sink",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L309-L410 |
project-rig/rig | rig/place_and_route/route/ner.py | route_has_dead_links | def route_has_dead_links(root, machine):
"""Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
"""
for direction, (x, y), routes in root.traverse():
for route in routes:
if (x, y, route) not in machine:
return True
return False | python | def route_has_dead_links(root, machine):
"""Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
"""
for direction, (x, y), routes in root.traverse():
for route in routes:
if (x, y, route) not in machine:
return True
return False | [
"def",
"route_has_dead_links",
"(",
"root",
",",
"machine",
")",
":",
"for",
"direction",
",",
"(",
"x",
",",
"y",
")",
",",
"routes",
"in",
"root",
".",
"traverse",
"(",
")",
":",
"for",
"route",
"in",
"routes",
":",
"if",
"(",
"x",
",",
"y",
",",
"route",
")",
"not",
"in",
"machine",
":",
"return",
"True",
"return",
"False"
] | Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise. | [
"Quickly",
"determine",
"if",
"a",
"route",
"uses",
"any",
"dead",
"links",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L413-L433 |
project-rig/rig | rig/place_and_route/route/ner.py | avoid_dead_links | def avoid_dead_links(root, machine, wrap_around=False):
"""Modify a RoutingTree to route-around dead links in a Machine.
Uses A* to reconnect disconnected branches of the tree (due to dead links
in the machine).
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
wrap_around : bool
Consider wrap-around links in pathfinding heuristics.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A new RoutingTree is produced rooted as before. A dictionarry mapping
from (x, y) to the associated RoutingTree is provided for convenience.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path to reconnect the tree cannot be found.
"""
# Make a copy of the RoutingTree with all broken parts disconnected
root, lookup, broken_links = copy_and_disconnect_tree(root, machine)
# For each disconnected subtree, use A* to connect the tree to *any* other
# disconnected subtree. Note that this process will eventually result in
# all disconnected subtrees being connected, the result is a fully
# connected tree.
for parent, child in broken_links:
child_chips = set(c.chip for c in lookup[child])
# Try to reconnect broken links to any other part of the tree
# (excluding this broken subtree itself since that would create a
# cycle).
path = a_star(child, parent,
set(lookup).difference(child_chips),
machine, wrap_around)
# Add new RoutingTree nodes to reconnect the child to the tree.
last_node = lookup[path[0][1]]
last_direction = path[0][0]
for direction, (x, y) in path[1:]:
if (x, y) not in child_chips:
# This path segment traverses new ground so we must create a
# new RoutingTree for the segment.
new_node = RoutingTree((x, y))
# A* will not traverse anything but chips in this tree so this
# assert is meerly a sanity check that this ocurred correctly.
assert (x, y) not in lookup, "Cycle created."
lookup[(x, y)] = new_node
else:
# This path segment overlaps part of the disconnected tree
# (A* doesn't know where the disconnected tree is and thus
# doesn't avoid it). To prevent cycles being introduced, this
# overlapped node is severed from its parent and merged as part
# of the A* path.
new_node = lookup[(x, y)]
# Find the node's current parent and disconnect it.
for node in lookup[child]: # pragma: no branch
dn = [(d, n) for d, n in node.children if n == new_node]
assert len(dn) <= 1
if dn:
node.children.remove(dn[0])
# A node can only have one parent so we can stop now.
break
last_node.children.append((Routes(last_direction), new_node))
last_node = new_node
last_direction = direction
last_node.children.append((last_direction, lookup[child]))
return (root, lookup) | python | def avoid_dead_links(root, machine, wrap_around=False):
"""Modify a RoutingTree to route-around dead links in a Machine.
Uses A* to reconnect disconnected branches of the tree (due to dead links
in the machine).
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
wrap_around : bool
Consider wrap-around links in pathfinding heuristics.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A new RoutingTree is produced rooted as before. A dictionarry mapping
from (x, y) to the associated RoutingTree is provided for convenience.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path to reconnect the tree cannot be found.
"""
# Make a copy of the RoutingTree with all broken parts disconnected
root, lookup, broken_links = copy_and_disconnect_tree(root, machine)
# For each disconnected subtree, use A* to connect the tree to *any* other
# disconnected subtree. Note that this process will eventually result in
# all disconnected subtrees being connected, the result is a fully
# connected tree.
for parent, child in broken_links:
child_chips = set(c.chip for c in lookup[child])
# Try to reconnect broken links to any other part of the tree
# (excluding this broken subtree itself since that would create a
# cycle).
path = a_star(child, parent,
set(lookup).difference(child_chips),
machine, wrap_around)
# Add new RoutingTree nodes to reconnect the child to the tree.
last_node = lookup[path[0][1]]
last_direction = path[0][0]
for direction, (x, y) in path[1:]:
if (x, y) not in child_chips:
# This path segment traverses new ground so we must create a
# new RoutingTree for the segment.
new_node = RoutingTree((x, y))
# A* will not traverse anything but chips in this tree so this
# assert is meerly a sanity check that this ocurred correctly.
assert (x, y) not in lookup, "Cycle created."
lookup[(x, y)] = new_node
else:
# This path segment overlaps part of the disconnected tree
# (A* doesn't know where the disconnected tree is and thus
# doesn't avoid it). To prevent cycles being introduced, this
# overlapped node is severed from its parent and merged as part
# of the A* path.
new_node = lookup[(x, y)]
# Find the node's current parent and disconnect it.
for node in lookup[child]: # pragma: no branch
dn = [(d, n) for d, n in node.children if n == new_node]
assert len(dn) <= 1
if dn:
node.children.remove(dn[0])
# A node can only have one parent so we can stop now.
break
last_node.children.append((Routes(last_direction), new_node))
last_node = new_node
last_direction = direction
last_node.children.append((last_direction, lookup[child]))
return (root, lookup) | [
"def",
"avoid_dead_links",
"(",
"root",
",",
"machine",
",",
"wrap_around",
"=",
"False",
")",
":",
"# Make a copy of the RoutingTree with all broken parts disconnected",
"root",
",",
"lookup",
",",
"broken_links",
"=",
"copy_and_disconnect_tree",
"(",
"root",
",",
"machine",
")",
"# For each disconnected subtree, use A* to connect the tree to *any* other",
"# disconnected subtree. Note that this process will eventually result in",
"# all disconnected subtrees being connected, the result is a fully",
"# connected tree.",
"for",
"parent",
",",
"child",
"in",
"broken_links",
":",
"child_chips",
"=",
"set",
"(",
"c",
".",
"chip",
"for",
"c",
"in",
"lookup",
"[",
"child",
"]",
")",
"# Try to reconnect broken links to any other part of the tree",
"# (excluding this broken subtree itself since that would create a",
"# cycle).",
"path",
"=",
"a_star",
"(",
"child",
",",
"parent",
",",
"set",
"(",
"lookup",
")",
".",
"difference",
"(",
"child_chips",
")",
",",
"machine",
",",
"wrap_around",
")",
"# Add new RoutingTree nodes to reconnect the child to the tree.",
"last_node",
"=",
"lookup",
"[",
"path",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
"last_direction",
"=",
"path",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"direction",
",",
"(",
"x",
",",
"y",
")",
"in",
"path",
"[",
"1",
":",
"]",
":",
"if",
"(",
"x",
",",
"y",
")",
"not",
"in",
"child_chips",
":",
"# This path segment traverses new ground so we must create a",
"# new RoutingTree for the segment.",
"new_node",
"=",
"RoutingTree",
"(",
"(",
"x",
",",
"y",
")",
")",
"# A* will not traverse anything but chips in this tree so this",
"# assert is meerly a sanity check that this ocurred correctly.",
"assert",
"(",
"x",
",",
"y",
")",
"not",
"in",
"lookup",
",",
"\"Cycle created.\"",
"lookup",
"[",
"(",
"x",
",",
"y",
")",
"]",
"=",
"new_node",
"else",
":",
"# This path segment overlaps part of the disconnected tree",
"# (A* doesn't know where the disconnected tree is and thus",
"# doesn't avoid it). To prevent cycles being introduced, this",
"# overlapped node is severed from its parent and merged as part",
"# of the A* path.",
"new_node",
"=",
"lookup",
"[",
"(",
"x",
",",
"y",
")",
"]",
"# Find the node's current parent and disconnect it.",
"for",
"node",
"in",
"lookup",
"[",
"child",
"]",
":",
"# pragma: no branch",
"dn",
"=",
"[",
"(",
"d",
",",
"n",
")",
"for",
"d",
",",
"n",
"in",
"node",
".",
"children",
"if",
"n",
"==",
"new_node",
"]",
"assert",
"len",
"(",
"dn",
")",
"<=",
"1",
"if",
"dn",
":",
"node",
".",
"children",
".",
"remove",
"(",
"dn",
"[",
"0",
"]",
")",
"# A node can only have one parent so we can stop now.",
"break",
"last_node",
".",
"children",
".",
"append",
"(",
"(",
"Routes",
"(",
"last_direction",
")",
",",
"new_node",
")",
")",
"last_node",
"=",
"new_node",
"last_direction",
"=",
"direction",
"last_node",
".",
"children",
".",
"append",
"(",
"(",
"last_direction",
",",
"lookup",
"[",
"child",
"]",
")",
")",
"return",
"(",
"root",
",",
"lookup",
")"
] | Modify a RoutingTree to route-around dead links in a Machine.
Uses A* to reconnect disconnected branches of the tree (due to dead links
in the machine).
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
wrap_around : bool
Consider wrap-around links in pathfinding heuristics.
Returns
-------
(:py:class:`~.rig.place_and_route.routing_tree.RoutingTree`,
{(x,y): :py:class:`~.rig.place_and_route.routing_tree.RoutingTree`, ...})
A new RoutingTree is produced rooted as before. A dictionarry mapping
from (x, y) to the associated RoutingTree is provided for convenience.
Raises
------
:py:class:~rig.place_and_route.exceptions.MachineHasDisconnectedSubregion`
If a path to reconnect the tree cannot be found. | [
"Modify",
"a",
"RoutingTree",
"to",
"route",
"-",
"around",
"dead",
"links",
"in",
"a",
"Machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L436-L514 |
project-rig/rig | rig/place_and_route/route/ner.py | route | def route(vertices_resources, nets, machine, constraints, placements,
allocations={}, core_resource=Cores, radius=20):
"""Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
"""
wrap_around = machine.has_wrap_around_links()
# Vertices constrained to route to a specific link. {vertex: route}
route_to_endpoint = {}
for constraint in constraints:
if isinstance(constraint, RouteEndpointConstraint):
route_to_endpoint[constraint.vertex] = constraint.route
routes = {}
for net in nets:
# Generate routing tree (assuming a perfect machine)
root, lookup = ner_net(placements[net.source],
set(placements[sink] for sink in net.sinks),
machine.width, machine.height,
wrap_around, radius)
# Fix routes to avoid dead chips/links
if route_has_dead_links(root, machine):
root, lookup = avoid_dead_links(root, machine, wrap_around)
# Add the sinks in the net to the RoutingTree
for sink in net.sinks:
tree_node = lookup[placements[sink]]
if sink in route_to_endpoint:
# Sinks with route-to-endpoint constraints must be routed
# in the according directions.
tree_node.children.append((route_to_endpoint[sink], sink))
else:
cores = allocations.get(sink, {}).get(core_resource, None)
if cores is not None:
# Sinks with the core_resource resource specified must be
# routed to that set of cores.
for core in range(cores.start, cores.stop):
tree_node.children.append((Routes.core(core), sink))
else:
# Sinks without that resource are simply included without
# an associated route
tree_node.children.append((None, sink))
routes[net] = root
return routes | python | def route(vertices_resources, nets, machine, constraints, placements,
allocations={}, core_resource=Cores, radius=20):
"""Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
"""
wrap_around = machine.has_wrap_around_links()
# Vertices constrained to route to a specific link. {vertex: route}
route_to_endpoint = {}
for constraint in constraints:
if isinstance(constraint, RouteEndpointConstraint):
route_to_endpoint[constraint.vertex] = constraint.route
routes = {}
for net in nets:
# Generate routing tree (assuming a perfect machine)
root, lookup = ner_net(placements[net.source],
set(placements[sink] for sink in net.sinks),
machine.width, machine.height,
wrap_around, radius)
# Fix routes to avoid dead chips/links
if route_has_dead_links(root, machine):
root, lookup = avoid_dead_links(root, machine, wrap_around)
# Add the sinks in the net to the RoutingTree
for sink in net.sinks:
tree_node = lookup[placements[sink]]
if sink in route_to_endpoint:
# Sinks with route-to-endpoint constraints must be routed
# in the according directions.
tree_node.children.append((route_to_endpoint[sink], sink))
else:
cores = allocations.get(sink, {}).get(core_resource, None)
if cores is not None:
# Sinks with the core_resource resource specified must be
# routed to that set of cores.
for core in range(cores.start, cores.stop):
tree_node.children.append((Routes.core(core), sink))
else:
# Sinks without that resource are simply included without
# an associated route
tree_node.children.append((None, sink))
routes[net] = root
return routes | [
"def",
"route",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"placements",
",",
"allocations",
"=",
"{",
"}",
",",
"core_resource",
"=",
"Cores",
",",
"radius",
"=",
"20",
")",
":",
"wrap_around",
"=",
"machine",
".",
"has_wrap_around_links",
"(",
")",
"# Vertices constrained to route to a specific link. {vertex: route}",
"route_to_endpoint",
"=",
"{",
"}",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"RouteEndpointConstraint",
")",
":",
"route_to_endpoint",
"[",
"constraint",
".",
"vertex",
"]",
"=",
"constraint",
".",
"route",
"routes",
"=",
"{",
"}",
"for",
"net",
"in",
"nets",
":",
"# Generate routing tree (assuming a perfect machine)",
"root",
",",
"lookup",
"=",
"ner_net",
"(",
"placements",
"[",
"net",
".",
"source",
"]",
",",
"set",
"(",
"placements",
"[",
"sink",
"]",
"for",
"sink",
"in",
"net",
".",
"sinks",
")",
",",
"machine",
".",
"width",
",",
"machine",
".",
"height",
",",
"wrap_around",
",",
"radius",
")",
"# Fix routes to avoid dead chips/links",
"if",
"route_has_dead_links",
"(",
"root",
",",
"machine",
")",
":",
"root",
",",
"lookup",
"=",
"avoid_dead_links",
"(",
"root",
",",
"machine",
",",
"wrap_around",
")",
"# Add the sinks in the net to the RoutingTree",
"for",
"sink",
"in",
"net",
".",
"sinks",
":",
"tree_node",
"=",
"lookup",
"[",
"placements",
"[",
"sink",
"]",
"]",
"if",
"sink",
"in",
"route_to_endpoint",
":",
"# Sinks with route-to-endpoint constraints must be routed",
"# in the according directions.",
"tree_node",
".",
"children",
".",
"append",
"(",
"(",
"route_to_endpoint",
"[",
"sink",
"]",
",",
"sink",
")",
")",
"else",
":",
"cores",
"=",
"allocations",
".",
"get",
"(",
"sink",
",",
"{",
"}",
")",
".",
"get",
"(",
"core_resource",
",",
"None",
")",
"if",
"cores",
"is",
"not",
"None",
":",
"# Sinks with the core_resource resource specified must be",
"# routed to that set of cores.",
"for",
"core",
"in",
"range",
"(",
"cores",
".",
"start",
",",
"cores",
".",
"stop",
")",
":",
"tree_node",
".",
"children",
".",
"append",
"(",
"(",
"Routes",
".",
"core",
"(",
"core",
")",
",",
"sink",
")",
")",
"else",
":",
"# Sinks without that resource are simply included without",
"# an associated route",
"tree_node",
".",
"children",
".",
"append",
"(",
"(",
"None",
",",
"sink",
")",
")",
"routes",
"[",
"net",
"]",
"=",
"root",
"return",
"routes"
] | Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing. | [
"Routing",
"algorithm",
"based",
"on",
"Neighbour",
"Exploring",
"Routing",
"(",
"NER",
")",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/ner.py#L517-L577 |
project-rig/rig | rig/scripts/rig_ps.py | match | def match(string, patterns):
"""Given a string return true if it matches the supplied list of
patterns.
Parameters
----------
string : str
The string to be matched.
patterns : None or [pattern, ...]
The series of regular expressions to attempt to match.
"""
if patterns is None:
return True
else:
return any(re.match(pattern, string)
for pattern in patterns) | python | def match(string, patterns):
"""Given a string return true if it matches the supplied list of
patterns.
Parameters
----------
string : str
The string to be matched.
patterns : None or [pattern, ...]
The series of regular expressions to attempt to match.
"""
if patterns is None:
return True
else:
return any(re.match(pattern, string)
for pattern in patterns) | [
"def",
"match",
"(",
"string",
",",
"patterns",
")",
":",
"if",
"patterns",
"is",
"None",
":",
"return",
"True",
"else",
":",
"return",
"any",
"(",
"re",
".",
"match",
"(",
"pattern",
",",
"string",
")",
"for",
"pattern",
"in",
"patterns",
")"
] | Given a string return true if it matches the supplied list of
patterns.
Parameters
----------
string : str
The string to be matched.
patterns : None or [pattern, ...]
The series of regular expressions to attempt to match. | [
"Given",
"a",
"string",
"return",
"true",
"if",
"it",
"matches",
"the",
"supplied",
"list",
"of",
"patterns",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_ps.py#L21-L36 |
project-rig/rig | rig/scripts/rig_ps.py | get_process_list | def get_process_list(mc, x_=None, y_=None, p_=None,
app_ids=None, applications=None, states=None):
"""Scan a SpiNNaker system's cores filtering by the specified features.
Generates
-------
(x, y, core, state, runtime_exception, application, app_id)
"""
system_info = mc.get_system_info()
for (x, y), chip_info in sorted(iteritems(system_info)):
if x_ is not None and x_ != x:
continue
if y_ is not None and y_ != y:
continue
for p in range(chip_info.num_cores):
if p_ is not None and p_ != p:
continue
try:
status = mc.get_processor_status(x=x, y=y, p=p)
keep = (match(str(status.app_id), app_ids) and
match(status.app_name, applications) and
match(status.cpu_state.name, states))
if keep:
yield (x, y, p,
status.cpu_state,
status.rt_code,
status.app_name,
status.app_id)
except SCPError as e:
# If an error occurs while communicating with a chip, we bodge
# it into the "cpu_status" field and continue (note that it
# will never get filtered out).
class DeadStatus(object):
name = "{}: {}".format(e.__class__.__name__, str(e))
yield (x, y, p, DeadStatus(), None, "", -1) | python | def get_process_list(mc, x_=None, y_=None, p_=None,
app_ids=None, applications=None, states=None):
"""Scan a SpiNNaker system's cores filtering by the specified features.
Generates
-------
(x, y, core, state, runtime_exception, application, app_id)
"""
system_info = mc.get_system_info()
for (x, y), chip_info in sorted(iteritems(system_info)):
if x_ is not None and x_ != x:
continue
if y_ is not None and y_ != y:
continue
for p in range(chip_info.num_cores):
if p_ is not None and p_ != p:
continue
try:
status = mc.get_processor_status(x=x, y=y, p=p)
keep = (match(str(status.app_id), app_ids) and
match(status.app_name, applications) and
match(status.cpu_state.name, states))
if keep:
yield (x, y, p,
status.cpu_state,
status.rt_code,
status.app_name,
status.app_id)
except SCPError as e:
# If an error occurs while communicating with a chip, we bodge
# it into the "cpu_status" field and continue (note that it
# will never get filtered out).
class DeadStatus(object):
name = "{}: {}".format(e.__class__.__name__, str(e))
yield (x, y, p, DeadStatus(), None, "", -1) | [
"def",
"get_process_list",
"(",
"mc",
",",
"x_",
"=",
"None",
",",
"y_",
"=",
"None",
",",
"p_",
"=",
"None",
",",
"app_ids",
"=",
"None",
",",
"applications",
"=",
"None",
",",
"states",
"=",
"None",
")",
":",
"system_info",
"=",
"mc",
".",
"get_system_info",
"(",
")",
"for",
"(",
"x",
",",
"y",
")",
",",
"chip_info",
"in",
"sorted",
"(",
"iteritems",
"(",
"system_info",
")",
")",
":",
"if",
"x_",
"is",
"not",
"None",
"and",
"x_",
"!=",
"x",
":",
"continue",
"if",
"y_",
"is",
"not",
"None",
"and",
"y_",
"!=",
"y",
":",
"continue",
"for",
"p",
"in",
"range",
"(",
"chip_info",
".",
"num_cores",
")",
":",
"if",
"p_",
"is",
"not",
"None",
"and",
"p_",
"!=",
"p",
":",
"continue",
"try",
":",
"status",
"=",
"mc",
".",
"get_processor_status",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"p",
"=",
"p",
")",
"keep",
"=",
"(",
"match",
"(",
"str",
"(",
"status",
".",
"app_id",
")",
",",
"app_ids",
")",
"and",
"match",
"(",
"status",
".",
"app_name",
",",
"applications",
")",
"and",
"match",
"(",
"status",
".",
"cpu_state",
".",
"name",
",",
"states",
")",
")",
"if",
"keep",
":",
"yield",
"(",
"x",
",",
"y",
",",
"p",
",",
"status",
".",
"cpu_state",
",",
"status",
".",
"rt_code",
",",
"status",
".",
"app_name",
",",
"status",
".",
"app_id",
")",
"except",
"SCPError",
"as",
"e",
":",
"# If an error occurs while communicating with a chip, we bodge",
"# it into the \"cpu_status\" field and continue (note that it",
"# will never get filtered out).",
"class",
"DeadStatus",
"(",
"object",
")",
":",
"name",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"e",
".",
"__class__",
".",
"__name__",
",",
"str",
"(",
"e",
")",
")",
"yield",
"(",
"x",
",",
"y",
",",
"p",
",",
"DeadStatus",
"(",
")",
",",
"None",
",",
"\"\"",
",",
"-",
"1",
")"
] | Scan a SpiNNaker system's cores filtering by the specified features.
Generates
-------
(x, y, core, state, runtime_exception, application, app_id) | [
"Scan",
"a",
"SpiNNaker",
"system",
"s",
"cores",
"filtering",
"by",
"the",
"specified",
"features",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/scripts/rig_ps.py#L39-L77 |
project-rig/rig | rig/place_and_route/utils.py | build_machine | def build_machine(system_info,
core_resource=Cores,
sdram_resource=SDRAM,
sram_resource=SRAM):
"""Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure.
"""
try:
max_cores = max(c.num_cores for c in itervalues(system_info))
except ValueError:
max_cores = 0
try:
max_sdram = max(c.largest_free_sdram_block
for c in itervalues(system_info))
except ValueError:
max_sdram = 0
try:
max_sram = max(c.largest_free_sram_block
for c in itervalues(system_info))
except ValueError:
max_sram = 0
return Machine(width=system_info.width,
height=system_info.height,
chip_resources={
core_resource: max_cores,
sdram_resource: max_sdram,
sram_resource: max_sram,
},
chip_resource_exceptions={
chip: {
core_resource: info.num_cores,
sdram_resource: info.largest_free_sdram_block,
sram_resource: info.largest_free_sram_block,
}
for chip, info in iteritems(system_info)
if (info.num_cores != max_cores or
info.largest_free_sdram_block != max_sdram or
info.largest_free_sram_block != max_sram)
},
dead_chips=set(system_info.dead_chips()),
dead_links=set(system_info.dead_links())) | python | def build_machine(system_info,
core_resource=Cores,
sdram_resource=SDRAM,
sram_resource=SRAM):
"""Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure.
"""
try:
max_cores = max(c.num_cores for c in itervalues(system_info))
except ValueError:
max_cores = 0
try:
max_sdram = max(c.largest_free_sdram_block
for c in itervalues(system_info))
except ValueError:
max_sdram = 0
try:
max_sram = max(c.largest_free_sram_block
for c in itervalues(system_info))
except ValueError:
max_sram = 0
return Machine(width=system_info.width,
height=system_info.height,
chip_resources={
core_resource: max_cores,
sdram_resource: max_sdram,
sram_resource: max_sram,
},
chip_resource_exceptions={
chip: {
core_resource: info.num_cores,
sdram_resource: info.largest_free_sdram_block,
sram_resource: info.largest_free_sram_block,
}
for chip, info in iteritems(system_info)
if (info.num_cores != max_cores or
info.largest_free_sdram_block != max_sdram or
info.largest_free_sram_block != max_sram)
},
dead_chips=set(system_info.dead_chips()),
dead_links=set(system_info.dead_links())) | [
"def",
"build_machine",
"(",
"system_info",
",",
"core_resource",
"=",
"Cores",
",",
"sdram_resource",
"=",
"SDRAM",
",",
"sram_resource",
"=",
"SRAM",
")",
":",
"try",
":",
"max_cores",
"=",
"max",
"(",
"c",
".",
"num_cores",
"for",
"c",
"in",
"itervalues",
"(",
"system_info",
")",
")",
"except",
"ValueError",
":",
"max_cores",
"=",
"0",
"try",
":",
"max_sdram",
"=",
"max",
"(",
"c",
".",
"largest_free_sdram_block",
"for",
"c",
"in",
"itervalues",
"(",
"system_info",
")",
")",
"except",
"ValueError",
":",
"max_sdram",
"=",
"0",
"try",
":",
"max_sram",
"=",
"max",
"(",
"c",
".",
"largest_free_sram_block",
"for",
"c",
"in",
"itervalues",
"(",
"system_info",
")",
")",
"except",
"ValueError",
":",
"max_sram",
"=",
"0",
"return",
"Machine",
"(",
"width",
"=",
"system_info",
".",
"width",
",",
"height",
"=",
"system_info",
".",
"height",
",",
"chip_resources",
"=",
"{",
"core_resource",
":",
"max_cores",
",",
"sdram_resource",
":",
"max_sdram",
",",
"sram_resource",
":",
"max_sram",
",",
"}",
",",
"chip_resource_exceptions",
"=",
"{",
"chip",
":",
"{",
"core_resource",
":",
"info",
".",
"num_cores",
",",
"sdram_resource",
":",
"info",
".",
"largest_free_sdram_block",
",",
"sram_resource",
":",
"info",
".",
"largest_free_sram_block",
",",
"}",
"for",
"chip",
",",
"info",
"in",
"iteritems",
"(",
"system_info",
")",
"if",
"(",
"info",
".",
"num_cores",
"!=",
"max_cores",
"or",
"info",
".",
"largest_free_sdram_block",
"!=",
"max_sdram",
"or",
"info",
".",
"largest_free_sram_block",
"!=",
"max_sram",
")",
"}",
",",
"dead_chips",
"=",
"set",
"(",
"system_info",
".",
"dead_chips",
"(",
")",
")",
",",
"dead_links",
"=",
"set",
"(",
"system_info",
".",
"dead_links",
"(",
")",
")",
")"
] | Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure. | [
"Build",
"a",
":",
"py",
":",
"class",
":",
"~rig",
".",
"place_and_route",
".",
"Machine",
"object",
"from",
"a",
":",
"py",
":",
"class",
":",
"~rig",
".",
"machine_control",
".",
"machine_controller",
".",
"SystemInfo",
"object",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/utils.py#L18-L116 |
project-rig/rig | rig/place_and_route/utils.py | _get_minimal_core_reservations | def _get_minimal_core_reservations(core_resource, cores, chip=None):
"""Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
"""
reservation = None
# Cores is in ascending order
for core in cores:
if reservation is None:
reservation = slice(core, core + 1)
elif reservation.stop == core:
reservation = slice(reservation.start, core + 1)
else:
yield ReserveResourceConstraint(
core_resource, reservation, chip)
reservation = slice(core, core + 1)
if reservation is not None:
yield ReserveResourceConstraint(core_resource, reservation, chip) | python | def _get_minimal_core_reservations(core_resource, cores, chip=None):
"""Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
"""
reservation = None
# Cores is in ascending order
for core in cores:
if reservation is None:
reservation = slice(core, core + 1)
elif reservation.stop == core:
reservation = slice(reservation.start, core + 1)
else:
yield ReserveResourceConstraint(
core_resource, reservation, chip)
reservation = slice(core, core + 1)
if reservation is not None:
yield ReserveResourceConstraint(core_resource, reservation, chip) | [
"def",
"_get_minimal_core_reservations",
"(",
"core_resource",
",",
"cores",
",",
"chip",
"=",
"None",
")",
":",
"reservation",
"=",
"None",
"# Cores is in ascending order",
"for",
"core",
"in",
"cores",
":",
"if",
"reservation",
"is",
"None",
":",
"reservation",
"=",
"slice",
"(",
"core",
",",
"core",
"+",
"1",
")",
"elif",
"reservation",
".",
"stop",
"==",
"core",
":",
"reservation",
"=",
"slice",
"(",
"reservation",
".",
"start",
",",
"core",
"+",
"1",
")",
"else",
":",
"yield",
"ReserveResourceConstraint",
"(",
"core_resource",
",",
"reservation",
",",
"chip",
")",
"reservation",
"=",
"slice",
"(",
"core",
",",
"core",
"+",
"1",
")",
"if",
"reservation",
"is",
"not",
"None",
":",
"yield",
"ReserveResourceConstraint",
"(",
"core_resource",
",",
"reservation",
",",
"chip",
")"
] | Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint` | [
"Yield",
"a",
"minimal",
"set",
"of",
":",
"py",
":",
"class",
":",
"~rig",
".",
"place_and_route",
".",
"constraints",
".",
"ReserveResourceConstraint",
"objects",
"which",
"reserve",
"the",
"specified",
"set",
"of",
"cores",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/utils.py#L119-L152 |
project-rig/rig | rig/place_and_route/utils.py | build_core_constraints | def build_core_constraints(system_info, core_resource=Cores):
"""Return a set of place-and-route
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
which reserve any cores that that are already in use.
The returned list of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`\ s
reserves all cores not in an Idle state (i.e. not a monitor and not already
running an application).
.. note::
Historically, every application was required to add a
:py:class:~rig.place_and_route.constraints.ReserveResourceConstraint to
reserve the monitor processor on each chip. This method improves upon
this approach by automatically generating constraints which reserve not
just the monitor core but also any other cores which are already in
use.
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
The resource identifier used for cores.
Returns
-------
[:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`, \
...]
A set of place-and-route constraints which reserves all non-idle cores.
The resource type given in the ``core_resource`` argument will be
reserved accordingly.
"""
constraints = []
# Find the set of cores which are universally reserved
globally_reserved = None
for chip_info in itervalues(system_info):
reserved = sum(1 << c for c, state in enumerate(chip_info.core_states)
if state != AppState.idle)
if globally_reserved is None:
globally_reserved = reserved
else:
globally_reserved &= reserved
if globally_reserved is None:
globally_reserved = 0
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core in range(18) if (1 << core) & globally_reserved]))
# Create chip-specific resource reservations for any special cases
for chip, chip_info in iteritems(system_info):
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core, state in enumerate(chip_info.core_states)
if state != AppState.idle and
not globally_reserved & (1 << core)],
chip))
return constraints | python | def build_core_constraints(system_info, core_resource=Cores):
"""Return a set of place-and-route
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
which reserve any cores that that are already in use.
The returned list of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`\ s
reserves all cores not in an Idle state (i.e. not a monitor and not already
running an application).
.. note::
Historically, every application was required to add a
:py:class:~rig.place_and_route.constraints.ReserveResourceConstraint to
reserve the monitor processor on each chip. This method improves upon
this approach by automatically generating constraints which reserve not
just the monitor core but also any other cores which are already in
use.
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
The resource identifier used for cores.
Returns
-------
[:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`, \
...]
A set of place-and-route constraints which reserves all non-idle cores.
The resource type given in the ``core_resource`` argument will be
reserved accordingly.
"""
constraints = []
# Find the set of cores which are universally reserved
globally_reserved = None
for chip_info in itervalues(system_info):
reserved = sum(1 << c for c, state in enumerate(chip_info.core_states)
if state != AppState.idle)
if globally_reserved is None:
globally_reserved = reserved
else:
globally_reserved &= reserved
if globally_reserved is None:
globally_reserved = 0
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core in range(18) if (1 << core) & globally_reserved]))
# Create chip-specific resource reservations for any special cases
for chip, chip_info in iteritems(system_info):
constraints.extend(_get_minimal_core_reservations(
core_resource,
[core for core, state in enumerate(chip_info.core_states)
if state != AppState.idle and
not globally_reserved & (1 << core)],
chip))
return constraints | [
"def",
"build_core_constraints",
"(",
"system_info",
",",
"core_resource",
"=",
"Cores",
")",
":",
"constraints",
"=",
"[",
"]",
"# Find the set of cores which are universally reserved",
"globally_reserved",
"=",
"None",
"for",
"chip_info",
"in",
"itervalues",
"(",
"system_info",
")",
":",
"reserved",
"=",
"sum",
"(",
"1",
"<<",
"c",
"for",
"c",
",",
"state",
"in",
"enumerate",
"(",
"chip_info",
".",
"core_states",
")",
"if",
"state",
"!=",
"AppState",
".",
"idle",
")",
"if",
"globally_reserved",
"is",
"None",
":",
"globally_reserved",
"=",
"reserved",
"else",
":",
"globally_reserved",
"&=",
"reserved",
"if",
"globally_reserved",
"is",
"None",
":",
"globally_reserved",
"=",
"0",
"constraints",
".",
"extend",
"(",
"_get_minimal_core_reservations",
"(",
"core_resource",
",",
"[",
"core",
"for",
"core",
"in",
"range",
"(",
"18",
")",
"if",
"(",
"1",
"<<",
"core",
")",
"&",
"globally_reserved",
"]",
")",
")",
"# Create chip-specific resource reservations for any special cases",
"for",
"chip",
",",
"chip_info",
"in",
"iteritems",
"(",
"system_info",
")",
":",
"constraints",
".",
"extend",
"(",
"_get_minimal_core_reservations",
"(",
"core_resource",
",",
"[",
"core",
"for",
"core",
",",
"state",
"in",
"enumerate",
"(",
"chip_info",
".",
"core_states",
")",
"if",
"state",
"!=",
"AppState",
".",
"idle",
"and",
"not",
"globally_reserved",
"&",
"(",
"1",
"<<",
"core",
")",
"]",
",",
"chip",
")",
")",
"return",
"constraints"
] | Return a set of place-and-route
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
which reserve any cores that that are already in use.
The returned list of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`\ s
reserves all cores not in an Idle state (i.e. not a monitor and not already
running an application).
.. note::
Historically, every application was required to add a
:py:class:~rig.place_and_route.constraints.ReserveResourceConstraint to
reserve the monitor processor on each chip. This method improves upon
this approach by automatically generating constraints which reserve not
just the monitor core but also any other cores which are already in
use.
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (Default: :py:data:`~rig.place_and_route.Cores`)
The resource identifier used for cores.
Returns
-------
[:py:class:`rig.place_and_route.constraints.ReserveResourceConstraint`, \
...]
A set of place-and-route constraints which reserves all non-idle cores.
The resource type given in the ``core_resource`` argument will be
reserved accordingly. | [
"Return",
"a",
"set",
"of",
"place",
"-",
"and",
"-",
"route",
":",
"py",
":",
"class",
":",
"~rig",
".",
"place_and_route",
".",
"constraints",
".",
"ReserveResourceConstraint",
"which",
"reserve",
"any",
"cores",
"that",
"that",
"are",
"already",
"in",
"use",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/utils.py#L155-L219 |
project-rig/rig | rig/place_and_route/utils.py | build_application_map | def build_application_map(vertices_applications, placements, allocations,
core_resource=Cores):
"""Build a mapping from application to a list of cores where the
application is used.
This utility function assumes that each vertex is associated with a
specific application.
Parameters
----------
vertices_applications : {vertex: application, ...}
Applications are represented by the path of their APLX file.
placements : {vertex: (x, y), ...}
allocations : {vertex: {resource: slice, ...}, ...}
One of these resources should match the `core_resource` argument.
core_resource : object
The resource identifier which represents cores.
Returns
-------
{application: {(x, y) : set([c, ...]), ...}, ...}
For each application, for each used chip a set of core numbers onto
which the application should be loaded.
"""
application_map = defaultdict(lambda: defaultdict(set))
for vertex, application in iteritems(vertices_applications):
chip_cores = application_map[application][placements[vertex]]
core_slice = allocations[vertex].get(core_resource, slice(0, 0))
chip_cores.update(range(core_slice.start, core_slice.stop))
return application_map | python | def build_application_map(vertices_applications, placements, allocations,
core_resource=Cores):
"""Build a mapping from application to a list of cores where the
application is used.
This utility function assumes that each vertex is associated with a
specific application.
Parameters
----------
vertices_applications : {vertex: application, ...}
Applications are represented by the path of their APLX file.
placements : {vertex: (x, y), ...}
allocations : {vertex: {resource: slice, ...}, ...}
One of these resources should match the `core_resource` argument.
core_resource : object
The resource identifier which represents cores.
Returns
-------
{application: {(x, y) : set([c, ...]), ...}, ...}
For each application, for each used chip a set of core numbers onto
which the application should be loaded.
"""
application_map = defaultdict(lambda: defaultdict(set))
for vertex, application in iteritems(vertices_applications):
chip_cores = application_map[application][placements[vertex]]
core_slice = allocations[vertex].get(core_resource, slice(0, 0))
chip_cores.update(range(core_slice.start, core_slice.stop))
return application_map | [
"def",
"build_application_map",
"(",
"vertices_applications",
",",
"placements",
",",
"allocations",
",",
"core_resource",
"=",
"Cores",
")",
":",
"application_map",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"set",
")",
")",
"for",
"vertex",
",",
"application",
"in",
"iteritems",
"(",
"vertices_applications",
")",
":",
"chip_cores",
"=",
"application_map",
"[",
"application",
"]",
"[",
"placements",
"[",
"vertex",
"]",
"]",
"core_slice",
"=",
"allocations",
"[",
"vertex",
"]",
".",
"get",
"(",
"core_resource",
",",
"slice",
"(",
"0",
",",
"0",
")",
")",
"chip_cores",
".",
"update",
"(",
"range",
"(",
"core_slice",
".",
"start",
",",
"core_slice",
".",
"stop",
")",
")",
"return",
"application_map"
] | Build a mapping from application to a list of cores where the
application is used.
This utility function assumes that each vertex is associated with a
specific application.
Parameters
----------
vertices_applications : {vertex: application, ...}
Applications are represented by the path of their APLX file.
placements : {vertex: (x, y), ...}
allocations : {vertex: {resource: slice, ...}, ...}
One of these resources should match the `core_resource` argument.
core_resource : object
The resource identifier which represents cores.
Returns
-------
{application: {(x, y) : set([c, ...]), ...}, ...}
For each application, for each used chip a set of core numbers onto
which the application should be loaded. | [
"Build",
"a",
"mapping",
"from",
"application",
"to",
"a",
"list",
"of",
"cores",
"where",
"the",
"application",
"is",
"used",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/utils.py#L222-L253 |
project-rig/rig | rig/place_and_route/utils.py | build_routing_tables | def build_routing_tables(routes, net_keys, omit_default_routes=True):
"""**DEPRECATED** Convert a set of RoutingTrees into a per-chip set of
routing tables.
.. warning::
This method has been deprecated in favour of
:py:meth:`rig.routing_table.routing_tree_to_tables` and
:py:meth:`rig.routing_table.minimise`.
E.g. most applications should use something like::
from rig.routing_table import routing_tree_to_tables, minimise
tables = minimise(routing_tree_to_tables(routes, net_keys),
target_lengths)
Where target_length gives the number of available routing entries on
the chips in your SpiNNaker system (see
:py:func:~rig.routing_table.utils.build_routing_table_target_lengths)
This command produces routing tables with entries optionally omitted when
the route does not change direction (i.e. when default routing can be
used).
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same datastructure produced by routers in the
`place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
omit_default_routes : bool
Do not create routing entries for routes which do not change direction
(i.e. use default routing).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
from rig.routing_table import routing_tree_to_tables, remove_default_routes
warnings.warn(
"build_routing_tables() is deprecated, see "
"rig.routing_table.routing_tree_to_tables()"
"and rig.routing_table.minimise()", DeprecationWarning
)
# Build full routing tables and then remove default entries from them
tables = dict()
for chip, table in iteritems(routing_tree_to_tables(routes, net_keys)):
if omit_default_routes:
table = remove_default_routes.minimise(table, target_length=None)
# If the table is empty don't add it to the dictionary of tables.
if table:
tables[chip] = table
return tables | python | def build_routing_tables(routes, net_keys, omit_default_routes=True):
"""**DEPRECATED** Convert a set of RoutingTrees into a per-chip set of
routing tables.
.. warning::
This method has been deprecated in favour of
:py:meth:`rig.routing_table.routing_tree_to_tables` and
:py:meth:`rig.routing_table.minimise`.
E.g. most applications should use something like::
from rig.routing_table import routing_tree_to_tables, minimise
tables = minimise(routing_tree_to_tables(routes, net_keys),
target_lengths)
Where target_length gives the number of available routing entries on
the chips in your SpiNNaker system (see
:py:func:~rig.routing_table.utils.build_routing_table_target_lengths)
This command produces routing tables with entries optionally omitted when
the route does not change direction (i.e. when default routing can be
used).
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same datastructure produced by routers in the
`place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
omit_default_routes : bool
Do not create routing entries for routes which do not change direction
(i.e. use default routing).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
from rig.routing_table import routing_tree_to_tables, remove_default_routes
warnings.warn(
"build_routing_tables() is deprecated, see "
"rig.routing_table.routing_tree_to_tables()"
"and rig.routing_table.minimise()", DeprecationWarning
)
# Build full routing tables and then remove default entries from them
tables = dict()
for chip, table in iteritems(routing_tree_to_tables(routes, net_keys)):
if omit_default_routes:
table = remove_default_routes.minimise(table, target_length=None)
# If the table is empty don't add it to the dictionary of tables.
if table:
tables[chip] = table
return tables | [
"def",
"build_routing_tables",
"(",
"routes",
",",
"net_keys",
",",
"omit_default_routes",
"=",
"True",
")",
":",
"from",
"rig",
".",
"routing_table",
"import",
"routing_tree_to_tables",
",",
"remove_default_routes",
"warnings",
".",
"warn",
"(",
"\"build_routing_tables() is deprecated, see \"",
"\"rig.routing_table.routing_tree_to_tables()\"",
"\"and rig.routing_table.minimise()\"",
",",
"DeprecationWarning",
")",
"# Build full routing tables and then remove default entries from them",
"tables",
"=",
"dict",
"(",
")",
"for",
"chip",
",",
"table",
"in",
"iteritems",
"(",
"routing_tree_to_tables",
"(",
"routes",
",",
"net_keys",
")",
")",
":",
"if",
"omit_default_routes",
":",
"table",
"=",
"remove_default_routes",
".",
"minimise",
"(",
"table",
",",
"target_length",
"=",
"None",
")",
"# If the table is empty don't add it to the dictionary of tables.",
"if",
"table",
":",
"tables",
"[",
"chip",
"]",
"=",
"table",
"return",
"tables"
] | **DEPRECATED** Convert a set of RoutingTrees into a per-chip set of
routing tables.
.. warning::
This method has been deprecated in favour of
:py:meth:`rig.routing_table.routing_tree_to_tables` and
:py:meth:`rig.routing_table.minimise`.
E.g. most applications should use something like::
from rig.routing_table import routing_tree_to_tables, minimise
tables = minimise(routing_tree_to_tables(routes, net_keys),
target_lengths)
Where target_length gives the number of available routing entries on
the chips in your SpiNNaker system (see
:py:func:~rig.routing_table.utils.build_routing_table_target_lengths)
This command produces routing tables with entries optionally omitted when
the route does not change direction (i.e. when default routing can be
used).
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same datastructure produced by routers in the
`place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
omit_default_routes : bool
Do not create routing entries for routes which do not change direction
(i.e. use default routing).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] | [
"**",
"DEPRECATED",
"**",
"Convert",
"a",
"set",
"of",
"RoutingTrees",
"into",
"a",
"per",
"-",
"chip",
"set",
"of",
"routing",
"tables",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/utils.py#L256-L330 |
NicolasLM/spinach | spinach/contrib/sentry.py | register_sentry | def register_sentry(raven_client, namespace: Optional[str]=None,
send_retries: bool=False):
"""Register the Sentry integration.
Exceptions making jobs fail are sent to Sentry.
:param raven_client: configured Raven client used to sent errors to Sentry
:param namespace: optionally only register the Sentry integration for a
particular Spinach :class:`Engine`
:param send_retries: whether to also send to Sentry exceptions resulting
in a job being retried
"""
@signals.job_started.connect_via(namespace)
def job_started(namespace, job, **kwargs):
raven_client.context.activate()
raven_client.transaction.push(job.task_name)
@signals.job_finished.connect_via(namespace)
def job_finished(namespace, job, **kwargs):
raven_client.transaction.pop(job.task_name)
raven_client.context.clear()
@signals.job_failed.connect_via(namespace)
def job_failed(namespace, job, **kwargs):
raven_client.captureException(
extra={attr: getattr(job, attr) for attr in job.__slots__}
)
if send_retries:
@signals.job_schedule_retry.connect_via(namespace)
def job_schedule_retry(namespace, job, **kwargs):
raven_client.captureException(
extra={attr: getattr(job, attr) for attr in job.__slots__}
) | python | def register_sentry(raven_client, namespace: Optional[str]=None,
send_retries: bool=False):
"""Register the Sentry integration.
Exceptions making jobs fail are sent to Sentry.
:param raven_client: configured Raven client used to sent errors to Sentry
:param namespace: optionally only register the Sentry integration for a
particular Spinach :class:`Engine`
:param send_retries: whether to also send to Sentry exceptions resulting
in a job being retried
"""
@signals.job_started.connect_via(namespace)
def job_started(namespace, job, **kwargs):
raven_client.context.activate()
raven_client.transaction.push(job.task_name)
@signals.job_finished.connect_via(namespace)
def job_finished(namespace, job, **kwargs):
raven_client.transaction.pop(job.task_name)
raven_client.context.clear()
@signals.job_failed.connect_via(namespace)
def job_failed(namespace, job, **kwargs):
raven_client.captureException(
extra={attr: getattr(job, attr) for attr in job.__slots__}
)
if send_retries:
@signals.job_schedule_retry.connect_via(namespace)
def job_schedule_retry(namespace, job, **kwargs):
raven_client.captureException(
extra={attr: getattr(job, attr) for attr in job.__slots__}
) | [
"def",
"register_sentry",
"(",
"raven_client",
",",
"namespace",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"send_retries",
":",
"bool",
"=",
"False",
")",
":",
"@",
"signals",
".",
"job_started",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_started",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"raven_client",
".",
"context",
".",
"activate",
"(",
")",
"raven_client",
".",
"transaction",
".",
"push",
"(",
"job",
".",
"task_name",
")",
"@",
"signals",
".",
"job_finished",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_finished",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"raven_client",
".",
"transaction",
".",
"pop",
"(",
"job",
".",
"task_name",
")",
"raven_client",
".",
"context",
".",
"clear",
"(",
")",
"@",
"signals",
".",
"job_failed",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_failed",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"raven_client",
".",
"captureException",
"(",
"extra",
"=",
"{",
"attr",
":",
"getattr",
"(",
"job",
",",
"attr",
")",
"for",
"attr",
"in",
"job",
".",
"__slots__",
"}",
")",
"if",
"send_retries",
":",
"@",
"signals",
".",
"job_schedule_retry",
".",
"connect_via",
"(",
"namespace",
")",
"def",
"job_schedule_retry",
"(",
"namespace",
",",
"job",
",",
"*",
"*",
"kwargs",
")",
":",
"raven_client",
".",
"captureException",
"(",
"extra",
"=",
"{",
"attr",
":",
"getattr",
"(",
"job",
",",
"attr",
")",
"for",
"attr",
"in",
"job",
".",
"__slots__",
"}",
")"
] | Register the Sentry integration.
Exceptions making jobs fail are sent to Sentry.
:param raven_client: configured Raven client used to sent errors to Sentry
:param namespace: optionally only register the Sentry integration for a
particular Spinach :class:`Engine`
:param send_retries: whether to also send to Sentry exceptions resulting
in a job being retried | [
"Register",
"the",
"Sentry",
"integration",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/sentry.py#L6-L40 |
Metatab/metapack | metapack/cli/url.py | add_resource | def add_resource(mt_file, ref, cache):
"""Add a resources entry, downloading the intuiting the file, replacing entries with
the same reference"""
if isinstance(mt_file, MetapackDoc):
doc = mt_file
else:
doc = MetapackDoc(mt_file)
if not 'Resources' in doc:
doc.new_section('Resources')
doc['Resources'].args = [e for e in set(doc['Resources'].args + ['Name', 'StartLine', 'HeaderLines', 'Encoding']) if
e]
seen_names = set()
u = parse_app_url(ref)
# The web and file URLs don't list the same.
if u.proto == 'file':
entries = u.list()
else:
entries = [ssu for su in u.list() for ssu in su.list()]
errors = []
for e in entries:
if not add_single_resource(doc, e, cache=cache, seen_names=seen_names):
errors.append(e)
if errors:
prt()
warn("Found, but failed to add these urls:")
for e in errors:
print(' ', e)
write_doc(doc, mt_file) | python | def add_resource(mt_file, ref, cache):
"""Add a resources entry, downloading the intuiting the file, replacing entries with
the same reference"""
if isinstance(mt_file, MetapackDoc):
doc = mt_file
else:
doc = MetapackDoc(mt_file)
if not 'Resources' in doc:
doc.new_section('Resources')
doc['Resources'].args = [e for e in set(doc['Resources'].args + ['Name', 'StartLine', 'HeaderLines', 'Encoding']) if
e]
seen_names = set()
u = parse_app_url(ref)
# The web and file URLs don't list the same.
if u.proto == 'file':
entries = u.list()
else:
entries = [ssu for su in u.list() for ssu in su.list()]
errors = []
for e in entries:
if not add_single_resource(doc, e, cache=cache, seen_names=seen_names):
errors.append(e)
if errors:
prt()
warn("Found, but failed to add these urls:")
for e in errors:
print(' ', e)
write_doc(doc, mt_file) | [
"def",
"add_resource",
"(",
"mt_file",
",",
"ref",
",",
"cache",
")",
":",
"if",
"isinstance",
"(",
"mt_file",
",",
"MetapackDoc",
")",
":",
"doc",
"=",
"mt_file",
"else",
":",
"doc",
"=",
"MetapackDoc",
"(",
"mt_file",
")",
"if",
"not",
"'Resources'",
"in",
"doc",
":",
"doc",
".",
"new_section",
"(",
"'Resources'",
")",
"doc",
"[",
"'Resources'",
"]",
".",
"args",
"=",
"[",
"e",
"for",
"e",
"in",
"set",
"(",
"doc",
"[",
"'Resources'",
"]",
".",
"args",
"+",
"[",
"'Name'",
",",
"'StartLine'",
",",
"'HeaderLines'",
",",
"'Encoding'",
"]",
")",
"if",
"e",
"]",
"seen_names",
"=",
"set",
"(",
")",
"u",
"=",
"parse_app_url",
"(",
"ref",
")",
"# The web and file URLs don't list the same.",
"if",
"u",
".",
"proto",
"==",
"'file'",
":",
"entries",
"=",
"u",
".",
"list",
"(",
")",
"else",
":",
"entries",
"=",
"[",
"ssu",
"for",
"su",
"in",
"u",
".",
"list",
"(",
")",
"for",
"ssu",
"in",
"su",
".",
"list",
"(",
")",
"]",
"errors",
"=",
"[",
"]",
"for",
"e",
"in",
"entries",
":",
"if",
"not",
"add_single_resource",
"(",
"doc",
",",
"e",
",",
"cache",
"=",
"cache",
",",
"seen_names",
"=",
"seen_names",
")",
":",
"errors",
".",
"append",
"(",
"e",
")",
"if",
"errors",
":",
"prt",
"(",
")",
"warn",
"(",
"\"Found, but failed to add these urls:\"",
")",
"for",
"e",
"in",
"errors",
":",
"print",
"(",
"' '",
",",
"e",
")",
"write_doc",
"(",
"doc",
",",
"mt_file",
")"
] | Add a resources entry, downloading the intuiting the file, replacing entries with
the same reference | [
"Add",
"a",
"resources",
"entry",
"downloading",
"the",
"intuiting",
"the",
"file",
"replacing",
"entries",
"with",
"the",
"same",
"reference"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/url.py#L92-L130 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.bounds | def bounds(self, pixelbuffer=0):
"""
Return Tile boundaries.
- pixelbuffer: tile buffer in pixels
"""
left = self._left
bottom = self._bottom
right = self._right
top = self._top
if pixelbuffer:
offset = self.pixel_x_size * float(pixelbuffer)
left -= offset
bottom -= offset
right += offset
top += offset
# on global grids clip at northern and southern TilePyramid bound
if self.tp.grid.is_global:
top = min([top, self.tile_pyramid.top])
bottom = max([bottom, self.tile_pyramid.bottom])
return Bounds(left, bottom, right, top) | python | def bounds(self, pixelbuffer=0):
"""
Return Tile boundaries.
- pixelbuffer: tile buffer in pixels
"""
left = self._left
bottom = self._bottom
right = self._right
top = self._top
if pixelbuffer:
offset = self.pixel_x_size * float(pixelbuffer)
left -= offset
bottom -= offset
right += offset
top += offset
# on global grids clip at northern and southern TilePyramid bound
if self.tp.grid.is_global:
top = min([top, self.tile_pyramid.top])
bottom = max([bottom, self.tile_pyramid.bottom])
return Bounds(left, bottom, right, top) | [
"def",
"bounds",
"(",
"self",
",",
"pixelbuffer",
"=",
"0",
")",
":",
"left",
"=",
"self",
".",
"_left",
"bottom",
"=",
"self",
".",
"_bottom",
"right",
"=",
"self",
".",
"_right",
"top",
"=",
"self",
".",
"_top",
"if",
"pixelbuffer",
":",
"offset",
"=",
"self",
".",
"pixel_x_size",
"*",
"float",
"(",
"pixelbuffer",
")",
"left",
"-=",
"offset",
"bottom",
"-=",
"offset",
"right",
"+=",
"offset",
"top",
"+=",
"offset",
"# on global grids clip at northern and southern TilePyramid bound",
"if",
"self",
".",
"tp",
".",
"grid",
".",
"is_global",
":",
"top",
"=",
"min",
"(",
"[",
"top",
",",
"self",
".",
"tile_pyramid",
".",
"top",
"]",
")",
"bottom",
"=",
"max",
"(",
"[",
"bottom",
",",
"self",
".",
"tile_pyramid",
".",
"bottom",
"]",
")",
"return",
"Bounds",
"(",
"left",
",",
"bottom",
",",
"right",
",",
"top",
")"
] | Return Tile boundaries.
- pixelbuffer: tile buffer in pixels | [
"Return",
"Tile",
"boundaries",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L93-L113 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.affine | def affine(self, pixelbuffer=0):
"""
Return an Affine object of tile.
- pixelbuffer: tile buffer in pixels
"""
return Affine(
self.pixel_x_size,
0,
self.bounds(pixelbuffer).left,
0,
-self.pixel_y_size,
self.bounds(pixelbuffer).top
) | python | def affine(self, pixelbuffer=0):
"""
Return an Affine object of tile.
- pixelbuffer: tile buffer in pixels
"""
return Affine(
self.pixel_x_size,
0,
self.bounds(pixelbuffer).left,
0,
-self.pixel_y_size,
self.bounds(pixelbuffer).top
) | [
"def",
"affine",
"(",
"self",
",",
"pixelbuffer",
"=",
"0",
")",
":",
"return",
"Affine",
"(",
"self",
".",
"pixel_x_size",
",",
"0",
",",
"self",
".",
"bounds",
"(",
"pixelbuffer",
")",
".",
"left",
",",
"0",
",",
"-",
"self",
".",
"pixel_y_size",
",",
"self",
".",
"bounds",
"(",
"pixelbuffer",
")",
".",
"top",
")"
] | Return an Affine object of tile.
- pixelbuffer: tile buffer in pixels | [
"Return",
"an",
"Affine",
"object",
"of",
"tile",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L123-L136 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.shape | def shape(self, pixelbuffer=0):
"""
Return a tuple of tile height and width.
- pixelbuffer: tile buffer in pixels
"""
# apply pixelbuffers
height = self._base_shape.height + 2 * pixelbuffer
width = self._base_shape.width + 2 * pixelbuffer
if pixelbuffer and self.tp.grid.is_global:
# on first and last row, remove pixelbuffer on top or bottom
matrix_height = self.tile_pyramid.matrix_height(self.zoom)
if matrix_height == 1:
height = self._base_shape.height
elif self.row in [0, matrix_height - 1]:
height = self._base_shape.height + pixelbuffer
return Shape(height=height, width=width) | python | def shape(self, pixelbuffer=0):
"""
Return a tuple of tile height and width.
- pixelbuffer: tile buffer in pixels
"""
# apply pixelbuffers
height = self._base_shape.height + 2 * pixelbuffer
width = self._base_shape.width + 2 * pixelbuffer
if pixelbuffer and self.tp.grid.is_global:
# on first and last row, remove pixelbuffer on top or bottom
matrix_height = self.tile_pyramid.matrix_height(self.zoom)
if matrix_height == 1:
height = self._base_shape.height
elif self.row in [0, matrix_height - 1]:
height = self._base_shape.height + pixelbuffer
return Shape(height=height, width=width) | [
"def",
"shape",
"(",
"self",
",",
"pixelbuffer",
"=",
"0",
")",
":",
"# apply pixelbuffers",
"height",
"=",
"self",
".",
"_base_shape",
".",
"height",
"+",
"2",
"*",
"pixelbuffer",
"width",
"=",
"self",
".",
"_base_shape",
".",
"width",
"+",
"2",
"*",
"pixelbuffer",
"if",
"pixelbuffer",
"and",
"self",
".",
"tp",
".",
"grid",
".",
"is_global",
":",
"# on first and last row, remove pixelbuffer on top or bottom",
"matrix_height",
"=",
"self",
".",
"tile_pyramid",
".",
"matrix_height",
"(",
"self",
".",
"zoom",
")",
"if",
"matrix_height",
"==",
"1",
":",
"height",
"=",
"self",
".",
"_base_shape",
".",
"height",
"elif",
"self",
".",
"row",
"in",
"[",
"0",
",",
"matrix_height",
"-",
"1",
"]",
":",
"height",
"=",
"self",
".",
"_base_shape",
".",
"height",
"+",
"pixelbuffer",
"return",
"Shape",
"(",
"height",
"=",
"height",
",",
"width",
"=",
"width",
")"
] | Return a tuple of tile height and width.
- pixelbuffer: tile buffer in pixels | [
"Return",
"a",
"tuple",
"of",
"tile",
"height",
"and",
"width",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L138-L154 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.is_valid | def is_valid(self):
"""Return True if tile is available in tile pyramid."""
if not all([
isinstance(self.zoom, int),
self.zoom >= 0,
isinstance(self.row, int),
self.row >= 0,
isinstance(self.col, int),
self.col >= 0
]):
raise TypeError("zoom, col and row must be integers >= 0")
cols = self.tile_pyramid.matrix_width(self.zoom)
rows = self.tile_pyramid.matrix_height(self.zoom)
if self.col >= cols:
raise ValueError("col (%s) exceeds matrix width (%s)" % (self.col, cols))
if self.row >= rows:
raise ValueError("row (%s) exceeds matrix height (%s)" % (self.row, rows))
return True | python | def is_valid(self):
"""Return True if tile is available in tile pyramid."""
if not all([
isinstance(self.zoom, int),
self.zoom >= 0,
isinstance(self.row, int),
self.row >= 0,
isinstance(self.col, int),
self.col >= 0
]):
raise TypeError("zoom, col and row must be integers >= 0")
cols = self.tile_pyramid.matrix_width(self.zoom)
rows = self.tile_pyramid.matrix_height(self.zoom)
if self.col >= cols:
raise ValueError("col (%s) exceeds matrix width (%s)" % (self.col, cols))
if self.row >= rows:
raise ValueError("row (%s) exceeds matrix height (%s)" % (self.row, rows))
return True | [
"def",
"is_valid",
"(",
"self",
")",
":",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"self",
".",
"zoom",
",",
"int",
")",
",",
"self",
".",
"zoom",
">=",
"0",
",",
"isinstance",
"(",
"self",
".",
"row",
",",
"int",
")",
",",
"self",
".",
"row",
">=",
"0",
",",
"isinstance",
"(",
"self",
".",
"col",
",",
"int",
")",
",",
"self",
".",
"col",
">=",
"0",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"zoom, col and row must be integers >= 0\"",
")",
"cols",
"=",
"self",
".",
"tile_pyramid",
".",
"matrix_width",
"(",
"self",
".",
"zoom",
")",
"rows",
"=",
"self",
".",
"tile_pyramid",
".",
"matrix_height",
"(",
"self",
".",
"zoom",
")",
"if",
"self",
".",
"col",
">=",
"cols",
":",
"raise",
"ValueError",
"(",
"\"col (%s) exceeds matrix width (%s)\"",
"%",
"(",
"self",
".",
"col",
",",
"cols",
")",
")",
"if",
"self",
".",
"row",
">=",
"rows",
":",
"raise",
"ValueError",
"(",
"\"row (%s) exceeds matrix height (%s)\"",
"%",
"(",
"self",
".",
"row",
",",
"rows",
")",
")",
"return",
"True"
] | Return True if tile is available in tile pyramid. | [
"Return",
"True",
"if",
"tile",
"is",
"available",
"in",
"tile",
"pyramid",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L156-L173 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.get_parent | def get_parent(self):
"""Return tile from previous zoom level."""
return None if self.zoom == 0 else self.tile_pyramid.tile(
self.zoom - 1, self.row // 2, self.col // 2
) | python | def get_parent(self):
"""Return tile from previous zoom level."""
return None if self.zoom == 0 else self.tile_pyramid.tile(
self.zoom - 1, self.row // 2, self.col // 2
) | [
"def",
"get_parent",
"(",
"self",
")",
":",
"return",
"None",
"if",
"self",
".",
"zoom",
"==",
"0",
"else",
"self",
".",
"tile_pyramid",
".",
"tile",
"(",
"self",
".",
"zoom",
"-",
"1",
",",
"self",
".",
"row",
"//",
"2",
",",
"self",
".",
"col",
"//",
"2",
")"
] | Return tile from previous zoom level. | [
"Return",
"tile",
"from",
"previous",
"zoom",
"level",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L175-L179 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.get_children | def get_children(self):
"""Return tiles from next zoom level."""
next_zoom = self.zoom + 1
return [
self.tile_pyramid.tile(
next_zoom,
self.row * 2 + row_offset,
self.col * 2 + col_offset
)
for row_offset, col_offset in [
(0, 0), # top left
(0, 1), # top right
(1, 1), # bottom right
(1, 0), # bottom left
]
if all([
self.row * 2 + row_offset < self.tp.matrix_height(next_zoom),
self.col * 2 + col_offset < self.tp.matrix_width(next_zoom)
])
] | python | def get_children(self):
"""Return tiles from next zoom level."""
next_zoom = self.zoom + 1
return [
self.tile_pyramid.tile(
next_zoom,
self.row * 2 + row_offset,
self.col * 2 + col_offset
)
for row_offset, col_offset in [
(0, 0), # top left
(0, 1), # top right
(1, 1), # bottom right
(1, 0), # bottom left
]
if all([
self.row * 2 + row_offset < self.tp.matrix_height(next_zoom),
self.col * 2 + col_offset < self.tp.matrix_width(next_zoom)
])
] | [
"def",
"get_children",
"(",
"self",
")",
":",
"next_zoom",
"=",
"self",
".",
"zoom",
"+",
"1",
"return",
"[",
"self",
".",
"tile_pyramid",
".",
"tile",
"(",
"next_zoom",
",",
"self",
".",
"row",
"*",
"2",
"+",
"row_offset",
",",
"self",
".",
"col",
"*",
"2",
"+",
"col_offset",
")",
"for",
"row_offset",
",",
"col_offset",
"in",
"[",
"(",
"0",
",",
"0",
")",
",",
"# top left",
"(",
"0",
",",
"1",
")",
",",
"# top right",
"(",
"1",
",",
"1",
")",
",",
"# bottom right",
"(",
"1",
",",
"0",
")",
",",
"# bottom left",
"]",
"if",
"all",
"(",
"[",
"self",
".",
"row",
"*",
"2",
"+",
"row_offset",
"<",
"self",
".",
"tp",
".",
"matrix_height",
"(",
"next_zoom",
")",
",",
"self",
".",
"col",
"*",
"2",
"+",
"col_offset",
"<",
"self",
".",
"tp",
".",
"matrix_width",
"(",
"next_zoom",
")",
"]",
")",
"]"
] | Return tiles from next zoom level. | [
"Return",
"tiles",
"from",
"next",
"zoom",
"level",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L181-L200 |
ungarj/tilematrix | tilematrix/_tile.py | Tile.get_neighbors | def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight.
"""
if connectedness not in [4, 8]:
raise ValueError("only connectedness values 8 or 4 are allowed")
unique_neighbors = {}
# 4-connected neighborsfor pyramid
matrix_offsets = [
(-1, 0), # 1: above
(0, 1), # 2: right
(1, 0), # 3: below
(0, -1) # 4: left
]
if connectedness == 8:
matrix_offsets.extend([
(-1, 1), # 5: above right
(1, 1), # 6: below right
(1, -1), # 7: below left
(-1, -1) # 8: above left
])
for row_offset, col_offset in matrix_offsets:
new_row = self.row + row_offset
new_col = self.col + col_offset
# omit if row is outside of tile matrix
if new_row < 0 or new_row >= self.tp.matrix_height(self.zoom):
continue
# wrap around antimeridian if new column is outside of tile matrix
if new_col < 0:
if not self.tp.is_global:
continue
new_col = self.tp.matrix_width(self.zoom) + new_col
elif new_col >= self.tp.matrix_width(self.zoom):
if not self.tp.is_global:
continue
new_col -= self.tp.matrix_width(self.zoom)
# omit if new tile is current tile
if new_row == self.row and new_col == self.col:
continue
# create new tile
unique_neighbors[(new_row, new_col)] = self.tp.tile(
self.zoom, new_row, new_col
)
return unique_neighbors.values() | python | def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight.
"""
if connectedness not in [4, 8]:
raise ValueError("only connectedness values 8 or 4 are allowed")
unique_neighbors = {}
# 4-connected neighborsfor pyramid
matrix_offsets = [
(-1, 0), # 1: above
(0, 1), # 2: right
(1, 0), # 3: below
(0, -1) # 4: left
]
if connectedness == 8:
matrix_offsets.extend([
(-1, 1), # 5: above right
(1, 1), # 6: below right
(1, -1), # 7: below left
(-1, -1) # 8: above left
])
for row_offset, col_offset in matrix_offsets:
new_row = self.row + row_offset
new_col = self.col + col_offset
# omit if row is outside of tile matrix
if new_row < 0 or new_row >= self.tp.matrix_height(self.zoom):
continue
# wrap around antimeridian if new column is outside of tile matrix
if new_col < 0:
if not self.tp.is_global:
continue
new_col = self.tp.matrix_width(self.zoom) + new_col
elif new_col >= self.tp.matrix_width(self.zoom):
if not self.tp.is_global:
continue
new_col -= self.tp.matrix_width(self.zoom)
# omit if new tile is current tile
if new_row == self.row and new_col == self.col:
continue
# create new tile
unique_neighbors[(new_row, new_col)] = self.tp.tile(
self.zoom, new_row, new_col
)
return unique_neighbors.values() | [
"def",
"get_neighbors",
"(",
"self",
",",
"connectedness",
"=",
"8",
")",
":",
"if",
"connectedness",
"not",
"in",
"[",
"4",
",",
"8",
"]",
":",
"raise",
"ValueError",
"(",
"\"only connectedness values 8 or 4 are allowed\"",
")",
"unique_neighbors",
"=",
"{",
"}",
"# 4-connected neighborsfor pyramid",
"matrix_offsets",
"=",
"[",
"(",
"-",
"1",
",",
"0",
")",
",",
"# 1: above",
"(",
"0",
",",
"1",
")",
",",
"# 2: right",
"(",
"1",
",",
"0",
")",
",",
"# 3: below",
"(",
"0",
",",
"-",
"1",
")",
"# 4: left",
"]",
"if",
"connectedness",
"==",
"8",
":",
"matrix_offsets",
".",
"extend",
"(",
"[",
"(",
"-",
"1",
",",
"1",
")",
",",
"# 5: above right",
"(",
"1",
",",
"1",
")",
",",
"# 6: below right",
"(",
"1",
",",
"-",
"1",
")",
",",
"# 7: below left",
"(",
"-",
"1",
",",
"-",
"1",
")",
"# 8: above left",
"]",
")",
"for",
"row_offset",
",",
"col_offset",
"in",
"matrix_offsets",
":",
"new_row",
"=",
"self",
".",
"row",
"+",
"row_offset",
"new_col",
"=",
"self",
".",
"col",
"+",
"col_offset",
"# omit if row is outside of tile matrix",
"if",
"new_row",
"<",
"0",
"or",
"new_row",
">=",
"self",
".",
"tp",
".",
"matrix_height",
"(",
"self",
".",
"zoom",
")",
":",
"continue",
"# wrap around antimeridian if new column is outside of tile matrix",
"if",
"new_col",
"<",
"0",
":",
"if",
"not",
"self",
".",
"tp",
".",
"is_global",
":",
"continue",
"new_col",
"=",
"self",
".",
"tp",
".",
"matrix_width",
"(",
"self",
".",
"zoom",
")",
"+",
"new_col",
"elif",
"new_col",
">=",
"self",
".",
"tp",
".",
"matrix_width",
"(",
"self",
".",
"zoom",
")",
":",
"if",
"not",
"self",
".",
"tp",
".",
"is_global",
":",
"continue",
"new_col",
"-=",
"self",
".",
"tp",
".",
"matrix_width",
"(",
"self",
".",
"zoom",
")",
"# omit if new tile is current tile",
"if",
"new_row",
"==",
"self",
".",
"row",
"and",
"new_col",
"==",
"self",
".",
"col",
":",
"continue",
"# create new tile",
"unique_neighbors",
"[",
"(",
"new_row",
",",
"new_col",
")",
"]",
"=",
"self",
".",
"tp",
".",
"tile",
"(",
"self",
".",
"zoom",
",",
"new_row",
",",
"new_col",
")",
"return",
"unique_neighbors",
".",
"values",
"(",
")"
] | Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight. | [
"Return",
"tile",
"neighbors",
"."
] | train | https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tile.py#L202-L263 |
project-rig/rig | rig/bitfield.py | BitField.add_field | def add_field(self, identifier, length=None, start_at=None, tags=None):
"""Add a new field to the BitField.
If any existing fields' values are set, the newly created field will
become a child of those fields. This means that this field will exist
only when the parent fields' values are set as they are currently.
Parameters
----------
identifier : str
A identifier for the field. Must be a valid python identifier.
Field names must be unique within the scope in which they exist and
are only valid within that scope. For example::
>>> bf = BitField(32)
>>> bf.add_field("a")
>>> # Can add multiple fields with the same name if they exist
>>> # in different scopes
>>> bf0 = bf(a=0)
>>> bf0.add_field("b", length=4)
>>> bf1 = bf(a=1)
>>> bf1.add_field("b", length=8)
>>> # Can't add multiple fields with the same name which exist
>>> # within the same or nested scopes.
>>> bf.add_field("a")
Traceback (most recent call last):
ValueError: Field with identifier 'a' already exists
>>> bf.add_field("b")
Traceback (most recent call last):
ValueError: Field with identifier 'b' already exists
Here *three* fields are defined, one called "a" and the other two
called "b". The two fields called "b" are completely unrelated
(they may differ in size, position and associated set of tags) and
are distinguished by the fact that one exists when a=0 and the
other when a=1.
length : int or None
The number of bits in the field. If None the field will be
automatically assigned a length long enough for the largest value
assigned.
start_at : int or None
0-based index of least significant bit of the field within the
bit field. If None the field will be automatically located in free
space in the bit field.
tags : string or collection of strings or None
A (possibly empty) set of tags used to classify the field. Tags
should be valid Python identifiers. If a string, the string must be
a single tag or a space-separated list of tags. If *None*, an empty
set of tags is assumed. These tags are applied recursively to all
fields of which this field is a child.
Raises
------
ValueError
If any the field overlaps with another one or does not fit within
the bit field. Note that fields with unspecified lengths and
positions do not undergo such checks until their length and
position become known when :py:meth:`.assign_fields` is called.
"""
# Check for zero-length fields
if length is not None and length <= 0:
raise ValueError("Fields must be at least one bit in length.")
# Check for fields which don't fit in the bit field
if (start_at is not None and
(0 <= start_at >= self.length or
start_at + (length or 1) > self.length)):
raise ValueError(
"Field doesn't fit within {}-bit bit field.".format(
self.length))
# Check for fields which occupy the same bits
if start_at is not None:
end_at = start_at + (length or 1)
for other_identifier, other_field in \
self.fields.potential_fields(self.field_values):
if other_field.start_at is not None:
other_start_at = other_field.start_at
other_end_at = other_start_at + (other_field.length or 1)
if end_at > other_start_at and other_end_at > start_at:
raise ValueError(
"Field '{}' (range {}-{}) "
"overlaps field '{}' (range {}-{})".format(
identifier,
start_at, end_at,
other_identifier,
other_start_at, other_end_at))
# Normalise tags type
if type(tags) is str:
tags = set(tags.split())
elif tags is None:
tags = set()
else:
tags = set(tags)
# Add the field (checking that the identifier is unique in the process)
field = type(self)._Field(length, start_at, tags)
self.fields.add_field(field, identifier, self.field_values)
# Add tags to all parents of this field
for parent_identifier in self.fields.get_field_requirements(
identifier, self.field_values):
parent = self.fields.get_field(parent_identifier,
self.field_values)
parent.tags.update(tags) | python | def add_field(self, identifier, length=None, start_at=None, tags=None):
"""Add a new field to the BitField.
If any existing fields' values are set, the newly created field will
become a child of those fields. This means that this field will exist
only when the parent fields' values are set as they are currently.
Parameters
----------
identifier : str
A identifier for the field. Must be a valid python identifier.
Field names must be unique within the scope in which they exist and
are only valid within that scope. For example::
>>> bf = BitField(32)
>>> bf.add_field("a")
>>> # Can add multiple fields with the same name if they exist
>>> # in different scopes
>>> bf0 = bf(a=0)
>>> bf0.add_field("b", length=4)
>>> bf1 = bf(a=1)
>>> bf1.add_field("b", length=8)
>>> # Can't add multiple fields with the same name which exist
>>> # within the same or nested scopes.
>>> bf.add_field("a")
Traceback (most recent call last):
ValueError: Field with identifier 'a' already exists
>>> bf.add_field("b")
Traceback (most recent call last):
ValueError: Field with identifier 'b' already exists
Here *three* fields are defined, one called "a" and the other two
called "b". The two fields called "b" are completely unrelated
(they may differ in size, position and associated set of tags) and
are distinguished by the fact that one exists when a=0 and the
other when a=1.
length : int or None
The number of bits in the field. If None the field will be
automatically assigned a length long enough for the largest value
assigned.
start_at : int or None
0-based index of least significant bit of the field within the
bit field. If None the field will be automatically located in free
space in the bit field.
tags : string or collection of strings or None
A (possibly empty) set of tags used to classify the field. Tags
should be valid Python identifiers. If a string, the string must be
a single tag or a space-separated list of tags. If *None*, an empty
set of tags is assumed. These tags are applied recursively to all
fields of which this field is a child.
Raises
------
ValueError
If any the field overlaps with another one or does not fit within
the bit field. Note that fields with unspecified lengths and
positions do not undergo such checks until their length and
position become known when :py:meth:`.assign_fields` is called.
"""
# Check for zero-length fields
if length is not None and length <= 0:
raise ValueError("Fields must be at least one bit in length.")
# Check for fields which don't fit in the bit field
if (start_at is not None and
(0 <= start_at >= self.length or
start_at + (length or 1) > self.length)):
raise ValueError(
"Field doesn't fit within {}-bit bit field.".format(
self.length))
# Check for fields which occupy the same bits
if start_at is not None:
end_at = start_at + (length or 1)
for other_identifier, other_field in \
self.fields.potential_fields(self.field_values):
if other_field.start_at is not None:
other_start_at = other_field.start_at
other_end_at = other_start_at + (other_field.length or 1)
if end_at > other_start_at and other_end_at > start_at:
raise ValueError(
"Field '{}' (range {}-{}) "
"overlaps field '{}' (range {}-{})".format(
identifier,
start_at, end_at,
other_identifier,
other_start_at, other_end_at))
# Normalise tags type
if type(tags) is str:
tags = set(tags.split())
elif tags is None:
tags = set()
else:
tags = set(tags)
# Add the field (checking that the identifier is unique in the process)
field = type(self)._Field(length, start_at, tags)
self.fields.add_field(field, identifier, self.field_values)
# Add tags to all parents of this field
for parent_identifier in self.fields.get_field_requirements(
identifier, self.field_values):
parent = self.fields.get_field(parent_identifier,
self.field_values)
parent.tags.update(tags) | [
"def",
"add_field",
"(",
"self",
",",
"identifier",
",",
"length",
"=",
"None",
",",
"start_at",
"=",
"None",
",",
"tags",
"=",
"None",
")",
":",
"# Check for zero-length fields",
"if",
"length",
"is",
"not",
"None",
"and",
"length",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Fields must be at least one bit in length.\"",
")",
"# Check for fields which don't fit in the bit field",
"if",
"(",
"start_at",
"is",
"not",
"None",
"and",
"(",
"0",
"<=",
"start_at",
">=",
"self",
".",
"length",
"or",
"start_at",
"+",
"(",
"length",
"or",
"1",
")",
">",
"self",
".",
"length",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Field doesn't fit within {}-bit bit field.\"",
".",
"format",
"(",
"self",
".",
"length",
")",
")",
"# Check for fields which occupy the same bits",
"if",
"start_at",
"is",
"not",
"None",
":",
"end_at",
"=",
"start_at",
"+",
"(",
"length",
"or",
"1",
")",
"for",
"other_identifier",
",",
"other_field",
"in",
"self",
".",
"fields",
".",
"potential_fields",
"(",
"self",
".",
"field_values",
")",
":",
"if",
"other_field",
".",
"start_at",
"is",
"not",
"None",
":",
"other_start_at",
"=",
"other_field",
".",
"start_at",
"other_end_at",
"=",
"other_start_at",
"+",
"(",
"other_field",
".",
"length",
"or",
"1",
")",
"if",
"end_at",
">",
"other_start_at",
"and",
"other_end_at",
">",
"start_at",
":",
"raise",
"ValueError",
"(",
"\"Field '{}' (range {}-{}) \"",
"\"overlaps field '{}' (range {}-{})\"",
".",
"format",
"(",
"identifier",
",",
"start_at",
",",
"end_at",
",",
"other_identifier",
",",
"other_start_at",
",",
"other_end_at",
")",
")",
"# Normalise tags type",
"if",
"type",
"(",
"tags",
")",
"is",
"str",
":",
"tags",
"=",
"set",
"(",
"tags",
".",
"split",
"(",
")",
")",
"elif",
"tags",
"is",
"None",
":",
"tags",
"=",
"set",
"(",
")",
"else",
":",
"tags",
"=",
"set",
"(",
"tags",
")",
"# Add the field (checking that the identifier is unique in the process)",
"field",
"=",
"type",
"(",
"self",
")",
".",
"_Field",
"(",
"length",
",",
"start_at",
",",
"tags",
")",
"self",
".",
"fields",
".",
"add_field",
"(",
"field",
",",
"identifier",
",",
"self",
".",
"field_values",
")",
"# Add tags to all parents of this field",
"for",
"parent_identifier",
"in",
"self",
".",
"fields",
".",
"get_field_requirements",
"(",
"identifier",
",",
"self",
".",
"field_values",
")",
":",
"parent",
"=",
"self",
".",
"fields",
".",
"get_field",
"(",
"parent_identifier",
",",
"self",
".",
"field_values",
")",
"parent",
".",
"tags",
".",
"update",
"(",
"tags",
")"
] | Add a new field to the BitField.
If any existing fields' values are set, the newly created field will
become a child of those fields. This means that this field will exist
only when the parent fields' values are set as they are currently.
Parameters
----------
identifier : str
A identifier for the field. Must be a valid python identifier.
Field names must be unique within the scope in which they exist and
are only valid within that scope. For example::
>>> bf = BitField(32)
>>> bf.add_field("a")
>>> # Can add multiple fields with the same name if they exist
>>> # in different scopes
>>> bf0 = bf(a=0)
>>> bf0.add_field("b", length=4)
>>> bf1 = bf(a=1)
>>> bf1.add_field("b", length=8)
>>> # Can't add multiple fields with the same name which exist
>>> # within the same or nested scopes.
>>> bf.add_field("a")
Traceback (most recent call last):
ValueError: Field with identifier 'a' already exists
>>> bf.add_field("b")
Traceback (most recent call last):
ValueError: Field with identifier 'b' already exists
Here *three* fields are defined, one called "a" and the other two
called "b". The two fields called "b" are completely unrelated
(they may differ in size, position and associated set of tags) and
are distinguished by the fact that one exists when a=0 and the
other when a=1.
length : int or None
The number of bits in the field. If None the field will be
automatically assigned a length long enough for the largest value
assigned.
start_at : int or None
0-based index of least significant bit of the field within the
bit field. If None the field will be automatically located in free
space in the bit field.
tags : string or collection of strings or None
A (possibly empty) set of tags used to classify the field. Tags
should be valid Python identifiers. If a string, the string must be
a single tag or a space-separated list of tags. If *None*, an empty
set of tags is assumed. These tags are applied recursively to all
fields of which this field is a child.
Raises
------
ValueError
If any the field overlaps with another one or does not fit within
the bit field. Note that fields with unspecified lengths and
positions do not undergo such checks until their length and
position become known when :py:meth:`.assign_fields` is called. | [
"Add",
"a",
"new",
"field",
"to",
"the",
"BitField",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L81-L189 |
project-rig/rig | rig/bitfield.py | BitField.get_value | def get_value(self, tag=None, field=None):
"""Generate an integer whose bits are set according to the values of
fields in this bit field. All other bits are set to zero.
Parameters
----------
tag : str
Optionally specifies that the value should only include fields with
the specified tag.
field : str
Optionally specifies that the value should only include the
specified field.
Raises
------
ValueError
If a field's value, length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
assert not (tag is not None and field is not None), \
"Cannot filter by tag and field simultaneously."
selected_fields = self._select_by_field_or_tag(tag, field)
# Check all selected fields have values defined
missing_fields_idents = set(selected_fields) - set(self.field_values)
if missing_fields_idents:
raise ValueError(
"Cannot generate value with undefined fields {}.".format(
", ".join("'{}'".format(f)
for f in missing_fields_idents)))
# Build the value
value = 0
for identifier, field in iteritems(selected_fields):
if field.length is None or field.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
identifier))
value |= (self.field_values[identifier] <<
field.start_at)
return value | python | def get_value(self, tag=None, field=None):
"""Generate an integer whose bits are set according to the values of
fields in this bit field. All other bits are set to zero.
Parameters
----------
tag : str
Optionally specifies that the value should only include fields with
the specified tag.
field : str
Optionally specifies that the value should only include the
specified field.
Raises
------
ValueError
If a field's value, length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
assert not (tag is not None and field is not None), \
"Cannot filter by tag and field simultaneously."
selected_fields = self._select_by_field_or_tag(tag, field)
# Check all selected fields have values defined
missing_fields_idents = set(selected_fields) - set(self.field_values)
if missing_fields_idents:
raise ValueError(
"Cannot generate value with undefined fields {}.".format(
", ".join("'{}'".format(f)
for f in missing_fields_idents)))
# Build the value
value = 0
for identifier, field in iteritems(selected_fields):
if field.length is None or field.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
identifier))
value |= (self.field_values[identifier] <<
field.start_at)
return value | [
"def",
"get_value",
"(",
"self",
",",
"tag",
"=",
"None",
",",
"field",
"=",
"None",
")",
":",
"assert",
"not",
"(",
"tag",
"is",
"not",
"None",
"and",
"field",
"is",
"not",
"None",
")",
",",
"\"Cannot filter by tag and field simultaneously.\"",
"selected_fields",
"=",
"self",
".",
"_select_by_field_or_tag",
"(",
"tag",
",",
"field",
")",
"# Check all selected fields have values defined",
"missing_fields_idents",
"=",
"set",
"(",
"selected_fields",
")",
"-",
"set",
"(",
"self",
".",
"field_values",
")",
"if",
"missing_fields_idents",
":",
"raise",
"ValueError",
"(",
"\"Cannot generate value with undefined fields {}.\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"\"'{}'\"",
".",
"format",
"(",
"f",
")",
"for",
"f",
"in",
"missing_fields_idents",
")",
")",
")",
"# Build the value",
"value",
"=",
"0",
"for",
"identifier",
",",
"field",
"in",
"iteritems",
"(",
"selected_fields",
")",
":",
"if",
"field",
".",
"length",
"is",
"None",
"or",
"field",
".",
"start_at",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Field '{}' does not have a fixed size/position.\"",
".",
"format",
"(",
"identifier",
")",
")",
"value",
"|=",
"(",
"self",
".",
"field_values",
"[",
"identifier",
"]",
"<<",
"field",
".",
"start_at",
")",
"return",
"value"
] | Generate an integer whose bits are set according to the values of
fields in this bit field. All other bits are set to zero.
Parameters
----------
tag : str
Optionally specifies that the value should only include fields with
the specified tag.
field : str
Optionally specifies that the value should only include the
specified field.
Raises
------
ValueError
If a field's value, length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available. | [
"Generate",
"an",
"integer",
"whose",
"bits",
"are",
"set",
"according",
"to",
"the",
"values",
"of",
"fields",
"in",
"this",
"bit",
"field",
".",
"All",
"other",
"bits",
"are",
"set",
"to",
"zero",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L260-L307 |
project-rig/rig | rig/bitfield.py | BitField.get_mask | def get_mask(self, tag=None, field=None):
"""Get the mask for all fields which exist in the current bit field.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
if tag is not None and field is not None:
raise TypeError("get_mask() takes exactly one keyword argument, "
"either 'field' or 'tag' (both given)")
selected_fields = self._select_by_field_or_tag(tag, field)
# Build the mask (and throw an exception if we encounter a field
# without a fixed size/length.
mask = 0
for identifier, field in iteritems(selected_fields):
if field.length is None or field.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
identifier))
mask |= ((1 << field.length) - 1) << field.start_at
return mask | python | def get_mask(self, tag=None, field=None):
"""Get the mask for all fields which exist in the current bit field.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
if tag is not None and field is not None:
raise TypeError("get_mask() takes exactly one keyword argument, "
"either 'field' or 'tag' (both given)")
selected_fields = self._select_by_field_or_tag(tag, field)
# Build the mask (and throw an exception if we encounter a field
# without a fixed size/length.
mask = 0
for identifier, field in iteritems(selected_fields):
if field.length is None or field.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
identifier))
mask |= ((1 << field.length) - 1) << field.start_at
return mask | [
"def",
"get_mask",
"(",
"self",
",",
"tag",
"=",
"None",
",",
"field",
"=",
"None",
")",
":",
"if",
"tag",
"is",
"not",
"None",
"and",
"field",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"get_mask() takes exactly one keyword argument, \"",
"\"either 'field' or 'tag' (both given)\"",
")",
"selected_fields",
"=",
"self",
".",
"_select_by_field_or_tag",
"(",
"tag",
",",
"field",
")",
"# Build the mask (and throw an exception if we encounter a field",
"# without a fixed size/length.",
"mask",
"=",
"0",
"for",
"identifier",
",",
"field",
"in",
"iteritems",
"(",
"selected_fields",
")",
":",
"if",
"field",
".",
"length",
"is",
"None",
"or",
"field",
".",
"start_at",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Field '{}' does not have a fixed size/position.\"",
".",
"format",
"(",
"identifier",
")",
")",
"mask",
"|=",
"(",
"(",
"1",
"<<",
"field",
".",
"length",
")",
"-",
"1",
")",
"<<",
"field",
".",
"start_at",
"return",
"mask"
] | Get the mask for all fields which exist in the current bit field.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available. | [
"Get",
"the",
"mask",
"for",
"all",
"fields",
"which",
"exist",
"in",
"the",
"current",
"bit",
"field",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L309-L348 |
project-rig/rig | rig/bitfield.py | BitField._select_by_field_or_tag | def _select_by_field_or_tag(self, tag=None, field=None):
"""For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
# Get the set of fields whose values will be included in the value
if field is not None:
# Select just the specified field (checking the field exists)
field_obj = self.fields.get_field(field, self.field_values)
selected_fields = OrderedDict([(field, field_obj)])
elif tag is not None:
# Select just fields with the specified tag
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values)
if tag in f.tags)
# Fail if no fields match the supplied tag. Because tags are
# applied to parent fields in the hierarchy, it is guaranteed that
# if a tag exists, at least one top-level (i.e. always present)
# field will have the tag.
if not selected_fields:
raise UnknownTagError(tag)
else:
# No specific field/tag supplied, select all enabled fields.
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values))
return selected_fields | python | def _select_by_field_or_tag(self, tag=None, field=None):
"""For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available.
"""
# Get the set of fields whose values will be included in the value
if field is not None:
# Select just the specified field (checking the field exists)
field_obj = self.fields.get_field(field, self.field_values)
selected_fields = OrderedDict([(field, field_obj)])
elif tag is not None:
# Select just fields with the specified tag
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values)
if tag in f.tags)
# Fail if no fields match the supplied tag. Because tags are
# applied to parent fields in the hierarchy, it is guaranteed that
# if a tag exists, at least one top-level (i.e. always present)
# field will have the tag.
if not selected_fields:
raise UnknownTagError(tag)
else:
# No specific field/tag supplied, select all enabled fields.
selected_fields = OrderedDict(
(i, f)
for (i, f) in self.fields.enabled_fields(self.field_values))
return selected_fields | [
"def",
"_select_by_field_or_tag",
"(",
"self",
",",
"tag",
"=",
"None",
",",
"field",
"=",
"None",
")",
":",
"# Get the set of fields whose values will be included in the value",
"if",
"field",
"is",
"not",
"None",
":",
"# Select just the specified field (checking the field exists)",
"field_obj",
"=",
"self",
".",
"fields",
".",
"get_field",
"(",
"field",
",",
"self",
".",
"field_values",
")",
"selected_fields",
"=",
"OrderedDict",
"(",
"[",
"(",
"field",
",",
"field_obj",
")",
"]",
")",
"elif",
"tag",
"is",
"not",
"None",
":",
"# Select just fields with the specified tag",
"selected_fields",
"=",
"OrderedDict",
"(",
"(",
"i",
",",
"f",
")",
"for",
"(",
"i",
",",
"f",
")",
"in",
"self",
".",
"fields",
".",
"enabled_fields",
"(",
"self",
".",
"field_values",
")",
"if",
"tag",
"in",
"f",
".",
"tags",
")",
"# Fail if no fields match the supplied tag. Because tags are",
"# applied to parent fields in the hierarchy, it is guaranteed that",
"# if a tag exists, at least one top-level (i.e. always present)",
"# field will have the tag.",
"if",
"not",
"selected_fields",
":",
"raise",
"UnknownTagError",
"(",
"tag",
")",
"else",
":",
"# No specific field/tag supplied, select all enabled fields.",
"selected_fields",
"=",
"OrderedDict",
"(",
"(",
"i",
",",
"f",
")",
"for",
"(",
"i",
",",
"f",
")",
"in",
"self",
".",
"fields",
".",
"enabled_fields",
"(",
"self",
".",
"field_values",
")",
")",
"return",
"selected_fields"
] | For internal use only. Returns an OrderedDict of {identifier: field}
representing fields which match the supplied field/tag.
Parameters
----------
tag : str
Optionally specifies that the mask should only include fields with
the specified tag.
field : str
Optionally specifies that the mask should only include the
specified field.
Raises
------
UnknownTagError
If the tag specified using the `tag` argument does not exist.
UnavailableFieldError
If the field specified using the `field` argument does not exist or
is not available. | [
"For",
"internal",
"use",
"only",
".",
"Returns",
"an",
"OrderedDict",
"of",
"{",
"identifier",
":",
"field",
"}",
"representing",
"fields",
"which",
"match",
"the",
"supplied",
"field",
"/",
"tag",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L350-L394 |
project-rig/rig | rig/bitfield.py | BitField.get_tags | def get_tags(self, field):
"""Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available.
"""
return self.fields.get_field(field, self.field_values).tags.copy() | python | def get_tags(self, field):
"""Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available.
"""
return self.fields.get_field(field, self.field_values).tags.copy() | [
"def",
"get_tags",
"(",
"self",
",",
"field",
")",
":",
"return",
"self",
".",
"fields",
".",
"get_field",
"(",
"field",
",",
"self",
".",
"field_values",
")",
".",
"tags",
".",
"copy",
"(",
")"
] | Get the set of tags for a given field.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field whose tag should be read.
Returns
-------
set([tag, ...])
Raises
------
UnavailableFieldError
If the field does not exist or is not available. | [
"Get",
"the",
"set",
"of",
"tags",
"for",
"a",
"given",
"field",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L396-L417 |
project-rig/rig | rig/bitfield.py | BitField.get_location_and_length | def get_location_and_length(self, field):
"""Get the location and length of a field within the bitfield.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field of interest.
Returns
-------
location, length
A pair of integers defining the bit-number of the least-significant
bit in the field and the total number of bits in the field
respectively.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnavailableFieldError
If the field does not exist or is not available.
"""
field_obj = self.fields.get_field(field, self.field_values)
if field_obj.length is None or field_obj.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
field))
return (field_obj.start_at, field_obj.length) | python | def get_location_and_length(self, field):
"""Get the location and length of a field within the bitfield.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field of interest.
Returns
-------
location, length
A pair of integers defining the bit-number of the least-significant
bit in the field and the total number of bits in the field
respectively.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnavailableFieldError
If the field does not exist or is not available.
"""
field_obj = self.fields.get_field(field, self.field_values)
if field_obj.length is None or field_obj.start_at is None:
raise ValueError(
"Field '{}' does not have a fixed size/position.".format(
field))
return (field_obj.start_at, field_obj.length) | [
"def",
"get_location_and_length",
"(",
"self",
",",
"field",
")",
":",
"field_obj",
"=",
"self",
".",
"fields",
".",
"get_field",
"(",
"field",
",",
"self",
".",
"field_values",
")",
"if",
"field_obj",
".",
"length",
"is",
"None",
"or",
"field_obj",
".",
"start_at",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Field '{}' does not have a fixed size/position.\"",
".",
"format",
"(",
"field",
")",
")",
"return",
"(",
"field_obj",
".",
"start_at",
",",
"field_obj",
".",
"length",
")"
] | Get the location and length of a field within the bitfield.
.. note::
The named field must be accessible given the current set of values
defined.
Parameters
----------
field : str
The field of interest.
Returns
-------
location, length
A pair of integers defining the bit-number of the least-significant
bit in the field and the total number of bits in the field
respectively.
Raises
------
ValueError
If a field's length or position has not been defined. (e.g.
:py:meth:`.assign_fields` has not been called).
UnavailableFieldError
If the field does not exist or is not available. | [
"Get",
"the",
"location",
"and",
"length",
"of",
"a",
"field",
"within",
"the",
"bitfield",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L419-L453 |
project-rig/rig | rig/bitfield.py | BitField.assign_fields | def assign_fields(self):
"""Assign a position & length to any fields which do not have one.
Users should typically call this method after all field values have
been assigned, otherwise fields may be fixed at an inadequate size.
"""
# We must fix fields at every level of the hierarchy separately
# (otherwise fields of children won't be allowed to overlap). Here we
# do a breadth-first iteration over the hierarchy to fix fields with
# given starting positions; then we do depth-first to fix other fields.
# Assign all fields with a fixed starting position in breadth first,
# top-down order. The breadth-first ensures that children's fixed
# position fields must fit around the fixed position fields of their
# parents.
queue = [(self.fields, {})]
while queue:
node, field_values = queue.pop(0)
# Assign all fields at this level whose position is fixed
self._assign_fields(node.fields, field_values,
assign_positions=False)
# Breadth-first search through children
for requirements, child in iteritems(node.children):
requirements = dict(requirements)
requirements.update(field_values)
queue.append((child, requirements))
# Assign all fields with movable starting positions in leaf-first,
# depth-first order. The depth first ordering for variable position
# fields ensures that parents don't allocate fields in positions which
# would collide with fixed and variable position fields their children
# have already allocated.
def recurse_assign_fields(node=self.fields, field_values={}):
# Assign fields of child nodes first (allowing them to allocate
# bits independently)
for requirements, child in iteritems(node.children):
child_field_values = dict(requirements)
child_field_values.update(field_values)
recurse_assign_fields(child, child_field_values)
# Finally, assign all remaining fields at this level in the tree
self._assign_fields(node.fields, field_values,
assign_positions=True)
recurse_assign_fields() | python | def assign_fields(self):
"""Assign a position & length to any fields which do not have one.
Users should typically call this method after all field values have
been assigned, otherwise fields may be fixed at an inadequate size.
"""
# We must fix fields at every level of the hierarchy separately
# (otherwise fields of children won't be allowed to overlap). Here we
# do a breadth-first iteration over the hierarchy to fix fields with
# given starting positions; then we do depth-first to fix other fields.
# Assign all fields with a fixed starting position in breadth first,
# top-down order. The breadth-first ensures that children's fixed
# position fields must fit around the fixed position fields of their
# parents.
queue = [(self.fields, {})]
while queue:
node, field_values = queue.pop(0)
# Assign all fields at this level whose position is fixed
self._assign_fields(node.fields, field_values,
assign_positions=False)
# Breadth-first search through children
for requirements, child in iteritems(node.children):
requirements = dict(requirements)
requirements.update(field_values)
queue.append((child, requirements))
# Assign all fields with movable starting positions in leaf-first,
# depth-first order. The depth first ordering for variable position
# fields ensures that parents don't allocate fields in positions which
# would collide with fixed and variable position fields their children
# have already allocated.
def recurse_assign_fields(node=self.fields, field_values={}):
# Assign fields of child nodes first (allowing them to allocate
# bits independently)
for requirements, child in iteritems(node.children):
child_field_values = dict(requirements)
child_field_values.update(field_values)
recurse_assign_fields(child, child_field_values)
# Finally, assign all remaining fields at this level in the tree
self._assign_fields(node.fields, field_values,
assign_positions=True)
recurse_assign_fields() | [
"def",
"assign_fields",
"(",
"self",
")",
":",
"# We must fix fields at every level of the hierarchy separately",
"# (otherwise fields of children won't be allowed to overlap). Here we",
"# do a breadth-first iteration over the hierarchy to fix fields with",
"# given starting positions; then we do depth-first to fix other fields.",
"# Assign all fields with a fixed starting position in breadth first,",
"# top-down order. The breadth-first ensures that children's fixed",
"# position fields must fit around the fixed position fields of their",
"# parents.",
"queue",
"=",
"[",
"(",
"self",
".",
"fields",
",",
"{",
"}",
")",
"]",
"while",
"queue",
":",
"node",
",",
"field_values",
"=",
"queue",
".",
"pop",
"(",
"0",
")",
"# Assign all fields at this level whose position is fixed",
"self",
".",
"_assign_fields",
"(",
"node",
".",
"fields",
",",
"field_values",
",",
"assign_positions",
"=",
"False",
")",
"# Breadth-first search through children",
"for",
"requirements",
",",
"child",
"in",
"iteritems",
"(",
"node",
".",
"children",
")",
":",
"requirements",
"=",
"dict",
"(",
"requirements",
")",
"requirements",
".",
"update",
"(",
"field_values",
")",
"queue",
".",
"append",
"(",
"(",
"child",
",",
"requirements",
")",
")",
"# Assign all fields with movable starting positions in leaf-first,",
"# depth-first order. The depth first ordering for variable position",
"# fields ensures that parents don't allocate fields in positions which",
"# would collide with fixed and variable position fields their children",
"# have already allocated.",
"def",
"recurse_assign_fields",
"(",
"node",
"=",
"self",
".",
"fields",
",",
"field_values",
"=",
"{",
"}",
")",
":",
"# Assign fields of child nodes first (allowing them to allocate",
"# bits independently)",
"for",
"requirements",
",",
"child",
"in",
"iteritems",
"(",
"node",
".",
"children",
")",
":",
"child_field_values",
"=",
"dict",
"(",
"requirements",
")",
"child_field_values",
".",
"update",
"(",
"field_values",
")",
"recurse_assign_fields",
"(",
"child",
",",
"child_field_values",
")",
"# Finally, assign all remaining fields at this level in the tree",
"self",
".",
"_assign_fields",
"(",
"node",
".",
"fields",
",",
"field_values",
",",
"assign_positions",
"=",
"True",
")",
"recurse_assign_fields",
"(",
")"
] | Assign a position & length to any fields which do not have one.
Users should typically call this method after all field values have
been assigned, otherwise fields may be fixed at an inadequate size. | [
"Assign",
"a",
"position",
"&",
"length",
"to",
"any",
"fields",
"which",
"do",
"not",
"have",
"one",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L455-L501 |
project-rig/rig | rig/bitfield.py | BitField._assign_fields | def _assign_fields(self, identifiers, field_values,
assign_positions, assigned_bits=0):
"""For internal use only. Assign lengths & positions to a subset of all
potential fields with the supplied field_values.
This method will check for any assigned bits of all potential fields
but will only assign those fields whose identifiers are provided.
Parameters
----------
identifiers : iterable of identifiers
The identifiers of the fields to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifiers)
assign_positions : bool
If False, will only assign lengths to fields whose positions are
already known. Otherwise lengths and positions will be assigned to
all fields as necessary.
assigned_bits : int
A bit mask of bits which are already allocated. (Note that this
will automatically be extended with any already-assigned potential
fields' bits.)
Returns
-------
int
Mask of which bits which are assigned to fields after fields have
been assigned.
"""
# Calculate a mask of already allocated fields' bits
for i, f in self.fields.potential_fields(field_values):
if f.length is not None and f.start_at is not None:
assigned_bits |= ((1 << f.length) - 1) << f.start_at
# Allocate all specified fields
for identifier in identifiers:
field = self.fields.get_field(identifier, field_values)
if field.length is not None and field.start_at is not None:
# Already allocated, do nothing!
pass
elif assign_positions or field.start_at is not None:
assigned_bits |= self._assign_field(assigned_bits,
identifier,
field_values)
return assigned_bits | python | def _assign_fields(self, identifiers, field_values,
assign_positions, assigned_bits=0):
"""For internal use only. Assign lengths & positions to a subset of all
potential fields with the supplied field_values.
This method will check for any assigned bits of all potential fields
but will only assign those fields whose identifiers are provided.
Parameters
----------
identifiers : iterable of identifiers
The identifiers of the fields to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifiers)
assign_positions : bool
If False, will only assign lengths to fields whose positions are
already known. Otherwise lengths and positions will be assigned to
all fields as necessary.
assigned_bits : int
A bit mask of bits which are already allocated. (Note that this
will automatically be extended with any already-assigned potential
fields' bits.)
Returns
-------
int
Mask of which bits which are assigned to fields after fields have
been assigned.
"""
# Calculate a mask of already allocated fields' bits
for i, f in self.fields.potential_fields(field_values):
if f.length is not None and f.start_at is not None:
assigned_bits |= ((1 << f.length) - 1) << f.start_at
# Allocate all specified fields
for identifier in identifiers:
field = self.fields.get_field(identifier, field_values)
if field.length is not None and field.start_at is not None:
# Already allocated, do nothing!
pass
elif assign_positions or field.start_at is not None:
assigned_bits |= self._assign_field(assigned_bits,
identifier,
field_values)
return assigned_bits | [
"def",
"_assign_fields",
"(",
"self",
",",
"identifiers",
",",
"field_values",
",",
"assign_positions",
",",
"assigned_bits",
"=",
"0",
")",
":",
"# Calculate a mask of already allocated fields' bits",
"for",
"i",
",",
"f",
"in",
"self",
".",
"fields",
".",
"potential_fields",
"(",
"field_values",
")",
":",
"if",
"f",
".",
"length",
"is",
"not",
"None",
"and",
"f",
".",
"start_at",
"is",
"not",
"None",
":",
"assigned_bits",
"|=",
"(",
"(",
"1",
"<<",
"f",
".",
"length",
")",
"-",
"1",
")",
"<<",
"f",
".",
"start_at",
"# Allocate all specified fields",
"for",
"identifier",
"in",
"identifiers",
":",
"field",
"=",
"self",
".",
"fields",
".",
"get_field",
"(",
"identifier",
",",
"field_values",
")",
"if",
"field",
".",
"length",
"is",
"not",
"None",
"and",
"field",
".",
"start_at",
"is",
"not",
"None",
":",
"# Already allocated, do nothing!",
"pass",
"elif",
"assign_positions",
"or",
"field",
".",
"start_at",
"is",
"not",
"None",
":",
"assigned_bits",
"|=",
"self",
".",
"_assign_field",
"(",
"assigned_bits",
",",
"identifier",
",",
"field_values",
")",
"return",
"assigned_bits"
] | For internal use only. Assign lengths & positions to a subset of all
potential fields with the supplied field_values.
This method will check for any assigned bits of all potential fields
but will only assign those fields whose identifiers are provided.
Parameters
----------
identifiers : iterable of identifiers
The identifiers of the fields to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifiers)
assign_positions : bool
If False, will only assign lengths to fields whose positions are
already known. Otherwise lengths and positions will be assigned to
all fields as necessary.
assigned_bits : int
A bit mask of bits which are already allocated. (Note that this
will automatically be extended with any already-assigned potential
fields' bits.)
Returns
-------
int
Mask of which bits which are assigned to fields after fields have
been assigned. | [
"For",
"internal",
"use",
"only",
".",
"Assign",
"lengths",
"&",
"positions",
"to",
"a",
"subset",
"of",
"all",
"potential",
"fields",
"with",
"the",
"supplied",
"field_values",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L857-L903 |
project-rig/rig | rig/bitfield.py | BitField._assign_field | def _assign_field(self, assigned_bits, identifier, field_values):
"""For internal use only. Assign a length and position to a field
which may have either one of these values missing.
Parameters
----------
assigned_bits : int
A bit mask of bits already in use by other fields
identifier : str
The identifier of the field to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifier)
Returns
-------
int
Mask of which bits which are assigned to fields after this field
has been assigned.
"""
field = self.fields.get_field(identifier, field_values)
length = field.length
if length is None:
# Assign lengths based on values
length = int(log(field.max_value, 2)) + 1
start_at = field.start_at
if start_at is None:
# Force a failure if no better space is found
start_at = self.length
# Try every position until a space is found
for bit in range(0, self.length - length):
field_bits = ((1 << length) - 1) << bit
if not (assigned_bits & field_bits):
start_at = bit
assigned_bits |= field_bits
break
else:
# A start position has been forced, ensure that it can be fulfilled
field_bits = ((1 << length) - 1) << start_at
if assigned_bits & field_bits:
raise ValueError(
"{}-bit field {} with fixed position does not fit in "
"{}.".format(
field.length,
self.fields.get_field_human_readable(identifier,
field_values),
type(self).__name__
)
)
# Mark these bits as assigned
assigned_bits |= field_bits
# Check that the calculated field is within the bit field
if start_at + length <= self.length:
field.length = length
field.start_at = start_at
else:
raise ValueError(
"{}-bit field {} does not fit in {}.".format(
field.length,
self.fields.get_field_human_readable(identifier,
field_values),
type(self).__name__
)
)
return assigned_bits | python | def _assign_field(self, assigned_bits, identifier, field_values):
"""For internal use only. Assign a length and position to a field
which may have either one of these values missing.
Parameters
----------
assigned_bits : int
A bit mask of bits already in use by other fields
identifier : str
The identifier of the field to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifier)
Returns
-------
int
Mask of which bits which are assigned to fields after this field
has been assigned.
"""
field = self.fields.get_field(identifier, field_values)
length = field.length
if length is None:
# Assign lengths based on values
length = int(log(field.max_value, 2)) + 1
start_at = field.start_at
if start_at is None:
# Force a failure if no better space is found
start_at = self.length
# Try every position until a space is found
for bit in range(0, self.length - length):
field_bits = ((1 << length) - 1) << bit
if not (assigned_bits & field_bits):
start_at = bit
assigned_bits |= field_bits
break
else:
# A start position has been forced, ensure that it can be fulfilled
field_bits = ((1 << length) - 1) << start_at
if assigned_bits & field_bits:
raise ValueError(
"{}-bit field {} with fixed position does not fit in "
"{}.".format(
field.length,
self.fields.get_field_human_readable(identifier,
field_values),
type(self).__name__
)
)
# Mark these bits as assigned
assigned_bits |= field_bits
# Check that the calculated field is within the bit field
if start_at + length <= self.length:
field.length = length
field.start_at = start_at
else:
raise ValueError(
"{}-bit field {} does not fit in {}.".format(
field.length,
self.fields.get_field_human_readable(identifier,
field_values),
type(self).__name__
)
)
return assigned_bits | [
"def",
"_assign_field",
"(",
"self",
",",
"assigned_bits",
",",
"identifier",
",",
"field_values",
")",
":",
"field",
"=",
"self",
".",
"fields",
".",
"get_field",
"(",
"identifier",
",",
"field_values",
")",
"length",
"=",
"field",
".",
"length",
"if",
"length",
"is",
"None",
":",
"# Assign lengths based on values",
"length",
"=",
"int",
"(",
"log",
"(",
"field",
".",
"max_value",
",",
"2",
")",
")",
"+",
"1",
"start_at",
"=",
"field",
".",
"start_at",
"if",
"start_at",
"is",
"None",
":",
"# Force a failure if no better space is found",
"start_at",
"=",
"self",
".",
"length",
"# Try every position until a space is found",
"for",
"bit",
"in",
"range",
"(",
"0",
",",
"self",
".",
"length",
"-",
"length",
")",
":",
"field_bits",
"=",
"(",
"(",
"1",
"<<",
"length",
")",
"-",
"1",
")",
"<<",
"bit",
"if",
"not",
"(",
"assigned_bits",
"&",
"field_bits",
")",
":",
"start_at",
"=",
"bit",
"assigned_bits",
"|=",
"field_bits",
"break",
"else",
":",
"# A start position has been forced, ensure that it can be fulfilled",
"field_bits",
"=",
"(",
"(",
"1",
"<<",
"length",
")",
"-",
"1",
")",
"<<",
"start_at",
"if",
"assigned_bits",
"&",
"field_bits",
":",
"raise",
"ValueError",
"(",
"\"{}-bit field {} with fixed position does not fit in \"",
"\"{}.\"",
".",
"format",
"(",
"field",
".",
"length",
",",
"self",
".",
"fields",
".",
"get_field_human_readable",
"(",
"identifier",
",",
"field_values",
")",
",",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"# Mark these bits as assigned",
"assigned_bits",
"|=",
"field_bits",
"# Check that the calculated field is within the bit field",
"if",
"start_at",
"+",
"length",
"<=",
"self",
".",
"length",
":",
"field",
".",
"length",
"=",
"length",
"field",
".",
"start_at",
"=",
"start_at",
"else",
":",
"raise",
"ValueError",
"(",
"\"{}-bit field {} does not fit in {}.\"",
".",
"format",
"(",
"field",
".",
"length",
",",
"self",
".",
"fields",
".",
"get_field_human_readable",
"(",
"identifier",
",",
"field_values",
")",
",",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"return",
"assigned_bits"
] | For internal use only. Assign a length and position to a field
which may have either one of these values missing.
Parameters
----------
assigned_bits : int
A bit mask of bits already in use by other fields
identifier : str
The identifier of the field to assign
field_values : {identifier: value, ...}
The values held by various fields (used to access the correct
identifier)
Returns
-------
int
Mask of which bits which are assigned to fields after this field
has been assigned. | [
"For",
"internal",
"use",
"only",
".",
"Assign",
"a",
"length",
"and",
"position",
"to",
"a",
"field",
"which",
"may",
"have",
"either",
"one",
"of",
"these",
"values",
"missing",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/bitfield.py#L905-L976 |
Metatab/metapack | metapack/package/excel.py | ExcelPackageBuilder._load_resources | def _load_resources(self):
"""Remove the geography from the files, since it isn't particularly useful in Excel"""
for t in self.doc.find('Root.Table'):
for c in t.find('Table.Column'):
if c.get_value('datatype') == 'geometry':
c['transform'] = '^empty_str'
c['datatype'] = 'text'
return super()._load_resources() | python | def _load_resources(self):
"""Remove the geography from the files, since it isn't particularly useful in Excel"""
for t in self.doc.find('Root.Table'):
for c in t.find('Table.Column'):
if c.get_value('datatype') == 'geometry':
c['transform'] = '^empty_str'
c['datatype'] = 'text'
return super()._load_resources() | [
"def",
"_load_resources",
"(",
"self",
")",
":",
"for",
"t",
"in",
"self",
".",
"doc",
".",
"find",
"(",
"'Root.Table'",
")",
":",
"for",
"c",
"in",
"t",
".",
"find",
"(",
"'Table.Column'",
")",
":",
"if",
"c",
".",
"get_value",
"(",
"'datatype'",
")",
"==",
"'geometry'",
":",
"c",
"[",
"'transform'",
"]",
"=",
"'^empty_str'",
"c",
"[",
"'datatype'",
"]",
"=",
"'text'",
"return",
"super",
"(",
")",
".",
"_load_resources",
"(",
")"
] | Remove the geography from the files, since it isn't particularly useful in Excel | [
"Remove",
"the",
"geography",
"from",
"the",
"files",
"since",
"it",
"isn",
"t",
"particularly",
"useful",
"in",
"Excel"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/excel.py#L91-L100 |
Metatab/metapack | metapack/cli/metaaws.py | user_policy_to_dict | def user_policy_to_dict(doc):
"""Convert a bucket policy to a dict mapping principal/prefix names to 'R' or 'W' """
import json
if not isinstance(doc, dict):
doc = json.loads(doc)
d = {}
def get_statement(name):
for s in doc['Statement']:
if s['Sid'] == name:
return s
for r in get_statement('read')['Principal']['AWS']:
bucket, prefix = r.replace(arn_prefix, '').replace('/*', '').split('/')
d[(bucket, prefix.strip('/'))] = 'R'
try:
for r in get_statement('write')['Resource']:
bucket, prefix = r.replace(arn_prefix, '').replace('/*', '').split('/')
d[(bucket, prefix.strip('/'))] = 'W'
except TypeError:
pass # No write section
return d | python | def user_policy_to_dict(doc):
"""Convert a bucket policy to a dict mapping principal/prefix names to 'R' or 'W' """
import json
if not isinstance(doc, dict):
doc = json.loads(doc)
d = {}
def get_statement(name):
for s in doc['Statement']:
if s['Sid'] == name:
return s
for r in get_statement('read')['Principal']['AWS']:
bucket, prefix = r.replace(arn_prefix, '').replace('/*', '').split('/')
d[(bucket, prefix.strip('/'))] = 'R'
try:
for r in get_statement('write')['Resource']:
bucket, prefix = r.replace(arn_prefix, '').replace('/*', '').split('/')
d[(bucket, prefix.strip('/'))] = 'W'
except TypeError:
pass # No write section
return d | [
"def",
"user_policy_to_dict",
"(",
"doc",
")",
":",
"import",
"json",
"if",
"not",
"isinstance",
"(",
"doc",
",",
"dict",
")",
":",
"doc",
"=",
"json",
".",
"loads",
"(",
"doc",
")",
"d",
"=",
"{",
"}",
"def",
"get_statement",
"(",
"name",
")",
":",
"for",
"s",
"in",
"doc",
"[",
"'Statement'",
"]",
":",
"if",
"s",
"[",
"'Sid'",
"]",
"==",
"name",
":",
"return",
"s",
"for",
"r",
"in",
"get_statement",
"(",
"'read'",
")",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
":",
"bucket",
",",
"prefix",
"=",
"r",
".",
"replace",
"(",
"arn_prefix",
",",
"''",
")",
".",
"replace",
"(",
"'/*'",
",",
"''",
")",
".",
"split",
"(",
"'/'",
")",
"d",
"[",
"(",
"bucket",
",",
"prefix",
".",
"strip",
"(",
"'/'",
")",
")",
"]",
"=",
"'R'",
"try",
":",
"for",
"r",
"in",
"get_statement",
"(",
"'write'",
")",
"[",
"'Resource'",
"]",
":",
"bucket",
",",
"prefix",
"=",
"r",
".",
"replace",
"(",
"arn_prefix",
",",
"''",
")",
".",
"replace",
"(",
"'/*'",
",",
"''",
")",
".",
"split",
"(",
"'/'",
")",
"d",
"[",
"(",
"bucket",
",",
"prefix",
".",
"strip",
"(",
"'/'",
")",
")",
"]",
"=",
"'W'",
"except",
"TypeError",
":",
"pass",
"# No write section",
"return",
"d"
] | Convert a bucket policy to a dict mapping principal/prefix names to 'R' or 'W' | [
"Convert",
"a",
"bucket",
"policy",
"to",
"a",
"dict",
"mapping",
"principal",
"/",
"prefix",
"names",
"to",
"R",
"or",
"W"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L225-L251 |
Metatab/metapack | metapack/cli/metaaws.py | make_bucket_policy_statements | def make_bucket_policy_statements(bucket):
"""Return the statemtns in a bucket policy as a dict of dicts"""
import yaml
from os.path import dirname, join, abspath
import copy
import metatab
with open(join(dirname(abspath(metatab.__file__)), 'support', 'policy_parts.yaml')) as f:
parts = yaml.load(f)
statements = {}
cl = copy.deepcopy(parts['list'])
cl['Resource'] = arn_prefix + bucket
statements['list'] = cl
cl = copy.deepcopy(parts['bucket'])
cl['Resource'] = arn_prefix + bucket
statements['bucket'] = cl
for sd in TOP_LEVEL_DIRS:
cl = copy.deepcopy(parts['read'])
cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*'
cl['Sid'] = cl['Sid'].title() + sd.title()
statements[cl['Sid']] = cl
cl = copy.deepcopy(parts['write'])
cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*'
cl['Sid'] = cl['Sid'].title() + sd.title()
statements[cl['Sid']] = cl
cl = copy.deepcopy(parts['listb'])
cl['Resource'] = arn_prefix + bucket
cl['Sid'] = cl['Sid'].title() + sd.title()
cl['Condition']['StringLike']['s3:prefix'] = [sd + '/*']
statements[cl['Sid']] = cl
return statements | python | def make_bucket_policy_statements(bucket):
"""Return the statemtns in a bucket policy as a dict of dicts"""
import yaml
from os.path import dirname, join, abspath
import copy
import metatab
with open(join(dirname(abspath(metatab.__file__)), 'support', 'policy_parts.yaml')) as f:
parts = yaml.load(f)
statements = {}
cl = copy.deepcopy(parts['list'])
cl['Resource'] = arn_prefix + bucket
statements['list'] = cl
cl = copy.deepcopy(parts['bucket'])
cl['Resource'] = arn_prefix + bucket
statements['bucket'] = cl
for sd in TOP_LEVEL_DIRS:
cl = copy.deepcopy(parts['read'])
cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*'
cl['Sid'] = cl['Sid'].title() + sd.title()
statements[cl['Sid']] = cl
cl = copy.deepcopy(parts['write'])
cl['Resource'] = arn_prefix + bucket + '/' + sd + '/*'
cl['Sid'] = cl['Sid'].title() + sd.title()
statements[cl['Sid']] = cl
cl = copy.deepcopy(parts['listb'])
cl['Resource'] = arn_prefix + bucket
cl['Sid'] = cl['Sid'].title() + sd.title()
cl['Condition']['StringLike']['s3:prefix'] = [sd + '/*']
statements[cl['Sid']] = cl
return statements | [
"def",
"make_bucket_policy_statements",
"(",
"bucket",
")",
":",
"import",
"yaml",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"join",
",",
"abspath",
"import",
"copy",
"import",
"metatab",
"with",
"open",
"(",
"join",
"(",
"dirname",
"(",
"abspath",
"(",
"metatab",
".",
"__file__",
")",
")",
",",
"'support'",
",",
"'policy_parts.yaml'",
")",
")",
"as",
"f",
":",
"parts",
"=",
"yaml",
".",
"load",
"(",
"f",
")",
"statements",
"=",
"{",
"}",
"cl",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
"[",
"'list'",
"]",
")",
"cl",
"[",
"'Resource'",
"]",
"=",
"arn_prefix",
"+",
"bucket",
"statements",
"[",
"'list'",
"]",
"=",
"cl",
"cl",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
"[",
"'bucket'",
"]",
")",
"cl",
"[",
"'Resource'",
"]",
"=",
"arn_prefix",
"+",
"bucket",
"statements",
"[",
"'bucket'",
"]",
"=",
"cl",
"for",
"sd",
"in",
"TOP_LEVEL_DIRS",
":",
"cl",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
"[",
"'read'",
"]",
")",
"cl",
"[",
"'Resource'",
"]",
"=",
"arn_prefix",
"+",
"bucket",
"+",
"'/'",
"+",
"sd",
"+",
"'/*'",
"cl",
"[",
"'Sid'",
"]",
"=",
"cl",
"[",
"'Sid'",
"]",
".",
"title",
"(",
")",
"+",
"sd",
".",
"title",
"(",
")",
"statements",
"[",
"cl",
"[",
"'Sid'",
"]",
"]",
"=",
"cl",
"cl",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
"[",
"'write'",
"]",
")",
"cl",
"[",
"'Resource'",
"]",
"=",
"arn_prefix",
"+",
"bucket",
"+",
"'/'",
"+",
"sd",
"+",
"'/*'",
"cl",
"[",
"'Sid'",
"]",
"=",
"cl",
"[",
"'Sid'",
"]",
".",
"title",
"(",
")",
"+",
"sd",
".",
"title",
"(",
")",
"statements",
"[",
"cl",
"[",
"'Sid'",
"]",
"]",
"=",
"cl",
"cl",
"=",
"copy",
".",
"deepcopy",
"(",
"parts",
"[",
"'listb'",
"]",
")",
"cl",
"[",
"'Resource'",
"]",
"=",
"arn_prefix",
"+",
"bucket",
"cl",
"[",
"'Sid'",
"]",
"=",
"cl",
"[",
"'Sid'",
"]",
".",
"title",
"(",
")",
"+",
"sd",
".",
"title",
"(",
")",
"cl",
"[",
"'Condition'",
"]",
"[",
"'StringLike'",
"]",
"[",
"'s3:prefix'",
"]",
"=",
"[",
"sd",
"+",
"'/*'",
"]",
"statements",
"[",
"cl",
"[",
"'Sid'",
"]",
"]",
"=",
"cl",
"return",
"statements"
] | Return the statemtns in a bucket policy as a dict of dicts | [
"Return",
"the",
"statemtns",
"in",
"a",
"bucket",
"policy",
"as",
"a",
"dict",
"of",
"dicts"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L254-L294 |
Metatab/metapack | metapack/cli/metaaws.py | bucket_dict_to_policy | def bucket_dict_to_policy(args, bucket_name, d):
"""
Create a bucket policy document from a permissions dict.
The dictionary d maps (user, prefix) to 'R' or 'W'.
:param bucket_name:
:param d:
:return:
"""
import json
iam = get_resource(args, 'iam')
statements = make_bucket_policy_statements(bucket_name)
user_stats = set() # statement tripples
for (user, prefix), mode in d.items():
user_stats.add((user, 'list'))
user_stats.add((user, 'bucket'))
if mode == 'R':
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'List' + prefix.title()))
elif mode == 'W':
user_stats.add((user, 'List' + prefix.title()))
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'Write' + prefix.title()))
users_arns = {}
for user_name, section in user_stats:
section = statements[section]
if user_name not in users_arns:
user = iam.User(user_name)
users_arns[user.name] = user
else:
user = users_arns[user_name]
section['Principal']['AWS'].append(user.arn)
for sid in list(statements.keys()):
if not statements[sid]['Principal']['AWS']:
del statements[sid]
return json.dumps(dict(Version="2012-10-17", Statement=list(statements.values())), indent=4) | python | def bucket_dict_to_policy(args, bucket_name, d):
"""
Create a bucket policy document from a permissions dict.
The dictionary d maps (user, prefix) to 'R' or 'W'.
:param bucket_name:
:param d:
:return:
"""
import json
iam = get_resource(args, 'iam')
statements = make_bucket_policy_statements(bucket_name)
user_stats = set() # statement tripples
for (user, prefix), mode in d.items():
user_stats.add((user, 'list'))
user_stats.add((user, 'bucket'))
if mode == 'R':
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'List' + prefix.title()))
elif mode == 'W':
user_stats.add((user, 'List' + prefix.title()))
user_stats.add((user, 'Read' + prefix.title()))
user_stats.add((user, 'Write' + prefix.title()))
users_arns = {}
for user_name, section in user_stats:
section = statements[section]
if user_name not in users_arns:
user = iam.User(user_name)
users_arns[user.name] = user
else:
user = users_arns[user_name]
section['Principal']['AWS'].append(user.arn)
for sid in list(statements.keys()):
if not statements[sid]['Principal']['AWS']:
del statements[sid]
return json.dumps(dict(Version="2012-10-17", Statement=list(statements.values())), indent=4) | [
"def",
"bucket_dict_to_policy",
"(",
"args",
",",
"bucket_name",
",",
"d",
")",
":",
"import",
"json",
"iam",
"=",
"get_resource",
"(",
"args",
",",
"'iam'",
")",
"statements",
"=",
"make_bucket_policy_statements",
"(",
"bucket_name",
")",
"user_stats",
"=",
"set",
"(",
")",
"# statement tripples",
"for",
"(",
"user",
",",
"prefix",
")",
",",
"mode",
"in",
"d",
".",
"items",
"(",
")",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'list'",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'bucket'",
")",
")",
"if",
"mode",
"==",
"'R'",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Read'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'List'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"elif",
"mode",
"==",
"'W'",
":",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'List'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Read'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"user_stats",
".",
"add",
"(",
"(",
"user",
",",
"'Write'",
"+",
"prefix",
".",
"title",
"(",
")",
")",
")",
"users_arns",
"=",
"{",
"}",
"for",
"user_name",
",",
"section",
"in",
"user_stats",
":",
"section",
"=",
"statements",
"[",
"section",
"]",
"if",
"user_name",
"not",
"in",
"users_arns",
":",
"user",
"=",
"iam",
".",
"User",
"(",
"user_name",
")",
"users_arns",
"[",
"user",
".",
"name",
"]",
"=",
"user",
"else",
":",
"user",
"=",
"users_arns",
"[",
"user_name",
"]",
"section",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
".",
"append",
"(",
"user",
".",
"arn",
")",
"for",
"sid",
"in",
"list",
"(",
"statements",
".",
"keys",
"(",
")",
")",
":",
"if",
"not",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
":",
"del",
"statements",
"[",
"sid",
"]",
"return",
"json",
".",
"dumps",
"(",
"dict",
"(",
"Version",
"=",
"\"2012-10-17\"",
",",
"Statement",
"=",
"list",
"(",
"statements",
".",
"values",
"(",
")",
")",
")",
",",
"indent",
"=",
"4",
")"
] | Create a bucket policy document from a permissions dict.
The dictionary d maps (user, prefix) to 'R' or 'W'.
:param bucket_name:
:param d:
:return: | [
"Create",
"a",
"bucket",
"policy",
"document",
"from",
"a",
"permissions",
"dict",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L297-L346 |
Metatab/metapack | metapack/cli/metaaws.py | bucket_policy_to_dict | def bucket_policy_to_dict(policy):
"""Produce a dictionary of read, write permissions for an existing bucket policy document"""
import json
if not isinstance(policy, dict):
policy = json.loads(policy)
statements = {s['Sid']: s for s in policy['Statement']}
d = {}
for rw in ('Read', 'Write'):
for prefix in TOP_LEVEL_DIRS:
sid = rw.title() + prefix.title()
if sid in statements:
if isinstance(statements[sid]['Principal']['AWS'], list):
for principal in statements[sid]['Principal']['AWS']:
user_name = principal.split('/').pop()
d[(user_name, prefix)] = rw[0]
else:
user_name = statements[sid]['Principal']['AWS'].split('/').pop()
d[(user_name, prefix)] = rw[0]
return d | python | def bucket_policy_to_dict(policy):
"""Produce a dictionary of read, write permissions for an existing bucket policy document"""
import json
if not isinstance(policy, dict):
policy = json.loads(policy)
statements = {s['Sid']: s for s in policy['Statement']}
d = {}
for rw in ('Read', 'Write'):
for prefix in TOP_LEVEL_DIRS:
sid = rw.title() + prefix.title()
if sid in statements:
if isinstance(statements[sid]['Principal']['AWS'], list):
for principal in statements[sid]['Principal']['AWS']:
user_name = principal.split('/').pop()
d[(user_name, prefix)] = rw[0]
else:
user_name = statements[sid]['Principal']['AWS'].split('/').pop()
d[(user_name, prefix)] = rw[0]
return d | [
"def",
"bucket_policy_to_dict",
"(",
"policy",
")",
":",
"import",
"json",
"if",
"not",
"isinstance",
"(",
"policy",
",",
"dict",
")",
":",
"policy",
"=",
"json",
".",
"loads",
"(",
"policy",
")",
"statements",
"=",
"{",
"s",
"[",
"'Sid'",
"]",
":",
"s",
"for",
"s",
"in",
"policy",
"[",
"'Statement'",
"]",
"}",
"d",
"=",
"{",
"}",
"for",
"rw",
"in",
"(",
"'Read'",
",",
"'Write'",
")",
":",
"for",
"prefix",
"in",
"TOP_LEVEL_DIRS",
":",
"sid",
"=",
"rw",
".",
"title",
"(",
")",
"+",
"prefix",
".",
"title",
"(",
")",
"if",
"sid",
"in",
"statements",
":",
"if",
"isinstance",
"(",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
",",
"list",
")",
":",
"for",
"principal",
"in",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
":",
"user_name",
"=",
"principal",
".",
"split",
"(",
"'/'",
")",
".",
"pop",
"(",
")",
"d",
"[",
"(",
"user_name",
",",
"prefix",
")",
"]",
"=",
"rw",
"[",
"0",
"]",
"else",
":",
"user_name",
"=",
"statements",
"[",
"sid",
"]",
"[",
"'Principal'",
"]",
"[",
"'AWS'",
"]",
".",
"split",
"(",
"'/'",
")",
".",
"pop",
"(",
")",
"d",
"[",
"(",
"user_name",
",",
"prefix",
")",
"]",
"=",
"rw",
"[",
"0",
"]",
"return",
"d"
] | Produce a dictionary of read, write permissions for an existing bucket policy document | [
"Produce",
"a",
"dictionary",
"of",
"read",
"write",
"permissions",
"for",
"an",
"existing",
"bucket",
"policy",
"document"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L349-L375 |
Metatab/metapack | metapack/cli/metaaws.py | get_iam_account | def get_iam_account(l, args, user_name):
"""Return the local Account for a user name, by fetching User and looking up
the arn. """
iam = get_resource(args, 'iam')
user = iam.User(user_name)
user.load()
return l.find_or_new_account(user.arn) | python | def get_iam_account(l, args, user_name):
"""Return the local Account for a user name, by fetching User and looking up
the arn. """
iam = get_resource(args, 'iam')
user = iam.User(user_name)
user.load()
return l.find_or_new_account(user.arn) | [
"def",
"get_iam_account",
"(",
"l",
",",
"args",
",",
"user_name",
")",
":",
"iam",
"=",
"get_resource",
"(",
"args",
",",
"'iam'",
")",
"user",
"=",
"iam",
".",
"User",
"(",
"user_name",
")",
"user",
".",
"load",
"(",
")",
"return",
"l",
".",
"find_or_new_account",
"(",
"user",
".",
"arn",
")"
] | Return the local Account for a user name, by fetching User and looking up
the arn. | [
"Return",
"the",
"local",
"Account",
"for",
"a",
"user",
"name",
"by",
"fetching",
"User",
"and",
"looking",
"up",
"the",
"arn",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metaaws.py#L540-L548 |
boazmohar/pySparkUtils | pySparkUtils/SVD.py | getSVD | def getSVD(data, k, getComponents=False, getS=False, normalization='mean'):
""" Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s
"""
if normalization == 'nanmean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x)))
elif normalization == 'mean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean()))
elif normalization is 'zscore':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten())))
elif normalization is None:
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten()))
else:
raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization)
mat = RowMatrix(data2)
mat.rows.cache()
mat.rows.count()
svd = compute_svd(row_matrix=mat, k=k, compute_u=False)
if getComponents:
components = svd.call("V").toArray()
components = components.transpose(1, 0).reshape((k,) + data.shape[1:])
else:
components = None
projection = np.array(RowMatrix_new(data2).multiply(svd.call("V")).rows.collect())
if getS:
s = svd.call("s").toArray()
else:
s = None
return projection, components, s | python | def getSVD(data, k, getComponents=False, getS=False, normalization='mean'):
""" Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s
"""
if normalization == 'nanmean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - np.nanmean(x)))
elif normalization == 'mean':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten() - x.mean()))
elif normalization is 'zscore':
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(zscore(x.flatten())))
elif normalization is None:
data2 = data.tordd().sortByKey().values().map(lambda x: _convert_to_vector(x.flatten()))
else:
raise ValueError('Normalization should be one of: mean, nanmean, zscore, None. Got: %s' % normalization)
mat = RowMatrix(data2)
mat.rows.cache()
mat.rows.count()
svd = compute_svd(row_matrix=mat, k=k, compute_u=False)
if getComponents:
components = svd.call("V").toArray()
components = components.transpose(1, 0).reshape((k,) + data.shape[1:])
else:
components = None
projection = np.array(RowMatrix_new(data2).multiply(svd.call("V")).rows.collect())
if getS:
s = svd.call("s").toArray()
else:
s = None
return projection, components, s | [
"def",
"getSVD",
"(",
"data",
",",
"k",
",",
"getComponents",
"=",
"False",
",",
"getS",
"=",
"False",
",",
"normalization",
"=",
"'mean'",
")",
":",
"if",
"normalization",
"==",
"'nanmean'",
":",
"data2",
"=",
"data",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"values",
"(",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"_convert_to_vector",
"(",
"x",
".",
"flatten",
"(",
")",
"-",
"np",
".",
"nanmean",
"(",
"x",
")",
")",
")",
"elif",
"normalization",
"==",
"'mean'",
":",
"data2",
"=",
"data",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"values",
"(",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"_convert_to_vector",
"(",
"x",
".",
"flatten",
"(",
")",
"-",
"x",
".",
"mean",
"(",
")",
")",
")",
"elif",
"normalization",
"is",
"'zscore'",
":",
"data2",
"=",
"data",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"values",
"(",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"_convert_to_vector",
"(",
"zscore",
"(",
"x",
".",
"flatten",
"(",
")",
")",
")",
")",
"elif",
"normalization",
"is",
"None",
":",
"data2",
"=",
"data",
".",
"tordd",
"(",
")",
".",
"sortByKey",
"(",
")",
".",
"values",
"(",
")",
".",
"map",
"(",
"lambda",
"x",
":",
"_convert_to_vector",
"(",
"x",
".",
"flatten",
"(",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Normalization should be one of: mean, nanmean, zscore, None. Got: %s'",
"%",
"normalization",
")",
"mat",
"=",
"RowMatrix",
"(",
"data2",
")",
"mat",
".",
"rows",
".",
"cache",
"(",
")",
"mat",
".",
"rows",
".",
"count",
"(",
")",
"svd",
"=",
"compute_svd",
"(",
"row_matrix",
"=",
"mat",
",",
"k",
"=",
"k",
",",
"compute_u",
"=",
"False",
")",
"if",
"getComponents",
":",
"components",
"=",
"svd",
".",
"call",
"(",
"\"V\"",
")",
".",
"toArray",
"(",
")",
"components",
"=",
"components",
".",
"transpose",
"(",
"1",
",",
"0",
")",
".",
"reshape",
"(",
"(",
"k",
",",
")",
"+",
"data",
".",
"shape",
"[",
"1",
":",
"]",
")",
"else",
":",
"components",
"=",
"None",
"projection",
"=",
"np",
".",
"array",
"(",
"RowMatrix_new",
"(",
"data2",
")",
".",
"multiply",
"(",
"svd",
".",
"call",
"(",
"\"V\"",
")",
")",
".",
"rows",
".",
"collect",
"(",
")",
")",
"if",
"getS",
":",
"s",
"=",
"svd",
".",
"call",
"(",
"\"s\"",
")",
".",
"toArray",
"(",
")",
"else",
":",
"s",
"=",
"None",
"return",
"projection",
",",
"components",
",",
"s"
] | Wrapper for computeSVD that will normalize and handle a Thunder Images object
:param data: Thunder Images object
:param k: number of components to keep
:param getComponents: will return the components if true, otherwise will return None
:returns: projections, components, s | [
"Wrapper",
"for",
"computeSVD",
"that",
"will",
"normalize",
"and",
"handle",
"a",
"Thunder",
"Images",
"object"
] | train | https://github.com/boazmohar/pySparkUtils/blob/5891b75327eb8b91af8558642edf7af82c5991b1/pySparkUtils/SVD.py#L77-L111 |
NicolasLM/spinach | spinach/utils.py | human_duration | def human_duration(duration_seconds: float) -> str:
"""Convert a duration in seconds into a human friendly string."""
if duration_seconds < 0.001:
return '0 ms'
if duration_seconds < 1:
return '{} ms'.format(int(duration_seconds * 1000))
return '{} s'.format(int(duration_seconds)) | python | def human_duration(duration_seconds: float) -> str:
"""Convert a duration in seconds into a human friendly string."""
if duration_seconds < 0.001:
return '0 ms'
if duration_seconds < 1:
return '{} ms'.format(int(duration_seconds * 1000))
return '{} s'.format(int(duration_seconds)) | [
"def",
"human_duration",
"(",
"duration_seconds",
":",
"float",
")",
"->",
"str",
":",
"if",
"duration_seconds",
"<",
"0.001",
":",
"return",
"'0 ms'",
"if",
"duration_seconds",
"<",
"1",
":",
"return",
"'{} ms'",
".",
"format",
"(",
"int",
"(",
"duration_seconds",
"*",
"1000",
")",
")",
"return",
"'{} s'",
".",
"format",
"(",
"int",
"(",
"duration_seconds",
")",
")"
] | Convert a duration in seconds into a human friendly string. | [
"Convert",
"a",
"duration",
"in",
"seconds",
"into",
"a",
"human",
"friendly",
"string",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L13-L19 |
NicolasLM/spinach | spinach/utils.py | call_with_retry | def call_with_retry(func: Callable, exceptions, max_retries: int,
logger: Logger, *args, **kwargs):
"""Call a function and retry it on failure."""
attempt = 0
while True:
try:
return func(*args, **kwargs)
except exceptions as e:
attempt += 1
if attempt >= max_retries:
raise
delay = exponential_backoff(attempt, cap=60)
logger.warning('%s: retrying in %s', e, delay)
time.sleep(delay.total_seconds()) | python | def call_with_retry(func: Callable, exceptions, max_retries: int,
logger: Logger, *args, **kwargs):
"""Call a function and retry it on failure."""
attempt = 0
while True:
try:
return func(*args, **kwargs)
except exceptions as e:
attempt += 1
if attempt >= max_retries:
raise
delay = exponential_backoff(attempt, cap=60)
logger.warning('%s: retrying in %s', e, delay)
time.sleep(delay.total_seconds()) | [
"def",
"call_with_retry",
"(",
"func",
":",
"Callable",
",",
"exceptions",
",",
"max_retries",
":",
"int",
",",
"logger",
":",
"Logger",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"attempt",
"=",
"0",
"while",
"True",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"exceptions",
"as",
"e",
":",
"attempt",
"+=",
"1",
"if",
"attempt",
">=",
"max_retries",
":",
"raise",
"delay",
"=",
"exponential_backoff",
"(",
"attempt",
",",
"cap",
"=",
"60",
")",
"logger",
".",
"warning",
"(",
"'%s: retrying in %s'",
",",
"e",
",",
"delay",
")",
"time",
".",
"sleep",
"(",
"delay",
".",
"total_seconds",
"(",
")",
")"
] | Call a function and retry it on failure. | [
"Call",
"a",
"function",
"and",
"retry",
"it",
"on",
"failure",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L49-L63 |
NicolasLM/spinach | spinach/utils.py | exponential_backoff | def exponential_backoff(attempt: int, cap: int=1200) -> timedelta:
"""Calculate a delay to retry using an exponential backoff algorithm.
It is an exponential backoff with random jitter to prevent failures
from being retried at the same time. It is a good fit for most
applications.
:arg attempt: the number of attempts made
:arg cap: maximum delay, defaults to 20 minutes
"""
base = 3
temp = min(base * 2 ** attempt, cap)
return timedelta(seconds=temp / 2 + random.randint(0, temp / 2)) | python | def exponential_backoff(attempt: int, cap: int=1200) -> timedelta:
"""Calculate a delay to retry using an exponential backoff algorithm.
It is an exponential backoff with random jitter to prevent failures
from being retried at the same time. It is a good fit for most
applications.
:arg attempt: the number of attempts made
:arg cap: maximum delay, defaults to 20 minutes
"""
base = 3
temp = min(base * 2 ** attempt, cap)
return timedelta(seconds=temp / 2 + random.randint(0, temp / 2)) | [
"def",
"exponential_backoff",
"(",
"attempt",
":",
"int",
",",
"cap",
":",
"int",
"=",
"1200",
")",
"->",
"timedelta",
":",
"base",
"=",
"3",
"temp",
"=",
"min",
"(",
"base",
"*",
"2",
"**",
"attempt",
",",
"cap",
")",
"return",
"timedelta",
"(",
"seconds",
"=",
"temp",
"/",
"2",
"+",
"random",
".",
"randint",
"(",
"0",
",",
"temp",
"/",
"2",
")",
")"
] | Calculate a delay to retry using an exponential backoff algorithm.
It is an exponential backoff with random jitter to prevent failures
from being retried at the same time. It is a good fit for most
applications.
:arg attempt: the number of attempts made
:arg cap: maximum delay, defaults to 20 minutes | [
"Calculate",
"a",
"delay",
"to",
"retry",
"using",
"an",
"exponential",
"backoff",
"algorithm",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L66-L78 |
NicolasLM/spinach | spinach/utils.py | handle_sigterm | def handle_sigterm():
"""Handle SIGTERM like a normal SIGINT (KeyboardInterrupt).
By default Docker sends a SIGTERM for stopping containers, giving them
time to terminate before getting killed. If a process does not catch this
signal and does nothing, it just gets killed.
Handling SIGTERM like SIGINT allows to gracefully terminate both
interactively with ^C and with `docker stop`.
This context manager restores the default SIGTERM behavior when exiting.
"""
original_sigterm_handler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, signal.default_int_handler)
try:
yield
finally:
signal.signal(signal.SIGTERM, original_sigterm_handler) | python | def handle_sigterm():
"""Handle SIGTERM like a normal SIGINT (KeyboardInterrupt).
By default Docker sends a SIGTERM for stopping containers, giving them
time to terminate before getting killed. If a process does not catch this
signal and does nothing, it just gets killed.
Handling SIGTERM like SIGINT allows to gracefully terminate both
interactively with ^C and with `docker stop`.
This context manager restores the default SIGTERM behavior when exiting.
"""
original_sigterm_handler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, signal.default_int_handler)
try:
yield
finally:
signal.signal(signal.SIGTERM, original_sigterm_handler) | [
"def",
"handle_sigterm",
"(",
")",
":",
"original_sigterm_handler",
"=",
"signal",
".",
"getsignal",
"(",
"signal",
".",
"SIGTERM",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"signal",
".",
"default_int_handler",
")",
"try",
":",
"yield",
"finally",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGTERM",
",",
"original_sigterm_handler",
")"
] | Handle SIGTERM like a normal SIGINT (KeyboardInterrupt).
By default Docker sends a SIGTERM for stopping containers, giving them
time to terminate before getting killed. If a process does not catch this
signal and does nothing, it just gets killed.
Handling SIGTERM like SIGINT allows to gracefully terminate both
interactively with ^C and with `docker stop`.
This context manager restores the default SIGTERM behavior when exiting. | [
"Handle",
"SIGTERM",
"like",
"a",
"normal",
"SIGINT",
"(",
"KeyboardInterrupt",
")",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/utils.py#L82-L99 |
project-rig/rig | rig/type_casts.py | float_to_fp | def float_to_fp(signed, n_bits, n_frac):
"""Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
"""
# Calculate the maximum and minimum values
if signed:
max_v = (1 << (n_bits - 1)) - 1
min_v = -max_v - 1
else:
min_v = 0
max_v = (1 << n_bits) - 1
# Compute the scale
scale = 2.0**n_frac
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
int_val = int(scale * value)
return max((min(max_v, int_val), min_v))
return bitsk | python | def float_to_fp(signed, n_bits, n_frac):
"""Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
"""
# Calculate the maximum and minimum values
if signed:
max_v = (1 << (n_bits - 1)) - 1
min_v = -max_v - 1
else:
min_v = 0
max_v = (1 << n_bits) - 1
# Compute the scale
scale = 2.0**n_frac
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
int_val = int(scale * value)
return max((min(max_v, int_val), min_v))
return bitsk | [
"def",
"float_to_fp",
"(",
"signed",
",",
"n_bits",
",",
"n_frac",
")",
":",
"# Calculate the maximum and minimum values",
"if",
"signed",
":",
"max_v",
"=",
"(",
"1",
"<<",
"(",
"n_bits",
"-",
"1",
")",
")",
"-",
"1",
"min_v",
"=",
"-",
"max_v",
"-",
"1",
"else",
":",
"min_v",
"=",
"0",
"max_v",
"=",
"(",
"1",
"<<",
"n_bits",
")",
"-",
"1",
"# Compute the scale",
"scale",
"=",
"2.0",
"**",
"n_frac",
"def",
"bitsk",
"(",
"value",
")",
":",
"\"\"\"Convert a floating point value to a fixed point value.\n\n Parameters\n ----------\n value : float\n The value to convert.\n \"\"\"",
"int_val",
"=",
"int",
"(",
"scale",
"*",
"value",
")",
"return",
"max",
"(",
"(",
"min",
"(",
"max_v",
",",
"int_val",
")",
",",
"min_v",
")",
")",
"return",
"bitsk"
] | Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation. | [
"Return",
"a",
"function",
"to",
"convert",
"a",
"floating",
"point",
"value",
"to",
"a",
"fixed",
"point",
"value",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/type_casts.py#L7-L70 |
project-rig/rig | rig/type_casts.py | float_to_fix | def float_to_fix(signed, n_bits, n_frac):
"""**DEPRECATED** Return a function to convert a floating point value to a
fixed point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.float_to_fp`.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fix(signed=True, n_bits=8, n_frac=4)
>>> hex(s34(0.5))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fix(False, 8, 4) # Unsigned 4.4
>>> hex(q34(-0.5))
'0x0'
>>> hex(q34(15.0))
'0xf0'
>>> hex(q34(16.0))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(float_to_fix(True, 8, 4)(-0.5)) # Signed
'0xf8'
>>> hex(float_to_fix(False, 8, 4)(-0.5)) # Unsigned
'0x0'
.. note::
Regardless of the value of the `signed` parameter the returned
value is always an unsigned integer suitable for packing with the
struct packing chars `B`, `H`, `I` etc.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive).
"""
warnings.warn("float_to_fix() is deprecated, see float_to_fp",
DeprecationWarning)
mask = int(2**n_bits - 1)
min_v, max_v = validate_fp_params(signed, n_bits, n_frac)
# Saturate values
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
value = np.clip(value, min_v, max_v)
if value < 0:
fp_val = (1 << n_bits) + int(value * 2**n_frac)
else:
fp_val = int(value * 2**n_frac)
assert 0 <= fp_val < 1 << (n_bits + 1)
return fp_val & mask
return bitsk | python | def float_to_fix(signed, n_bits, n_frac):
"""**DEPRECATED** Return a function to convert a floating point value to a
fixed point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.float_to_fp`.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fix(signed=True, n_bits=8, n_frac=4)
>>> hex(s34(0.5))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fix(False, 8, 4) # Unsigned 4.4
>>> hex(q34(-0.5))
'0x0'
>>> hex(q34(15.0))
'0xf0'
>>> hex(q34(16.0))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(float_to_fix(True, 8, 4)(-0.5)) # Signed
'0xf8'
>>> hex(float_to_fix(False, 8, 4)(-0.5)) # Unsigned
'0x0'
.. note::
Regardless of the value of the `signed` parameter the returned
value is always an unsigned integer suitable for packing with the
struct packing chars `B`, `H`, `I` etc.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive).
"""
warnings.warn("float_to_fix() is deprecated, see float_to_fp",
DeprecationWarning)
mask = int(2**n_bits - 1)
min_v, max_v = validate_fp_params(signed, n_bits, n_frac)
# Saturate values
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
value = np.clip(value, min_v, max_v)
if value < 0:
fp_val = (1 << n_bits) + int(value * 2**n_frac)
else:
fp_val = int(value * 2**n_frac)
assert 0 <= fp_val < 1 << (n_bits + 1)
return fp_val & mask
return bitsk | [
"def",
"float_to_fix",
"(",
"signed",
",",
"n_bits",
",",
"n_frac",
")",
":",
"warnings",
".",
"warn",
"(",
"\"float_to_fix() is deprecated, see float_to_fp\"",
",",
"DeprecationWarning",
")",
"mask",
"=",
"int",
"(",
"2",
"**",
"n_bits",
"-",
"1",
")",
"min_v",
",",
"max_v",
"=",
"validate_fp_params",
"(",
"signed",
",",
"n_bits",
",",
"n_frac",
")",
"# Saturate values",
"def",
"bitsk",
"(",
"value",
")",
":",
"\"\"\"Convert a floating point value to a fixed point value.\n\n Parameters\n ----------\n value : float\n The value to convert.\n \"\"\"",
"value",
"=",
"np",
".",
"clip",
"(",
"value",
",",
"min_v",
",",
"max_v",
")",
"if",
"value",
"<",
"0",
":",
"fp_val",
"=",
"(",
"1",
"<<",
"n_bits",
")",
"+",
"int",
"(",
"value",
"*",
"2",
"**",
"n_frac",
")",
"else",
":",
"fp_val",
"=",
"int",
"(",
"value",
"*",
"2",
"**",
"n_frac",
")",
"assert",
"0",
"<=",
"fp_val",
"<",
"1",
"<<",
"(",
"n_bits",
"+",
"1",
")",
"return",
"fp_val",
"&",
"mask",
"return",
"bitsk"
] | **DEPRECATED** Return a function to convert a floating point value to a
fixed point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.float_to_fp`.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fix(signed=True, n_bits=8, n_frac=4)
>>> hex(s34(0.5))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fix(False, 8, 4) # Unsigned 4.4
>>> hex(q34(-0.5))
'0x0'
>>> hex(q34(15.0))
'0xf0'
>>> hex(q34(16.0))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(float_to_fix(True, 8, 4)(-0.5)) # Signed
'0xf8'
>>> hex(float_to_fix(False, 8, 4)(-0.5)) # Unsigned
'0x0'
.. note::
Regardless of the value of the `signed` parameter the returned
value is always an unsigned integer suitable for packing with the
struct packing chars `B`, `H`, `I` etc.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive). | [
"**",
"DEPRECATED",
"**",
"Return",
"a",
"function",
"to",
"convert",
"a",
"floating",
"point",
"value",
"to",
"a",
"fixed",
"point",
"value",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/type_casts.py#L110-L195 |
project-rig/rig | rig/type_casts.py | fix_to_float | def fix_to_float(signed, n_bits, n_frac):
"""**DEPRECATED** Return a function to convert a fixed point value to a
floating point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.fp_to_float`.
For example, a function to convert from signed fractional representations
with 8 bits overall and 4 fractional representations (S3.4) can be
constructed and used with::
>>> f = fix_to_float(True, 8, 4)
>>> f(0x08)
0.5
>>> f(0xf8)
-0.5
>>> f(0x88)
-7.5
Parameters
----------
signed : bool
Determines whether input values should be treated as signed or
otherwise, e.g.::
>>> fix_to_float(True, 8, 4)(0xfc)
-0.25
>>> fix_to_float(False, 8, 4)(0xf8)
15.5
The value accepted by the returned function should always be an
unsigned integer.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive).
"""
warnings.warn("fix_to_float() is deprecated, see fp_to_float",
DeprecationWarning)
validate_fp_params(signed, n_bits, n_frac)
def kbits(value):
"""Convert a fixed point value to a float.
Parameters
----------
value : int
The fix point value as an integer.
"""
if signed and value & (1 << (n_bits - 1)):
# If signed and negative
value -= (1 << n_bits)
# Unsigned or signed and positive
return float(value) / (2.0**n_frac)
return kbits | python | def fix_to_float(signed, n_bits, n_frac):
"""**DEPRECATED** Return a function to convert a fixed point value to a
floating point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.fp_to_float`.
For example, a function to convert from signed fractional representations
with 8 bits overall and 4 fractional representations (S3.4) can be
constructed and used with::
>>> f = fix_to_float(True, 8, 4)
>>> f(0x08)
0.5
>>> f(0xf8)
-0.5
>>> f(0x88)
-7.5
Parameters
----------
signed : bool
Determines whether input values should be treated as signed or
otherwise, e.g.::
>>> fix_to_float(True, 8, 4)(0xfc)
-0.25
>>> fix_to_float(False, 8, 4)(0xf8)
15.5
The value accepted by the returned function should always be an
unsigned integer.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive).
"""
warnings.warn("fix_to_float() is deprecated, see fp_to_float",
DeprecationWarning)
validate_fp_params(signed, n_bits, n_frac)
def kbits(value):
"""Convert a fixed point value to a float.
Parameters
----------
value : int
The fix point value as an integer.
"""
if signed and value & (1 << (n_bits - 1)):
# If signed and negative
value -= (1 << n_bits)
# Unsigned or signed and positive
return float(value) / (2.0**n_frac)
return kbits | [
"def",
"fix_to_float",
"(",
"signed",
",",
"n_bits",
",",
"n_frac",
")",
":",
"warnings",
".",
"warn",
"(",
"\"fix_to_float() is deprecated, see fp_to_float\"",
",",
"DeprecationWarning",
")",
"validate_fp_params",
"(",
"signed",
",",
"n_bits",
",",
"n_frac",
")",
"def",
"kbits",
"(",
"value",
")",
":",
"\"\"\"Convert a fixed point value to a float.\n\n Parameters\n ----------\n value : int\n The fix point value as an integer.\n \"\"\"",
"if",
"signed",
"and",
"value",
"&",
"(",
"1",
"<<",
"(",
"n_bits",
"-",
"1",
")",
")",
":",
"# If signed and negative",
"value",
"-=",
"(",
"1",
"<<",
"n_bits",
")",
"# Unsigned or signed and positive",
"return",
"float",
"(",
"value",
")",
"/",
"(",
"2.0",
"**",
"n_frac",
")",
"return",
"kbits"
] | **DEPRECATED** Return a function to convert a fixed point value to a
floating point value.
.. warning::
This function is deprecated in favour of :py:meth:`~.fp_to_float`.
For example, a function to convert from signed fractional representations
with 8 bits overall and 4 fractional representations (S3.4) can be
constructed and used with::
>>> f = fix_to_float(True, 8, 4)
>>> f(0x08)
0.5
>>> f(0xf8)
-0.5
>>> f(0x88)
-7.5
Parameters
----------
signed : bool
Determines whether input values should be treated as signed or
otherwise, e.g.::
>>> fix_to_float(True, 8, 4)(0xfc)
-0.25
>>> fix_to_float(False, 8, 4)(0xf8)
15.5
The value accepted by the returned function should always be an
unsigned integer.
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
Raises
------
ValueError
If the number of bits specified is not possible. For example,
requiring more fractional bits than there are bits overall will result
in a `ValueError`::
>>> fix_to_float(False, 8, 9)
Traceback (most recent call last):
ValueError: n_frac: 9: Must be less than 8 (and positive). | [
"**",
"DEPRECATED",
"**",
"Return",
"a",
"function",
"to",
"convert",
"a",
"fixed",
"point",
"value",
"to",
"a",
"floating",
"point",
"value",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/type_casts.py#L198-L270 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_metadata_path | def get_metadata_path(name):
"""Get reference metadata file path."""
return pkg_resources.resource_filename('voobly', os.path.join(METADATA_PATH, '{}.json'.format(name))) | python | def get_metadata_path(name):
"""Get reference metadata file path."""
return pkg_resources.resource_filename('voobly', os.path.join(METADATA_PATH, '{}.json'.format(name))) | [
"def",
"get_metadata_path",
"(",
"name",
")",
":",
"return",
"pkg_resources",
".",
"resource_filename",
"(",
"'voobly'",
",",
"os",
".",
"path",
".",
"join",
"(",
"METADATA_PATH",
",",
"'{}.json'",
".",
"format",
"(",
"name",
")",
")",
")"
] | Get reference metadata file path. | [
"Get",
"reference",
"metadata",
"file",
"path",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L85-L87 |
happyleavesaoc/python-voobly | voobly/__init__.py | _save_cookies | def _save_cookies(requests_cookiejar, filename):
"""Save cookies to a file."""
with open(filename, 'wb') as handle:
pickle.dump(requests_cookiejar, handle) | python | def _save_cookies(requests_cookiejar, filename):
"""Save cookies to a file."""
with open(filename, 'wb') as handle:
pickle.dump(requests_cookiejar, handle) | [
"def",
"_save_cookies",
"(",
"requests_cookiejar",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"handle",
":",
"pickle",
".",
"dump",
"(",
"requests_cookiejar",
",",
"handle",
")"
] | Save cookies to a file. | [
"Save",
"cookies",
"to",
"a",
"file",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L100-L103 |
happyleavesaoc/python-voobly | voobly/__init__.py | _make_request | def _make_request(session, url, argument=None, params=None, raw=False):
"""Make a request to API endpoint."""
if not params:
params = {}
params['key'] = session.auth.key
try:
if argument:
request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument)
else:
request_url = '{}{}'.format(VOOBLY_API_URL, url)
resp = session.get(request_url, params=params)
except RequestException:
raise VooblyError('failed to connect')
if resp.text == 'bad-key':
raise VooblyError('bad api key')
elif resp.text == 'too-busy':
raise VooblyError('service too busy')
elif not resp.text:
raise VooblyError('no data returned')
if raw:
return resp.text
try:
return tablib.Dataset().load(resp.text).dict
except UnsupportedFormat:
raise VooblyError('unexpected error {}'.format(resp.text)) | python | def _make_request(session, url, argument=None, params=None, raw=False):
"""Make a request to API endpoint."""
if not params:
params = {}
params['key'] = session.auth.key
try:
if argument:
request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument)
else:
request_url = '{}{}'.format(VOOBLY_API_URL, url)
resp = session.get(request_url, params=params)
except RequestException:
raise VooblyError('failed to connect')
if resp.text == 'bad-key':
raise VooblyError('bad api key')
elif resp.text == 'too-busy':
raise VooblyError('service too busy')
elif not resp.text:
raise VooblyError('no data returned')
if raw:
return resp.text
try:
return tablib.Dataset().load(resp.text).dict
except UnsupportedFormat:
raise VooblyError('unexpected error {}'.format(resp.text)) | [
"def",
"_make_request",
"(",
"session",
",",
"url",
",",
"argument",
"=",
"None",
",",
"params",
"=",
"None",
",",
"raw",
"=",
"False",
")",
":",
"if",
"not",
"params",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"'key'",
"]",
"=",
"session",
".",
"auth",
".",
"key",
"try",
":",
"if",
"argument",
":",
"request_url",
"=",
"'{}{}{}{}'",
".",
"format",
"(",
"session",
".",
"auth",
".",
"base_url",
",",
"VOOBLY_API_URL",
",",
"url",
",",
"argument",
")",
"else",
":",
"request_url",
"=",
"'{}{}'",
".",
"format",
"(",
"VOOBLY_API_URL",
",",
"url",
")",
"resp",
"=",
"session",
".",
"get",
"(",
"request_url",
",",
"params",
"=",
"params",
")",
"except",
"RequestException",
":",
"raise",
"VooblyError",
"(",
"'failed to connect'",
")",
"if",
"resp",
".",
"text",
"==",
"'bad-key'",
":",
"raise",
"VooblyError",
"(",
"'bad api key'",
")",
"elif",
"resp",
".",
"text",
"==",
"'too-busy'",
":",
"raise",
"VooblyError",
"(",
"'service too busy'",
")",
"elif",
"not",
"resp",
".",
"text",
":",
"raise",
"VooblyError",
"(",
"'no data returned'",
")",
"if",
"raw",
":",
"return",
"resp",
".",
"text",
"try",
":",
"return",
"tablib",
".",
"Dataset",
"(",
")",
".",
"load",
"(",
"resp",
".",
"text",
")",
".",
"dict",
"except",
"UnsupportedFormat",
":",
"raise",
"VooblyError",
"(",
"'unexpected error {}'",
".",
"format",
"(",
"resp",
".",
"text",
")",
")"
] | Make a request to API endpoint. | [
"Make",
"a",
"request",
"to",
"API",
"endpoint",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L112-L136 |
happyleavesaoc/python-voobly | voobly/__init__.py | make_scrape_request | def make_scrape_request(session, url, mode='get', data=None):
"""Make a request to URL."""
try:
html = session.request(mode, url, data=data)
except RequestException:
raise VooblyError('failed to connect')
if SCRAPE_FETCH_ERROR in html.text:
raise VooblyError('not logged in')
if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text:
raise VooblyError('page not found')
return bs4.BeautifulSoup(html.text, features='lxml') | python | def make_scrape_request(session, url, mode='get', data=None):
"""Make a request to URL."""
try:
html = session.request(mode, url, data=data)
except RequestException:
raise VooblyError('failed to connect')
if SCRAPE_FETCH_ERROR in html.text:
raise VooblyError('not logged in')
if html.status_code != 200 or SCRAPE_PAGE_NOT_FOUND in html.text:
raise VooblyError('page not found')
return bs4.BeautifulSoup(html.text, features='lxml') | [
"def",
"make_scrape_request",
"(",
"session",
",",
"url",
",",
"mode",
"=",
"'get'",
",",
"data",
"=",
"None",
")",
":",
"try",
":",
"html",
"=",
"session",
".",
"request",
"(",
"mode",
",",
"url",
",",
"data",
"=",
"data",
")",
"except",
"RequestException",
":",
"raise",
"VooblyError",
"(",
"'failed to connect'",
")",
"if",
"SCRAPE_FETCH_ERROR",
"in",
"html",
".",
"text",
":",
"raise",
"VooblyError",
"(",
"'not logged in'",
")",
"if",
"html",
".",
"status_code",
"!=",
"200",
"or",
"SCRAPE_PAGE_NOT_FOUND",
"in",
"html",
".",
"text",
":",
"raise",
"VooblyError",
"(",
"'page not found'",
")",
"return",
"bs4",
".",
"BeautifulSoup",
"(",
"html",
".",
"text",
",",
"features",
"=",
"'lxml'",
")"
] | Make a request to URL. | [
"Make",
"a",
"request",
"to",
"URL",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L139-L149 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_ladder | def get_ladder(session, ladder_id, user_id=None, user_ids=None, start=0, limit=LADDER_RESULT_LIMIT):
"""Get ladder."""
params = {
'start': start,
'limit': limit
}
if isinstance(ladder_id, str):
ladder_id = lookup_ladder_id(ladder_id)
if limit > LADDER_RESULT_LIMIT:
raise VooblyError('limited to 40 rows')
if user_ids:
params['uidlist'] = ','.join([str(uid) for uid in user_ids])
elif user_id:
params['uid'] = user_id
resp = _make_request(session, LADDER_URL, ladder_id, params)
if user_id:
if not resp:
raise VooblyError('user not ranked')
return resp[0]
return resp | python | def get_ladder(session, ladder_id, user_id=None, user_ids=None, start=0, limit=LADDER_RESULT_LIMIT):
"""Get ladder."""
params = {
'start': start,
'limit': limit
}
if isinstance(ladder_id, str):
ladder_id = lookup_ladder_id(ladder_id)
if limit > LADDER_RESULT_LIMIT:
raise VooblyError('limited to 40 rows')
if user_ids:
params['uidlist'] = ','.join([str(uid) for uid in user_ids])
elif user_id:
params['uid'] = user_id
resp = _make_request(session, LADDER_URL, ladder_id, params)
if user_id:
if not resp:
raise VooblyError('user not ranked')
return resp[0]
return resp | [
"def",
"get_ladder",
"(",
"session",
",",
"ladder_id",
",",
"user_id",
"=",
"None",
",",
"user_ids",
"=",
"None",
",",
"start",
"=",
"0",
",",
"limit",
"=",
"LADDER_RESULT_LIMIT",
")",
":",
"params",
"=",
"{",
"'start'",
":",
"start",
",",
"'limit'",
":",
"limit",
"}",
"if",
"isinstance",
"(",
"ladder_id",
",",
"str",
")",
":",
"ladder_id",
"=",
"lookup_ladder_id",
"(",
"ladder_id",
")",
"if",
"limit",
">",
"LADDER_RESULT_LIMIT",
":",
"raise",
"VooblyError",
"(",
"'limited to 40 rows'",
")",
"if",
"user_ids",
":",
"params",
"[",
"'uidlist'",
"]",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"uid",
")",
"for",
"uid",
"in",
"user_ids",
"]",
")",
"elif",
"user_id",
":",
"params",
"[",
"'uid'",
"]",
"=",
"user_id",
"resp",
"=",
"_make_request",
"(",
"session",
",",
"LADDER_URL",
",",
"ladder_id",
",",
"params",
")",
"if",
"user_id",
":",
"if",
"not",
"resp",
":",
"raise",
"VooblyError",
"(",
"'user not ranked'",
")",
"return",
"resp",
"[",
"0",
"]",
"return",
"resp"
] | Get ladder. | [
"Get",
"ladder",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L167-L186 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_lobbies | def get_lobbies(session, game_id):
"""Get lobbies for a game."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = _make_request(session, LOBBY_URL, game_id)
for lobby in lobbies:
# pylint: disable=len-as-condition
if len(lobby['ladders']) > 0:
lobby['ladders'] = lobby['ladders'][:-1].split('|')
return lobbies | python | def get_lobbies(session, game_id):
"""Get lobbies for a game."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = _make_request(session, LOBBY_URL, game_id)
for lobby in lobbies:
# pylint: disable=len-as-condition
if len(lobby['ladders']) > 0:
lobby['ladders'] = lobby['ladders'][:-1].split('|')
return lobbies | [
"def",
"get_lobbies",
"(",
"session",
",",
"game_id",
")",
":",
"if",
"isinstance",
"(",
"game_id",
",",
"str",
")",
":",
"game_id",
"=",
"lookup_game_id",
"(",
"game_id",
")",
"lobbies",
"=",
"_make_request",
"(",
"session",
",",
"LOBBY_URL",
",",
"game_id",
")",
"for",
"lobby",
"in",
"lobbies",
":",
"# pylint: disable=len-as-condition",
"if",
"len",
"(",
"lobby",
"[",
"'ladders'",
"]",
")",
">",
"0",
":",
"lobby",
"[",
"'ladders'",
"]",
"=",
"lobby",
"[",
"'ladders'",
"]",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
"'|'",
")",
"return",
"lobbies"
] | Get lobbies for a game. | [
"Get",
"lobbies",
"for",
"a",
"game",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L189-L198 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_user | def get_user(session, user_id):
"""Get user."""
try:
user_id = int(user_id)
except ValueError:
user_id = find_user(session, user_id)
resp = _make_request(session, USER_URL, user_id)
if not resp:
raise VooblyError('user id not found')
return resp[0] | python | def get_user(session, user_id):
"""Get user."""
try:
user_id = int(user_id)
except ValueError:
user_id = find_user(session, user_id)
resp = _make_request(session, USER_URL, user_id)
if not resp:
raise VooblyError('user id not found')
return resp[0] | [
"def",
"get_user",
"(",
"session",
",",
"user_id",
")",
":",
"try",
":",
"user_id",
"=",
"int",
"(",
"user_id",
")",
"except",
"ValueError",
":",
"user_id",
"=",
"find_user",
"(",
"session",
",",
"user_id",
")",
"resp",
"=",
"_make_request",
"(",
"session",
",",
"USER_URL",
",",
"user_id",
")",
"if",
"not",
"resp",
":",
"raise",
"VooblyError",
"(",
"'user id not found'",
")",
"return",
"resp",
"[",
"0",
"]"
] | Get user. | [
"Get",
"user",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L201-L210 |
happyleavesaoc/python-voobly | voobly/__init__.py | find_user | def find_user(session, username):
"""Find user by name - returns user ID."""
resp = _make_request(session, FIND_USER_URL, username)
if not resp:
raise VooblyError('user not found')
try:
return int(resp[0]['uid'])
except ValueError:
raise VooblyError('user not found') | python | def find_user(session, username):
"""Find user by name - returns user ID."""
resp = _make_request(session, FIND_USER_URL, username)
if not resp:
raise VooblyError('user not found')
try:
return int(resp[0]['uid'])
except ValueError:
raise VooblyError('user not found') | [
"def",
"find_user",
"(",
"session",
",",
"username",
")",
":",
"resp",
"=",
"_make_request",
"(",
"session",
",",
"FIND_USER_URL",
",",
"username",
")",
"if",
"not",
"resp",
":",
"raise",
"VooblyError",
"(",
"'user not found'",
")",
"try",
":",
"return",
"int",
"(",
"resp",
"[",
"0",
"]",
"[",
"'uid'",
"]",
")",
"except",
"ValueError",
":",
"raise",
"VooblyError",
"(",
"'user not found'",
")"
] | Find user by name - returns user ID. | [
"Find",
"user",
"by",
"name",
"-",
"returns",
"user",
"ID",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L213-L221 |
happyleavesaoc/python-voobly | voobly/__init__.py | find_users | def find_users(session, *usernames):
"""Find multiple users by name."""
user_string = ','.join(usernames)
return _make_request(session, FIND_USERS_URL, user_string) | python | def find_users(session, *usernames):
"""Find multiple users by name."""
user_string = ','.join(usernames)
return _make_request(session, FIND_USERS_URL, user_string) | [
"def",
"find_users",
"(",
"session",
",",
"*",
"usernames",
")",
":",
"user_string",
"=",
"','",
".",
"join",
"(",
"usernames",
")",
"return",
"_make_request",
"(",
"session",
",",
"FIND_USERS_URL",
",",
"user_string",
")"
] | Find multiple users by name. | [
"Find",
"multiple",
"users",
"by",
"name",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L224-L227 |
happyleavesaoc/python-voobly | voobly/__init__.py | user | def user(session, uid, ladder_ids=None):
"""Get all possible user info by name."""
data = get_user(session, uid)
resp = dict(data)
if not ladder_ids:
return resp
resp['ladders'] = {}
for ladder_id in ladder_ids:
if isinstance(ladder_id, str):
ladder_id = lookup_ladder_id(ladder_id)
try:
ladder_data = dict(get_ladder(session, ladder_id, user_id=uid))
resp['ladders'][ladder_id] = ladder_data
except VooblyError:
# No ranking on ladder
pass
return resp | python | def user(session, uid, ladder_ids=None):
"""Get all possible user info by name."""
data = get_user(session, uid)
resp = dict(data)
if not ladder_ids:
return resp
resp['ladders'] = {}
for ladder_id in ladder_ids:
if isinstance(ladder_id, str):
ladder_id = lookup_ladder_id(ladder_id)
try:
ladder_data = dict(get_ladder(session, ladder_id, user_id=uid))
resp['ladders'][ladder_id] = ladder_data
except VooblyError:
# No ranking on ladder
pass
return resp | [
"def",
"user",
"(",
"session",
",",
"uid",
",",
"ladder_ids",
"=",
"None",
")",
":",
"data",
"=",
"get_user",
"(",
"session",
",",
"uid",
")",
"resp",
"=",
"dict",
"(",
"data",
")",
"if",
"not",
"ladder_ids",
":",
"return",
"resp",
"resp",
"[",
"'ladders'",
"]",
"=",
"{",
"}",
"for",
"ladder_id",
"in",
"ladder_ids",
":",
"if",
"isinstance",
"(",
"ladder_id",
",",
"str",
")",
":",
"ladder_id",
"=",
"lookup_ladder_id",
"(",
"ladder_id",
")",
"try",
":",
"ladder_data",
"=",
"dict",
"(",
"get_ladder",
"(",
"session",
",",
"ladder_id",
",",
"user_id",
"=",
"uid",
")",
")",
"resp",
"[",
"'ladders'",
"]",
"[",
"ladder_id",
"]",
"=",
"ladder_data",
"except",
"VooblyError",
":",
"# No ranking on ladder",
"pass",
"return",
"resp"
] | Get all possible user info by name. | [
"Get",
"all",
"possible",
"user",
"info",
"by",
"name",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L236-L252 |
happyleavesaoc/python-voobly | voobly/__init__.py | ladders | def ladders(session, game_id):
"""Get a list of ladder IDs."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = get_lobbies(session, game_id)
ladder_ids = set()
for lobby in lobbies:
ladder_ids |= set(lobby['ladders'])
return list(ladder_ids) | python | def ladders(session, game_id):
"""Get a list of ladder IDs."""
if isinstance(game_id, str):
game_id = lookup_game_id(game_id)
lobbies = get_lobbies(session, game_id)
ladder_ids = set()
for lobby in lobbies:
ladder_ids |= set(lobby['ladders'])
return list(ladder_ids) | [
"def",
"ladders",
"(",
"session",
",",
"game_id",
")",
":",
"if",
"isinstance",
"(",
"game_id",
",",
"str",
")",
":",
"game_id",
"=",
"lookup_game_id",
"(",
"game_id",
")",
"lobbies",
"=",
"get_lobbies",
"(",
"session",
",",
"game_id",
")",
"ladder_ids",
"=",
"set",
"(",
")",
"for",
"lobby",
"in",
"lobbies",
":",
"ladder_ids",
"|=",
"set",
"(",
"lobby",
"[",
"'ladders'",
"]",
")",
"return",
"list",
"(",
"ladder_ids",
")"
] | Get a list of ladder IDs. | [
"Get",
"a",
"list",
"of",
"ladder",
"IDs",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L255-L263 |
happyleavesaoc/python-voobly | voobly/__init__.py | authenticated | def authenticated(function):
"""Re-authenticate if session expired."""
def wrapped(session, *args, **kwargs):
"""Wrap function."""
try:
return function(session, *args, **kwargs)
except VooblyError:
with session.cache_disabled():
_LOGGER.info("attempted to access page before login")
login(session)
return function(session, *args, **kwargs)
return wrapped | python | def authenticated(function):
"""Re-authenticate if session expired."""
def wrapped(session, *args, **kwargs):
"""Wrap function."""
try:
return function(session, *args, **kwargs)
except VooblyError:
with session.cache_disabled():
_LOGGER.info("attempted to access page before login")
login(session)
return function(session, *args, **kwargs)
return wrapped | [
"def",
"authenticated",
"(",
"function",
")",
":",
"def",
"wrapped",
"(",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap function.\"\"\"",
"try",
":",
"return",
"function",
"(",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"VooblyError",
":",
"with",
"session",
".",
"cache_disabled",
"(",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"attempted to access page before login\"",
")",
"login",
"(",
"session",
")",
"return",
"function",
"(",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | Re-authenticate if session expired. | [
"Re",
"-",
"authenticate",
"if",
"session",
"expired",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L266-L277 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_clan_matches | def get_clan_matches(session, subdomain, clan_id, from_timestamp=None, limit=None):
"""Get recent matches by clan."""
return get_recent_matches(session, 'https://{}.voobly.com/{}/{}/0'.format(
subdomain, TEAM_MATCHES_URL, clan_id), from_timestamp, limit) | python | def get_clan_matches(session, subdomain, clan_id, from_timestamp=None, limit=None):
"""Get recent matches by clan."""
return get_recent_matches(session, 'https://{}.voobly.com/{}/{}/0'.format(
subdomain, TEAM_MATCHES_URL, clan_id), from_timestamp, limit) | [
"def",
"get_clan_matches",
"(",
"session",
",",
"subdomain",
",",
"clan_id",
",",
"from_timestamp",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"return",
"get_recent_matches",
"(",
"session",
",",
"'https://{}.voobly.com/{}/{}/0'",
".",
"format",
"(",
"subdomain",
",",
"TEAM_MATCHES_URL",
",",
"clan_id",
")",
",",
"from_timestamp",
",",
"limit",
")"
] | Get recent matches by clan. | [
"Get",
"recent",
"matches",
"by",
"clan",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L368-L371 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_user_matches | def get_user_matches(session, user_id, from_timestamp=None, limit=None):
"""Get recent matches by user."""
return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format(
session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit) | python | def get_user_matches(session, user_id, from_timestamp=None, limit=None):
"""Get recent matches by user."""
return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format(
session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit) | [
"def",
"get_user_matches",
"(",
"session",
",",
"user_id",
",",
"from_timestamp",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"return",
"get_recent_matches",
"(",
"session",
",",
"'{}{}/{}/Matches/games/matches/user/{}/0'",
".",
"format",
"(",
"session",
".",
"auth",
".",
"base_url",
",",
"PROFILE_URL",
",",
"user_id",
",",
"user_id",
")",
",",
"from_timestamp",
",",
"limit",
")"
] | Get recent matches by user. | [
"Get",
"recent",
"matches",
"by",
"user",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L375-L378 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_recent_matches | def get_recent_matches(session, init_url, from_timestamp, limit):
"""Get recently played user matches."""
if not from_timestamp:
from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1)
matches = []
page_id = 0
done = False
while not done and page_id < MAX_MATCH_PAGE_ID:
url = '{}/{}'.format(init_url, page_id)
parsed = make_scrape_request(session, url)
for row in parsed.find('table').find_all('tr')[1:]:
cols = row.find_all('td')
played_at = dateparser.parse(cols[2].text)
match_id = int(cols[5].find('a').text[1:])
has_rec = cols[6].find('a').find('img')
if played_at < from_timestamp or (limit and len(matches) == limit):
done = True
break
if not has_rec:
continue
matches.append({
'timestamp': played_at,
'match_id': match_id
})
if not matches:
break
page_id += 1
return matches | python | def get_recent_matches(session, init_url, from_timestamp, limit):
"""Get recently played user matches."""
if not from_timestamp:
from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1)
matches = []
page_id = 0
done = False
while not done and page_id < MAX_MATCH_PAGE_ID:
url = '{}/{}'.format(init_url, page_id)
parsed = make_scrape_request(session, url)
for row in parsed.find('table').find_all('tr')[1:]:
cols = row.find_all('td')
played_at = dateparser.parse(cols[2].text)
match_id = int(cols[5].find('a').text[1:])
has_rec = cols[6].find('a').find('img')
if played_at < from_timestamp or (limit and len(matches) == limit):
done = True
break
if not has_rec:
continue
matches.append({
'timestamp': played_at,
'match_id': match_id
})
if not matches:
break
page_id += 1
return matches | [
"def",
"get_recent_matches",
"(",
"session",
",",
"init_url",
",",
"from_timestamp",
",",
"limit",
")",
":",
"if",
"not",
"from_timestamp",
":",
"from_timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"matches",
"=",
"[",
"]",
"page_id",
"=",
"0",
"done",
"=",
"False",
"while",
"not",
"done",
"and",
"page_id",
"<",
"MAX_MATCH_PAGE_ID",
":",
"url",
"=",
"'{}/{}'",
".",
"format",
"(",
"init_url",
",",
"page_id",
")",
"parsed",
"=",
"make_scrape_request",
"(",
"session",
",",
"url",
")",
"for",
"row",
"in",
"parsed",
".",
"find",
"(",
"'table'",
")",
".",
"find_all",
"(",
"'tr'",
")",
"[",
"1",
":",
"]",
":",
"cols",
"=",
"row",
".",
"find_all",
"(",
"'td'",
")",
"played_at",
"=",
"dateparser",
".",
"parse",
"(",
"cols",
"[",
"2",
"]",
".",
"text",
")",
"match_id",
"=",
"int",
"(",
"cols",
"[",
"5",
"]",
".",
"find",
"(",
"'a'",
")",
".",
"text",
"[",
"1",
":",
"]",
")",
"has_rec",
"=",
"cols",
"[",
"6",
"]",
".",
"find",
"(",
"'a'",
")",
".",
"find",
"(",
"'img'",
")",
"if",
"played_at",
"<",
"from_timestamp",
"or",
"(",
"limit",
"and",
"len",
"(",
"matches",
")",
"==",
"limit",
")",
":",
"done",
"=",
"True",
"break",
"if",
"not",
"has_rec",
":",
"continue",
"matches",
".",
"append",
"(",
"{",
"'timestamp'",
":",
"played_at",
",",
"'match_id'",
":",
"match_id",
"}",
")",
"if",
"not",
"matches",
":",
"break",
"page_id",
"+=",
"1",
"return",
"matches"
] | Get recently played user matches. | [
"Get",
"recently",
"played",
"user",
"matches",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L381-L408 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_ladder_matches | def get_ladder_matches(session, ladder_id, from_timestamp=None, limit=LADDER_MATCH_LIMIT):
"""Get recently played ladder matches."""
if not from_timestamp:
from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1)
matches = []
page_id = 0
done = False
i = 0
while not done and page_id < MAX_LADDER_PAGE_ID:
url = '{}{}/{}/{}'.format(session.auth.base_url, LADDER_MATCHES_URL, lookup_ladder_id(ladder_id), page_id)
parsed = make_scrape_request(session, url)
for row in parsed.find(text='Recent Matches').find_next('table').find_all('tr')[1:]:
cols = row.find_all('td')
played_at = dateparser.parse(cols[0].text)
match_id = int(cols[1].find('a').text[1:])
has_rec = cols[4].find('a').find('img')
if not has_rec:
continue
if played_at < from_timestamp or i >= limit:
done = True
break
matches.append({
'timestamp': played_at,
'match_id': match_id
})
i += 1
page_id += 1
return matches | python | def get_ladder_matches(session, ladder_id, from_timestamp=None, limit=LADDER_MATCH_LIMIT):
"""Get recently played ladder matches."""
if not from_timestamp:
from_timestamp = datetime.datetime.now() - datetime.timedelta(days=1)
matches = []
page_id = 0
done = False
i = 0
while not done and page_id < MAX_LADDER_PAGE_ID:
url = '{}{}/{}/{}'.format(session.auth.base_url, LADDER_MATCHES_URL, lookup_ladder_id(ladder_id), page_id)
parsed = make_scrape_request(session, url)
for row in parsed.find(text='Recent Matches').find_next('table').find_all('tr')[1:]:
cols = row.find_all('td')
played_at = dateparser.parse(cols[0].text)
match_id = int(cols[1].find('a').text[1:])
has_rec = cols[4].find('a').find('img')
if not has_rec:
continue
if played_at < from_timestamp or i >= limit:
done = True
break
matches.append({
'timestamp': played_at,
'match_id': match_id
})
i += 1
page_id += 1
return matches | [
"def",
"get_ladder_matches",
"(",
"session",
",",
"ladder_id",
",",
"from_timestamp",
"=",
"None",
",",
"limit",
"=",
"LADDER_MATCH_LIMIT",
")",
":",
"if",
"not",
"from_timestamp",
":",
"from_timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"matches",
"=",
"[",
"]",
"page_id",
"=",
"0",
"done",
"=",
"False",
"i",
"=",
"0",
"while",
"not",
"done",
"and",
"page_id",
"<",
"MAX_LADDER_PAGE_ID",
":",
"url",
"=",
"'{}{}/{}/{}'",
".",
"format",
"(",
"session",
".",
"auth",
".",
"base_url",
",",
"LADDER_MATCHES_URL",
",",
"lookup_ladder_id",
"(",
"ladder_id",
")",
",",
"page_id",
")",
"parsed",
"=",
"make_scrape_request",
"(",
"session",
",",
"url",
")",
"for",
"row",
"in",
"parsed",
".",
"find",
"(",
"text",
"=",
"'Recent Matches'",
")",
".",
"find_next",
"(",
"'table'",
")",
".",
"find_all",
"(",
"'tr'",
")",
"[",
"1",
":",
"]",
":",
"cols",
"=",
"row",
".",
"find_all",
"(",
"'td'",
")",
"played_at",
"=",
"dateparser",
".",
"parse",
"(",
"cols",
"[",
"0",
"]",
".",
"text",
")",
"match_id",
"=",
"int",
"(",
"cols",
"[",
"1",
"]",
".",
"find",
"(",
"'a'",
")",
".",
"text",
"[",
"1",
":",
"]",
")",
"has_rec",
"=",
"cols",
"[",
"4",
"]",
".",
"find",
"(",
"'a'",
")",
".",
"find",
"(",
"'img'",
")",
"if",
"not",
"has_rec",
":",
"continue",
"if",
"played_at",
"<",
"from_timestamp",
"or",
"i",
">=",
"limit",
":",
"done",
"=",
"True",
"break",
"matches",
".",
"append",
"(",
"{",
"'timestamp'",
":",
"played_at",
",",
"'match_id'",
":",
"match_id",
"}",
")",
"i",
"+=",
"1",
"page_id",
"+=",
"1",
"return",
"matches"
] | Get recently played ladder matches. | [
"Get",
"recently",
"played",
"ladder",
"matches",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L412-L439 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_match | def get_match(session, match_id):
"""Get match metadata."""
url = '{}{}/{}'.format(session.auth.base_url, MATCH_URL, match_id)
parsed = make_scrape_request(session, url)
game = parsed.find('h3').text
if game != GAME_AOC:
raise ValueError('not an aoc match')
date_played = parsed.find(text=MATCH_DATE_PLAYED).find_next('td').text
players = []
colors = {}
player_count = int(parsed.find('td', text='Players:').find_next('td').text)
for div in parsed.find_all('div', style=True):
if not div['style'].startswith('background-color:'):
continue
if len(players) == player_count:
break
username_elem = div.find_next('a', href=re.compile(PROFILE_PATH))
username = username_elem.text
color = div['style'].split(':')[1].split(';')[0].strip()
colors[username] = color
rec = None
for dl_elem in parsed.find_all('a', href=re.compile('^/files/view')):
rec_name = dl_elem.find('b', text=re.compile(username+'$'))
if rec_name:
rec = rec_name.parent
user = parsed.find('a', text=username)
if not user:
# bugged match page
continue
user_id = int(user['href'].split('/')[-1])
children = list(user.find_next('span').children)
rate_after = None
rate_before = None
if str(children[0]).strip() == MATCH_NEW_RATE:
rate_after = int(children[1].text)
rate_before = rate_after - int(children[3].text)
elif str(children[4]).strip() == MATCH_NEW_RATE:
rate_after = int(children[5].text)
rate_before = rate_after - int(children[3].text)
players.append({
'url': rec['href'] if rec else None,
'id': user_id,
'username': username,
'color_id': COLOR_MAPPING.get(colors[username]),
'rate_before': rate_before,
'rate_after': rate_after
})
return {
'timestamp': dateparser.parse(date_played),
'players': players
} | python | def get_match(session, match_id):
"""Get match metadata."""
url = '{}{}/{}'.format(session.auth.base_url, MATCH_URL, match_id)
parsed = make_scrape_request(session, url)
game = parsed.find('h3').text
if game != GAME_AOC:
raise ValueError('not an aoc match')
date_played = parsed.find(text=MATCH_DATE_PLAYED).find_next('td').text
players = []
colors = {}
player_count = int(parsed.find('td', text='Players:').find_next('td').text)
for div in parsed.find_all('div', style=True):
if not div['style'].startswith('background-color:'):
continue
if len(players) == player_count:
break
username_elem = div.find_next('a', href=re.compile(PROFILE_PATH))
username = username_elem.text
color = div['style'].split(':')[1].split(';')[0].strip()
colors[username] = color
rec = None
for dl_elem in parsed.find_all('a', href=re.compile('^/files/view')):
rec_name = dl_elem.find('b', text=re.compile(username+'$'))
if rec_name:
rec = rec_name.parent
user = parsed.find('a', text=username)
if not user:
# bugged match page
continue
user_id = int(user['href'].split('/')[-1])
children = list(user.find_next('span').children)
rate_after = None
rate_before = None
if str(children[0]).strip() == MATCH_NEW_RATE:
rate_after = int(children[1].text)
rate_before = rate_after - int(children[3].text)
elif str(children[4]).strip() == MATCH_NEW_RATE:
rate_after = int(children[5].text)
rate_before = rate_after - int(children[3].text)
players.append({
'url': rec['href'] if rec else None,
'id': user_id,
'username': username,
'color_id': COLOR_MAPPING.get(colors[username]),
'rate_before': rate_before,
'rate_after': rate_after
})
return {
'timestamp': dateparser.parse(date_played),
'players': players
} | [
"def",
"get_match",
"(",
"session",
",",
"match_id",
")",
":",
"url",
"=",
"'{}{}/{}'",
".",
"format",
"(",
"session",
".",
"auth",
".",
"base_url",
",",
"MATCH_URL",
",",
"match_id",
")",
"parsed",
"=",
"make_scrape_request",
"(",
"session",
",",
"url",
")",
"game",
"=",
"parsed",
".",
"find",
"(",
"'h3'",
")",
".",
"text",
"if",
"game",
"!=",
"GAME_AOC",
":",
"raise",
"ValueError",
"(",
"'not an aoc match'",
")",
"date_played",
"=",
"parsed",
".",
"find",
"(",
"text",
"=",
"MATCH_DATE_PLAYED",
")",
".",
"find_next",
"(",
"'td'",
")",
".",
"text",
"players",
"=",
"[",
"]",
"colors",
"=",
"{",
"}",
"player_count",
"=",
"int",
"(",
"parsed",
".",
"find",
"(",
"'td'",
",",
"text",
"=",
"'Players:'",
")",
".",
"find_next",
"(",
"'td'",
")",
".",
"text",
")",
"for",
"div",
"in",
"parsed",
".",
"find_all",
"(",
"'div'",
",",
"style",
"=",
"True",
")",
":",
"if",
"not",
"div",
"[",
"'style'",
"]",
".",
"startswith",
"(",
"'background-color:'",
")",
":",
"continue",
"if",
"len",
"(",
"players",
")",
"==",
"player_count",
":",
"break",
"username_elem",
"=",
"div",
".",
"find_next",
"(",
"'a'",
",",
"href",
"=",
"re",
".",
"compile",
"(",
"PROFILE_PATH",
")",
")",
"username",
"=",
"username_elem",
".",
"text",
"color",
"=",
"div",
"[",
"'style'",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"colors",
"[",
"username",
"]",
"=",
"color",
"rec",
"=",
"None",
"for",
"dl_elem",
"in",
"parsed",
".",
"find_all",
"(",
"'a'",
",",
"href",
"=",
"re",
".",
"compile",
"(",
"'^/files/view'",
")",
")",
":",
"rec_name",
"=",
"dl_elem",
".",
"find",
"(",
"'b'",
",",
"text",
"=",
"re",
".",
"compile",
"(",
"username",
"+",
"'$'",
")",
")",
"if",
"rec_name",
":",
"rec",
"=",
"rec_name",
".",
"parent",
"user",
"=",
"parsed",
".",
"find",
"(",
"'a'",
",",
"text",
"=",
"username",
")",
"if",
"not",
"user",
":",
"# bugged match page",
"continue",
"user_id",
"=",
"int",
"(",
"user",
"[",
"'href'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"children",
"=",
"list",
"(",
"user",
".",
"find_next",
"(",
"'span'",
")",
".",
"children",
")",
"rate_after",
"=",
"None",
"rate_before",
"=",
"None",
"if",
"str",
"(",
"children",
"[",
"0",
"]",
")",
".",
"strip",
"(",
")",
"==",
"MATCH_NEW_RATE",
":",
"rate_after",
"=",
"int",
"(",
"children",
"[",
"1",
"]",
".",
"text",
")",
"rate_before",
"=",
"rate_after",
"-",
"int",
"(",
"children",
"[",
"3",
"]",
".",
"text",
")",
"elif",
"str",
"(",
"children",
"[",
"4",
"]",
")",
".",
"strip",
"(",
")",
"==",
"MATCH_NEW_RATE",
":",
"rate_after",
"=",
"int",
"(",
"children",
"[",
"5",
"]",
".",
"text",
")",
"rate_before",
"=",
"rate_after",
"-",
"int",
"(",
"children",
"[",
"3",
"]",
".",
"text",
")",
"players",
".",
"append",
"(",
"{",
"'url'",
":",
"rec",
"[",
"'href'",
"]",
"if",
"rec",
"else",
"None",
",",
"'id'",
":",
"user_id",
",",
"'username'",
":",
"username",
",",
"'color_id'",
":",
"COLOR_MAPPING",
".",
"get",
"(",
"colors",
"[",
"username",
"]",
")",
",",
"'rate_before'",
":",
"rate_before",
",",
"'rate_after'",
":",
"rate_after",
"}",
")",
"return",
"{",
"'timestamp'",
":",
"dateparser",
".",
"parse",
"(",
"date_played",
")",
",",
"'players'",
":",
"players",
"}"
] | Get match metadata. | [
"Get",
"match",
"metadata",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L443-L495 |
happyleavesaoc/python-voobly | voobly/__init__.py | download_rec | def download_rec(session, rec_url, target_path):
"""Download and extract a recorded game."""
try:
resp = session.get(session.auth.base_url + rec_url)
except RequestException:
raise VooblyError('failed to connect for download')
try:
downloaded = zipfile.ZipFile(io.BytesIO(resp.content))
downloaded.extractall(target_path)
except zipfile.BadZipFile:
raise VooblyError('invalid zip file')
return downloaded.namelist()[0] | python | def download_rec(session, rec_url, target_path):
"""Download and extract a recorded game."""
try:
resp = session.get(session.auth.base_url + rec_url)
except RequestException:
raise VooblyError('failed to connect for download')
try:
downloaded = zipfile.ZipFile(io.BytesIO(resp.content))
downloaded.extractall(target_path)
except zipfile.BadZipFile:
raise VooblyError('invalid zip file')
return downloaded.namelist()[0] | [
"def",
"download_rec",
"(",
"session",
",",
"rec_url",
",",
"target_path",
")",
":",
"try",
":",
"resp",
"=",
"session",
".",
"get",
"(",
"session",
".",
"auth",
".",
"base_url",
"+",
"rec_url",
")",
"except",
"RequestException",
":",
"raise",
"VooblyError",
"(",
"'failed to connect for download'",
")",
"try",
":",
"downloaded",
"=",
"zipfile",
".",
"ZipFile",
"(",
"io",
".",
"BytesIO",
"(",
"resp",
".",
"content",
")",
")",
"downloaded",
".",
"extractall",
"(",
"target_path",
")",
"except",
"zipfile",
".",
"BadZipFile",
":",
"raise",
"VooblyError",
"(",
"'invalid zip file'",
")",
"return",
"downloaded",
".",
"namelist",
"(",
")",
"[",
"0",
"]"
] | Download and extract a recorded game. | [
"Download",
"and",
"extract",
"a",
"recorded",
"game",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L499-L510 |
happyleavesaoc/python-voobly | voobly/__init__.py | login | def login(session):
"""Login to Voobly."""
if not session.auth.username or not session.auth.password:
raise VooblyError('must supply username and password')
_LOGGER.info("logging in (no valid cookie found)")
session.cookies.clear()
try:
session.get(session.auth.base_url + LOGIN_PAGE)
resp = session.post(session.auth.base_url + LOGIN_URL, data={
'username': session.auth.username,
'password': session.auth.password
})
except RequestException:
raise VooblyError('failed to connect for login')
if resp.status_code != 200:
raise VooblyError('failed to login')
_save_cookies(session.cookies, session.auth.cookie_path) | python | def login(session):
"""Login to Voobly."""
if not session.auth.username or not session.auth.password:
raise VooblyError('must supply username and password')
_LOGGER.info("logging in (no valid cookie found)")
session.cookies.clear()
try:
session.get(session.auth.base_url + LOGIN_PAGE)
resp = session.post(session.auth.base_url + LOGIN_URL, data={
'username': session.auth.username,
'password': session.auth.password
})
except RequestException:
raise VooblyError('failed to connect for login')
if resp.status_code != 200:
raise VooblyError('failed to login')
_save_cookies(session.cookies, session.auth.cookie_path) | [
"def",
"login",
"(",
"session",
")",
":",
"if",
"not",
"session",
".",
"auth",
".",
"username",
"or",
"not",
"session",
".",
"auth",
".",
"password",
":",
"raise",
"VooblyError",
"(",
"'must supply username and password'",
")",
"_LOGGER",
".",
"info",
"(",
"\"logging in (no valid cookie found)\"",
")",
"session",
".",
"cookies",
".",
"clear",
"(",
")",
"try",
":",
"session",
".",
"get",
"(",
"session",
".",
"auth",
".",
"base_url",
"+",
"LOGIN_PAGE",
")",
"resp",
"=",
"session",
".",
"post",
"(",
"session",
".",
"auth",
".",
"base_url",
"+",
"LOGIN_URL",
",",
"data",
"=",
"{",
"'username'",
":",
"session",
".",
"auth",
".",
"username",
",",
"'password'",
":",
"session",
".",
"auth",
".",
"password",
"}",
")",
"except",
"RequestException",
":",
"raise",
"VooblyError",
"(",
"'failed to connect for login'",
")",
"if",
"resp",
".",
"status_code",
"!=",
"200",
":",
"raise",
"VooblyError",
"(",
"'failed to login'",
")",
"_save_cookies",
"(",
"session",
".",
"cookies",
",",
"session",
".",
"auth",
".",
"cookie_path",
")"
] | Login to Voobly. | [
"Login",
"to",
"Voobly",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L513-L529 |
happyleavesaoc/python-voobly | voobly/__init__.py | get_session | def get_session(key=None, username=None, password=None, cache=True,
cache_expiry=datetime.timedelta(days=7), cookie_path=COOKIE_PATH, backend='memory',
version=VERSION_GLOBAL):
"""Get Voobly API session."""
class VooblyAuth(AuthBase): # pylint: disable=too-few-public-methods
"""Voobly authorization storage."""
def __init__(self, key, username, password, cookie_path, version):
"""Init."""
self.key = key
self.username = username
self.password = password
self.cookie_path = cookie_path
self.base_url = BASE_URLS[version]
def __call__(self, r):
"""Call is no-op."""
return r
if version not in BASE_URLS:
raise ValueError('unsupported voobly version')
session = requests.session()
if cache:
session = requests_cache.core.CachedSession(expire_after=cache_expiry, backend=backend)
session.auth = VooblyAuth(key, username, password, cookie_path, version)
if os.path.exists(cookie_path):
_LOGGER.info("cookie found at: %s", cookie_path)
session.cookies = _load_cookies(cookie_path)
return session | python | def get_session(key=None, username=None, password=None, cache=True,
cache_expiry=datetime.timedelta(days=7), cookie_path=COOKIE_PATH, backend='memory',
version=VERSION_GLOBAL):
"""Get Voobly API session."""
class VooblyAuth(AuthBase): # pylint: disable=too-few-public-methods
"""Voobly authorization storage."""
def __init__(self, key, username, password, cookie_path, version):
"""Init."""
self.key = key
self.username = username
self.password = password
self.cookie_path = cookie_path
self.base_url = BASE_URLS[version]
def __call__(self, r):
"""Call is no-op."""
return r
if version not in BASE_URLS:
raise ValueError('unsupported voobly version')
session = requests.session()
if cache:
session = requests_cache.core.CachedSession(expire_after=cache_expiry, backend=backend)
session.auth = VooblyAuth(key, username, password, cookie_path, version)
if os.path.exists(cookie_path):
_LOGGER.info("cookie found at: %s", cookie_path)
session.cookies = _load_cookies(cookie_path)
return session | [
"def",
"get_session",
"(",
"key",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"cache",
"=",
"True",
",",
"cache_expiry",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"7",
")",
",",
"cookie_path",
"=",
"COOKIE_PATH",
",",
"backend",
"=",
"'memory'",
",",
"version",
"=",
"VERSION_GLOBAL",
")",
":",
"class",
"VooblyAuth",
"(",
"AuthBase",
")",
":",
"# pylint: disable=too-few-public-methods",
"\"\"\"Voobly authorization storage.\"\"\"",
"def",
"__init__",
"(",
"self",
",",
"key",
",",
"username",
",",
"password",
",",
"cookie_path",
",",
"version",
")",
":",
"\"\"\"Init.\"\"\"",
"self",
".",
"key",
"=",
"key",
"self",
".",
"username",
"=",
"username",
"self",
".",
"password",
"=",
"password",
"self",
".",
"cookie_path",
"=",
"cookie_path",
"self",
".",
"base_url",
"=",
"BASE_URLS",
"[",
"version",
"]",
"def",
"__call__",
"(",
"self",
",",
"r",
")",
":",
"\"\"\"Call is no-op.\"\"\"",
"return",
"r",
"if",
"version",
"not",
"in",
"BASE_URLS",
":",
"raise",
"ValueError",
"(",
"'unsupported voobly version'",
")",
"session",
"=",
"requests",
".",
"session",
"(",
")",
"if",
"cache",
":",
"session",
"=",
"requests_cache",
".",
"core",
".",
"CachedSession",
"(",
"expire_after",
"=",
"cache_expiry",
",",
"backend",
"=",
"backend",
")",
"session",
".",
"auth",
"=",
"VooblyAuth",
"(",
"key",
",",
"username",
",",
"password",
",",
"cookie_path",
",",
"version",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cookie_path",
")",
":",
"_LOGGER",
".",
"info",
"(",
"\"cookie found at: %s\"",
",",
"cookie_path",
")",
"session",
".",
"cookies",
"=",
"_load_cookies",
"(",
"cookie_path",
")",
"return",
"session"
] | Get Voobly API session. | [
"Get",
"Voobly",
"API",
"session",
"."
] | train | https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L532-L561 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | main | def main():
"""The entry point for the Hyper-V Neutron Agent."""
neutron_config.register_agent_state_opts_helper(CONF)
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
hyperv_agent = HyperVNeutronAgent()
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
hyperv_agent.daemon_loop() | python | def main():
"""The entry point for the Hyper-V Neutron Agent."""
neutron_config.register_agent_state_opts_helper(CONF)
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
hyperv_agent = HyperVNeutronAgent()
# Start everything.
LOG.info("Agent initialized successfully, now running... ")
hyperv_agent.daemon_loop() | [
"def",
"main",
"(",
")",
":",
"neutron_config",
".",
"register_agent_state_opts_helper",
"(",
"CONF",
")",
"common_config",
".",
"init",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"neutron_config",
".",
"setup_logging",
"(",
")",
"hyperv_agent",
"=",
"HyperVNeutronAgent",
"(",
")",
"# Start everything.",
"LOG",
".",
"info",
"(",
"\"Agent initialized successfully, now running... \"",
")",
"hyperv_agent",
".",
"daemon_loop",
"(",
")"
] | The entry point for the Hyper-V Neutron Agent. | [
"The",
"entry",
"point",
"for",
"the",
"Hyper",
"-",
"V",
"Neutron",
"Agent",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L291-L301 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | HyperVNeutronAgent._setup | def _setup(self):
"""Setup the layer two agent."""
super(HyperVNeutronAgent, self)._setup()
self._sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self._sec_groups_agent = HyperVSecurityAgent(self._context,
self._sg_plugin_rpc)
self._vlan_driver = trunk_driver.HyperVTrunkDriver(self._context)
if CONF.NVGRE.enable_support:
self._consumers.append([h_constant.TUNNEL, topics.UPDATE])
self._consumers.append([h_constant.LOOKUP, h_constant.UPDATE]) | python | def _setup(self):
"""Setup the layer two agent."""
super(HyperVNeutronAgent, self)._setup()
self._sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self._sec_groups_agent = HyperVSecurityAgent(self._context,
self._sg_plugin_rpc)
self._vlan_driver = trunk_driver.HyperVTrunkDriver(self._context)
if CONF.NVGRE.enable_support:
self._consumers.append([h_constant.TUNNEL, topics.UPDATE])
self._consumers.append([h_constant.LOOKUP, h_constant.UPDATE]) | [
"def",
"_setup",
"(",
"self",
")",
":",
"super",
"(",
"HyperVNeutronAgent",
",",
"self",
")",
".",
"_setup",
"(",
")",
"self",
".",
"_sg_plugin_rpc",
"=",
"sg_rpc",
".",
"SecurityGroupServerRpcApi",
"(",
"topics",
".",
"PLUGIN",
")",
"self",
".",
"_sec_groups_agent",
"=",
"HyperVSecurityAgent",
"(",
"self",
".",
"_context",
",",
"self",
".",
"_sg_plugin_rpc",
")",
"self",
".",
"_vlan_driver",
"=",
"trunk_driver",
".",
"HyperVTrunkDriver",
"(",
"self",
".",
"_context",
")",
"if",
"CONF",
".",
"NVGRE",
".",
"enable_support",
":",
"self",
".",
"_consumers",
".",
"append",
"(",
"[",
"h_constant",
".",
"TUNNEL",
",",
"topics",
".",
"UPDATE",
"]",
")",
"self",
".",
"_consumers",
".",
"append",
"(",
"[",
"h_constant",
".",
"LOOKUP",
",",
"h_constant",
".",
"UPDATE",
"]",
")"
] | Setup the layer two agent. | [
"Setup",
"the",
"layer",
"two",
"agent",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L114-L124 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | HyperVNeutronAgent._setup_qos_extension | def _setup_qos_extension(self):
"""Setup the QOS extension if it is required."""
if not CONF.AGENT.enable_qos_extension:
return
self._qos_ext = qos_extension.QosAgentExtension()
self._qos_ext.consume_api(self)
self._qos_ext.initialize(self._connection, 'hyperv') | python | def _setup_qos_extension(self):
"""Setup the QOS extension if it is required."""
if not CONF.AGENT.enable_qos_extension:
return
self._qos_ext = qos_extension.QosAgentExtension()
self._qos_ext.consume_api(self)
self._qos_ext.initialize(self._connection, 'hyperv') | [
"def",
"_setup_qos_extension",
"(",
"self",
")",
":",
"if",
"not",
"CONF",
".",
"AGENT",
".",
"enable_qos_extension",
":",
"return",
"self",
".",
"_qos_ext",
"=",
"qos_extension",
".",
"QosAgentExtension",
"(",
")",
"self",
".",
"_qos_ext",
".",
"consume_api",
"(",
"self",
")",
"self",
".",
"_qos_ext",
".",
"initialize",
"(",
"self",
".",
"_connection",
",",
"'hyperv'",
")"
] | Setup the QOS extension if it is required. | [
"Setup",
"the",
"QOS",
"extension",
"if",
"it",
"is",
"required",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L126-L132 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | HyperVNeutronAgent._provision_network | def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
LOG.info("Provisioning network %s", net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == h_constant.TYPE_VLAN:
# Nothing to do
pass
elif network_type == h_constant.TYPE_FLAT:
# Nothing to do
pass
elif network_type == h_constant.TYPE_LOCAL:
# TODO(alexpilotti): Check that the switch type is private
# or create it if not existing.
pass
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_network(segmentation_id, net_uuid,
vswitch_name)
else:
raise exception.NetworkingHyperVException(
(_("Cannot provision unknown network type "
"%(network_type)s for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
vswitch_map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = vswitch_map | python | def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
LOG.info("Provisioning network %s", net_uuid)
vswitch_name = self._get_vswitch_name(network_type, physical_network)
if network_type == h_constant.TYPE_VLAN:
# Nothing to do
pass
elif network_type == h_constant.TYPE_FLAT:
# Nothing to do
pass
elif network_type == h_constant.TYPE_LOCAL:
# TODO(alexpilotti): Check that the switch type is private
# or create it if not existing.
pass
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_network(segmentation_id, net_uuid,
vswitch_name)
else:
raise exception.NetworkingHyperVException(
(_("Cannot provision unknown network type "
"%(network_type)s for network %(net_uuid)s") %
dict(network_type=network_type, net_uuid=net_uuid)))
vswitch_map = {
'network_type': network_type,
'vswitch_name': vswitch_name,
'ports': [],
'vlan_id': segmentation_id}
self._network_vswitch_map[net_uuid] = vswitch_map | [
"def",
"_provision_network",
"(",
"self",
",",
"port_id",
",",
"net_uuid",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
")",
":",
"LOG",
".",
"info",
"(",
"\"Provisioning network %s\"",
",",
"net_uuid",
")",
"vswitch_name",
"=",
"self",
".",
"_get_vswitch_name",
"(",
"network_type",
",",
"physical_network",
")",
"if",
"network_type",
"==",
"h_constant",
".",
"TYPE_VLAN",
":",
"# Nothing to do",
"pass",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_FLAT",
":",
"# Nothing to do",
"pass",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_LOCAL",
":",
"# TODO(alexpilotti): Check that the switch type is private",
"# or create it if not existing.",
"pass",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_NVGRE",
"and",
"self",
".",
"_nvgre_enabled",
":",
"self",
".",
"_nvgre_ops",
".",
"bind_nvgre_network",
"(",
"segmentation_id",
",",
"net_uuid",
",",
"vswitch_name",
")",
"else",
":",
"raise",
"exception",
".",
"NetworkingHyperVException",
"(",
"(",
"_",
"(",
"\"Cannot provision unknown network type \"",
"\"%(network_type)s for network %(net_uuid)s\"",
")",
"%",
"dict",
"(",
"network_type",
"=",
"network_type",
",",
"net_uuid",
"=",
"net_uuid",
")",
")",
")",
"vswitch_map",
"=",
"{",
"'network_type'",
":",
"network_type",
",",
"'vswitch_name'",
":",
"vswitch_name",
",",
"'ports'",
":",
"[",
"]",
",",
"'vlan_id'",
":",
"segmentation_id",
"}",
"self",
".",
"_network_vswitch_map",
"[",
"net_uuid",
"]",
"=",
"vswitch_map"
] | Provision the network with the received information. | [
"Provision",
"the",
"network",
"with",
"the",
"received",
"information",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L156-L186 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | HyperVNeutronAgent._port_bound | def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov):
"""Bind the port to the recived network."""
super(HyperVNeutronAgent, self)._port_bound(
port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov
)
vswitch_map = self._network_vswitch_map[network_id]
if network_type == h_constant.TYPE_VLAN:
self._vlan_driver.bind_vlan_port(port_id, segmentation_id)
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_port(
segmentation_id, vswitch_map['vswitch_name'], port_id)
elif network_type == h_constant.TYPE_FLAT:
pass # Nothing to do
elif network_type == h_constant.TYPE_LOCAL:
pass # Nothing to do
else:
LOG.error('Unsupported network type %s', network_type)
if self._enable_metrics_collection:
self._utils.add_metrics_collection_acls(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
# check if security groups is enabled.
# if not, teardown the security group rules
if self._enable_security_groups:
self._sec_groups_agent.refresh_firewall([port_id])
else:
self._utils.remove_all_security_rules(port_id)
self._utils.set_vswitch_port_mac_spoofing(port_id,
port_security_enabled) | python | def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov):
"""Bind the port to the recived network."""
super(HyperVNeutronAgent, self)._port_bound(
port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov
)
vswitch_map = self._network_vswitch_map[network_id]
if network_type == h_constant.TYPE_VLAN:
self._vlan_driver.bind_vlan_port(port_id, segmentation_id)
elif network_type == h_constant.TYPE_NVGRE and self._nvgre_enabled:
self._nvgre_ops.bind_nvgre_port(
segmentation_id, vswitch_map['vswitch_name'], port_id)
elif network_type == h_constant.TYPE_FLAT:
pass # Nothing to do
elif network_type == h_constant.TYPE_LOCAL:
pass # Nothing to do
else:
LOG.error('Unsupported network type %s', network_type)
if self._enable_metrics_collection:
self._utils.add_metrics_collection_acls(port_id)
self._port_metric_retries[port_id] = self._metrics_max_retries
# check if security groups is enabled.
# if not, teardown the security group rules
if self._enable_security_groups:
self._sec_groups_agent.refresh_firewall([port_id])
else:
self._utils.remove_all_security_rules(port_id)
self._utils.set_vswitch_port_mac_spoofing(port_id,
port_security_enabled) | [
"def",
"_port_bound",
"(",
"self",
",",
"port_id",
",",
"network_id",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
",",
"port_security_enabled",
",",
"set_port_sriov",
")",
":",
"super",
"(",
"HyperVNeutronAgent",
",",
"self",
")",
".",
"_port_bound",
"(",
"port_id",
",",
"network_id",
",",
"network_type",
",",
"physical_network",
",",
"segmentation_id",
",",
"port_security_enabled",
",",
"set_port_sriov",
")",
"vswitch_map",
"=",
"self",
".",
"_network_vswitch_map",
"[",
"network_id",
"]",
"if",
"network_type",
"==",
"h_constant",
".",
"TYPE_VLAN",
":",
"self",
".",
"_vlan_driver",
".",
"bind_vlan_port",
"(",
"port_id",
",",
"segmentation_id",
")",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_NVGRE",
"and",
"self",
".",
"_nvgre_enabled",
":",
"self",
".",
"_nvgre_ops",
".",
"bind_nvgre_port",
"(",
"segmentation_id",
",",
"vswitch_map",
"[",
"'vswitch_name'",
"]",
",",
"port_id",
")",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_FLAT",
":",
"pass",
"# Nothing to do",
"elif",
"network_type",
"==",
"h_constant",
".",
"TYPE_LOCAL",
":",
"pass",
"# Nothing to do",
"else",
":",
"LOG",
".",
"error",
"(",
"'Unsupported network type %s'",
",",
"network_type",
")",
"if",
"self",
".",
"_enable_metrics_collection",
":",
"self",
".",
"_utils",
".",
"add_metrics_collection_acls",
"(",
"port_id",
")",
"self",
".",
"_port_metric_retries",
"[",
"port_id",
"]",
"=",
"self",
".",
"_metrics_max_retries",
"# check if security groups is enabled.",
"# if not, teardown the security group rules",
"if",
"self",
".",
"_enable_security_groups",
":",
"self",
".",
"_sec_groups_agent",
".",
"refresh_firewall",
"(",
"[",
"port_id",
"]",
")",
"else",
":",
"self",
".",
"_utils",
".",
"remove_all_security_rules",
"(",
"port_id",
")",
"self",
".",
"_utils",
".",
"set_vswitch_port_mac_spoofing",
"(",
"port_id",
",",
"port_security_enabled",
")"
] | Bind the port to the recived network. | [
"Bind",
"the",
"port",
"to",
"the",
"recived",
"network",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L188-L221 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hyperv_neutron_agent.py | HyperVNeutronAgent._work | def _work(self):
"""Process the information regarding the available ports."""
super(HyperVNeutronAgent, self)._work()
if self._nvgre_enabled:
self._nvgre_ops.refresh_nvgre_records()
self._port_enable_control_metrics() | python | def _work(self):
"""Process the information regarding the available ports."""
super(HyperVNeutronAgent, self)._work()
if self._nvgre_enabled:
self._nvgre_ops.refresh_nvgre_records()
self._port_enable_control_metrics() | [
"def",
"_work",
"(",
"self",
")",
":",
"super",
"(",
"HyperVNeutronAgent",
",",
"self",
")",
".",
"_work",
"(",
")",
"if",
"self",
".",
"_nvgre_enabled",
":",
"self",
".",
"_nvgre_ops",
".",
"refresh_nvgre_records",
"(",
")",
"self",
".",
"_port_enable_control_metrics",
"(",
")"
] | Process the information regarding the available ports. | [
"Process",
"the",
"information",
"regarding",
"the",
"available",
"ports",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hyperv_neutron_agent.py#L269-L274 |
Metatab/metapack | metapack/jupyter/script.py | ScriptIPython.run_cell_magic | def run_cell_magic(self, magic_name, line, cell):
"""Run a limited number of magics from scripts, without IPython"""
if magic_name == 'bash':
self.shebang("bash", cell)
elif magic_name == 'metatab':
self.mm.metatab(line, cell) | python | def run_cell_magic(self, magic_name, line, cell):
"""Run a limited number of magics from scripts, without IPython"""
if magic_name == 'bash':
self.shebang("bash", cell)
elif magic_name == 'metatab':
self.mm.metatab(line, cell) | [
"def",
"run_cell_magic",
"(",
"self",
",",
"magic_name",
",",
"line",
",",
"cell",
")",
":",
"if",
"magic_name",
"==",
"'bash'",
":",
"self",
".",
"shebang",
"(",
"\"bash\"",
",",
"cell",
")",
"elif",
"magic_name",
"==",
"'metatab'",
":",
"self",
".",
"mm",
".",
"metatab",
"(",
"line",
",",
"cell",
")"
] | Run a limited number of magics from scripts, without IPython | [
"Run",
"a",
"limited",
"number",
"of",
"magics",
"from",
"scripts",
"without",
"IPython"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/script.py#L51-L57 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.