body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@check_type(JSONModelObject)
def object_locations(archival_object):
'Finds locations associated with an archival object.\n\n :param JSONModelObject archival_object: an ArchivesSpace archival_object.\n\n :returns: Locations objects associated with the archival object.\n :rtype: list\n '
locations = []
for instance in archival_object.instances:
top_container = instance.sub_container.top_container.reify()
locations += top_container.container_locations
return locations | 2,330,243,253,396,513,300 | Finds locations associated with an archival object.
:param JSONModelObject archival_object: an ArchivesSpace archival_object.
:returns: Locations objects associated with the archival object.
:rtype: list | rac_aspace/data_helpers.py | object_locations | RockefellerArchiveCenter/rac_aspace | python | @check_type(JSONModelObject)
def object_locations(archival_object):
'Finds locations associated with an archival object.\n\n :param JSONModelObject archival_object: an ArchivesSpace archival_object.\n\n :returns: Locations objects associated with the archival object.\n :rtype: list\n '
locations = []
for instance in archival_object.instances:
top_container = instance.sub_container.top_container.reify()
locations += top_container.container_locations
return locations |
@check_type(JSONModelObject)
def format_from_obj(obj, format_string):
'Generates a human-readable string from an object.\n\n :param JSONModelObject location: an ArchivesSpace object.\n\n :returns: a string in the chosen format.\n :rtype: str\n '
if (not format_string):
raise Exception('No format string provided.')
else:
try:
d = {}
matches = [i[1] for i in Formatter().parse(format_string) if i[1]]
for m in matches:
d.update({m: getattr(obj, m, '')})
return format_string.format(**d)
except KeyError as e:
raise KeyError('The field {} was not found in this object'.format(str(e))) | 7,218,999,293,139,241,000 | Generates a human-readable string from an object.
:param JSONModelObject location: an ArchivesSpace object.
:returns: a string in the chosen format.
:rtype: str | rac_aspace/data_helpers.py | format_from_obj | RockefellerArchiveCenter/rac_aspace | python | @check_type(JSONModelObject)
def format_from_obj(obj, format_string):
'Generates a human-readable string from an object.\n\n :param JSONModelObject location: an ArchivesSpace object.\n\n :returns: a string in the chosen format.\n :rtype: str\n '
if (not format_string):
raise Exception('No format string provided.')
else:
try:
d = {}
matches = [i[1] for i in Formatter().parse(format_string) if i[1]]
for m in matches:
d.update({m: getattr(obj, m, )})
return format_string.format(**d)
except KeyError as e:
raise KeyError('The field {} was not found in this object'.format(str(e))) |
@check_type(dict)
def format_resource_id(resource, separator=':'):
'Concatenates the four-part ID for a resource record.\n\n :param dict resource: an ArchivesSpace resource.\n :param str separator: a separator to insert between the id parts. Defaults\n to `:`.\n\n :returns: a concatenated four-part ID for the resource record.\n :rtype: str\n '
resource_id = []
for x in range(4):
try:
resource_id.append(resource['id_{0}'.format(x)])
except KeyError:
break
return separator.join(resource_id) | 4,343,371,765,667,696,000 | Concatenates the four-part ID for a resource record.
:param dict resource: an ArchivesSpace resource.
:param str separator: a separator to insert between the id parts. Defaults
to `:`.
:returns: a concatenated four-part ID for the resource record.
:rtype: str | rac_aspace/data_helpers.py | format_resource_id | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def format_resource_id(resource, separator=':'):
'Concatenates the four-part ID for a resource record.\n\n :param dict resource: an ArchivesSpace resource.\n :param str separator: a separator to insert between the id parts. Defaults\n to `:`.\n\n :returns: a concatenated four-part ID for the resource record.\n :rtype: str\n '
resource_id = []
for x in range(4):
try:
resource_id.append(resource['id_{0}'.format(x)])
except KeyError:
break
return separator.join(resource_id) |
@check_type(JSONModelObject)
def closest_value(archival_object, key):
'Finds the closest value matching a key.\n\n Starts with an archival object, and iterates up through its ancestors\n until it finds a match for a key that is not empty or null.\n\n :param JSONModelObject archival_object: an ArchivesSpace archival_object.\n :param str key: the key to match against.\n\n :returns: The value of the key, which could be a str, list, or dict.\n :rtype: str, list, or key\n '
if (getattr(archival_object, key) not in ['', [], {}, None]):
return getattr(archival_object, key)
else:
for ancestor in archival_object.ancestors:
return closest_value(ancestor, key) | -5,664,226,892,080,627,000 | Finds the closest value matching a key.
Starts with an archival object, and iterates up through its ancestors
until it finds a match for a key that is not empty or null.
:param JSONModelObject archival_object: an ArchivesSpace archival_object.
:param str key: the key to match against.
:returns: The value of the key, which could be a str, list, or dict.
:rtype: str, list, or key | rac_aspace/data_helpers.py | closest_value | RockefellerArchiveCenter/rac_aspace | python | @check_type(JSONModelObject)
def closest_value(archival_object, key):
'Finds the closest value matching a key.\n\n Starts with an archival object, and iterates up through its ancestors\n until it finds a match for a key that is not empty or null.\n\n :param JSONModelObject archival_object: an ArchivesSpace archival_object.\n :param str key: the key to match against.\n\n :returns: The value of the key, which could be a str, list, or dict.\n :rtype: str, list, or key\n '
if (getattr(archival_object, key) not in [, [], {}, None]):
return getattr(archival_object, key)
else:
for ancestor in archival_object.ancestors:
return closest_value(ancestor, key) |
def get_orphans(object_list, null_attribute):
'Finds objects in a list which do not have a value in a specified field.\n\n :param list object_list: a list of ArchivesSpace objects.\n :param null_attribute: an attribute which must be empty or null.\n\n :yields: a list of ArchivesSpace objects.\n :yield type: dict\n '
for obj in object_list:
if (getattr(obj, null_attribute) in ['', [], {}, None]):
(yield obj) | -3,144,633,524,907,891,700 | Finds objects in a list which do not have a value in a specified field.
:param list object_list: a list of ArchivesSpace objects.
:param null_attribute: an attribute which must be empty or null.
:yields: a list of ArchivesSpace objects.
:yield type: dict | rac_aspace/data_helpers.py | get_orphans | RockefellerArchiveCenter/rac_aspace | python | def get_orphans(object_list, null_attribute):
'Finds objects in a list which do not have a value in a specified field.\n\n :param list object_list: a list of ArchivesSpace objects.\n :param null_attribute: an attribute which must be empty or null.\n\n :yields: a list of ArchivesSpace objects.\n :yield type: dict\n '
for obj in object_list:
if (getattr(obj, null_attribute) in [, [], {}, None]):
(yield obj) |
@check_type(dict)
def get_expression(date):
'Returns a date expression for a date object.\n\n Concatenates start and end dates if no date expression exists.\n\n :param dict date: an ArchivesSpace date\n\n :returns: date expression for the date object.\n :rtype: str\n '
try:
expression = date['expression']
except KeyError:
if date.get('end'):
expression = '{0}-{1}'.format(date['begin'], date['end'])
else:
expression = date['begin']
return expression | -7,799,939,748,093,341,000 | Returns a date expression for a date object.
Concatenates start and end dates if no date expression exists.
:param dict date: an ArchivesSpace date
:returns: date expression for the date object.
:rtype: str | rac_aspace/data_helpers.py | get_expression | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def get_expression(date):
'Returns a date expression for a date object.\n\n Concatenates start and end dates if no date expression exists.\n\n :param dict date: an ArchivesSpace date\n\n :returns: date expression for the date object.\n :rtype: str\n '
try:
expression = date['expression']
except KeyError:
if date.get('end'):
expression = '{0}-{1}'.format(date['begin'], date['end'])
else:
expression = date['begin']
return expression |
@check_type(dict)
def indicates_restriction(rights_statement, restriction_acts):
'Parses a rights statement to determine if it indicates a restriction.\n\n :param dict rights_statement: an ArchivesSpace rights statement.\n\n :returns: True if rights statement indicates a restriction, False if not.\n :rtype: bool\n '
def is_expired(date):
today = datetime.now()
date = (date if date else datetime.strftime('%Y-%m-%d'))
return (False if (datetime.strptime(date, '%Y-%m-%d') >= today) else True)
if is_expired(rights_statement.get('end_date')):
return False
for act in rights_statement.get('acts'):
if ((act.get('restriction') in restriction_acts) and (not is_expired(act.get('end_date')))):
return True
return False | 2,833,106,606,100,588,500 | Parses a rights statement to determine if it indicates a restriction.
:param dict rights_statement: an ArchivesSpace rights statement.
:returns: True if rights statement indicates a restriction, False if not.
:rtype: bool | rac_aspace/data_helpers.py | indicates_restriction | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def indicates_restriction(rights_statement, restriction_acts):
'Parses a rights statement to determine if it indicates a restriction.\n\n :param dict rights_statement: an ArchivesSpace rights statement.\n\n :returns: True if rights statement indicates a restriction, False if not.\n :rtype: bool\n '
def is_expired(date):
today = datetime.now()
date = (date if date else datetime.strftime('%Y-%m-%d'))
return (False if (datetime.strptime(date, '%Y-%m-%d') >= today) else True)
if is_expired(rights_statement.get('end_date')):
return False
for act in rights_statement.get('acts'):
if ((act.get('restriction') in restriction_acts) and (not is_expired(act.get('end_date')))):
return True
return False |
@check_type(dict)
def is_restricted(archival_object, query_string, restriction_acts):
'Parses an archival object to determine if it is restricted.\n\n Iterates through notes, looking for a conditions governing access note\n which contains a particular set of strings.\n Also looks for associated rights statements which indicate object may be\n restricted.\n\n :param dict archival_object: an ArchivesSpace archival_object.\n :param list restriction_acts: a list of strings to match restriction act against.\n\n :returns: True if archival object is restricted, False if not.\n :rtype: bool\n '
for note in archival_object['notes']:
if (note['type'] == 'accessrestrict'):
if text_in_note(note, query_string.lower()):
return True
for rights_statement in archival_object['rights_statements']:
if indicates_restriction(rights_statement, restriction_acts):
return True
return False | -3,619,247,484,139,808,300 | Parses an archival object to determine if it is restricted.
Iterates through notes, looking for a conditions governing access note
which contains a particular set of strings.
Also looks for associated rights statements which indicate object may be
restricted.
:param dict archival_object: an ArchivesSpace archival_object.
:param list restriction_acts: a list of strings to match restriction act against.
:returns: True if archival object is restricted, False if not.
:rtype: bool | rac_aspace/data_helpers.py | is_restricted | RockefellerArchiveCenter/rac_aspace | python | @check_type(dict)
def is_restricted(archival_object, query_string, restriction_acts):
'Parses an archival object to determine if it is restricted.\n\n Iterates through notes, looking for a conditions governing access note\n which contains a particular set of strings.\n Also looks for associated rights statements which indicate object may be\n restricted.\n\n :param dict archival_object: an ArchivesSpace archival_object.\n :param list restriction_acts: a list of strings to match restriction act against.\n\n :returns: True if archival object is restricted, False if not.\n :rtype: bool\n '
for note in archival_object['notes']:
if (note['type'] == 'accessrestrict'):
if text_in_note(note, query_string.lower()):
return True
for rights_statement in archival_object['rights_statements']:
if indicates_restriction(rights_statement, restriction_acts):
return True
return False |
@check_type(str)
def strip_html_tags(string):
'Strips HTML tags from a string.\n\n :param str string: An input string from which to remove HTML tags.\n '
tag_match = re.compile('<.*?>')
cleantext = re.sub(tag_match, '', string)
return cleantext | 752,839,696,298,316,400 | Strips HTML tags from a string.
:param str string: An input string from which to remove HTML tags. | rac_aspace/data_helpers.py | strip_html_tags | RockefellerArchiveCenter/rac_aspace | python | @check_type(str)
def strip_html_tags(string):
'Strips HTML tags from a string.\n\n :param str string: An input string from which to remove HTML tags.\n '
tag_match = re.compile('<.*?>')
cleantext = re.sub(tag_match, , string)
return cleantext |
def parse_subnote(subnote):
'Parses note content from subnotes.\n\n :param dict: an ArchivesSpace subnote.\n\n :returns: a list containing subnote content.\n :rtype: list\n '
if (subnote['jsonmodel_type'] in ['note_orderedlist', 'note_index']):
content = subnote['items']
elif (subnote['jsonmodel_type'] in ['note_chronology', 'note_definedlist']):
content = []
for k in subnote['items']:
for i in k:
content += (k.get(i) if isinstance(k.get(i), list) else [k.get(i)])
else:
content = (subnote['content'] if isinstance(subnote['content'], list) else [subnote['content']])
return content | 7,959,161,512,769,357,000 | Parses note content from subnotes.
:param dict: an ArchivesSpace subnote.
:returns: a list containing subnote content.
:rtype: list | rac_aspace/data_helpers.py | parse_subnote | RockefellerArchiveCenter/rac_aspace | python | def parse_subnote(subnote):
'Parses note content from subnotes.\n\n :param dict: an ArchivesSpace subnote.\n\n :returns: a list containing subnote content.\n :rtype: list\n '
if (subnote['jsonmodel_type'] in ['note_orderedlist', 'note_index']):
content = subnote['items']
elif (subnote['jsonmodel_type'] in ['note_chronology', 'note_definedlist']):
content = []
for k in subnote['items']:
for i in k:
content += (k.get(i) if isinstance(k.get(i), list) else [k.get(i)])
else:
content = (subnote['content'] if isinstance(subnote['content'], list) else [subnote['content']])
return content |
def read_mo(fileobj):
'Read a binary MO file from the given file-like object and return a\n corresponding `Catalog` object.\n\n :param fileobj: the file-like object to read the MO file from\n\n :note: The implementation of this function is heavily based on the\n ``GNUTranslations._parse`` method of the ``gettext`` module in the\n standard library.\n '
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
magic = unpack('<I', buf[:4])[0]
if (magic == LE_MAGIC):
(version, msgcount, origidx, transidx) = unpack('<4I', buf[4:20])
ii = '<II'
elif (magic == BE_MAGIC):
(version, msgcount, origidx, transidx) = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
for i in range_type(0, msgcount):
(mlen, moff) = unpack(ii, buf[origidx:(origidx + 8)])
mend = (moff + mlen)
(tlen, toff) = unpack(ii, buf[transidx:(transidx + 8)])
tend = (toff + tlen)
if ((mend < buflen) and (tend < buflen)):
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
if (mlen == 0):
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if (not item):
continue
if (b':' in item):
(key, value) = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += (b'\n' + item)
if (b'\x04' in msg):
(ctxt, msg) = msg.split(b'\x04')
else:
ctxt = None
if (b'\x00' in msg):
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
elif catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog | -3,346,970,765,371,034,000 | Read a binary MO file from the given file-like object and return a
corresponding `Catalog` object.
:param fileobj: the file-like object to read the MO file from
:note: The implementation of this function is heavily based on the
``GNUTranslations._parse`` method of the ``gettext`` module in the
standard library. | desktop/core/ext-py/Babel-2.5.1/babel/messages/mofile.py | read_mo | 10088/hue | python | def read_mo(fileobj):
'Read a binary MO file from the given file-like object and return a\n corresponding `Catalog` object.\n\n :param fileobj: the file-like object to read the MO file from\n\n :note: The implementation of this function is heavily based on the\n ``GNUTranslations._parse`` method of the ``gettext`` module in the\n standard library.\n '
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', )
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
magic = unpack('<I', buf[:4])[0]
if (magic == LE_MAGIC):
(version, msgcount, origidx, transidx) = unpack('<4I', buf[4:20])
ii = '<II'
elif (magic == BE_MAGIC):
(version, msgcount, origidx, transidx) = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
for i in range_type(0, msgcount):
(mlen, moff) = unpack(ii, buf[origidx:(origidx + 8)])
mend = (moff + mlen)
(tlen, toff) = unpack(ii, buf[transidx:(transidx + 8)])
tend = (toff + tlen)
if ((mend < buflen) and (tend < buflen)):
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
if (mlen == 0):
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if (not item):
continue
if (b':' in item):
(key, value) = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += (b'\n' + item)
if (b'\x04' in msg):
(ctxt, msg) = msg.split(b'\x04')
else:
ctxt = None
if (b'\x00' in msg):
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
elif catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog |
def write_mo(fileobj, catalog, use_fuzzy=False):
'Write a catalog to the specified file-like object using the GNU MO file\n format.\n\n >>> import sys\n >>> from babel.messages import Catalog\n >>> from gettext import GNUTranslations\n >>> from babel._compat import BytesIO\n\n >>> catalog = Catalog(locale=\'en_US\')\n >>> catalog.add(\'foo\', \'Voh\')\n <Message ...>\n >>> catalog.add((u\'bar\', u\'baz\'), (u\'Bahr\', u\'Batz\'))\n <Message ...>\n >>> catalog.add(\'fuz\', \'Futz\', flags=[\'fuzzy\'])\n <Message ...>\n >>> catalog.add(\'Fizz\', \'\')\n <Message ...>\n >>> catalog.add((\'Fuzz\', \'Fuzzes\'), (\'\', \'\'))\n <Message ...>\n >>> buf = BytesIO()\n\n >>> write_mo(buf, catalog)\n >>> x = buf.seek(0)\n >>> translations = GNUTranslations(fp=buf)\n >>> if sys.version_info[0] >= 3:\n ... translations.ugettext = translations.gettext\n ... translations.ungettext = translations.ngettext\n >>> translations.ugettext(\'foo\')\n u\'Voh\'\n >>> translations.ungettext(\'bar\', \'baz\', 1)\n u\'Bahr\'\n >>> translations.ungettext(\'bar\', \'baz\', 2)\n u\'Batz\'\n >>> translations.ugettext(\'fuz\')\n u\'fuz\'\n >>> translations.ugettext(\'Fizz\')\n u\'Fizz\'\n >>> translations.ugettext(\'Fuzz\')\n u\'Fuzz\'\n >>> translations.ugettext(\'Fuzzes\')\n u\'Fuzzes\'\n\n :param fileobj: the file-like object to write to\n :param catalog: the `Catalog` instance\n :param use_fuzzy: whether translations marked as "fuzzy" should be included\n in the output\n '
messages = list(catalog)
if (not use_fuzzy):
messages[1:] = [m for m in messages[1:] if (not m.fuzzy)]
messages.sort()
ids = strs = b''
offsets = []
for message in messages:
if message.pluralizable:
msgid = b'\x00'.join([msgid.encode(catalog.charset) for msgid in message.id])
msgstrs = []
for (idx, string) in enumerate(message.string):
if (not string):
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([msgstr.encode(catalog.charset) for msgstr in msgstrs])
else:
msgid = message.id.encode(catalog.charset)
if (not message.string):
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset), msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += (msgid + b'\x00')
strs += (msgstr + b'\x00')
keystart = ((7 * 4) + (16 * len(messages)))
valuestart = (keystart + len(ids))
koffsets = []
voffsets = []
for (o1, l1, o2, l2) in offsets:
koffsets += [l1, (o1 + keystart)]
voffsets += [l2, (o2 + valuestart)]
offsets = (koffsets + voffsets)
fileobj.write((((struct.pack('Iiiiiii', LE_MAGIC, 0, len(messages), (7 * 4), ((7 * 4) + (len(messages) * 8)), 0, 0) + array_tobytes(array.array('i', offsets))) + ids) + strs)) | 1,580,142,548,688,505,900 | Write a catalog to the specified file-like object using the GNU MO file
format.
>>> import sys
>>> from babel.messages import Catalog
>>> from gettext import GNUTranslations
>>> from babel._compat import BytesIO
>>> catalog = Catalog(locale='en_US')
>>> catalog.add('foo', 'Voh')
<Message ...>
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
<Message ...>
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
<Message ...>
>>> catalog.add('Fizz', '')
<Message ...>
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
<Message ...>
>>> buf = BytesIO()
>>> write_mo(buf, catalog)
>>> x = buf.seek(0)
>>> translations = GNUTranslations(fp=buf)
>>> if sys.version_info[0] >= 3:
... translations.ugettext = translations.gettext
... translations.ungettext = translations.ngettext
>>> translations.ugettext('foo')
u'Voh'
>>> translations.ungettext('bar', 'baz', 1)
u'Bahr'
>>> translations.ungettext('bar', 'baz', 2)
u'Batz'
>>> translations.ugettext('fuz')
u'fuz'
>>> translations.ugettext('Fizz')
u'Fizz'
>>> translations.ugettext('Fuzz')
u'Fuzz'
>>> translations.ugettext('Fuzzes')
u'Fuzzes'
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param use_fuzzy: whether translations marked as "fuzzy" should be included
in the output | desktop/core/ext-py/Babel-2.5.1/babel/messages/mofile.py | write_mo | 10088/hue | python | def write_mo(fileobj, catalog, use_fuzzy=False):
'Write a catalog to the specified file-like object using the GNU MO file\n format.\n\n >>> import sys\n >>> from babel.messages import Catalog\n >>> from gettext import GNUTranslations\n >>> from babel._compat import BytesIO\n\n >>> catalog = Catalog(locale=\'en_US\')\n >>> catalog.add(\'foo\', \'Voh\')\n <Message ...>\n >>> catalog.add((u\'bar\', u\'baz\'), (u\'Bahr\', u\'Batz\'))\n <Message ...>\n >>> catalog.add(\'fuz\', \'Futz\', flags=[\'fuzzy\'])\n <Message ...>\n >>> catalog.add(\'Fizz\', \'\')\n <Message ...>\n >>> catalog.add((\'Fuzz\', \'Fuzzes\'), (\'\', \'\'))\n <Message ...>\n >>> buf = BytesIO()\n\n >>> write_mo(buf, catalog)\n >>> x = buf.seek(0)\n >>> translations = GNUTranslations(fp=buf)\n >>> if sys.version_info[0] >= 3:\n ... translations.ugettext = translations.gettext\n ... translations.ungettext = translations.ngettext\n >>> translations.ugettext(\'foo\')\n u\'Voh\'\n >>> translations.ungettext(\'bar\', \'baz\', 1)\n u\'Bahr\'\n >>> translations.ungettext(\'bar\', \'baz\', 2)\n u\'Batz\'\n >>> translations.ugettext(\'fuz\')\n u\'fuz\'\n >>> translations.ugettext(\'Fizz\')\n u\'Fizz\'\n >>> translations.ugettext(\'Fuzz\')\n u\'Fuzz\'\n >>> translations.ugettext(\'Fuzzes\')\n u\'Fuzzes\'\n\n :param fileobj: the file-like object to write to\n :param catalog: the `Catalog` instance\n :param use_fuzzy: whether translations marked as "fuzzy" should be included\n in the output\n '
messages = list(catalog)
if (not use_fuzzy):
messages[1:] = [m for m in messages[1:] if (not m.fuzzy)]
messages.sort()
ids = strs = b
offsets = []
for message in messages:
if message.pluralizable:
msgid = b'\x00'.join([msgid.encode(catalog.charset) for msgid in message.id])
msgstrs = []
for (idx, string) in enumerate(message.string):
if (not string):
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([msgstr.encode(catalog.charset) for msgstr in msgstrs])
else:
msgid = message.id.encode(catalog.charset)
if (not message.string):
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset), msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += (msgid + b'\x00')
strs += (msgstr + b'\x00')
keystart = ((7 * 4) + (16 * len(messages)))
valuestart = (keystart + len(ids))
koffsets = []
voffsets = []
for (o1, l1, o2, l2) in offsets:
koffsets += [l1, (o1 + keystart)]
voffsets += [l2, (o2 + valuestart)]
offsets = (koffsets + voffsets)
fileobj.write((((struct.pack('Iiiiiii', LE_MAGIC, 0, len(messages), (7 * 4), ((7 * 4) + (len(messages) * 8)), 0, 0) + array_tobytes(array.array('i', offsets))) + ids) + strs)) |
def __init__(self, alias=None, duration=None, id=None, name=None, next_run=None, next_snapshot=None, path=None, pattern=None, schedule=None):
'SnapshotScheduleExtended - a model defined in Swagger'
self._alias = None
self._duration = None
self._id = None
self._name = None
self._next_run = None
self._next_snapshot = None
self._path = None
self._pattern = None
self._schedule = None
self.discriminator = None
if (alias is not None):
self.alias = alias
if (duration is not None):
self.duration = duration
if (id is not None):
self.id = id
if (name is not None):
self.name = name
if (next_run is not None):
self.next_run = next_run
if (next_snapshot is not None):
self.next_snapshot = next_snapshot
if (path is not None):
self.path = path
if (pattern is not None):
self.pattern = pattern
if (schedule is not None):
self.schedule = schedule | 8,188,369,046,617,406,000 | SnapshotScheduleExtended - a model defined in Swagger | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | __init__ | Isilon/isilon_sdk_python | python | def __init__(self, alias=None, duration=None, id=None, name=None, next_run=None, next_snapshot=None, path=None, pattern=None, schedule=None):
self._alias = None
self._duration = None
self._id = None
self._name = None
self._next_run = None
self._next_snapshot = None
self._path = None
self._pattern = None
self._schedule = None
self.discriminator = None
if (alias is not None):
self.alias = alias
if (duration is not None):
self.duration = duration
if (id is not None):
self.id = id
if (name is not None):
self.name = name
if (next_run is not None):
self.next_run = next_run
if (next_snapshot is not None):
self.next_snapshot = next_snapshot
if (path is not None):
self.path = path
if (pattern is not None):
self.pattern = pattern
if (schedule is not None):
self.schedule = schedule |
@property
def alias(self):
'Gets the alias of this SnapshotScheduleExtended. # noqa: E501\n\n Alias name to create for each snapshot. # noqa: E501\n\n :return: The alias of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._alias | 8,222,679,228,824,724,000 | Gets the alias of this SnapshotScheduleExtended. # noqa: E501
Alias name to create for each snapshot. # noqa: E501
:return: The alias of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | alias | Isilon/isilon_sdk_python | python | @property
def alias(self):
'Gets the alias of this SnapshotScheduleExtended. # noqa: E501\n\n Alias name to create for each snapshot. # noqa: E501\n\n :return: The alias of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._alias |
@alias.setter
def alias(self, alias):
'Sets the alias of this SnapshotScheduleExtended.\n\n Alias name to create for each snapshot. # noqa: E501\n\n :param alias: The alias of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._alias = alias | 7,765,599,120,718,993,000 | Sets the alias of this SnapshotScheduleExtended.
Alias name to create for each snapshot. # noqa: E501
:param alias: The alias of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | alias | Isilon/isilon_sdk_python | python | @alias.setter
def alias(self, alias):
'Sets the alias of this SnapshotScheduleExtended.\n\n Alias name to create for each snapshot. # noqa: E501\n\n :param alias: The alias of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._alias = alias |
@property
def duration(self):
'Gets the duration of this SnapshotScheduleExtended. # noqa: E501\n\n Time in seconds added to creation time to construction expiration time. # noqa: E501\n\n :return: The duration of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._duration | 5,132,855,313,313,818,000 | Gets the duration of this SnapshotScheduleExtended. # noqa: E501
Time in seconds added to creation time to construction expiration time. # noqa: E501
:return: The duration of this SnapshotScheduleExtended. # noqa: E501
:rtype: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | duration | Isilon/isilon_sdk_python | python | @property
def duration(self):
'Gets the duration of this SnapshotScheduleExtended. # noqa: E501\n\n Time in seconds added to creation time to construction expiration time. # noqa: E501\n\n :return: The duration of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._duration |
@duration.setter
def duration(self, duration):
'Sets the duration of this SnapshotScheduleExtended.\n\n Time in seconds added to creation time to construction expiration time. # noqa: E501\n\n :param duration: The duration of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._duration = duration | -3,238,083,843,216,095,700 | Sets the duration of this SnapshotScheduleExtended.
Time in seconds added to creation time to construction expiration time. # noqa: E501
:param duration: The duration of this SnapshotScheduleExtended. # noqa: E501
:type: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | duration | Isilon/isilon_sdk_python | python | @duration.setter
def duration(self, duration):
'Sets the duration of this SnapshotScheduleExtended.\n\n Time in seconds added to creation time to construction expiration time. # noqa: E501\n\n :param duration: The duration of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._duration = duration |
@property
def id(self):
'Gets the id of this SnapshotScheduleExtended. # noqa: E501\n\n The system ID given to the schedule. # noqa: E501\n\n :return: The id of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._id | -4,511,242,532,677,356,000 | Gets the id of this SnapshotScheduleExtended. # noqa: E501
The system ID given to the schedule. # noqa: E501
:return: The id of this SnapshotScheduleExtended. # noqa: E501
:rtype: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | id | Isilon/isilon_sdk_python | python | @property
def id(self):
'Gets the id of this SnapshotScheduleExtended. # noqa: E501\n\n The system ID given to the schedule. # noqa: E501\n\n :return: The id of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._id |
@id.setter
def id(self, id):
'Sets the id of this SnapshotScheduleExtended.\n\n The system ID given to the schedule. # noqa: E501\n\n :param id: The id of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._id = id | 5,140,474,921,021,432,000 | Sets the id of this SnapshotScheduleExtended.
The system ID given to the schedule. # noqa: E501
:param id: The id of this SnapshotScheduleExtended. # noqa: E501
:type: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | id | Isilon/isilon_sdk_python | python | @id.setter
def id(self, id):
'Sets the id of this SnapshotScheduleExtended.\n\n The system ID given to the schedule. # noqa: E501\n\n :param id: The id of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._id = id |
@property
def name(self):
'Gets the name of this SnapshotScheduleExtended. # noqa: E501\n\n The schedule name. # noqa: E501\n\n :return: The name of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._name | 4,539,151,345,437,986,000 | Gets the name of this SnapshotScheduleExtended. # noqa: E501
The schedule name. # noqa: E501
:return: The name of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | name | Isilon/isilon_sdk_python | python | @property
def name(self):
'Gets the name of this SnapshotScheduleExtended. # noqa: E501\n\n The schedule name. # noqa: E501\n\n :return: The name of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._name |
@name.setter
def name(self, name):
'Sets the name of this SnapshotScheduleExtended.\n\n The schedule name. # noqa: E501\n\n :param name: The name of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._name = name | -8,044,229,968,498,617,000 | Sets the name of this SnapshotScheduleExtended.
The schedule name. # noqa: E501
:param name: The name of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | name | Isilon/isilon_sdk_python | python | @name.setter
def name(self, name):
'Sets the name of this SnapshotScheduleExtended.\n\n The schedule name. # noqa: E501\n\n :param name: The name of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._name = name |
@property
def next_run(self):
'Gets the next_run of this SnapshotScheduleExtended. # noqa: E501\n\n Unix Epoch time of next snapshot to be created. # noqa: E501\n\n :return: The next_run of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._next_run | -2,930,780,184,667,441,700 | Gets the next_run of this SnapshotScheduleExtended. # noqa: E501
Unix Epoch time of next snapshot to be created. # noqa: E501
:return: The next_run of this SnapshotScheduleExtended. # noqa: E501
:rtype: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | next_run | Isilon/isilon_sdk_python | python | @property
def next_run(self):
'Gets the next_run of this SnapshotScheduleExtended. # noqa: E501\n\n Unix Epoch time of next snapshot to be created. # noqa: E501\n\n :return: The next_run of this SnapshotScheduleExtended. # noqa: E501\n :rtype: int\n '
return self._next_run |
@next_run.setter
def next_run(self, next_run):
'Sets the next_run of this SnapshotScheduleExtended.\n\n Unix Epoch time of next snapshot to be created. # noqa: E501\n\n :param next_run: The next_run of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._next_run = next_run | 3,923,820,394,412,975,000 | Sets the next_run of this SnapshotScheduleExtended.
Unix Epoch time of next snapshot to be created. # noqa: E501
:param next_run: The next_run of this SnapshotScheduleExtended. # noqa: E501
:type: int | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | next_run | Isilon/isilon_sdk_python | python | @next_run.setter
def next_run(self, next_run):
'Sets the next_run of this SnapshotScheduleExtended.\n\n Unix Epoch time of next snapshot to be created. # noqa: E501\n\n :param next_run: The next_run of this SnapshotScheduleExtended. # noqa: E501\n :type: int\n '
self._next_run = next_run |
@property
def next_snapshot(self):
'Gets the next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n\n Formatted name (see pattern) of next snapshot to be created. # noqa: E501\n\n :return: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._next_snapshot | 4,249,285,130,094,383,000 | Gets the next_snapshot of this SnapshotScheduleExtended. # noqa: E501
Formatted name (see pattern) of next snapshot to be created. # noqa: E501
:return: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | next_snapshot | Isilon/isilon_sdk_python | python | @property
def next_snapshot(self):
'Gets the next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n\n Formatted name (see pattern) of next snapshot to be created. # noqa: E501\n\n :return: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._next_snapshot |
@next_snapshot.setter
def next_snapshot(self, next_snapshot):
'Sets the next_snapshot of this SnapshotScheduleExtended.\n\n Formatted name (see pattern) of next snapshot to be created. # noqa: E501\n\n :param next_snapshot: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._next_snapshot = next_snapshot | -7,459,489,476,555,487,000 | Sets the next_snapshot of this SnapshotScheduleExtended.
Formatted name (see pattern) of next snapshot to be created. # noqa: E501
:param next_snapshot: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | next_snapshot | Isilon/isilon_sdk_python | python | @next_snapshot.setter
def next_snapshot(self, next_snapshot):
'Sets the next_snapshot of this SnapshotScheduleExtended.\n\n Formatted name (see pattern) of next snapshot to be created. # noqa: E501\n\n :param next_snapshot: The next_snapshot of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._next_snapshot = next_snapshot |
@property
def path(self):
'Gets the path of this SnapshotScheduleExtended. # noqa: E501\n\n The /ifs path snapshotted. # noqa: E501\n\n :return: The path of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._path | -5,556,848,542,669,554,000 | Gets the path of this SnapshotScheduleExtended. # noqa: E501
The /ifs path snapshotted. # noqa: E501
:return: The path of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | path | Isilon/isilon_sdk_python | python | @property
def path(self):
'Gets the path of this SnapshotScheduleExtended. # noqa: E501\n\n The /ifs path snapshotted. # noqa: E501\n\n :return: The path of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._path |
@path.setter
def path(self, path):
'Sets the path of this SnapshotScheduleExtended.\n\n The /ifs path snapshotted. # noqa: E501\n\n :param path: The path of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._path = path | 6,009,263,888,184,312,000 | Sets the path of this SnapshotScheduleExtended.
The /ifs path snapshotted. # noqa: E501
:param path: The path of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | path | Isilon/isilon_sdk_python | python | @path.setter
def path(self, path):
'Sets the path of this SnapshotScheduleExtended.\n\n The /ifs path snapshotted. # noqa: E501\n\n :param path: The path of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._path = path |
@property
def pattern(self):
'Gets the pattern of this SnapshotScheduleExtended. # noqa: E501\n\n Pattern expanded with strftime to create snapshot name. # noqa: E501\n\n :return: The pattern of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._pattern | 9,163,341,005,801,752,000 | Gets the pattern of this SnapshotScheduleExtended. # noqa: E501
Pattern expanded with strftime to create snapshot name. # noqa: E501
:return: The pattern of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | pattern | Isilon/isilon_sdk_python | python | @property
def pattern(self):
'Gets the pattern of this SnapshotScheduleExtended. # noqa: E501\n\n Pattern expanded with strftime to create snapshot name. # noqa: E501\n\n :return: The pattern of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._pattern |
@pattern.setter
def pattern(self, pattern):
'Sets the pattern of this SnapshotScheduleExtended.\n\n Pattern expanded with strftime to create snapshot name. # noqa: E501\n\n :param pattern: The pattern of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._pattern = pattern | 1,174,243,700,243,047,700 | Sets the pattern of this SnapshotScheduleExtended.
Pattern expanded with strftime to create snapshot name. # noqa: E501
:param pattern: The pattern of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | pattern | Isilon/isilon_sdk_python | python | @pattern.setter
def pattern(self, pattern):
'Sets the pattern of this SnapshotScheduleExtended.\n\n Pattern expanded with strftime to create snapshot name. # noqa: E501\n\n :param pattern: The pattern of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._pattern = pattern |
@property
def schedule(self):
'Gets the schedule of this SnapshotScheduleExtended. # noqa: E501\n\n The isidate compatible natural language description of the schedule. # noqa: E501\n\n :return: The schedule of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._schedule | 9,038,659,744,786,501,000 | Gets the schedule of this SnapshotScheduleExtended. # noqa: E501
The isidate compatible natural language description of the schedule. # noqa: E501
:return: The schedule of this SnapshotScheduleExtended. # noqa: E501
:rtype: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | schedule | Isilon/isilon_sdk_python | python | @property
def schedule(self):
'Gets the schedule of this SnapshotScheduleExtended. # noqa: E501\n\n The isidate compatible natural language description of the schedule. # noqa: E501\n\n :return: The schedule of this SnapshotScheduleExtended. # noqa: E501\n :rtype: str\n '
return self._schedule |
@schedule.setter
def schedule(self, schedule):
'Sets the schedule of this SnapshotScheduleExtended.\n\n The isidate compatible natural language description of the schedule. # noqa: E501\n\n :param schedule: The schedule of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._schedule = schedule | -6,909,454,171,113,551,000 | Sets the schedule of this SnapshotScheduleExtended.
The isidate compatible natural language description of the schedule. # noqa: E501
:param schedule: The schedule of this SnapshotScheduleExtended. # noqa: E501
:type: str | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | schedule | Isilon/isilon_sdk_python | python | @schedule.setter
def schedule(self, schedule):
'Sets the schedule of this SnapshotScheduleExtended.\n\n The isidate compatible natural language description of the schedule. # noqa: E501\n\n :param schedule: The schedule of this SnapshotScheduleExtended. # noqa: E501\n :type: str\n '
self._schedule = schedule |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | -2,772,352,302,133,010,000 | Returns the model properties as a dict | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | to_dict | Isilon/isilon_sdk_python | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | to_str | Isilon/isilon_sdk_python | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | __repr__ | Isilon/isilon_sdk_python | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, SnapshotScheduleExtended)):
return False
return (self.__dict__ == other.__dict__) | 5,032,486,065,264,618,000 | Returns true if both objects are equal | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | __eq__ | Isilon/isilon_sdk_python | python | def __eq__(self, other):
if (not isinstance(other, SnapshotScheduleExtended)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | isi_sdk_8_1_0/isi_sdk_8_1_0/models/snapshot_schedule_extended.py | __ne__ | Isilon/isilon_sdk_python | python | def __ne__(self, other):
return (not (self == other)) |
def apply_discount(line, discount, quantity, offer=None):
'\n Apply a given discount to the passed basket\n '
line.discount(discount, quantity, incl_tax=False, offer=offer) | 1,955,075,466,180,244,000 | Apply a given discount to the passed basket | src/oscar/apps/offer/benefits.py | apply_discount | AMuratTuran/mkn | python | def apply_discount(line, discount, quantity, offer=None):
'\n \n '
line.discount(discount, quantity, incl_tax=False, offer=offer) |
def get_domain_name():
'\n Returns the domain name of the current configuration from a config file\n \n Returns\n -------\n string\n the domain name\n '
with open('/var/www/logic_webapp/webapp_config') as file:
line = file.readline()
domain = line.split('=')[1].rstrip()
return domain | 8,050,748,681,402,970,000 | Returns the domain name of the current configuration from a config file
Returns
-------
string
the domain name | webapp/external_access.py | get_domain_name | Quoding/petricore | python | def get_domain_name():
'\n Returns the domain name of the current configuration from a config file\n \n Returns\n -------\n string\n the domain name\n '
with open('/var/www/logic_webapp/webapp_config') as file:
line = file.readline()
domain = line.split('=')[1].rstrip()
return domain |
def create_slurm_db_connection(host, port, user, password, db):
'\n Creates the connection to the database (MySQL) so it can be queried\n \n Parameters\n ----------\n host : string\n hostname on which is located the DB\n port : integer\n port on which the connection is to be established\n user : string\n user name with which the connection is to be established\n password : string\n password of the user on the database (of the user `user`)\n db : string\n name of the database which will be queried\n\n Returns\n -------\n PyMySQL Connection object\n '
connection = pymysql.connect(host=host, port=port, user=user, password=password, db=db)
print('[+] Slurm accounting DB connection is up! [+]')
return connection | 9,200,343,461,986,738,000 | Creates the connection to the database (MySQL) so it can be queried
Parameters
----------
host : string
hostname on which is located the DB
port : integer
port on which the connection is to be established
user : string
user name with which the connection is to be established
password : string
password of the user on the database (of the user `user`)
db : string
name of the database which will be queried
Returns
-------
PyMySQL Connection object | webapp/external_access.py | create_slurm_db_connection | Quoding/petricore | python | def create_slurm_db_connection(host, port, user, password, db):
'\n Creates the connection to the database (MySQL) so it can be queried\n \n Parameters\n ----------\n host : string\n hostname on which is located the DB\n port : integer\n port on which the connection is to be established\n user : string\n user name with which the connection is to be established\n password : string\n password of the user on the database (of the user `user`)\n db : string\n name of the database which will be queried\n\n Returns\n -------\n PyMySQL Connection object\n '
connection = pymysql.connect(host=host, port=port, user=user, password=password, db=db)
print('[+] Slurm accounting DB connection is up! [+]')
return connection |
def create_ldap_connection(host):
'\n Creates an LDAP connection object with a given hostname\n\n Parameters\n ----------\n host : hostname with the LDAP database in the form of (ldap://host)\n\n Returns\n -------\n LDAP connection object\n '
connection = ldap.initialize(host)
connection.set_option(ldap.OPT_REFERRALS, 0)
connection.simple_bind_s()
return connection | -2,518,563,131,765,269,000 | Creates an LDAP connection object with a given hostname
Parameters
----------
host : hostname with the LDAP database in the form of (ldap://host)
Returns
-------
LDAP connection object | webapp/external_access.py | create_ldap_connection | Quoding/petricore | python | def create_ldap_connection(host):
'\n Creates an LDAP connection object with a given hostname\n\n Parameters\n ----------\n host : hostname with the LDAP database in the form of (ldap://host)\n\n Returns\n -------\n LDAP connection object\n '
connection = ldap.initialize(host)
connection.set_option(ldap.OPT_REFERRALS, 0)
connection.simple_bind_s()
return connection |
def noun_chunks(doclike: Union[(Doc, Span)]) -> Iterator[Tuple[(int, int, int)]]:
'\n Detect base noun phrases from a dependency parse. Works on both Doc and Span.\n '
labels = ['oprd', 'nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', 'dative', 'appos', 'attr', 'ROOT']
doc = doclike.doc
if (not doc.has_annotation('DEP')):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add('conj')
np_label = doc.vocab.strings.add('NP')
prev_end = (- 1)
for (i, word) in enumerate(doclike):
if (word.pos not in (NOUN, PROPN, PRON)):
continue
if (word.left_edge.i <= prev_end):
continue
if (word.dep in np_deps):
prev_end = word.i
(yield (word.left_edge.i, (word.i + 1), np_label))
elif (word.dep == conj):
head = word.head
while ((head.dep == conj) and (head.head.i < head.i)):
head = head.head
if (head.dep in np_deps):
prev_end = word.i
(yield (word.left_edge.i, (word.i + 1), np_label)) | 6,718,516,851,564,815,000 | Detect base noun phrases from a dependency parse. Works on both Doc and Span. | spacy/lang/en/syntax_iterators.py | noun_chunks | Alan-love/spaCy | python | def noun_chunks(doclike: Union[(Doc, Span)]) -> Iterator[Tuple[(int, int, int)]]:
'\n \n '
labels = ['oprd', 'nsubj', 'dobj', 'nsubjpass', 'pcomp', 'pobj', 'dative', 'appos', 'attr', 'ROOT']
doc = doclike.doc
if (not doc.has_annotation('DEP')):
raise ValueError(Errors.E029)
np_deps = [doc.vocab.strings.add(label) for label in labels]
conj = doc.vocab.strings.add('conj')
np_label = doc.vocab.strings.add('NP')
prev_end = (- 1)
for (i, word) in enumerate(doclike):
if (word.pos not in (NOUN, PROPN, PRON)):
continue
if (word.left_edge.i <= prev_end):
continue
if (word.dep in np_deps):
prev_end = word.i
(yield (word.left_edge.i, (word.i + 1), np_label))
elif (word.dep == conj):
head = word.head
while ((head.dep == conj) and (head.head.i < head.i)):
head = head.head
if (head.dep in np_deps):
prev_end = word.i
(yield (word.left_edge.i, (word.i + 1), np_label)) |
async def connect(self, channel):
'\n Connects bot to the given voice channel. If it is not already connected.\n :param channel: The channel from which the user send the command\n '
if ((self.voiceChannel is None) or (not self.voiceChannel.is_connected())):
self.voiceChannel = (await channel.connect()) | -1,948,810,983,289,389,600 | Connects bot to the given voice channel. If it is not already connected.
:param channel: The channel from which the user send the command | modules/youtube_music.py | connect | mavroudo/jarvis-discord | python | async def connect(self, channel):
'\n Connects bot to the given voice channel. If it is not already connected.\n :param channel: The channel from which the user send the command\n '
if ((self.voiceChannel is None) or (not self.voiceChannel.is_connected())):
self.voiceChannel = (await channel.connect()) |
async def disconnect(self):
'\n Disconnects from the channel that the bot is already connected. If there is no such a channel,\n this function will simply do nothing\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected()):
(await self.voiceChannel.disconnect()) | 3,291,842,844,855,340,500 | Disconnects from the channel that the bot is already connected. If there is no such a channel,
this function will simply do nothing | modules/youtube_music.py | disconnect | mavroudo/jarvis-discord | python | async def disconnect(self):
'\n Disconnects from the channel that the bot is already connected. If there is no such a channel,\n this function will simply do nothing\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected()):
(await self.voiceChannel.disconnect()) |
def getNextSong(self):
'\n If the queue is not empty this function will remove the first song from the queue and return it\n :return: the next song of the queue, or None if the queue is empty\n '
if self.queue:
return self.queue.pop(0)
else:
return None | -6,250,506,797,179,005,000 | If the queue is not empty this function will remove the first song from the queue and return it
:return: the next song of the queue, or None if the queue is empty | modules/youtube_music.py | getNextSong | mavroudo/jarvis-discord | python | def getNextSong(self):
'\n If the queue is not empty this function will remove the first song from the queue and return it\n :return: the next song of the queue, or None if the queue is empty\n '
if self.queue:
return self.queue.pop(0)
else:
return None |
def clear_folder(self):
'\n Because the songs will be downloaded, it is important to delete them if there are not longer needed.\n This function deletes the songs that are not in the queue (not one of the upcoming songs)\n '
for song in os.listdir('songs/'):
if (('songs/' + song) not in self.queue):
os.remove(('songs/' + song)) | -8,674,669,685,397,055,000 | Because the songs will be downloaded, it is important to delete them if there are not longer needed.
This function deletes the songs that are not in the queue (not one of the upcoming songs) | modules/youtube_music.py | clear_folder | mavroudo/jarvis-discord | python | def clear_folder(self):
'\n Because the songs will be downloaded, it is important to delete them if there are not longer needed.\n This function deletes the songs that are not in the queue (not one of the upcoming songs)\n '
for song in os.listdir('songs/'):
if (('songs/' + song) not in self.queue):
os.remove(('songs/' + song)) |
async def add_song(self, url, ctx):
'\n Add a new song from the youtube in the queue. It will not be downloaded if it is already in the songs file\n :param url: The url of the youtube song\n :param ctx: The channel from which the user send the command\n '
with ytdl.YoutubeDL(self.ydl_opts) as ydl:
info_dict = ydl.extract_info(url, download=False)
title = (((('songs/' + info_dict['title']) + '-') + info_dict['id']) + '.mp3')
if (title not in self.queue):
(await ctx.send('Your song is downloading now!'))
ydl.extract_info(url, download=True)
self.queue.append(title)
if ((self.voiceChannel is None) or (not self.voiceChannel.is_connected()) or (not self.voiceChannel.is_playing())):
(await ctx.send('Your song has added to the queue, use $play to start the party!!'))
else:
(await ctx.send('Your song has added to the queue')) | 8,575,287,853,276,897,000 | Add a new song from the youtube in the queue. It will not be downloaded if it is already in the songs file
:param url: The url of the youtube song
:param ctx: The channel from which the user send the command | modules/youtube_music.py | add_song | mavroudo/jarvis-discord | python | async def add_song(self, url, ctx):
'\n Add a new song from the youtube in the queue. It will not be downloaded if it is already in the songs file\n :param url: The url of the youtube song\n :param ctx: The channel from which the user send the command\n '
with ytdl.YoutubeDL(self.ydl_opts) as ydl:
info_dict = ydl.extract_info(url, download=False)
title = (((('songs/' + info_dict['title']) + '-') + info_dict['id']) + '.mp3')
if (title not in self.queue):
(await ctx.send('Your song is downloading now!'))
ydl.extract_info(url, download=True)
self.queue.append(title)
if ((self.voiceChannel is None) or (not self.voiceChannel.is_connected()) or (not self.voiceChannel.is_playing())):
(await ctx.send('Your song has added to the queue, use $play to start the party!!'))
else:
(await ctx.send('Your song has added to the queue')) |
def load_next_song(self):
'\n This will create a FFMPEG object and start playing it in the voice channel\n '
if ((not self.voiceChannel.is_playing()) and self.queue):
audio_source = discord.FFmpegPCMAudio(self.getNextSong())
self.voiceChannel.play(audio_source, after=None) | 1,219,452,789,580,571,600 | This will create a FFMPEG object and start playing it in the voice channel | modules/youtube_music.py | load_next_song | mavroudo/jarvis-discord | python | def load_next_song(self):
'\n \n '
if ((not self.voiceChannel.is_playing()) and self.queue):
audio_source = discord.FFmpegPCMAudio(self.getNextSong())
self.voiceChannel.play(audio_source, after=None) |
async def pause_song(self, ctx):
'\n Pauses a song that is already being played or send a message if there is no such song\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing()):
self.voiceChannel.pause()
else:
(await ctx.send('There is no song playing in order to pause it')) | 3,986,980,433,489,613,300 | Pauses a song that is already being played or send a message if there is no such song
:param ctx: The channel from which the user gave the command. | modules/youtube_music.py | pause_song | mavroudo/jarvis-discord | python | async def pause_song(self, ctx):
'\n Pauses a song that is already being played or send a message if there is no such song\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing()):
self.voiceChannel.pause()
else:
(await ctx.send('There is no song playing in order to pause it')) |
async def resume_song(self, ctx):
'\n Resumes a song if there is one that has been paused or send a message if there is no such song\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_paused()):
self.voiceChannel.resume()
else:
(await ctx.send('There is no song paused in order to resume it')) | 7,379,376,182,202,520,000 | Resumes a song if there is one that has been paused or send a message if there is no such song
:param ctx: The channel from which the user gave the command. | modules/youtube_music.py | resume_song | mavroudo/jarvis-discord | python | async def resume_song(self, ctx):
'\n Resumes a song if there is one that has been paused or send a message if there is no such song\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_paused()):
self.voiceChannel.resume()
else:
(await ctx.send('There is no song paused in order to resume it')) |
async def stop(self, ctx):
'\n Stops the music if there is music or sends message if there is not. At the end clears the file of\n the unnecessary songs.\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing()):
self.voiceChannel.stop()
else:
(await ctx.send('There is no song playing in order to stop it'))
self.clear_folder() | 4,282,445,432,905,563,600 | Stops the music if there is music or sends message if there is not. At the end clears the file of
the unnecessary songs.
:param ctx: The channel from which the user gave the command. | modules/youtube_music.py | stop | mavroudo/jarvis-discord | python | async def stop(self, ctx):
'\n Stops the music if there is music or sends message if there is not. At the end clears the file of\n the unnecessary songs.\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing()):
self.voiceChannel.stop()
else:
(await ctx.send('There is no song playing in order to stop it'))
self.clear_folder() |
async def next(self, ctx):
'\n Stops this song and start the next one. The user will be informed with message if there is no other song or if\n there is no song playing at the moment\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing() and self.queue):
(await self.stop(ctx))
self.load_next_song()
elif (not self.queue):
(await ctx.send('There is no other song in the queue'))
else:
(await ctx.send('There is no song playing, maybe use $play to start playing songs from the queue')) | 7,484,198,099,774,231,000 | Stops this song and start the next one. The user will be informed with message if there is no other song or if
there is no song playing at the moment
:param ctx: The channel from which the user gave the command. | modules/youtube_music.py | next | mavroudo/jarvis-discord | python | async def next(self, ctx):
'\n Stops this song and start the next one. The user will be informed with message if there is no other song or if\n there is no song playing at the moment\n :param ctx: The channel from which the user gave the command.\n '
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and self.voiceChannel.is_playing() and self.queue):
(await self.stop(ctx))
self.load_next_song()
elif (not self.queue):
(await ctx.send('There is no other song in the queue'))
else:
(await ctx.send('There is no song playing, maybe use $play to start playing songs from the queue')) |
async def play(self, ctx, channel):
'\n Starts playing the first song in the queue. If there are not songs in the queue or there is some music playing\n at this moment the user will ne informed with messages\n :param ctx: The channel from which the user gave the command.\n '
(await self.connect(channel))
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and (not self.voiceChannel.is_playing()) and self.queue):
self.load_next_song()
elif (not self.queue):
(await ctx.send('There is no song in the list'))
elif self.voiceChannel.is_playing():
(await ctx.send('THere is already some music playing. Increase the volume and join the party!')) | -3,596,108,759,146,214,400 | Starts playing the first song in the queue. If there are not songs in the queue or there is some music playing
at this moment the user will ne informed with messages
:param ctx: The channel from which the user gave the command. | modules/youtube_music.py | play | mavroudo/jarvis-discord | python | async def play(self, ctx, channel):
'\n Starts playing the first song in the queue. If there are not songs in the queue or there is some music playing\n at this moment the user will ne informed with messages\n :param ctx: The channel from which the user gave the command.\n '
(await self.connect(channel))
if ((self.voiceChannel is not None) and self.voiceChannel.is_connected() and (not self.voiceChannel.is_playing()) and self.queue):
self.load_next_song()
elif (not self.queue):
(await ctx.send('There is no song in the list'))
elif self.voiceChannel.is_playing():
(await ctx.send('THere is already some music playing. Increase the volume and join the party!')) |
def perform(self, action_data: ActionData, user_id: int, internal: bool=False) -> Tuple[(Optional[WriteRequest], Optional[ActionResults])]:
'\n Simplified entrypoint to perform the action.\n '
self.user_id = user_id
self.index = 0
instance = next(iter(action_data))
self.validate_instance(instance)
instance = self.update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
return (final_write_request, [None]) | 4,471,072,255,482,969,000 | Simplified entrypoint to perform the action. | openslides_backend/action/actions/organization/initial_import.py | perform | JLkp/openslides-backend | python | def perform(self, action_data: ActionData, user_id: int, internal: bool=False) -> Tuple[(Optional[WriteRequest], Optional[ActionResults])]:
'\n \n '
self.user_id = user_id
self.index = 0
instance = next(iter(action_data))
self.validate_instance(instance)
instance = self.update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
return (final_write_request, [None]) |
def create_user(self, username, email, password=None):
'\n Creates and saves a User with the given email, date of\n birth and password.\n '
if (not email):
raise ValueError('Users must have an email address')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user | -4,442,619,516,629,424,000 | Creates and saves a User with the given email, date of
birth and password. | InteractionTracker/accounts/models.py | create_user | Ahmar123/Lean-UX-Platform | python | def create_user(self, username, email, password=None):
'\n Creates and saves a User with the given email, date of\n birth and password.\n '
if (not email):
raise ValueError('Users must have an email address')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user |
def create_superuser(self, username, email, password):
'\n Creates and saves a superuser with the given email, date of\n birth and password.\n '
user = self.create_user(username, email, password=password)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user | -8,522,836,812,166,154,000 | Creates and saves a superuser with the given email, date of
birth and password. | InteractionTracker/accounts/models.py | create_superuser | Ahmar123/Lean-UX-Platform | python | def create_superuser(self, username, email, password):
'\n Creates and saves a superuser with the given email, date of\n birth and password.\n '
user = self.create_user(username, email, password=password)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user |
def has_perm(self, perm, obj=None):
'Does the user have a specific permission?'
return True | -9,084,859,824,158,067,000 | Does the user have a specific permission? | InteractionTracker/accounts/models.py | has_perm | Ahmar123/Lean-UX-Platform | python | def has_perm(self, perm, obj=None):
return True |
def has_module_perms(self, app_label):
'Does the user have permissions to view the app `app_label`?'
return True | 4,992,969,413,468,943,000 | Does the user have permissions to view the app `app_label`? | InteractionTracker/accounts/models.py | has_module_perms | Ahmar123/Lean-UX-Platform | python | def has_module_perms(self, app_label):
return True |
def handle_request(self, request):
'\n based on mozilla documentation\n '
user = request.environ.get('HTTP_USER_AGENT', 'No User Agent Found')
response = Response()
response.text = f'This is {user}'
return response | -1,355,152,497,272,374,800 | based on mozilla documentation | diy/api.py | handle_request | sodrooome/diy | python | def handle_request(self, request):
'\n \n '
user = request.environ.get('HTTP_USER_AGENT', 'No User Agent Found')
response = Response()
response.text = f'This is {user}'
return response |
def class_based_request(self, request):
'\n class based views such as Django\n already implemented\n '
response = Response()
(handler, kwargs) = self.find_handler_request(request_path=request.path)
if (handler is not None):
if inspect.isclass(handler):
handler = getattr(handler(), request.method.lower(), None)
if (handler is None):
raise AttributeError('Method now allowed', request.method)
handler(request, response, **kwargs)
else:
self.default_response(response)
return response | 6,494,720,508,179,630,000 | class based views such as Django
already implemented | diy/api.py | class_based_request | sodrooome/diy | python | def class_based_request(self, request):
'\n class based views such as Django\n already implemented\n '
response = Response()
(handler, kwargs) = self.find_handler_request(request_path=request.path)
if (handler is not None):
if inspect.isclass(handler):
handler = getattr(handler(), request.method.lower(), None)
if (handler is None):
raise AttributeError('Method now allowed', request.method)
handler(request, response, **kwargs)
else:
self.default_response(response)
return response |
def session(self, base_url='http://baseserver'):
' \n mount it to session object\n any request will start using URL given\n by prefix base_url\n '
session = RequestsSession()
session.mount(prefix=base_url, adapter=RequestWSGIAdapter(self))
return session | 4,558,973,264,244,254,700 | mount it to session object
any request will start using URL given
by prefix base_url | diy/api.py | session | sodrooome/diy | python | def session(self, base_url='http://baseserver'):
' \n mount it to session object\n any request will start using URL given\n by prefix base_url\n '
session = RequestsSession()
session.mount(prefix=base_url, adapter=RequestWSGIAdapter(self))
return session |
@classmethod
def tearDownClass(cls):
'Restore random seeds'
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state) | 7,837,575,732,146,355,000 | Restore random seeds | python/paddle/fluid/tests/unittests/ipu/op_test_ipu.py | tearDownClass | Abraham-Xu/Paddle | python | @classmethod
def tearDownClass(cls):
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state) |
def test_overloading():
'Test that |, & are overloaded as expected'
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert ((A & B) == And(A, B))
assert ((A | B) == Or(A, B))
assert (((A & B) | C) == Or(And(A, B), C))
assert ((A >> B) == Implies(A, B))
assert ((A << B) == Implies(B, A))
assert ((~ A) == Not(A))
assert ((A ^ B) == Xor(A, B)) | -5,511,934,167,199,415,000 | Test that |, & are overloaded as expected | py3k-sympy/sympy/logic/tests/test_boolalg.py | test_overloading | cielavenir/sympy | python | def test_overloading():
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert ((A & B) == And(A, B))
assert ((A | B) == Or(A, B))
assert (((A & B) | C) == Or(And(A, B), C))
assert ((A >> B) == Implies(A, B))
assert ((A << B) == Implies(B, A))
assert ((~ A) == Not(A))
assert ((A ^ B) == Xor(A, B)) |
def test_bool_symbol():
'Test that mixing symbols with boolean values\n works as expected'
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert (And(A, True) == A)
assert (And(A, True, True) == A)
assert (And(A, False) == False)
assert (And(A, True, False) == False)
assert (Or(A, True) == True)
assert (Or(A, False) == A) | 6,746,053,402,224,563,000 | Test that mixing symbols with boolean values
works as expected | py3k-sympy/sympy/logic/tests/test_boolalg.py | test_bool_symbol | cielavenir/sympy | python | def test_bool_symbol():
'Test that mixing symbols with boolean values\n works as expected'
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert (And(A, True) == A)
assert (And(A, True, True) == A)
assert (And(A, False) == False)
assert (And(A, True, False) == False)
assert (Or(A, True) == True)
assert (Or(A, False) == A) |
def test_commutative():
'Test for commutivity of And and Or'
(A, B) = list(map(Boolean, symbols('A,B')))
assert ((A & B) == (B & A))
assert ((A | B) == (B | A)) | 1,785,946,369,847,538,000 | Test for commutivity of And and Or | py3k-sympy/sympy/logic/tests/test_boolalg.py | test_commutative | cielavenir/sympy | python | def test_commutative():
(A, B) = list(map(Boolean, symbols('A,B')))
assert ((A & B) == (B & A))
assert ((A | B) == (B | A)) |
def test_and_associativity():
'Test for associativity of And'
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert (((A & B) & C) == (A & (B & C))) | -6,652,272,438,377,889,000 | Test for associativity of And | py3k-sympy/sympy/logic/tests/test_boolalg.py | test_and_associativity | cielavenir/sympy | python | def test_and_associativity():
(A, B, C) = list(map(Boolean, symbols('A,B,C')))
assert (((A & B) & C) == (A & (B & C))) |
def shrink_glove_file(glove_fp: Path, filter_words: List[str], save_fp: Path) -> None:
'\n :param glove_fp: File path to the glove file that is to be shrinked\n :param filter_words: List of words to filter/shrink the glove file/vectors \n by\n :param save_fp:\n '
with save_fp.open('w+') as save_file:
with glove_fp.open('r') as glove_file:
for glove_vector in glove_file:
glove_parts = glove_vector.split()
if ((len(glove_parts) == 301) or (len(glove_parts) == 51) or (len(glove_parts) == 201)):
pass
else:
continue
glove_word = glove_parts[0]
if (glove_word in filter_words):
save_file.write(glove_vector) | -6,857,033,972,029,439,000 | :param glove_fp: File path to the glove file that is to be shrinked
:param filter_words: List of words to filter/shrink the glove file/vectors
by
:param save_fp: | tdsa_augmentation/data_creation/shrink_glove_to_targets.py | shrink_glove_file | apmoore1/tdsa_augmentation | python | def shrink_glove_file(glove_fp: Path, filter_words: List[str], save_fp: Path) -> None:
'\n :param glove_fp: File path to the glove file that is to be shrinked\n :param filter_words: List of words to filter/shrink the glove file/vectors \n by\n :param save_fp:\n '
with save_fp.open('w+') as save_file:
with glove_fp.open('r') as glove_file:
for glove_vector in glove_file:
glove_parts = glove_vector.split()
if ((len(glove_parts) == 301) or (len(glove_parts) == 51) or (len(glove_parts) == 201)):
pass
else:
continue
glove_word = glove_parts[0]
if (glove_word in filter_words):
save_file.write(glove_vector) |
def empty_feedback(self):
'\n Return a tensor corresponding to no feedback.\n '
return np.array([(- 1), (- 1)]) | -6,264,336,474,587,260,000 | Return a tensor corresponding to no feedback. | envs/babyai/oracle/landmark_correction.py | empty_feedback | AliengirlLiv/babyai | python | def empty_feedback(self):
'\n \n '
return np.array([(- 1), (- 1)]) |
def random_feedback(self):
'\n Return a tensor corresponding to no feedback.\n '
raise NotImplementedError('random feedback not implemented') | -723,800,758,176,182,100 | Return a tensor corresponding to no feedback. | envs/babyai/oracle/landmark_correction.py | random_feedback | AliengirlLiv/babyai | python | def random_feedback(self):
'\n \n '
raise NotImplementedError('random feedback not implemented') |
def compute_feedback(self):
'\n Return the expert action from the previous timestep.\n '
dist_pos = np.array(self.env.dist_pos)
agentobj_distances = np.sum(np.abs((dist_pos - self.env.agent_pos)), axis=1)
curr_dist = np.sum(np.abs((self.env.obj_pos - self.env.agent_pos)))
goalobj_distances = np.sum(np.abs((dist_pos - self.env.obj_pos)), axis=1)
idx_closer = np.where((goalobj_distances < curr_dist))
if (len(idx_closer[0]) == 0):
return np.array([self.env.obj_color, self.env.obj_type])
else:
idx_agentobj = range(len(agentobj_distances))
idx_agentobj = [x for (_, x) in sorted(zip(agentobj_distances, idx_agentobj))]
for idx in idx_agentobj:
if (idx in idx_closer[0]):
break
return np.array([self.env.dist_colors[idx], self.env.dist_types[idx]]) | -8,649,091,093,806,017,000 | Return the expert action from the previous timestep. | envs/babyai/oracle/landmark_correction.py | compute_feedback | AliengirlLiv/babyai | python | def compute_feedback(self):
'\n \n '
dist_pos = np.array(self.env.dist_pos)
agentobj_distances = np.sum(np.abs((dist_pos - self.env.agent_pos)), axis=1)
curr_dist = np.sum(np.abs((self.env.obj_pos - self.env.agent_pos)))
goalobj_distances = np.sum(np.abs((dist_pos - self.env.obj_pos)), axis=1)
idx_closer = np.where((goalobj_distances < curr_dist))
if (len(idx_closer[0]) == 0):
return np.array([self.env.obj_color, self.env.obj_type])
else:
idx_agentobj = range(len(agentobj_distances))
idx_agentobj = [x for (_, x) in sorted(zip(agentobj_distances, idx_agentobj))]
for idx in idx_agentobj:
if (idx in idx_closer[0]):
break
return np.array([self.env.dist_colors[idx], self.env.dist_types[idx]]) |
def feedback_condition(self):
"\n Returns true when we should give feedback.\n Currently returns true when the agent's past action did not match the oracle's action.\n "
return ((len(self.agent_actions) > 0) and (not (self.agent_actions[(- 1)] == self.oracle_actions[(- 1)]))) | 5,280,185,725,248,230,000 | Returns true when we should give feedback.
Currently returns true when the agent's past action did not match the oracle's action. | envs/babyai/oracle/landmark_correction.py | feedback_condition | AliengirlLiv/babyai | python | def feedback_condition(self):
"\n Returns true when we should give feedback.\n Currently returns true when the agent's past action did not match the oracle's action.\n "
return ((len(self.agent_actions) > 0) and (not (self.agent_actions[(- 1)] == self.oracle_actions[(- 1)]))) |
def get_layer_uid(layer_name=''):
'Helper function, assigns unique layer IDs.'
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name] | 214,074,941,142,016,670 | Helper function, assigns unique layer IDs. | layers.py | get_layer_uid | krohak/TAGCN | python | def get_layer_uid(layer_name=):
if (layer_name not in _LAYER_UIDS):
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name] |
def sparse_dropout(x, keep_prob, noise_shape):
'Dropout for sparse tensors.'
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return (pre_out * (1.0 / keep_prob)) | -9,178,850,592,782,271,000 | Dropout for sparse tensors. | layers.py | sparse_dropout | krohak/TAGCN | python | def sparse_dropout(x, keep_prob, noise_shape):
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return (pre_out * (1.0 / keep_prob)) |
def dot(x, y, sparse=False):
'Wrapper for tf.matmul (sparse vs dense).'
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res | -4,050,403,144,256,118,300 | Wrapper for tf.matmul (sparse vs dense). | layers.py | dot | krohak/TAGCN | python | def dot(x, y, sparse=False):
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res |
def asmt_merge_vacc():
'\n Merge assessment df with vaccine dataframe, filter out subject has a healthy assessments before vaccine date\n '
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
asmt = src['assessments']
vacc = s.open_dataset(VDATA, 'r', 'vacc')
dst = s.open_dataset(DSTDATA, 'w', 'dst')
vbrand_filter = ((vacc['vaccine_doses']['brand'].data[:] == 2) | (vacc['vaccine_doses']['brand'].data[:] == 3))
dvacc = dst.create_dataframe('vacc')
vacc['vaccine_doses'].apply_filter(vbrand_filter, ddf=dvacc)
asmt_v = dst.create_dataframe('asmt_v')
dataframe.merge(asmt, dvacc, asmt_v, 'patient_id', 'patient_id', how='inner')
symp_list = ['persistent_cough', 'fever', 'fatigue', 'delirium', 'shortness_of_breath', 'diarrhoea', 'abdominal_pain', 'chest_pain', 'hoarse_voice', 'skipped_meals', 'loss_of_smell', 'headache', 'sore_throat', 'chills_or_shivers', 'eye_soreness', 'nausea', 'blisters_on_feet', 'unusual_muscle_pains', 'runny_nose', 'red_welts_on_face_or_lips', 'dizzy_light_headed', 'swollen_glands', 'sneezing', 'skin_burning', 'earache', 'altered_smell', 'brain_fog', 'irregular_heartbeat']
symp_filter = (asmt_v['persistent_cough'].data[:] > 1)
for symptom1 in symp_list:
symp_filter |= (asmt_v[symptom1].data[:] > 1)
symp_filter = (~ symp_filter)
symp_filter &= (asmt_v['date_taken_specific'].data[:] > asmt_v['updated_at_l'].data[:])
symp_filter &= (asmt_v['updated_at_l'].data[:] > (asmt_v['date_taken_specific'].data[:] - ((3600 * 24) * 10)))
asmt_v.apply_filter(symp_filter)
yes_symp_filter = (asmt_v['persistent_cough'].data[:] > 1)
for symptom1 in symp_list:
yes_symp_filter |= (asmt_v[symptom1].data[:] > 1)
yes_symp_filter &= (asmt_v['date_taken_specific'].data[:] < asmt_v['updated_at_l'].data[:])
yes_symp_filter &= ((asmt_v['date_taken_specific'].data[:] + ((3600 * 24) * 10)) > asmt_v['updated_at_l'].data[:])
asmt_v.apply_filter(yes_symp_filter)
print('finish asmt join vaccine.') | 3,183,378,778,421,883,000 | Merge assessment df with vaccine dataframe, filter out subject has a healthy assessments before vaccine date | scripts/asmt_merge_vacc_exetera.py | asmt_merge_vacc | KCL-BMEIS/ExeTeraCovid | python | def asmt_merge_vacc():
'\n \n '
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
asmt = src['assessments']
vacc = s.open_dataset(VDATA, 'r', 'vacc')
dst = s.open_dataset(DSTDATA, 'w', 'dst')
vbrand_filter = ((vacc['vaccine_doses']['brand'].data[:] == 2) | (vacc['vaccine_doses']['brand'].data[:] == 3))
dvacc = dst.create_dataframe('vacc')
vacc['vaccine_doses'].apply_filter(vbrand_filter, ddf=dvacc)
asmt_v = dst.create_dataframe('asmt_v')
dataframe.merge(asmt, dvacc, asmt_v, 'patient_id', 'patient_id', how='inner')
symp_list = ['persistent_cough', 'fever', 'fatigue', 'delirium', 'shortness_of_breath', 'diarrhoea', 'abdominal_pain', 'chest_pain', 'hoarse_voice', 'skipped_meals', 'loss_of_smell', 'headache', 'sore_throat', 'chills_or_shivers', 'eye_soreness', 'nausea', 'blisters_on_feet', 'unusual_muscle_pains', 'runny_nose', 'red_welts_on_face_or_lips', 'dizzy_light_headed', 'swollen_glands', 'sneezing', 'skin_burning', 'earache', 'altered_smell', 'brain_fog', 'irregular_heartbeat']
symp_filter = (asmt_v['persistent_cough'].data[:] > 1)
for symptom1 in symp_list:
symp_filter |= (asmt_v[symptom1].data[:] > 1)
symp_filter = (~ symp_filter)
symp_filter &= (asmt_v['date_taken_specific'].data[:] > asmt_v['updated_at_l'].data[:])
symp_filter &= (asmt_v['updated_at_l'].data[:] > (asmt_v['date_taken_specific'].data[:] - ((3600 * 24) * 10)))
asmt_v.apply_filter(symp_filter)
yes_symp_filter = (asmt_v['persistent_cough'].data[:] > 1)
for symptom1 in symp_list:
yes_symp_filter |= (asmt_v[symptom1].data[:] > 1)
yes_symp_filter &= (asmt_v['date_taken_specific'].data[:] < asmt_v['updated_at_l'].data[:])
yes_symp_filter &= ((asmt_v['date_taken_specific'].data[:] + ((3600 * 24) * 10)) > asmt_v['updated_at_l'].data[:])
asmt_v.apply_filter(yes_symp_filter)
print('finish asmt join vaccine.') |
def join_tests():
'\n Merge tests to previous merged (assessments, vaccine), filter out subjects has test records within 10days after vaccine\n '
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
tests_src = src['tests']
dst = s.open_dataset(DSTDATA, 'r+', 'dst')
vacc = dst['asmt_v']
tests_m = dst.create_dataframe('tests_m')
dataframe.merge(vacc, tests_src, tests_m, 'patient_id_l', 'patient_id', how='inner')
test_filter = (tests_m['date_taken_specific_l'] < tests_m['date_taken_specific_r'])
test_filter &= (tests_m['date_taken_specific_l'] > (tests_m['date_taken_specific_r'] - ((3600 * 24) * 10)))
tests_m.apply_filter(test_filter) | 606,880,224,139,278,000 | Merge tests to previous merged (assessments, vaccine), filter out subjects has test records within 10days after vaccine | scripts/asmt_merge_vacc_exetera.py | join_tests | KCL-BMEIS/ExeTeraCovid | python | def join_tests():
'\n \n '
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
tests_src = src['tests']
dst = s.open_dataset(DSTDATA, 'r+', 'dst')
vacc = dst['asmt_v']
tests_m = dst.create_dataframe('tests_m')
dataframe.merge(vacc, tests_src, tests_m, 'patient_id_l', 'patient_id', how='inner')
test_filter = (tests_m['date_taken_specific_l'] < tests_m['date_taken_specific_r'])
test_filter &= (tests_m['date_taken_specific_l'] > (tests_m['date_taken_specific_r'] - ((3600 * 24) * 10)))
tests_m.apply_filter(test_filter) |
@property
def SpbOutsideLinks(self):
'\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc.SpbOutsideLinks): An instance of the SpbOutsideLinks class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc import SpbOutsideLinks
if (self._properties.get('SpbOutsideLinks', None) is None):
return SpbOutsideLinks(self)
else:
return self._properties.get('SpbOutsideLinks') | 1,129,730,518,934,605,700 | Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc.SpbOutsideLinks): An instance of the SpbOutsideLinks class
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | SpbOutsideLinks | Vibaswan/ixnetwork_restpy | python | @property
def SpbOutsideLinks(self):
'\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc.SpbOutsideLinks): An instance of the SpbOutsideLinks class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spboutsidelinks_dfb7b1e816409cddb14e138ebc2096dc import SpbOutsideLinks
if (self._properties.get('SpbOutsideLinks', None) is None):
return SpbOutsideLinks(self)
else:
return self._properties.get('SpbOutsideLinks') |
@property
def SpbmNodeTopologyRange(self):
'\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7.SpbmNodeTopologyRange): An instance of the SpbmNodeTopologyRange class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7 import SpbmNodeTopologyRange
if (self._properties.get('SpbmNodeTopologyRange', None) is None):
return SpbmNodeTopologyRange(self)
else:
return self._properties.get('SpbmNodeTopologyRange') | -2,365,756,284,658,724,000 | Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7.SpbmNodeTopologyRange): An instance of the SpbmNodeTopologyRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | SpbmNodeTopologyRange | Vibaswan/ixnetwork_restpy | python | @property
def SpbmNodeTopologyRange(self):
'\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7.SpbmNodeTopologyRange): An instance of the SpbmNodeTopologyRange class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodetopologyrange_199093afa11cd9f4488faaa1ad3ec3a7 import SpbmNodeTopologyRange
if (self._properties.get('SpbmNodeTopologyRange', None) is None):
return SpbmNodeTopologyRange(self)
else:
return self._properties.get('SpbmNodeTopologyRange') |
@property
def EnableAdvertiseNetworkRange(self):
'\n Returns\n -------\n - bool: If true, this SPB ISIS Network Range is advertised.\n '
return self._get_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange']) | -4,739,552,778,502,594,000 | Returns
-------
- bool: If true, this SPB ISIS Network Range is advertised. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | EnableAdvertiseNetworkRange | Vibaswan/ixnetwork_restpy | python | @property
def EnableAdvertiseNetworkRange(self):
'\n Returns\n -------\n - bool: If true, this SPB ISIS Network Range is advertised.\n '
return self._get_attribute(self._SDM_ATT_MAP['EnableAdvertiseNetworkRange']) |
@property
def EnableHostName(self):
'\n Returns\n -------\n - bool: If true, the host name of the router is activated.\n '
return self._get_attribute(self._SDM_ATT_MAP['EnableHostName']) | 2,206,462,825,410,428,700 | Returns
-------
- bool: If true, the host name of the router is activated. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | EnableHostName | Vibaswan/ixnetwork_restpy | python | @property
def EnableHostName(self):
'\n Returns\n -------\n - bool: If true, the host name of the router is activated.\n '
return self._get_attribute(self._SDM_ATT_MAP['EnableHostName']) |
@property
def EntryColumn(self):
'\n Returns\n -------\n - number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n '
return self._get_attribute(self._SDM_ATT_MAP['EntryColumn']) | -5,533,420,305,700,948,000 | Returns
-------
- number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | EntryColumn | Vibaswan/ixnetwork_restpy | python | @property
def EntryColumn(self):
'\n Returns\n -------\n - number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n '
return self._get_attribute(self._SDM_ATT_MAP['EntryColumn']) |
@property
def EntryRow(self):
'\n Returns\n -------\n - number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n '
return self._get_attribute(self._SDM_ATT_MAP['EntryRow']) | 8,920,289,282,218,558,000 | Returns
-------
- number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | EntryRow | Vibaswan/ixnetwork_restpy | python | @property
def EntryRow(self):
'\n Returns\n -------\n - number: The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n '
return self._get_attribute(self._SDM_ATT_MAP['EntryRow']) |
@property
def HostNamePrefix(self):
'\n Returns\n -------\n - str: The host name prefix information.\n '
return self._get_attribute(self._SDM_ATT_MAP['HostNamePrefix']) | 9,153,214,394,494,106,000 | Returns
-------
- str: The host name prefix information. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | HostNamePrefix | Vibaswan/ixnetwork_restpy | python | @property
def HostNamePrefix(self):
'\n Returns\n -------\n - str: The host name prefix information.\n '
return self._get_attribute(self._SDM_ATT_MAP['HostNamePrefix']) |
@property
def InterfaceMetric(self):
'\n Returns\n -------\n - number: The metric cost associated with this emulated SPB ISIS router.\n '
return self._get_attribute(self._SDM_ATT_MAP['InterfaceMetric']) | 7,485,795,995,632,040,000 | Returns
-------
- number: The metric cost associated with this emulated SPB ISIS router. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | InterfaceMetric | Vibaswan/ixnetwork_restpy | python | @property
def InterfaceMetric(self):
'\n Returns\n -------\n - number: The metric cost associated with this emulated SPB ISIS router.\n '
return self._get_attribute(self._SDM_ATT_MAP['InterfaceMetric']) |
@property
def NoOfColumns(self):
'\n Returns\n -------\n - number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n '
return self._get_attribute(self._SDM_ATT_MAP['NoOfColumns']) | -7,184,879,986,260,368,000 | Returns
-------
- number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers). | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | NoOfColumns | Vibaswan/ixnetwork_restpy | python | @property
def NoOfColumns(self):
'\n Returns\n -------\n - number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n '
return self._get_attribute(self._SDM_ATT_MAP['NoOfColumns']) |
@property
def NoOfRows(self):
'\n Returns\n -------\n - number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n '
return self._get_attribute(self._SDM_ATT_MAP['NoOfRows']) | 8,961,068,169,500,311,000 | Returns
-------
- number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers). | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | NoOfRows | Vibaswan/ixnetwork_restpy | python | @property
def NoOfRows(self):
'\n Returns\n -------\n - number: The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n '
return self._get_attribute(self._SDM_ATT_MAP['NoOfRows']) |
@property
def StartSystemId(self):
'\n Returns\n -------\n - str: The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n '
return self._get_attribute(self._SDM_ATT_MAP['StartSystemId']) | 9,167,205,911,169,402,000 | Returns
-------
- str: The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | StartSystemId | Vibaswan/ixnetwork_restpy | python | @property
def StartSystemId(self):
'\n Returns\n -------\n - str: The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n '
return self._get_attribute(self._SDM_ATT_MAP['StartSystemId']) |
@property
def SystemIdIncrementBy(self):
'\n Returns\n -------\n - str: This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n '
return self._get_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy']) | -7,672,082,771,421,963,000 | Returns
-------
- str: This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range. | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | SystemIdIncrementBy | Vibaswan/ixnetwork_restpy | python | @property
def SystemIdIncrementBy(self):
'\n Returns\n -------\n - str: This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n '
return self._get_attribute(self._SDM_ATT_MAP['SystemIdIncrementBy']) |
def update(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Updates spbNetworkRange resource on the server.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) | 682,650,737,863,613,300 | Updates spbNetworkRange resource on the server.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | update | Vibaswan/ixnetwork_restpy | python | def update(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Updates spbNetworkRange resource on the server.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) |
def add(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Adds a new spbNetworkRange resource on the server and adds it to the container.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Returns\n -------\n - self: This instance with all currently retrieved spbNetworkRange resources using find and the newly added spbNetworkRange resources available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) | 8,049,020,523,931,185,000 | Adds a new spbNetworkRange resource on the server and adds it to the container.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Returns
-------
- self: This instance with all currently retrieved spbNetworkRange resources using find and the newly added spbNetworkRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | add | Vibaswan/ixnetwork_restpy | python | def add(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Adds a new spbNetworkRange resource on the server and adds it to the container.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Returns\n -------\n - self: This instance with all currently retrieved spbNetworkRange resources using find and the newly added spbNetworkRange resources available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) |
def remove(self):
'Deletes all the contained spbNetworkRange resources in this instance from the server.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n '
self._delete() | -8,538,951,994,139,801,000 | Deletes all the contained spbNetworkRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | remove | Vibaswan/ixnetwork_restpy | python | def remove(self):
'Deletes all the contained spbNetworkRange resources in this instance from the server.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n '
self._delete() |
def find(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Finds and retrieves spbNetworkRange resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbNetworkRange resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all spbNetworkRange resources from the server.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Returns\n -------\n - self: This instance with matching spbNetworkRange resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) | -4,447,757,006,739,500,500 | Finds and retrieves spbNetworkRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbNetworkRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all spbNetworkRange resources from the server.
Args
----
- EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.
- EnableHostName (bool): If true, the host name of the router is activated.
- EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.
- HostNamePrefix (str): The host name prefix information.
- InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.
- NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).
- StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.
- SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.
Returns
-------
- self: This instance with matching spbNetworkRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | find | Vibaswan/ixnetwork_restpy | python | def find(self, EnableAdvertiseNetworkRange=None, EnableHostName=None, EntryColumn=None, EntryRow=None, HostNamePrefix=None, InterfaceMetric=None, NoOfColumns=None, NoOfRows=None, StartSystemId=None, SystemIdIncrementBy=None):
'Finds and retrieves spbNetworkRange resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbNetworkRange resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all spbNetworkRange resources from the server.\n\n Args\n ----\n - EnableAdvertiseNetworkRange (bool): If true, this SPB ISIS Network Range is advertised.\n - EnableHostName (bool): If true, the host name of the router is activated.\n - EntryColumn (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - EntryRow (number): The value is used in combination to specify which virtual router in the Network Range is connected to the current ISIS L2/L3 Router.\n - HostNamePrefix (str): The host name prefix information.\n - InterfaceMetric (number): The metric cost associated with this emulated SPB ISIS router.\n - NoOfColumns (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - NoOfRows (number): The value is used in combination to create a matrix (grid) for an emulated network range of the following size: The # Rows multiplied the # Cols = Number of routers in this Network Range. (For example, 3 Rows x 3 Columns = 9 Routers).\n - StartSystemId (str): The System ID assigned to the starting SPB ISIS router in this network range. The default is 00 00 00 00 00 00.\n - SystemIdIncrementBy (str): This is used when more than one router is to be emulated. The increment value is added to the previous System ID for each additional emulated router in this network range.\n\n Returns\n -------\n - self: This instance with matching spbNetworkRange resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) |
def read(self, href):
'Retrieves a single instance of spbNetworkRange data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the spbNetworkRange resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._read(href) | -6,453,500,770,520,028,000 | Retrieves a single instance of spbNetworkRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the spbNetworkRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbnetworkrange_525415b0593fd4072368412490b137fa.py | read | Vibaswan/ixnetwork_restpy | python | def read(self, href):
'Retrieves a single instance of spbNetworkRange data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the spbNetworkRange resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n '
return self._read(href) |
def init():
' Init the application and add routes '
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s', level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app | -517,099,752,630,745,100 | Init the application and add routes | web/app.py | init | ikreymer/archivethiswebsite | python | def init():
' '
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s', level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app |
def red(text):
"\n Shameless hack-up of the 'termcolor' python package\n by Konstantin Lepa - <https://pypi.python.org/pypi/termcolor>\n to reduce rependencies and only make red text.\n "
s = ('\x1b[%dm%s\x1b[0m' % (31, text))
return s | -8,253,788,579,041,573,000 | Shameless hack-up of the 'termcolor' python package
by Konstantin Lepa - <https://pypi.python.org/pypi/termcolor>
to reduce rependencies and only make red text. | contrib/cli_scripts/nodemeisterlib.py | red | coxmediagroup/nodemeister | python | def red(text):
"\n Shameless hack-up of the 'termcolor' python package\n by Konstantin Lepa - <https://pypi.python.org/pypi/termcolor>\n to reduce rependencies and only make red text.\n "
s = ('\x1b[%dm%s\x1b[0m' % (31, text))
return s |
def print_columns(lines, spacer=' ', onlydifferent=False):
'\n Take a list of lines, each being a list with 3 elements\n (the three columns to print) and print in 3 columns.\n\n :param lines: list of 3-element lists, each list is a line and\n each sub-list are the 3 columns in the line\n :type lines: list of lists\n :param spacer: spacer between columns, default 3 spaces\n :type lines: string\n :param onlydifferent: only output differing lines\n :type onlydifferent: boolean\n '
s = ''
clen = [0, 0, 0]
for l in lines:
if onlydifferent:
if (len(l) < 3):
continue
for c in xrange(0, 3):
if (len(str(l[c])) > clen[c]):
clen[c] = len(str(l[c]))
line_spec = '{{0:<{1}s}}{0}{{1:<{2}s}}{0}{{2:<{3}s}}\n'.format((' ' * 3), clen[0], clen[1], clen[2])
for l in lines:
if ((len(l) > 3) and (l[3] == True)):
s += red(line_spec.format((DIFF_MARKER + l[0]), str(l[1]), str(l[2])))
else:
if onlydifferent:
continue
s += line_spec.format(l[0], str(l[1]), str(l[2]))
return s | 5,469,703,063,360,530,000 | Take a list of lines, each being a list with 3 elements
(the three columns to print) and print in 3 columns.
:param lines: list of 3-element lists, each list is a line and
each sub-list are the 3 columns in the line
:type lines: list of lists
:param spacer: spacer between columns, default 3 spaces
:type lines: string
:param onlydifferent: only output differing lines
:type onlydifferent: boolean | contrib/cli_scripts/nodemeisterlib.py | print_columns | coxmediagroup/nodemeister | python | def print_columns(lines, spacer=' ', onlydifferent=False):
'\n Take a list of lines, each being a list with 3 elements\n (the three columns to print) and print in 3 columns.\n\n :param lines: list of 3-element lists, each list is a line and\n each sub-list are the 3 columns in the line\n :type lines: list of lists\n :param spacer: spacer between columns, default 3 spaces\n :type lines: string\n :param onlydifferent: only output differing lines\n :type onlydifferent: boolean\n '
s =
clen = [0, 0, 0]
for l in lines:
if onlydifferent:
if (len(l) < 3):
continue
for c in xrange(0, 3):
if (len(str(l[c])) > clen[c]):
clen[c] = len(str(l[c]))
line_spec = '{{0:<{1}s}}{0}{{1:<{2}s}}{0}{{2:<{3}s}}\n'.format((' ' * 3), clen[0], clen[1], clen[2])
for l in lines:
if ((len(l) > 3) and (l[3] == True)):
s += red(line_spec.format((DIFF_MARKER + l[0]), str(l[1]), str(l[2])))
else:
if onlydifferent:
continue
s += line_spec.format(l[0], str(l[1]), str(l[2]))
return s |
def pretty_diff_list(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
lines = []
items = set.union(set(oA), set(oB))
for i in sorted(items):
if ((i in oA) and (i in oB)):
lines.append(['', i, i])
elif (i in oA):
lines.append(['', i, MISSING_ITEM, True])
elif (i in oB):
lines.append(['', MISSING_ITEM, i, True])
return lines | -1,005,687,675,319,491,800 | Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists | contrib/cli_scripts/nodemeisterlib.py | pretty_diff_list | coxmediagroup/nodemeister | python | def pretty_diff_list(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
lines = []
items = set.union(set(oA), set(oB))
for i in sorted(items):
if ((i in oA) and (i in oB)):
lines.append([, i, i])
elif (i in oA):
lines.append([, i, MISSING_ITEM, True])
elif (i in oB):
lines.append([, MISSING_ITEM, i, True])
return lines |
def pretty_diff_str(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
if (oA != oB):
return [[title, oA, oB, True]]
return [[title, oA, oB]] | 2,362,814,580,164,539,400 | Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists | contrib/cli_scripts/nodemeisterlib.py | pretty_diff_str | coxmediagroup/nodemeister | python | def pretty_diff_str(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
if (oA != oB):
return [[title, oA, oB, True]]
return [[title, oA, oB]] |
def pretty_diff_dict(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
lines = [[title, '', '']]
keys = set.union(set(oA.keys()), set(oB.keys()))
for k in sorted(keys):
if ((k in oA) and (k in oB)):
if (oA[k] == oB[k]):
lines.append([k, oA[k], oB[k]])
else:
lines.append([k, oA[k], oB[k], True])
elif (k in oA):
lines.append([k, oA[k], MISSING_ITEM, True])
else:
lines.append([k, MISSING_ITEM, oB[k], True])
return lines | 2,325,412,352,550,839,300 | Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists | contrib/cli_scripts/nodemeisterlib.py | pretty_diff_dict | coxmediagroup/nodemeister | python | def pretty_diff_dict(title, oA, oB):
'\n Generate a pretty diff of two dicts.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
lines = [[title, , ]]
keys = set.union(set(oA.keys()), set(oB.keys()))
for k in sorted(keys):
if ((k in oA) and (k in oB)):
if (oA[k] == oB[k]):
lines.append([k, oA[k], oB[k]])
else:
lines.append([k, oA[k], oB[k], True])
elif (k in oA):
lines.append([k, oA[k], MISSING_ITEM, True])
else:
lines.append([k, MISSING_ITEM, oB[k], True])
return lines |
def pretty_diff_obj(title, oA, oB):
'\n Generate a pretty diff of two objects (actually just\n dict, list or string) of lines suitable for use in pretty_diff_dicts()\n\n This method is a pass-through to\n pretty_diff_(dict|string|list)\n depending on the input type.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
if ((type(oA) == type({})) or (type(oB) == type({}))):
return pretty_diff_dict(title, oA, oB)
elif ((type(oA) == type('')) or (type(oB) == type('')) or (type(oA) == type(u'')) or (type(oB) == type(u''))):
return pretty_diff_str(title, oA, oB)
else:
return pretty_diff_list(title, oA, oB)
return [] | -2,122,703,204,487,054,000 | Generate a pretty diff of two objects (actually just
dict, list or string) of lines suitable for use in pretty_diff_dicts()
This method is a pass-through to
pretty_diff_(dict|string|list)
depending on the input type.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists | contrib/cli_scripts/nodemeisterlib.py | pretty_diff_obj | coxmediagroup/nodemeister | python | def pretty_diff_obj(title, oA, oB):
'\n Generate a pretty diff of two objects (actually just\n dict, list or string) of lines suitable for use in pretty_diff_dicts()\n\n This method is a pass-through to\n pretty_diff_(dict|string|list)\n depending on the input type.\n\n :param title: the title/heading for the line\n :type title: string\n :param oA: first object\n :param oB: second object\n :returns: list of lines, each a list of 3 columns\n :rtype: list of lists\n '
if ((type(oA) == type({})) or (type(oB) == type({}))):
return pretty_diff_dict(title, oA, oB)
elif ((type(oA) == type()) or (type(oB) == type()) or (type(oA) == type(u)) or (type(oB) == type(u))):
return pretty_diff_str(title, oA, oB)
else:
return pretty_diff_list(title, oA, oB)
return [] |
def pretty_diff(title, titleA, dictA, titleB, dictB, onlydifferent=False):
'\n Generate a "pretty" printable diff of two Nodes or Groups\n containing arbitrarily deep dict, list or string items.\n\n Intended to be used for the "text" dicts in migrate_group()\n and migrate_node().\n\n :param title: overall title of the diff\n :type title: string\n :param titleA: title of the first dict\n :type titleA: string\n :param dictA: the first dict\n :type dictA: dict\n :param titleB: title of the second dict\n :type titleB: string\n :param dictB: the second dict\n :type dictB: dict\n :param onlydifferent: only output differing lines\n :type onlydifferent: boolean\n :returns: multi-line string, columnar diff of dicts\n :rtype: string\n '
s = ('Diff of %s\n' % title)
lines = []
lines.append(['', titleA, titleB])
lines.append(['', ('-' * len(titleA)), ('-' * len(titleB))])
lines.append(['name', dictA.get('name', '<none>'), dictB.get('name', '<none>')])
lines.append(['id', dictA.get('id', '<none>'), dictB.get('id', '<none>')])
lines.append(['description', dictA.get('description', '<none>'), dictB.get('description', '<none>')])
dictA.pop('name', None)
dictA.pop('id', None)
dictA.pop('description', None)
dictB.pop('name', None)
dictB.pop('id', None)
dictB.pop('description', None)
lines.append(['', '', ''])
k = set.union(set(dictA.keys()), set(dictB.keys()))
for p in sorted(k):
lines.append([(p.capitalize() + ':'), '', ''])
lines.extend(pretty_diff_obj('', dictA.get(p), dictB.get(p)))
s += print_columns(lines, onlydifferent=onlydifferent)
return s | 2,543,859,274,008,386,000 | Generate a "pretty" printable diff of two Nodes or Groups
containing arbitrarily deep dict, list or string items.
Intended to be used for the "text" dicts in migrate_group()
and migrate_node().
:param title: overall title of the diff
:type title: string
:param titleA: title of the first dict
:type titleA: string
:param dictA: the first dict
:type dictA: dict
:param titleB: title of the second dict
:type titleB: string
:param dictB: the second dict
:type dictB: dict
:param onlydifferent: only output differing lines
:type onlydifferent: boolean
:returns: multi-line string, columnar diff of dicts
:rtype: string | contrib/cli_scripts/nodemeisterlib.py | pretty_diff | coxmediagroup/nodemeister | python | def pretty_diff(title, titleA, dictA, titleB, dictB, onlydifferent=False):
'\n Generate a "pretty" printable diff of two Nodes or Groups\n containing arbitrarily deep dict, list or string items.\n\n Intended to be used for the "text" dicts in migrate_group()\n and migrate_node().\n\n :param title: overall title of the diff\n :type title: string\n :param titleA: title of the first dict\n :type titleA: string\n :param dictA: the first dict\n :type dictA: dict\n :param titleB: title of the second dict\n :type titleB: string\n :param dictB: the second dict\n :type dictB: dict\n :param onlydifferent: only output differing lines\n :type onlydifferent: boolean\n :returns: multi-line string, columnar diff of dicts\n :rtype: string\n '
s = ('Diff of %s\n' % title)
lines = []
lines.append([, titleA, titleB])
lines.append([, ('-' * len(titleA)), ('-' * len(titleB))])
lines.append(['name', dictA.get('name', '<none>'), dictB.get('name', '<none>')])
lines.append(['id', dictA.get('id', '<none>'), dictB.get('id', '<none>')])
lines.append(['description', dictA.get('description', '<none>'), dictB.get('description', '<none>')])
dictA.pop('name', None)
dictA.pop('id', None)
dictA.pop('description', None)
dictB.pop('name', None)
dictB.pop('id', None)
dictB.pop('description', None)
lines.append([, , ])
k = set.union(set(dictA.keys()), set(dictB.keys()))
for p in sorted(k):
lines.append([(p.capitalize() + ':'), , ])
lines.extend(pretty_diff_obj(, dictA.get(p), dictB.get(p)))
s += print_columns(lines, onlydifferent=onlydifferent)
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.