code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0:
print("Weird")
elif 2 <= n <= 5:
print("Not Weird")
elif 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
| [
[
[
31,
32
],
[
63,
64
],
[
112,
113
],
[
161,
162
]
]
] |
import hashlib
import json
import sys
from logbook import Logger, StreamHandler
from pycoin.coins.bitcoin.networks import BitcoinMainnet
import pycoin.ui.key_from_text
import pycoin.key
import socket
script_for_address = BitcoinMainnet.ui.script_for_address
log = Logger(__name__)
class Connection:
def __init__(self, addr):
self.s = socket.create_connection(addr)
self.f = self.s.makefile('r')
self.id = 0
def call(self, method, *args):
req = {
'id': self.id,
'method': method,
'params': list(args),
}
msg = json.dumps(req) + '\n'
self.s.sendall(msg.encode('ascii'))
return json.loads(self.f.readline())
def main():
conn = Connection(('localhost', 50001))
xpub, = sys.argv[1:]
total = 0
k = pycoin.ui.key_from_text.key_from_text(xpub)
for change in (0, 1):
empty = 0
for n in range(100):
address = k.subkey(change).subkey(n).address()
script = script_for_address(address)
script_hash = hashlib.sha256(script).digest()
log.debug('{}', conn.call('blockchain.scripthash.get_history',
script_hash[::-1].hex()))
reply = conn.call('blockchain.scripthash.get_balance',
script_hash[::-1].hex())
result = reply['result']
confirmed = result['confirmed'] / 1e8
total += confirmed
if confirmed:
log.info('{}/{} => {} has {:11.8f} BTC',
change, n, address, confirmed)
empty = 0
else:
empty += 1
if empty >= 10:
break
log.info('total balance: {} BTC', total)
if __name__ == '__main__':
with StreamHandler(sys.stderr, level='INFO').applicationbound():
main()
| [
[
[
7,
14
],
[
1076,
1083
]
],
[
[
22,
26
],
[
608,
612
],
[
690,
694
]
],
[
[
34,
37
],
[
1852,
1855
],
[
790,
793
]
],
[
[
59,
65
],
[
269,
275
]
],
[
[
67,
80
],
[
1838,
1851
]
],
[
[
124,
138
],
[
225,
239
]
],
[
[
146,
169
]
],
[
[
177,
187
],
[
825,
831
]
],
[
[
196,
202
],
[
352,
358
]
],
[
[
204,
222
],
[
1022,
1040
]
],
[
[
263,
266
],
[
1120,
1123
],
[
1529,
1532
],
[
1759,
1762
]
],
[
[
293,
303
],
[
745,
755
]
],
[
[
726,
730
],
[
1906,
1910
]
]
] |
from django.urls import path
from .views import MyObtainTokenPairView, RegisterView
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),
path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='auth_register'),
]
| [
[
[
24,
28
],
[
172,
176
],
[
252,
256
],
[
331,
335
]
],
[
[
49,
70
],
[
187,
208
]
],
[
[
72,
84
],
[
349,
361
]
],
[
[
129,
145
],
[
275,
291
]
],
[
[
151,
162
]
]
] |
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""Package Filter"""
from core.src.bootstrap.Constants import Constants
import fnmatch
class PackageFilter(object):
"""implements the Package filtering logic"""
def __init__(self, execution_config, composite_logger):
self.execution_config = execution_config
self.composite_logger = composite_logger
# Exclusions - note: version based exclusion is not supported
self.global_excluded_packages = self.sanitize_str_to_list(self.execution_config.global_exclusion_list)
self.installation_excluded_package_masks = self.execution_config.excluded_package_name_mask_list
self.installation_excluded_packages, self.installation_excluded_package_versions = self.get_packages_and_versions_from_masks(self.installation_excluded_package_masks)
# Inclusions - note: version based inclusion is optionally supported
self.installation_included_package_masks = self.execution_config.included_package_name_mask_list
self.installation_included_packages, self.installation_included_package_versions = self.get_packages_and_versions_from_masks(self.installation_included_package_masks)
self.installation_included_classifications = [] if self.execution_config.included_classifications_list is None else self.execution_config.included_classifications_list
# Neutralize global excluded packages, if customer explicitly includes the package
packages_to_clear_from_global = []
for package in self.global_excluded_packages:
if self.check_for_explicit_inclusion(package):
self.composite_logger.log_debug('Removing package from global exclusion list: ' + package)
packages_to_clear_from_global.append(package)
self.global_excluded_packages = [x for x in self.global_excluded_packages if x not in packages_to_clear_from_global]
# Logging
self.composite_logger.log("\nAzure globally-excluded packages: " + str(self.global_excluded_packages))
self.composite_logger.log("Included package classifications: " + ', '.join(self.installation_included_classifications))
self.composite_logger.log("Included packages: " + str(self.installation_included_package_masks))
self.composite_logger.log("Excluded packages: " + str(self.installation_excluded_packages))
if '=' in str(self.installation_excluded_package_masks):
self.composite_logger.log_error("\n /!\\ Package exclusions do not support version matching in the filter today. "
"Due to this, more packages than expected may be excluded from this update deployment.")
# region Inclusion / exclusion presence checks
def is_exclusion_list_present(self):
"""Return true if either Global or patch installation specific exclusion list present"""
return bool(self.global_excluded_packages) or bool(self.installation_excluded_packages)
def is_inclusion_list_present(self):
"""Return true if patch installation Inclusion is present"""
return bool(self.installation_included_packages)
# endregion
# region Package exclusion checks
def check_for_exclusion(self, one_or_more_packages):
"""Return true if package need to be excluded"""
return self.check_for_match(one_or_more_packages, self.installation_excluded_packages) or \
self.check_for_match(one_or_more_packages, self.global_excluded_packages)
# endregion
# region Package inclusion checks
def check_for_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included (either because no inclusion list is specified, or because of explicit match)"""
return not self.is_inclusion_list_present() or self.check_for_explicit_inclusion(package, package_version)
def check_for_explicit_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included due to an explicit match to the inclusion list """
return self.check_for_match(package, self.installation_included_packages, package_version, self.installation_included_package_versions)
# endregion
# region Inclusion / exclusion common match checker
def check_for_match(self, one_or_more_packages, matching_list, linked_package_versions=Constants.DEFAULT_UNSPECIFIED_VALUE, version_matching_list=Constants.DEFAULT_UNSPECIFIED_VALUE):
# type: (str, object, str, object) -> bool # type hinting to remove a warning
"""Return true if package(s) (with, optionally, linked version(s)) matches the filter list"""
if matching_list:
if type(one_or_more_packages) is str:
return self.single_package_check_for_match(one_or_more_packages, matching_list, linked_package_versions, version_matching_list)
else:
for index, each_package in enumerate(one_or_more_packages):
if type(linked_package_versions) is str:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions, version_matching_list):
return True
else:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions[index], version_matching_list):
return True
return False
def single_package_check_for_match(self, package, matching_list, package_version, version_matching_list):
"""Returns true if a single package (optionally, version) matches the filter list"""
for index, matching_package in enumerate(matching_list):
if fnmatch.fnmatch(package, matching_package) or fnmatch.fnmatch(self.get_product_name_without_arch(package), matching_package):
self.composite_logger.log_debug(' - [Package] {0} matches expression {1}'.format(package, matching_package))
if package_version == Constants.DEFAULT_UNSPECIFIED_VALUE or not version_matching_list or version_matching_list[index] == Constants.DEFAULT_UNSPECIFIED_VALUE:
self.composite_logger.log_debug(' - [Version] Check skipped as not specified.')
return True
elif len(version_matching_list) > index and fnmatch.fnmatch(package_version, version_matching_list[index]):
self.composite_logger.log_debug(' - [Version] {0} matches expression {1}'.format(package, version_matching_list[index]))
return True
elif len(version_matching_list) <= index: # This should never happen - something has gone horribly wrong
self.composite_logger.log_error(' - [Version] Index error - ({0} of {1})'.format(index + 1, len(version_matching_list)))
else:
self.composite_logger.log_debug(' - Package {0} (version={1}) was found, but it did not match filter specified for version ({2})'.format(package, package_version, version_matching_list[index]))
return False
@staticmethod
def get_product_name_without_arch(package_name):
"""Splits out product name without architecture - if this is changed, review YumPackageManager"""
architectures = ['.x86_64', '.noarch', '.i686']
for arch in architectures:
if package_name.endswith(arch):
return package_name.replace(arch, '')
return package_name
# endregion
# region Get included / excluded package masks
def get_packages_and_versions_from_masks(self, package_masks):
"""Return package names and versions"""
packages = []
package_versions = []
if package_masks is not None:
for index, package_mask in enumerate(package_masks):
package_mask_split = str(package_mask).split('=')
if len(package_mask_split) == 1: # no version specified
packages.append(package_mask_split[0].strip())
package_versions.append(Constants.DEFAULT_UNSPECIFIED_VALUE)
elif len(package_mask_split) == 2: # version also specified
packages.append(package_mask_split[0].strip())
package_versions.append(package_mask_split[1].strip())
else: # invalid format
self.composite_logger.log_warning("Invalid package format: " + str(package_mask) + " [Ignored]")
return packages, package_versions
@staticmethod
def sanitize_str_to_list(string_input):
"""Strips excess white-space and converts a comma-separated string to a list"""
return [] if (string_input is None) else string_input.strip().split(",")
# endregion
# region Get installation classifications from execution configuration
def is_msft_critsec_classification_only(self):
return ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications) and 'Other' not in self.installation_included_classifications
def is_msft_other_classification_only(self):
return 'Other' in self.installation_included_classifications and not ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications)
def is_msft_all_classification_included(self):
"""Returns true if all classifications were individually selected *OR* (nothing was selected AND no inclusion list is present) -- business logic"""
all_classifications = [key for key in Constants.PackageClassification.__dict__.keys() if not key.startswith('__')]
all_classifications_explicitly_selected = bool(len(self.installation_included_classifications) == (len(all_classifications) - 1))
no_classifications_selected = bool(len(self.installation_included_classifications) == 0)
only_unclassified_selected = bool('Unclassified' in self.installation_included_classifications and len(self.installation_included_classifications) == 1)
return all_classifications_explicitly_selected or ((no_classifications_selected or only_unclassified_selected) and not self.is_inclusion_list_present())
def is_invalid_classification_combination(self):
return ('Other' in self.installation_included_classifications and 'Critical' in self.installation_included_classifications and 'Security' not in self.installation_included_classifications) or \
('Other' in self.installation_included_classifications and 'Security' in self.installation_included_classifications and 'Critical' not in self.installation_included_classifications)
# endregion
| [
[
[
674,
683
],
[
4190,
4199
],
[
4546,
4555
],
[
4996,
5005
],
[
5055,
5064
],
[
6649,
6658
],
[
6749,
6758
],
[
8738,
8747
],
[
10302,
10311
]
],
[
[
691,
698
],
[
6357,
6364
],
[
6403,
6410
],
[
6981,
6988
]
],
[
[
707,
720
]
]
] |
#!/usr/bin/python3
import sys
from signal import pause
import RPi.GPIO as GPIO
# script to activate and deactivate an amplifier, power led, etc. using a GPIO
# pin on power up / down
# see for an example implementation with a PAM8403 digital amplifier
# (PAM pin 12 connected to GPIO 26)
# https://github.com/MiczFlor/RPi-Jukebox-RFID/wiki/Hardware-Hack-PAM8403-Poweroff
# change this value based on which GPIO port the amplifier or other devices are connected to
# Flexible Pinout
AMP_GPIO = 26
# Classic Pinout
# AMP_GPIO = 23
# setup RPi lib to control output pin
# we do not cleanup the GPIO because we want the pin low = off after program exit
# the resulting warning can be ignored
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(AMP_GPIO, GPIO.OUT)
def set_amplifier(status):
if status:
print("Setting amplifier: ON")
GPIO.output(AMP_GPIO, GPIO.HIGH)
else:
print("Setting amplifier: OFF")
GPIO.output(AMP_GPIO, GPIO.LOW)
if __name__ == "__main__":
try:
set_amplifier(True)
pause()
except KeyboardInterrupt:
# turn the relay off
set_amplifier(False)
print("\nExiting amplifier control\n")
# exit the application
sys.exit(0)
| [
[
[
27,
30
],
[
1241,
1244
]
],
[
[
50,
55
],
[
1059,
1064
]
],
[
[
63,
79
],
[
693,
697
],
[
717,
721
],
[
730,
734
],
[
740,
744
],
[
761,
765
],
[
862,
866
],
[
884,
888
],
[
953,
957
],
[
975,
979
]
],
[
[
486,
494
],
[
751,
759
],
[
874,
882
],
[
965,
973
]
],
[
[
777,
790
],
[
1031,
1044
],
[
1134,
1147
]
]
] |
import os
import time
from click.testing import CliRunner
from bin.throne import cli as throne
runner = CliRunner()
shodan_key = os.getenv('SHODAN_KEY')
throne_user = os.getenv('THRONE_USER')
throne_pass = os.getenv('THRONE_PASS')
def test_throne_setapi():
print("Testing: throne api setapi")
response = runner.invoke(throne, ["api", "setapi", "-u", f"{throne_user}", "-p", f"{throne_pass}"])
assert response.exit_code == 0
assert "Successfully set throne API key." in response.output
def test_shodan_setapi():
print("Testing: throne shodan setapi")
response = runner.invoke(throne, ["shodan", "setapi"], input=f"{shodan_key}")
assert response.exit_code == 0
assert "Successfully set Shodan API key." in response.output | [
[
[
7,
9
],
[
131,
133
],
[
169,
171
],
[
208,
210
]
],
[
[
17,
21
]
],
[
[
48,
57
],
[
105,
114
]
],
[
[
81,
94
],
[
329,
335
],
[
603,
609
]
],
[
[
96,
102
],
[
315,
321
],
[
589,
595
]
],
[
[
118,
128
],
[
642,
652
]
],
[
[
155,
166
],
[
364,
375
]
],
[
[
194,
205
],
[
388,
399
]
],
[
[
238,
256
]
],
[
[
509,
527
]
]
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
import six
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
# Field alias for code readability
# BooleanField = fields.BooleanField
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
# NOTE: The following definition is much more stricter than the oslo
# version. Also note that the treatment of default values here:
# we are using the user specified default value when invoking
# the 'bool_from_string' until function.
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
# NOTE: This definition is kept because we want the error message from
# 'int' conversion to be user friendly.
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
# Senlin has a stricter field checking for object fields.
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
# we are not checking whether self._obj_name is registered, an
# exception will be raised anyway if it is not registered.
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
# The priorities here are derived from oslo_messaging.notify.notifier
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
# NOTE: This is pretty restrictive. We can relax it later when
# there are requests to do so
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.common.config")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
# NOTE: We are not basing Enum on String because String is not working
# correctly when handling None value.
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, six.string_types):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = six.text_type(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
# TODO(Qiming): remove this when oslo patch is released
# https://review.openstack.org/#/c/360095
class NonNegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
# An override to the oslo.versionedobjects version so that we are using
# our own Object definition.
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| [
[
[
573,
576
],
[
820,
823
]
],
[
[
608,
617
],
[
3781,
3790
],
[
4121,
4130
],
[
4265,
4274
]
],
[
[
641,
649
],
[
1726,
1734
]
],
[
[
673,
682
],
[
3306,
3315
]
],
[
[
717,
723
],
[
925,
931
],
[
967,
973
],
[
1001,
1007
],
[
1034,
1040
],
[
1064,
1070
],
[
1097,
1103
],
[
1139,
1145
],
[
1186,
1192
],
[
1230,
1236
],
[
1269,
1275
],
[
1927,
1933
],
[
2719,
2725
],
[
3096,
3102
],
[
3634,
3640
],
[
4459,
4465
],
[
4701,
4707
],
[
4849,
4855
],
[
6417,
6423
],
[
8538,
8544
],
[
9534,
9540
],
[
10645,
10651
],
[
12463,
12469
],
[
12879,
12885
],
[
12962,
12968
],
[
13134,
13140
],
[
13374,
13380
],
[
13439,
13445
],
[
13479,
13485
],
[
13491,
13497
],
[
13545,
13551
],
[
13638,
13644
],
[
13715,
13721
],
[
13781,
13787
],
[
13851,
13857
],
[
14143,
14149
],
[
14400,
14406
],
[
14869,
14875
],
[
15149,
15155
],
[
15431,
15437
],
[
15510,
15516
],
[
15588,
15594
],
[
15668,
15674
],
[
15941,
15947
],
[
15993,
15999
],
[
14618,
14624
]
],
[
[
731,
733
],
[
5671,
5673
],
[
5751,
5753
]
],
[
[
741,
744
],
[
3722,
3725
],
[
4206,
4209
],
[
11049,
11052
],
[
11327,
11330
]
],
[
[
772,
778
],
[
4558,
4564
],
[
4726,
4732
],
[
11762,
11768
],
[
11941,
11947
],
[
12120,
12126
],
[
12185,
12191
],
[
12249,
12255
],
[
12315,
12321
]
],
[
[
810,
811
],
[
2232,
2233
],
[
2426,
2427
],
[
3355,
3356
],
[
3857,
3858
],
[
5129,
5130
],
[
5353,
5354
],
[
5828,
5829
],
[
6666,
6667
],
[
6961,
6962
],
[
7196,
7197
],
[
7644,
7645
],
[
7860,
7861
],
[
8092,
8093
],
[
8862,
8863
],
[
9007,
9008
],
[
9197,
9198
],
[
9999,
10000
],
[
10254,
10255
],
[
10927,
10928
],
[
11101,
11102
],
[
11421,
11422
],
[
12693,
12694
]
],
[
[
813,
817
],
[
6533,
6537
],
[
6620,
6624
],
[
6779,
6783
],
[
7146,
7150
],
[
7338,
7342
],
[
7481,
7485
]
],
[
[
902,
922
]
],
[
[
953,
964
]
],
[
[
986,
998
]
],
[
[
1021,
1031
]
],
[
[
1052,
1061
]
],
[
[
1081,
1094
]
],
[
[
1118,
1136
]
],
[
[
1165,
1183
]
],
[
[
1212,
1227
]
],
[
[
1261,
1268
],
[
13003,
13010
],
[
1612,
1619
]
],
[
[
1908,
1926
],
[
12920,
12938
]
],
[
[
2712,
2718
],
[
2785,
2791
],
[
13245,
13251
]
],
[
[
3091,
3095
],
[
13822,
13826
]
],
[
[
3629,
3633
],
[
13414,
13418
]
],
[
[
4438,
4458
],
[
13584,
13604
],
[
4628,
4648
]
],
[
[
4683,
4700
],
[
13677,
13694
],
[
4792,
4809
]
],
[
[
4844,
4848
],
[
13756,
13760
],
[
4928,
4932
],
[
6200,
6204
]
],
[
[
6408,
6416
],
[
6498,
6506
],
[
8282,
8290
],
[
14002,
14010
]
],
[
[
8533,
8537
],
[
8605,
8609
],
[
9390,
9394
],
[
14281,
14285
]
],
[
[
9521,
9533
],
[
9670,
9682
],
[
9887,
9899
],
[
10411,
10423
],
[
14605,
14617
]
],
[
[
10636,
10644
],
[
11740,
11748
],
[
11919,
11927
],
[
12098,
12106
],
[
12163,
12171
],
[
12227,
12235
],
[
12293,
12301
],
[
11243,
11251
]
],
[
[
11725,
11739
],
[
15024,
15038
]
],
[
[
11901,
11918
],
[
15304,
15321
]
],
[
[
12084,
12097
],
[
15472,
15485
]
],
[
[
12152,
12162
],
[
15551,
15561
]
],
[
[
12214,
12226
],
[
15629,
15641
]
],
[
[
12280,
12292
],
[
15823,
15835
]
],
[
[
12452,
12462
],
[
15982,
15992
],
[
12538,
12548
]
],
[
[
12855,
12878
]
],
[
[
12949,
12961
]
],
[
[
13122,
13133
],
[
13318,
13329
]
],
[
[
13364,
13373
]
],
[
[
13429,
13438
],
[
16034,
16043
]
],
[
[
13519,
13544
]
],
[
[
13615,
13637
]
],
[
[
13705,
13714
]
],
[
[
13771,
13780
]
],
[
[
13837,
13850
],
[
14059,
14072
]
],
[
[
14133,
14142
],
[
14312,
14321
]
],
[
[
14382,
14399
],
[
14723,
14740
]
],
[
[
14849,
14868
],
[
15072,
15091
]
],
[
[
15126,
15148
],
[
15355,
15377
]
],
[
[
15412,
15430
]
],
[
[
15494,
15509
]
],
[
[
15570,
15587
]
],
[
[
15650,
15667
],
[
15869,
15886
]
],
[
[
15921,
15940
]
],
[
[
16018,
16033
],
[
16141,
16156
],
[
16245,
16260
]
]
] |
# coding:utf-8
import json
import random
import string
import tornado.web
import config
from lib.jsdict import JsDict
from model.user import User
# route
class Route(object):
urls = []
def __call__(self, url, name=None):
def _(cls):
self.urls.append(tornado.web.URLSpec(url, cls, name=name))
return cls
return _
route = Route()
# 模板
def get_lookup_mako():
import mako.lookup
_lookup = mako.lookup.TemplateLookup(
directories=['./templates'],
module_directory='/tmp/mako' + ''.join(random.sample(string.ascii_letters + string.digits, 8)),
input_encoding='utf-8',
)
return _lookup
def get_lookup_jinja2(_globals={}, extensions=[]):
from jinja2 import Environment, FileSystemLoader
_lookup = Environment(
loader=FileSystemLoader(['./templates'], encoding='utf-8'),
extensions=extensions
)
# mako 没有全局变量特性,这里为了一致性 jinjia 向 mako 妥协
#_lookup.globals['url_for'] = url_for
_lookup.globals['config'] = config
_lookup.globals.update(_globals)
return _lookup
if config.TEMPLATE == 'mako':
lookup = get_lookup_mako()
elif config.TEMPLATE == 'jinja2':
lookup = get_lookup_jinja2()
else:
lookup = None
# Session
class SimpleSession(object):
def __init__(self, request):
self._request = request
self._data = self.load()
def __delitem__(self, key):
del self._data[key]
def __getitem__(self, key):
return self._data.get(key)
def __setitem__(self, key, value):
self._data[key] = value
def load(self):
_s = self._request.get_secure_cookie('session') or '{}'
try: _s = _s.decode('utf-8') # fix:py2
except: pass
return json.loads(_s)
def flush(self):
self._request.set_secure_cookie('session', json.dumps(self._data))
# 消息闪现支持
class Messages(object):
MESSAGE_LEVEL = JsDict(
DEBUG=10,
INFO=20,
SUCCESS=25,
WARNING=30,
ERROR=40,
)
DEFAULT_TAGS = {
MESSAGE_LEVEL.DEBUG: 'debug',
MESSAGE_LEVEL.INFO: 'info',
MESSAGE_LEVEL.SUCCESS: 'success',
MESSAGE_LEVEL.WARNING: 'warning',
MESSAGE_LEVEL.ERROR: 'error',
}
def __init__(self):
self.messages = []
def _add_message(self, level, message):
self.messages.append([level, message])
def debug(self, message):
self._add_message(self.MESSAGE_LEVEL.DEBUG, message)
def info(self, message):
self._add_message(self.MESSAGE_LEVEL.INFO, message)
def success(self, message):
self._add_message(self.MESSAGE_LEVEL.SUCCESS, message)
def warning(self, message):
self._add_message(self.MESSAGE_LEVEL.WARNING, message)
def error(self, message):
self._add_message(self.MESSAGE_LEVEL.ERROR, message)
class View(tornado.web.RequestHandler):
def render(self, fn=None, **kwargs):
if not fn:
fn = ('/%s/%s.html' % (
'/'.join(self.__module__.split('.')[1:-1]),
self.__class__.__name__.lower()
)).replace(r'//', r'/')
kwargs.update({
'req': self,
'config': config,
'static': self.static_url,
'url_for': self.reverse_url,
'get_messages': self.get_messages,
'xsrf_token': self.xsrf_form_html(),
'csrf_token': self.xsrf_form_html(),
})
if lookup:
tmpl = lookup.get_template(fn)
self.finish(tmpl.render(**kwargs))
else:
if fn.startswith('/'):
fn = '.' + fn
super(View, self).render(fn, config=config, **kwargs)
def get_messages(self):
msg_lst = self.messages.messages + (self.session['_messages'] or [])
_messages = []
for i in msg_lst:
tag, txt = i
try: txt = txt.decode('utf-8') # 为py2做个转换
except: pass
_messages.append(JsDict(tag=Messages.DEFAULT_TAGS[tag], txt=txt))
self.messages.messages = []
return _messages
def initialize(self):
self.messages = Messages()
self.session = SimpleSession(self)
super(View, self).initialize()
def flush(self, include_footers=False, callback=None):
self.session['_messages'] = self.messages.messages
self.session.flush()
super(View, self).flush(include_footers, callback)
def current_user(self):
key = self.get_secure_cookie('u')
return User.get_by_key(key)
def is_admin(self):
user = self.current_user()
if user and user.is_admin():
return user
class LoginView(View):
def prepare(self):
if not self.current_user():
self.redirect(url_for('signin'))
class NoLoginView(View):
def prepare(self):
if self.current_user():
self.messages.error("您已登陆,请先退出")
self.redirect(url_for('index'))
class AjaxView(View):
def check_xsrf_cookie(self):
# useless for json request
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxView, self).prepare()
class AjaxLoginView(LoginView):
def check_xsrf_cookie(self):
# useless for json request
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxLoginView, self).prepare()
# sugar
def url_for(name, *args):
return config.app.reverse_url(name, *args)
def page_title(*args):
no_blank = lambda x: x is not None and x != ''
return ' » '.join(list(filter(no_blank, args)) + [config.TITLE])
| [
[
[
23,
27
],
[
1769,
1773
],
[
1857,
1861
]
],
[
[
35,
41
],
[
570,
576
]
],
[
[
49,
55
],
[
584,
590
],
[
607,
613
]
],
[
[
63,
74
],
[
2891,
2898
],
[
282,
289
]
],
[
[
82,
88
],
[
1112,
1118
],
[
1175,
1181
],
[
1045,
1051
],
[
3233,
3239
],
[
3714,
3720
],
[
5531,
5537
],
[
5698,
5704
]
],
[
[
112,
118
],
[
1937,
1943
],
[
4021,
4027
]
],
[
[
142,
146
],
[
4569,
4573
]
],
[
[
163,
168
],
[
374,
379
]
],
[
[
366,
371
]
],
[
[
393,
408
],
[
1152,
1167
]
],
[
[
694,
711
],
[
1217,
1234
]
],
[
[
1143,
1149
],
[
3489,
3495
],
[
3516,
3522
]
],
[
[
1208,
1214
],
[
3489,
3495
],
[
3516,
3522
]
],
[
[
1247,
1253
],
[
3489,
3495
],
[
3516,
3522
]
],
[
[
1279,
1292
],
[
4217,
4230
]
],
[
[
1898,
1906
],
[
4032,
4040
],
[
4183,
4191
]
],
[
[
2886,
2890
],
[
4729,
4733
],
[
4860,
4864
],
[
5028,
5032
],
[
3684,
3688
],
[
4251,
4255
],
[
4438,
4442
]
],
[
[
4719,
4728
],
[
5262,
5271
]
],
[
[
4848,
4859
]
],
[
[
5019,
5027
],
[
5214,
5222
]
],
[
[
5248,
5261
],
[
5453,
5466
]
],
[
[
5498,
5505
],
[
4821,
4828
],
[
4993,
5000
]
],
[
[
5573,
5583
]
]
] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os.path
import sys
import unittest
from builtins import map
import yaml
from apache_beam.coders import coder_impl
from apache_beam.coders import coders
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
STANDARD_CODERS_YAML = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data', 'standard_coders.yaml')
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
for ix, spec in enumerate(yaml.load_all(open(test_yaml))):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
class StandardCodersTest(unittest.TestCase):
_urn_to_coder_class = {
'beam:coder:bytes:v1': coders.BytesCoder,
'beam:coder:varint:v1': coders.VarIntCoder,
'beam:coder:kv:v1': lambda k, v: coders.TupleCoder((k, v)),
'beam:coder:interval_window:v1': coders.IntervalWindowCoder,
'beam:coder:iterable:v1': lambda t: coders.IterableCoder(t),
'beam:coder:global_window:v1': coders.GlobalWindowCoder,
'beam:coder:windowed_value:v1':
lambda v, w: coders.WindowedValueCoder(v, w)
}
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1':
lambda x, key_parser, value_parser: (key_parser(x['key']),
value_parser(x['value'])),
'beam:coder:interval_window:v1':
lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x, parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1':
lambda x, value_parser, window_parser: windowed_value.create(
value_parser(x['value']), x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
def parse_coder(self, spec):
return self._urn_to_coder_class[spec['urn']](
*[self.parse_coder(c) for c in spec.get('components', ())])
def json_value_parser(self, coder_spec):
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {}
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
| [
[
[
883,
898
]
],
[
[
922,
936
]
],
[
[
945,
949
],
[
5230,
5234
]
],
[
[
957,
964
],
[
3381,
3388
]
],
[
[
972,
979
],
[
1344,
1346
],
[
1362,
1364
],
[
1611,
1613
]
],
[
[
987,
990
],
[
5971,
5974
],
[
6019,
6022
]
],
[
[
998,
1006
],
[
1906,
1914
],
[
6046,
6054
]
],
[
[
1028,
1031
],
[
2951,
2954
]
],
[
[
1040,
1044
],
[
1735,
1739
]
],
[
[
1077,
1087
],
[
5668,
5678
],
[
5878,
5888
]
],
[
[
1119,
1125
],
[
1982,
1988
],
[
2031,
2037
],
[
2156,
2162
],
[
2288,
2294
],
[
2090,
2096
],
[
2226,
2232
],
[
2375,
2381
]
],
[
[
1161,
1167
],
[
3015,
3021
]
],
[
[
1210,
1224
],
[
2757,
2771
]
],
[
[
1255,
1269
],
[
3125,
3139
]
],
[
[
1310,
1319
],
[
2793,
2802
],
[
2860,
2869
]
],
[
[
1321,
1341
],
[
3352,
3372
],
[
5150,
5170
],
[
5559,
5579
]
],
[
[
1444,
1460
],
[
3335,
3351
]
],
[
[
1887,
1905
],
[
5985,
6003
]
],
[
[
5618,
5631
],
[
3974,
3987
]
],
[
[
5782,
5795
],
[
4254,
4267
],
[
4442,
4455
]
]
] |
# Under MIT License, see LICENSE.txt
class Field():
def __init__(self, ball):
self.ball = ball
def move_ball(self, position, delta):
self.ball.set_position(position, delta)
| [
[
[
45,
50
]
]
] |
#!/usr/bin/env python
# setup
# Setup script for installing nomen
##########################################################################
## Imports
##########################################################################
import os
import re
import codecs
from setuptools import setup
from setuptools import find_packages
##########################################################################
## Package Information
##########################################################################
## Basic information
NAME = "nomen"
DESCRIPTION = "YAML configuration tree with command line flags."
AUTHOR = "Jaan Altosaar"
EMAIL = "j@jaan.io"
LICENSE = "MIT"
REPOSITORY = "https://github.com/altosaar/nomen"
PACKAGE = "nomen"
## Define the keywords
KEYWORDS = (
'nomen', 'python', 'option', 'tree', 'nested', 'dict', 'parameter', 'flags'
)
## Define the classifiers
## See https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
)
## Important Paths
PROJECT = os.path.abspath(os.path.dirname(__file__))
VERSION_PATH = os.path.join(PACKAGE, "version.py")
## Directories to ignore in find_packages
EXCLUDES = (
"tests", "bin", "docs", "fixtures", "register", "notebooks",
)
## Requirements
REQUIREMENTS = ["pyyaml", "addict"]
##########################################################################
## Helper Functions
##########################################################################
def read(*parts):
"""
Assume UTF-8 encoding and return the contents of the file located at the
absolute path from the REPOSITORY joined with *parts.
"""
with codecs.open(os.path.join(PROJECT, *parts), 'rb', 'utf-8') as f:
return f.read()
def get_version(path=VERSION_PATH):
"""
Reads the version.py defined in the VERSION_PATH to find the get_version
function, and executes it to ensure that it is loaded correctly.
"""
namespace = {}
exec(read(path), namespace)
return namespace['get_version']()
##########################################################################
## Define the configuration
##########################################################################
config = {
"name": NAME,
"version": get_version(),
"description": DESCRIPTION,
"long_description": DESCRIPTION,
"license": LICENSE,
"author": AUTHOR,
"author_email": EMAIL,
"maintainer": AUTHOR,
"maintainer_email": EMAIL,
"url": REPOSITORY,
"download_url": "{}/tarball/v{}".format(REPOSITORY, get_version()),
"packages": find_packages(where=PROJECT, exclude=EXCLUDES),
"classifiers": CLASSIFIERS,
"keywords": KEYWORDS,
"zip_safe": False,
"install_requires": REQUIREMENTS,
}
##########################################################################
## Run setup script
##########################################################################
if __name__ == '__main__':
setup(**config)
| [
[
[
236,
238
],
[
1277,
1279
],
[
1293,
1295
],
[
1335,
1337
],
[
1910,
1912
]
],
[
[
246,
248
]
],
[
[
256,
262
],
[
1898,
1904
]
],
[
[
287,
292
],
[
3197,
3202
]
],
[
[
316,
329
],
[
2825,
2838
]
],
[
[
526,
530
],
[
2479,
2483
]
],
[
[
541,
552
],
[
2534,
2545
],
[
2571,
2582
]
],
[
[
606,
612
],
[
2622,
2628
],
[
2675,
2681
]
],
[
[
631,
636
],
[
2650,
2655
],
[
2707,
2712
]
],
[
[
651,
658
],
[
2599,
2606
]
],
[
[
667,
677
],
[
2725,
2735
],
[
2781,
2791
]
],
[
[
716,
723
],
[
1348,
1355
]
],
[
[
758,
766
],
[
2921,
2929
]
],
[
[
943,
954
],
[
2892,
2903
]
],
[
[
1267,
1274
],
[
2845,
2852
],
[
1923,
1930
]
],
[
[
1320,
1332
],
[
2009,
2021
]
],
[
[
1414,
1422
],
[
2862,
2870
]
],
[
[
1511,
1523
],
[
2978,
2990
]
],
[
[
1724,
1728
],
[
2214,
2218
]
],
[
[
1992,
2003
],
[
2500,
2511
],
[
2793,
2804
]
],
[
[
2456,
2462
],
[
3205,
3211
]
]
] |
from confd_gnmi_adapter import GnmiServerAdapter
class GnmiNetconfServerAdapter(GnmiServerAdapter):
@classmethod
def get_adapter(cls):
pass
def set(self, prefix, path, val):
pass
def get_subscription_handler(self, subscription_list):
pass
def capabilities(self):
return []
def get(self, prefix, paths, data_type, use_models):
return []
| [
[
[
31,
48
],
[
82,
99
]
],
[
[
57,
81
]
]
] |
# Copyright 2021 The LightSeq Team
# Copyright Facebook Fairseq
# We use layers from Facebook Fairseq as our baseline
import math
import uuid
from typing import Dict, Optional, Tuple, List
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter, LayerNorm, Dropout, Linear
from lightseq.training.ops.pytorch import util
from lightseq.training.ops.pytorch.layer_base import (
TransformerEmbeddingLayerBase,
TransformerEncoderLayerBase,
TransformerDecoderLayerBase,
)
from .quantization import (
QuantLinear,
TensorQuantizer,
act_quant_config,
weight_quant_config,
)
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
is_decoder=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
self.is_decoder = is_decoder
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.attention_quant = None
if self.self_attention:
# self.qkv_proj = Linear(embed_dim, 3*embed_dim, bias=bias)
self.qkv_proj = QuantLinear(embed_dim, 3 * embed_dim, bias=bias)
self.attention_quant = (
TensorQuantizer(act_quant_config) if self.is_decoder else None
)
elif self.encoder_decoder_attention and self.is_decoder:
self.k_proj = QuantLinear(
self.kdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.v_proj = QuantLinear(
self.vdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.q_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
self.out_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.tpu = False
self.init_incremental_state()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
if self.self_attention:
nn.init.xavier_uniform_(self.qkv_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
qkv = self.qkv_proj(query)
if self.attention_quant is not None:
qkv = self.attention_quant(qkv)
q, k, v = qkv.split(self.embed_dim, dim=-1)
# q = self.q_proj(query)
# k = self.k_proj(query)
# v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = util.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
class TransformerEncoderLayer(TransformerEncoderLayerBase):
"""Encoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
normalize_before to True.
"""
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = self.build_self_attention(
self.embed_dim, config.nhead, config.attn_prob_dropout_ratio
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size, self.embed_dim, pre_activation="relu"
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_self_attention(self, embed_dim, nhead, attn_dropout):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
self_attention=True,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
x = x.transpose(0, 1)
return x
class TransformerDecoderLayer(TransformerDecoderLayerBase):
"""Decoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
"""
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.cross_self_attention = False
self.self_attn = self.build_self_attention(
self.embed_dim,
config.nhead,
config.attn_prob_dropout_ratio,
)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = self.build_encoder_attention(
self.embed_dim,
config.hidden_size,
config.attn_prob_dropout_ratio,
config.nhead,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size,
self.embed_dim,
pre_activation="relu",
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def build_self_attention(
self, embed_dim, nhead, attn_dropout, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not self.cross_self_attention,
is_decoder=True,
)
def build_encoder_attention(
self, embed_dim, encoder_embed_dim, attn_dropout, nhead
):
return MultiheadAttention(
embed_dim,
nhead,
kdim=encoder_embed_dim,
vdim=encoder_embed_dim,
dropout=attn_dropout,
encoder_decoder_attention=True,
is_decoder=True,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
if (
encoder_out.shape[1] != x.shape[1]
and x.shape[1] % encoder_out.shape[1] == 0
):
beam_size = int(x.shape[1] / encoder_out.shape[1])
encoder_out = encoder_out.repeat_interleave(beam_size, 1)
encoder_padding_mask = encoder_padding_mask.repeat_interleave(
beam_size, 0
)
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
x = x.transpose(0, 1)
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
class TransformerEmbeddingLayer(TransformerEmbeddingLayerBase):
def __init__(self, config):
super().__init__()
self.emb_lookup = nn.Embedding(
config.vocab_size, config.embedding_dim, padding_idx=config.padding_idx
)
self.emb_lookup.to(dtype=(torch.half if config.fp16 else torch.float))
self.embeddings = self.emb_lookup.weight
nn.init.normal_(self.embeddings, mean=0, std=config.embedding_dim ** -0.5)
nn.init.constant_(self.embeddings[config.padding_idx], 0)
self.embed_positions = SinusoidalPositionalEmbedding(
config.embedding_dim, config.padding_idx, config.max_seq_len, config.fp16
)
self.embedding_dim = config.embedding_dim
self.dropout = Dropout(config.dropout)
self.emb_quant = TensorQuantizer(weight_quant_config)
self.config = config
def forward(self, input, step=0):
x = self.emb_lookup(input)
x = self.emb_quant(x)
x = math.sqrt(self.embedding_dim) * x
x += self.embed_positions(input, step)
x = self.dropout(x)
return x
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024, fp16=False):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
if fp16:
self.weights = self.weights.to(torch.half)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
return emb
def make_positions(self, tensor, padding_idx, step):
mask = tensor.ne(padding_idx).int()
return ((torch.cumsum(mask, dim=1).type_as(mask) - 1 + step) * mask).long()
def forward(
self,
input,
step=0,
incremental_state=None,
timestep=None,
positions=None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.size(0), input.size(1)
positions = self.make_positions(input, self.padding_idx, step)
mask = (
torch.ne(input, self.padding_idx)
.unsqueeze(2)
.expand(bsz, seq_len, self.embedding_dim)
)
return (
self.weights.to(input.device)
.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
* mask
).detach()
| [
[
[
127,
131
],
[
3589,
3593
],
[
3690,
3694
],
[
3773,
3777
],
[
3856,
3860
],
[
33728,
33732
],
[
34805,
34809
]
],
[
[
139,
143
],
[
18738,
18742
]
],
[
[
164,
168
],
[
4571,
4575
],
[
4581,
4585
],
[
15789,
15793
],
[
15799,
15803
],
[
16678,
16682
],
[
16629,
16633
],
[
16639,
16643
],
[
16881,
16885
],
[
17015,
17019
],
[
17025,
17029
],
[
17071,
17075
],
[
19039,
19043
],
[
18962,
18966
],
[
18972,
18976
],
[
19544,
19548
],
[
19554,
19558
],
[
19423,
19427
],
[
19433,
19437
],
[
19497,
19501
],
[
26626,
26630
],
[
26636,
26640
],
[
27914,
27918
],
[
30588,
30592
]
],
[
[
170,
178
],
[
4433,
4441
],
[
4466,
4474
],
[
4510,
4518
],
[
4562,
4570
],
[
4591,
4599
],
[
4706,
4714
],
[
9513,
9521
],
[
13724,
13732
],
[
14325,
14333
],
[
14177,
14185
],
[
14226,
14234
],
[
15809,
15817
],
[
16688,
16696
],
[
16620,
16628
],
[
16649,
16657
],
[
16891,
16899
],
[
17035,
17043
],
[
17081,
17089
],
[
19030,
19038
],
[
19049,
19057
],
[
18953,
18961
],
[
18982,
18990
],
[
19535,
19543
],
[
19564,
19572
],
[
19414,
19422
],
[
19443,
19451
],
[
19507,
19515
],
[
26498,
26506
],
[
26559,
26567
],
[
26617,
26625
],
[
26646,
26654
],
[
26704,
26712
],
[
26766,
26774
],
[
26827,
26835
],
[
26890,
26898
],
[
27924,
27932
],
[
30598,
30606
],
[
34517,
34525
]
],
[
[
180,
185
]
],
[
[
187,
191
],
[
26713,
26717
],
[
26775,
26779
]
],
[
[
200,
205
],
[
15696,
15701
],
[
2836,
2841
],
[
2903,
2908
],
[
7428,
7433
],
[
7490,
7495
],
[
7602,
7607
],
[
7793,
7798
],
[
9026,
9031
],
[
9444,
9449
],
[
11006,
11011
],
[
11088,
11093
],
[
11220,
11225
],
[
11411,
11416
],
[
11510,
11515
],
[
11724,
11729
],
[
12474,
12479
],
[
13194,
13199
],
[
14636,
14641
],
[
14970,
14975
],
[
15156,
15161
],
[
15316,
15321
],
[
15492,
15497
],
[
26507,
26512
],
[
26568,
26573
],
[
26718,
26723
],
[
26780,
26785
],
[
26836,
26841
],
[
26899,
26904
],
[
28707,
28712
],
[
29181,
29186
],
[
29343,
29348
],
[
33022,
33027
],
[
33053,
33058
],
[
34401,
34406
],
[
34852,
34857
],
[
34862,
34867
],
[
34891,
34896
],
[
34926,
34931
],
[
34961,
34966
],
[
35042,
35047
],
[
35053,
35058
],
[
35069,
35074
],
[
35216,
35221
],
[
35232,
35237
],
[
35410,
35415
],
[
35840,
35845
]
],
[
[
213,
237
]
],
[
[
256,
262
],
[
4442,
4448
],
[
4475,
4481
],
[
4519,
4525
],
[
4600,
4606
],
[
4715,
4721
],
[
9522,
9528
],
[
13733,
13739
],
[
14334,
14340
],
[
14186,
14192
],
[
14235,
14241
],
[
15818,
15824
],
[
15848,
15854
],
[
16697,
16703
],
[
16658,
16664
],
[
16900,
16906
],
[
17044,
17050
],
[
17090,
17096
],
[
19058,
19064
],
[
18991,
18997
],
[
19573,
19579
],
[
19452,
19458
],
[
19516,
19522
],
[
26655,
26661
],
[
27933,
27939
],
[
30607,
30613
]
],
[
[
264,
266
],
[
674,
676
],
[
33893,
33895
],
[
3534,
3536
],
[
3637,
3639
],
[
3720,
3722
],
[
3803,
3805
],
[
3896,
3898
],
[
3952,
3954
],
[
4008,
4010
],
[
4061,
4063
],
[
4162,
4164
],
[
4253,
4255
],
[
4337,
4339
],
[
32880,
32882
],
[
33125,
33127
],
[
33208,
33210
]
],
[
[
288,
297
],
[
2826,
2835
],
[
2893,
2902
]
],
[
[
299,
308
],
[
20793,
20802
],
[
21373,
21382
],
[
24841,
24850
],
[
25105,
25114
],
[
25416,
25425
]
],
[
[
310,
317
],
[
1415,
1422
],
[
20849,
20856
],
[
21012,
21019
],
[
24335,
24342
],
[
24702,
24709
],
[
33497,
33504
]
],
[
[
319,
325
]
],
[
[
369,
373
],
[
12945,
12949
],
[
20915,
20919
],
[
24605,
24609
]
],
[
[
433,
462
],
[
32762,
32791
]
],
[
[
468,
495
],
[
19874,
19901
]
],
[
[
501,
528
],
[
24006,
24033
]
],
[
[
564,
575
],
[
2127,
2138
],
[
2398,
2409
],
[
2529,
2540
],
[
2660,
2671
],
[
2730,
2741
],
[
21133,
21144
],
[
21241,
21252
],
[
25151,
25162
],
[
25259,
25270
]
],
[
[
581,
596
],
[
2230,
2245
],
[
33546,
33561
]
],
[
[
602,
618
],
[
2246,
2262
]
],
[
[
624,
643
],
[
33562,
33581
]
],
[
[
655,
673
],
[
9751,
9769
],
[
21483,
21501
],
[
25643,
25661
],
[
26034,
26052
]
],
[
[
19850,
19873
]
],
[
[
23982,
24005
]
],
[
[
32736,
32761
]
],
[
[
33863,
33892
],
[
33297,
33326
],
[
34236,
34265
]
]
] |
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ApiGenerateCodeResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, script: str=None): # noqa: E501
"""ApiGenerateCodeResponse - a model defined in Swagger
:param script: The script of this ApiGenerateCodeResponse. # noqa: E501
:type script: str
"""
self.swagger_types = {
'script': str
}
self.attribute_map = {
'script': 'script'
}
self._script = script
@classmethod
def from_dict(cls, dikt) -> 'ApiGenerateCodeResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The apiGenerateCodeResponse of this ApiGenerateCodeResponse. # noqa: E501
:rtype: ApiGenerateCodeResponse
"""
return util.deserialize_model(dikt, cls)
@property
def script(self) -> str:
"""Gets the script of this ApiGenerateCodeResponse.
The script source code to run the component in a pipeline # noqa: E501
:return: The script of this ApiGenerateCodeResponse.
:rtype: str
"""
return self._script
@script.setter
def script(self, script: str):
"""Sets the script of this ApiGenerateCodeResponse.
The script source code to run the component in a pipeline # noqa: E501
:param script: The script of this ApiGenerateCodeResponse.
:type script: str
"""
self._script = script
| [
[
[
619,
634
]
],
[
[
656,
660
]
],
[
[
662,
670
]
],
[
[
705,
709
]
],
[
[
711,
715
]
],
[
[
777,
782
],
[
847,
852
]
],
[
[
810,
814
],
[
1718,
1722
]
],
[
[
823,
846
]
]
] |
#!/usr/bin/env python3
import sys
from bisect import bisect, bisect_left, bisect_right, insort, insort_left, insort_right # type: ignore
from collections import Counter, defaultdict, deque # type: ignore
from fractions import gcd # type: ignore
from heapq import heapify, heappop, heappush, heappushpop, heapreplace, merge # type: ignore
from itertools import accumulate, combinations, permutations, product # type: ignore
N, Q = map(int, input().split())
A = list(map(int, input().split()))
x = []
for i in range(Q):
x.append(int(input()))
A.sort(reverse=True)
left = 0
right = N - 1
num = 0
for j in range(Q):
while right - left > 0:
print("left", left, "right", right)
mid = int((left + right) / 2)
if A[mid] < x[j]:
right = mid + 1
elif A[mid] > x[j]:
left = mid - 1
else:
break
num = mid
print("mid", num)
| [
[
[
30,
33
]
],
[
[
53,
59
]
],
[
[
61,
72
]
],
[
[
74,
86
]
],
[
[
88,
94
]
],
[
[
96,
107
]
],
[
[
109,
121
]
],
[
[
162,
169
]
],
[
[
171,
182
]
],
[
[
184,
189
]
],
[
[
228,
231
]
],
[
[
266,
273
]
],
[
[
275,
282
]
],
[
[
284,
292
]
],
[
[
294,
305
]
],
[
[
307,
318
]
],
[
[
320,
325
]
],
[
[
364,
374
]
],
[
[
376,
388
]
],
[
[
390,
402
]
],
[
[
404,
411
]
],
[
[
430,
431
],
[
593,
594
]
],
[
[
433,
434
],
[
524,
525
],
[
622,
623
]
],
[
[
464,
465
],
[
555,
556
],
[
747,
748
],
[
803,
804
]
],
[
[
502,
503
],
[
532,
533
],
[
756,
757
],
[
812,
813
]
],
[
[
513,
514
]
],
[
[
576,
580
],
[
644,
648
],
[
676,
680
],
[
717,
721
]
],
[
[
585,
590
],
[
636,
641
],
[
691,
696
],
[
724,
729
]
],
[
[
599,
602
],
[
912,
915
]
],
[
[
611,
612
],
[
758,
759
],
[
814,
815
]
],
[
[
706,
709
],
[
749,
752
],
[
782,
785
],
[
805,
808
],
[
837,
840
],
[
891,
894
]
],
[
[
774,
779
],
[
636,
641
],
[
691,
696
],
[
724,
729
]
],
[
[
830,
834
],
[
644,
648
],
[
676,
680
],
[
717,
721
]
],
[
[
885,
888
],
[
912,
915
]
]
] |
"""Unit test for KNX time objects."""
import unittest
from xknx.dpt import DPTTime, DPTWeekday
from xknx.exceptions import ConversionError
class TestDPTTime(unittest.TestCase):
"""Test class for KNX time objects."""
#
# TEST NORMAL TIME
#
def test_from_knx(self):
"""Test parsing of DPTTime object from binary values. Example 1."""
self.assertEqual(DPTTime().from_knx((0x4D, 0x17, 0x2A)),
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
def test_to_knx(self):
"""Testing KNX/Byte representation of DPTTime object."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
self.assertEqual(raw, (0x4D, 0x17, 0x2A))
#
# TEST MAXIMUM TIME
#
def test_to_knx_max(self):
"""Testing KNX/Byte representation of DPTTime object. Maximum values."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
self.assertEqual(raw, (0xF7, 0x3b, 0x3b))
def test_from_knx_max(self):
"""Test parsing of DPTTime object from binary values. Example 2."""
self.assertEqual(DPTTime().from_knx((0xF7, 0x3b, 0x3b)),
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
#
# TEST MINIMUM TIME
#
def test_to_knx_min(self):
"""Testing KNX/Byte representation of DPTTime object. Minimum values."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
self.assertEqual(raw, (0x0, 0x0, 0x0))
def test_from_knx_min(self):
"""Test parsing of DPTTime object from binary values. Example 3."""
self.assertEqual(DPTTime().from_knx((0x0, 0x0, 0x0)),
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
#
# TEST INITIALIZATION
#
def test_to_knx_default(self):
"""Testing default initialization of DPTTime object."""
self.assertEqual(DPTTime().to_knx({}), (0x0, 0x0, 0x0))
def test_from_knx_wrong_size(self):
"""Test parsing from DPTTime object from wrong binary values (wrong size)."""
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, 0x23))
def test_from_knx_wrong_bytes(self):
"""Test parsing from DPTTime object from wrong binary values (wrong bytes)."""
with self.assertRaises(ConversionError):
# thirs parameter exceeds limit
DPTTime().from_knx((0xF7, 0x3b, 0x3c))
def test_from_knx_wrong_type(self):
"""Test parsing from DPTTime object from wrong binary values (wrong type)."""
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, "0x23"))
def test_to_knx_wrong_parameter(self):
"""Test parsing from DPTTime object from wrong string value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx("fnord")
def test_to_knx_wrong_seconds(self):
"""Test parsing from DPTTime object from wrong seconds value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 42,
'seconds': 61
})
def test_to_knx_wrong_minutes(self):
"""Test parsing from DPTTime object from wrong minutes value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 61,
'seconds': 53
})
def test_to_knx_wrong_hours(self):
"""Test parsing from DPTTime object from wrong hours value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 24,
'minutes': 42,
'seconds': 53
})
def test_test_range_wrong_weekday(self):
"""Test range testing with wrong weekday (Cant be tested with normal from_/to_knx)."""
# pylint: disable=protected-access
self.assertFalse(DPTTime._test_range(8, 0, 0, 0))
| [
[
[
45,
53
],
[
160,
168
]
],
[
[
76,
83
],
[
389,
396
],
[
715,
722
],
[
1073,
1080
],
[
1402,
1409
],
[
1783,
1790
],
[
2104,
2111
],
[
2477,
2484
],
[
2704,
2711
],
[
2971,
2978
],
[
3198,
3205
],
[
3410,
3417
],
[
3612,
3619
],
[
3912,
3919
],
[
4208,
4215
],
[
4541,
4548
]
],
[
[
85,
95
],
[
466,
476
],
[
757,
767
],
[
1115,
1125
],
[
1479,
1489
],
[
1825,
1835
],
[
2178,
2188
]
],
[
[
124,
139
],
[
2674,
2689
],
[
2897,
2912
],
[
3168,
3183
],
[
3380,
3395
],
[
3582,
3597
],
[
3882,
3897
],
[
4178,
4193
]
],
[
[
148,
159
]
]
] |
# Copyright (c) 2019-present, The Johann Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE file. See the AUTHORS file for names of contributors.
"""Johann, lightweight and flexible scenario orchestration"""
__version__ = "0.3.0-alpha"
| [
[
[
288,
299
]
]
] |
from o3seespy.base_model import OpenSeesObject
class LayerBase(OpenSeesObject):
op_base_type = "layer"
class Straight(LayerBase):
"""
The Straight Layer Class
The layer command is used to generate a number of fibers along a line or a circular arc.
"""
op_type = 'straight'
def __init__(self, osi, mat, num_fiber, area_fiber, start, end):
"""
Initial method for Straight
Parameters
----------
mat: obj
Material tag associated with this fiber (uniaxialmaterial tag for a fibersection and ndmaterial tag for use
in an ndfibersection).
num_fiber: int
Number of fibers along line
area_fiber: float
Area of each fiber
start: list
Y & z-coordinates of first fiber in line (local coordinate system)
end: list
Y & z-coordinates of last fiber in line (local coordinate system)
"""
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.start = start
self.end = end
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.start, *self.end]
self.to_process(osi)
class Circ(LayerBase):
"""
The Circ Layer Class
This command is used to construct a line of fibers along a circular arc
"""
op_type = 'circ'
def __init__(self, osi, mat, num_fiber, area_fiber, center, radius, ang=None):
"""
Initial method for Circ
Parameters
----------
mat: obj
Material tag associated with this fiber (uniaxialmaterial tag for a fibersection and ndmaterial tag for use
in an ndfibersection).
num_fiber: int
Number of fibers along line
area_fiber: float
Area of each fiber
center: listf
Y & z-coordinates of center of circular arc
radius: float
Radius of circlular arc
ang: listf
Starting and ending angle (optional) [0.0, 360.0-360/num_fibres]
"""
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.center = center
self.radius = float(radius)
self.ang = ang
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.center, self.radius]
if self.ang is not None:
self._parameters += self.ang
self.to_process(osi)
| [
[
[
32,
46
],
[
65,
79
]
],
[
[
55,
64
],
[
126,
135
],
[
1273,
1282
]
],
[
[
117,
125
]
],
[
[
1268,
1272
]
]
] |
import json
import os
os.environ["system_file"] = "./tests/testing_data/system.yaml"
from typing import Dict, Text, Any, List
import pytest
import responses
from mongoengine import connect, disconnect
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from kairon.action_server.data_objects import HttpActionRequestBody, HttpActionConfig, HttpActionLog
from kairon.action_server.actions import ActionUtility, HttpAction
from kairon.action_server.exception import HttpActionFailure
from kairon.utils import Utility
def pytest_configure():
return {
'db_url': None,
}
class TestActions:
@pytest.fixture(autouse=True)
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_evironment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(host=db_url)
@pytest.fixture
def mock_get_http_action_exception(self, monkeypatch):
def _raise_excep(*arge, **kwargs):
raise HttpActionFailure("No HTTP action found for bot and action")
monkeypatch.setattr(ActionUtility, "get_http_action_config", _raise_excep)
@responses.activate
def test_execute_http_request_getWith_auth_token(self):
http_url = 'http://localhost:8080/mock'
# file deepcode ignore HardcodedNonCryptoSecret: Random string for testing
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_get_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_post_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_post_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_put_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_put_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_delete_with_request_body_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_with_auth_token_no_request_body(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=None)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[
responses.json_params_matcher(request_params)
]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
def test_get_http_action_config(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "http_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_config_deleted_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="",
action_name="test_get_http_action_config_deleted_action",
response="${RESPONSE}",
http_url="http://www.digite.com",
request_method="POST",
params_list=http_params,
bot="bot",
user="user",
status=False
).save().to_mongo().to_dict()
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_get_http_action_config_deleted_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "test_get_http_action_config_deleted_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_no_bot(self):
try:
ActionUtility.get_http_action_config(bot=None, action_name="http_action")
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_no_http_action(self):
try:
ActionUtility.get_http_action_config(bot="bot", action_name=None)
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_invalid_bot(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot1", "http_action")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_invalid_http_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_no_request_body(self):
http_params = []
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_prepare_request(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "slot_name": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param2", value="slot_name", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
assert actual_request_body
assert actual_request_body['param1'] == 'value1'
assert actual_request_body['param2'] == 'param2value'
def test_prepare_request_empty_slot(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param3", value="", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert not request_params['param3']
def test_prepare_request_sender_id(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="user_id", value="", parameter_type="sender_id")]
tracker = Tracker(sender_id="kairon_user@digite.com", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert request_params['user_id'] == "kairon_user@digite.com"
def test_prepare_request_no_request_params(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events: List[Dict] = None
http_action_config_params: List[HttpActionRequestBody] = None
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
# deepcode ignore C1801: empty request body for http request with no request body params
assert len(actual_request_body) == 0
@pytest.mark.asyncio
async def test_name(self):
assert await HttpAction().name() == "kairon_http_action"
def test_is_empty(self):
assert ActionUtility.is_empty("")
assert ActionUtility.is_empty(" ")
assert ActionUtility.is_empty(None)
assert not ActionUtility.is_empty("None")
def test_prepare_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json1)
assert response == 'The value of 2 in red is []'
json2 = json.dumps({
"data": [
{"a": {
"b": {
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}}},
{"a": {
"b": {
"43": 5,
"c": [1, 2],
"d": ['buggy', 'bumpers'],
}}}
]
})
response = ActionUtility.prepare_response("The value of ${data.0.a} in ${data.0.a.b} is ${data.0.a.b.d}", json2)
assert response == 'The value of {"b": {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}} in {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]} is [\'red\', \'buggy\', \'bumpers\']'
def test_prepare_response_key_not_present(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_string_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of red is 0", json1)
assert response == "The value of red is 0"
def test_prepare_response_string_empty_response_string(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("", json1)
assert response == '{"a": {"b": {"3": 2, "43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}}}'
def test_prepare_response_string_empty_request_output(self):
json1 = json.dumps("{}")
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_invalid_response_json(self):
json_as_string = "Not a json string"
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json_as_string)
assert False
except HttpActionFailure as e:
assert str(e) == 'Could not find value for keys in response'
def test_prepare_response_as_json_and_expected_as_plain_string(self):
json_as_string = "Not a json string"
response = ActionUtility.prepare_response("The value of 2 in red is []", json_as_string)
assert response == 'The value of 2 in red is []'
def test_prepare_response_as_string_and_expected_as_none(self):
response = ActionUtility.prepare_response("The value of 2 in red is []", None)
assert response == 'The value of 2 in red is []'
@pytest.mark.asyncio
async def test_run_invalid_http_action(self, mock_get_http_action_exception):
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_http_action": "test_run_invalid_http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_run_invalid_http_action1",
response="json",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
).save()
dispatcher: CollectingDispatcher = CollectingDispatcher()
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
await HttpAction().run(dispatcher, tracker, domain)
str(dispatcher.messages[0]['text']).__contains__(
"I have failed to process your request: No HTTP action found for bot")
log = HttpActionLog.objects(sender="sender1",
bot="5f50fd0a56b698ca10d35d2e",
status="FAILURE").get()
assert log['exception'].__contains__('No HTTP action found for bot')
@pytest.mark.asyncio
async def test_run_no_bot(self):
slots = {"bot": None, "http_action_config_http_action": "new_http_action", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender2", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
log = HttpActionLog.objects(sender="sender2",
status="FAILURE").get()
assert log['exception'] == 'Bot id and HTTP action configuration name not found in slot'
@pytest.mark.asyncio
async def test_run_no_http_action(self):
slots = {"bot": "jhgfsjgfausyfgus", "http_action_config_http_action": None, "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
@pytest.mark.asyncio
async def test_run(self, monkeypatch):
action = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="This should be response",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'This should be response'
log = HttpActionLog.objects(sender="sender_test_run",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent']
assert log['action']
assert log['bot_response']
assert log['api_response']
@pytest.mark.asyncio
async def test_run_with_post(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert actual[0]['name'] == 'KAIRON_ACTION_RESPONSE'
assert actual[0]['value'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_post_and_parameters(self, monkeypatch):
request_params = [HttpActionRequestBody(key='key1', value="value1"),
HttpActionRequestBody(key='key2', value="value2")]
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=request_params,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run_with_post", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'Data added successfully, id:5000'
log = HttpActionLog.objects(sender="sender_test_run_with_post",
action="test_run_with_post",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent'] == "test_run"
assert log['action'] == "test_run_with_post"
assert log['request_params'] == {"key1": "value1", "key2": "value2"}
assert log['api_response'] == '5000'
assert log['bot_response'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_get(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8081/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8081/mock'
resp_msg = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'The value of 2 in red is [\'red\', \'buggy\', \'bumpers\']'
@pytest.mark.asyncio
async def test_run_no_connection(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="This should be response",
http_url="http://localhost:8085/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']).__contains__('I have failed to process your request')
@pytest.mark.asyncio
async def test_run_with_get_placeholder_vs_string_response(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get_string_http_response_placeholder_required",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8080/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8082/mock'
resp_msg = "This is string http response"
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e",
"http_action_config_test_run": "test_run_with_get_string_http_response_placeholder_required"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(
actual[0]['value']) == 'I have failed to process your request'
def test_attach_response_no_placeholder(self):
output = ActionUtility.attach_response("This has no placeholder", {"a": "b"})
assert output == "This has no placeholder"
def test_attach_response(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", {"dollars": "51"})
assert output == 'I want ${\'dollars\': \'51\'}'
def test_attach_response_int(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", 51)
assert output == 'I want $51'
def test_retrieve_value_from_response(self):
keys = ["a.b.3", 'a.b']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
key_values = ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert key_values is not None
assert key_values['${a.b.3}'] == 2
assert key_values['${a.b}'] is not None
assert key_values['${a.b}']['3'] == 2
assert key_values['${a.b}']['d'][0] == 'red'
def test_retrieve_value_from_response_invalid_key(self):
keys = ["d.e.f", 'g.h']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
try:
ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert False
except HttpActionFailure as e:
assert str(e) == 'Unable to retrieve value for key from HTTP response: \'d\''
| [
[
[
7,
11
],
[
19415,
19419
],
[
19836,
19840
],
[
20681,
20685
],
[
21187,
21191
],
[
21642,
21646
],
[
22129,
22133
],
[
33736,
33740
]
],
[
[
19,
21
],
[
22,
24
],
[
699,
701
]
],
[
[
104,
108
],
[
18412,
18416
],
[
24301,
24305
],
[
25408,
25412
],
[
25452,
25456
],
[
26549,
26553
],
[
26593,
26597
],
[
28100,
28104
],
[
28187,
28191
],
[
30163,
30167
],
[
30250,
30254
],
[
32130,
32134
],
[
32217,
32221
],
[
34701,
34705
],
[
34788,
34792
],
[
36247,
36251
],
[
36313,
36317
],
[
38156,
38160
],
[
38243,
38247
]
],
[
[
110,
114
],
[
24306,
24310
],
[
25413,
25417
],
[
25457,
25461
],
[
26554,
26558
],
[
26598,
26602
],
[
28105,
28109
],
[
28192,
28196
],
[
30168,
30172
],
[
30255,
30259
],
[
32135,
32139
],
[
32222,
32226
],
[
34706,
34710
],
[
34793,
34797
],
[
36252,
36256
],
[
36318,
36322
],
[
38161,
38165
],
[
38248,
38252
]
],
[
[
116,
119
],
[
24312,
24315
],
[
25419,
25422
],
[
25463,
25466
],
[
26560,
26563
],
[
26604,
26607
],
[
28111,
28114
],
[
28198,
28201
],
[
30174,
30177
],
[
30261,
30264
],
[
32141,
32144
],
[
32228,
32231
],
[
34712,
34715
],
[
34799,
34802
],
[
36258,
36261
],
[
36324,
36327
],
[
38167,
38170
],
[
38254,
38257
]
],
[
[
121,
125
],
[
18407,
18411
],
[
18460,
18464
],
[
25447,
25451
],
[
26588,
26592
],
[
28182,
28186
],
[
30245,
30249
],
[
32212,
32216
],
[
34783,
34787
],
[
36308,
36312
],
[
38238,
38242
]
],
[
[
134,
140
],
[
641,
647
],
[
919,
925
],
[
19035,
19041
],
[
23201,
23207
],
[
24790,
24796
],
[
25922,
25928
],
[
26852,
26858
],
[
28750,
28756
],
[
30494,
30500
],
[
33052,
33058
],
[
35093,
35099
],
[
36583,
36589
],
[
860,
866
]
],
[
[
148,
157
],
[
1205,
1214
],
[
2137,
2146
],
[
2907,
2916
],
[
3815,
3824
],
[
4644,
4653
],
[
5551,
5560
],
[
6379,
6388
],
[
7308,
7317
],
[
8063,
8072
],
[
1487,
1496
],
[
1521,
1530
],
[
1855,
1864
],
[
2065,
2074
],
[
2271,
2280
],
[
2305,
2314
],
[
2633,
2642
],
[
2866,
2875
],
[
3254,
3263
],
[
3288,
3297
],
[
3400,
3409
],
[
3623,
3632
],
[
3743,
3752
],
[
4097,
4106
],
[
4131,
4140
],
[
4243,
4252
],
[
4460,
4469
],
[
4603,
4612
],
[
4992,
5001
],
[
5026,
5035
],
[
5137,
5146
],
[
5360,
5369
],
[
5479,
5488
],
[
5834,
5843
],
[
5868,
5877
],
[
5979,
5988
],
[
6196,
6205
],
[
6338,
6347
],
[
6743,
6752
],
[
6777,
6786
],
[
6891,
6900
],
[
7114,
7123
],
[
7236,
7245
],
[
7574,
7583
],
[
7608,
7617
],
[
7879,
7888
],
[
7991,
8000
],
[
8349,
8358
],
[
8383,
8392
],
[
8514,
8523
],
[
8744,
8753
],
[
8889,
8898
],
[
29444,
29453
],
[
29470,
29479
],
[
29504,
29513
],
[
31367,
31376
],
[
31393,
31402
],
[
31427,
31436
],
[
32296,
32305
],
[
33983,
33992
],
[
34009,
34018
],
[
34043,
34052
],
[
34867,
34876
],
[
37380,
37389
],
[
37406,
37415
],
[
37440,
37449
],
[
38322,
38331
]
],
[
[
182,
189
],
[
892,
899
]
],
[
[
191,
201
]
],
[
[
223,
230
],
[
15989,
15996
],
[
16948,
16955
],
[
17789,
17796
],
[
18513,
18520
],
[
24092,
24099
],
[
25199,
25206
],
[
26340,
26347
],
[
27857,
27864
],
[
29954,
29961
],
[
31877,
31884
],
[
34492,
34499
],
[
36038,
36045
],
[
37947,
37954
]
],
[
[
261,
281
],
[
24051,
24071
],
[
24028,
24048
],
[
25064,
25084
],
[
25041,
25061
],
[
26205,
26225
],
[
26182,
26202
],
[
27725,
27745
],
[
27702,
27722
],
[
29822,
29842
],
[
29799,
29819
],
[
31745,
31765
],
[
31722,
31742
],
[
34360,
34380
],
[
34337,
34357
],
[
35906,
35926
],
[
35883,
35903
],
[
37815,
37835
],
[
37792,
37812
]
],
[
[
329,
350
],
[
8991,
9012
],
[
9088,
9109
],
[
10662,
10683
],
[
10759,
10780
],
[
13329,
13350
],
[
13426,
13447
],
[
14148,
14169
],
[
14245,
14266
],
[
15802,
15823
],
[
15892,
15913
],
[
16770,
16791
],
[
16860,
16881
],
[
17605,
17626
],
[
17695,
17716
],
[
18465,
18486
],
[
30608,
30629
],
[
30685,
30706
]
],
[
[
352,
368
],
[
9158,
9174
],
[
10818,
10834
],
[
11217,
11233
],
[
13485,
13501
],
[
14304,
14320
],
[
14973,
14989
],
[
23631,
23647
],
[
26932,
26948
],
[
28840,
28856
],
[
30753,
30769
],
[
33141,
33157
],
[
35187,
35203
],
[
36703,
36719
]
],
[
[
370,
383
],
[
24539,
24552
],
[
25719,
25732
],
[
28440,
28453
],
[
32504,
32517
]
],
[
[
425,
438
],
[
1144,
1157
],
[
1709,
1722
],
[
2493,
2506
],
[
3477,
3490
],
[
4320,
4333
],
[
5214,
5227
],
[
6056,
6069
],
[
6968,
6981
],
[
7733,
7746
],
[
8604,
8617
],
[
9536,
9549
],
[
11626,
11639
],
[
12770,
12783
],
[
13056,
13069
],
[
13871,
13884
],
[
14690,
14703
],
[
15359,
15372
],
[
16202,
16215
],
[
17156,
17169
],
[
18012,
18025
],
[
18726,
18739
],
[
19196,
19209
],
[
19238,
19251
],
[
19282,
19295
],
[
19330,
19343
],
[
19673,
19686
],
[
20312,
20325
],
[
20945,
20958
],
[
21445,
21458
],
[
21900,
21913
],
[
22171,
22184
],
[
22473,
22486
],
[
22847,
22860
],
[
23070,
23083
],
[
27422,
27435
],
[
29308,
29321
],
[
31231,
31244
],
[
33615,
33628
],
[
35638,
35651
],
[
37219,
37232
],
[
38603,
38616
],
[
38777,
38790
],
[
38964,
38977
],
[
39408,
39421
],
[
40060,
40073
]
],
[
[
440,
450
],
[
19107,
19117
],
[
24338,
24348
],
[
25477,
25487
],
[
26618,
26628
],
[
28212,
28222
],
[
30275,
30285
],
[
32242,
32252
],
[
34813,
34823
],
[
36338,
36348
],
[
38268,
38278
]
],
[
[
494,
511
],
[
12884,
12901
],
[
13162,
13179
],
[
13971,
13988
],
[
14790,
14807
],
[
15459,
15476
],
[
21074,
21091
],
[
22300,
22317
],
[
22611,
22628
],
[
40159,
40176
],
[
1054,
1071
]
],
[
[
537,
544
],
[
770,
777
],
[
813,
820
]
],
[
[
551,
567
]
],
[
[
622,
633
]
]
] |
from datetime import date
ano = int(input('ANO de nascimento : '))
ano_hoje = date.today().year
cont = ano_hoje - ano
if cont > 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MASTER. ')
elif cont == 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é SENIOR. ')
elif cont >= 19 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é JUNIOR. ')
elif cont >=10 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é INFANTIL. ')
elif cont <= 9 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MIRIM. ') | [
[
[
21,
25
],
[
81,
85
]
],
[
[
27,
30
],
[
118,
121
],
[
199,
202
],
[
346,
349
],
[
492,
495
],
[
637,
640
],
[
784,
787
]
],
[
[
70,
78
],
[
107,
115
],
[
210,
218
],
[
357,
365
],
[
503,
511
],
[
648,
656
],
[
795,
803
]
],
[
[
100,
104
],
[
126,
130
],
[
204,
208
],
[
271,
275
],
[
351,
355
],
[
418,
422
],
[
497,
501
],
[
564,
568
],
[
642,
646
],
[
711,
715
],
[
789,
793
]
]
] |
from distutils.core import setup
setup(
name='upprint',
packages=['upprint'],
version='0.1',
description='Modified version of pprint with better Unicode output',
author='Michiel Sikma',
author_email='michiel@sikma.org',
url='https://github.com/msikma/upprint',
download_url='https://github.com/msikma/upprint/tarball/0.1',
keywords=['pprint', 'debugging', 'print'],
classifiers=[],
license='MIT'
)
| [
[
[
27,
32
],
[
34,
39
]
]
] |
from pathlib import Path
from copy import deepcopy
import pytest
from gretel_synthetics.config import BaseConfig
import gretel_synthetics.tokenizers as tok
class SimpleConfig(BaseConfig):
"""Used for simple tokenization tests
"""
def get_generator_class(self):
return None
def get_training_callable(self):
return None
@pytest.fixture(scope="module")
def input_data_path():
return str(
(Path(__file__).parent / "data" / "smol.txt").resolve()
)
L1 = "Once upon a midnight dreary, while I pondered, weak and weary,\n"
def test_single_char(input_data_path, tmpdir):
# NOTE: Here the line delim should not matter for this char tokenizer
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.CharTokenizerTrainer(config=config)
# We need this for batch mode, so verify it can be copied
deepcopy(trainer)
line_iter = trainer.annotate_data()
# Assert that we didn't do any annotation
line_one = next(line_iter)
assert line_one == L1
# Let's train the tokenizer, and now reload it back in
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 32
# NOTE: this is because we default to using this token as a delim
# in the main config, but this tokenizer doesn't do anything with it anyway
assert tokenizer.field_delimiter == ","
assert tokenizer.field_delimiter_token == "<d>"
l1_ids = [6, 21, 11, 13, 1, 28, 23, 22, 21, 1, 9, 1, 20, 17, 12, 21, 17, 15, 16, 27, 1, 12, 25, 13, 9, 25, 31, 2, 1, 30, 16, 17, 19, 13, 1, 5, 1, 23, 22, 21, 12, 13, 25, 13, 12, 2, 1, 30, 13, 9, 18, 1, 9, 21, 12, 1, 30, 13, 9, 25, 31, 2, 0]
assert tokenizer.encode_to_ids(L1) == l1_ids
assert tokenizer.decode_from_ids(l1_ids) == L1
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.CharTokenizer
)
def test_single_char_small_vocab(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.CharTokenizerTrainer(config=config, vocab_size=10)
trainer.annotate_data()
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 10
# Too small of a vocab...
with pytest.raises(tok.TokenizerError):
tokenizer.encode_to_ids("Once upon")
with pytest.raises(tok.TokenizerError):
tokenizer.decode_from_ids([11])
def test_sp(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.SentencePieceTokenizerTrainer(config=config)
deepcopy(trainer)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary, while I pondered, weak and weary,<n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [41, 54, 8, 5, 11, 36, 10, 14, 16, 13, 17, 16, 22, 20, 15, 5, 13, 25, 32, 7, 6, 51, 42, 9, 8, 5, 23, 5, 36, 13, 48, 13, 6, 49, 62, 10, 28, 49, 25, 7, 6, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary, while I pondered, weak and weary,<n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
def test_sp_field_delim(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.SentencePieceTokenizerTrainer(config=config)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [40, 53, 7, 5, 10, 35, 9, 13, 15, 12, 16, 15, 21, 19, 14, 5, 12, 24, 30, 6, 4, 51, 41, 8, 7, 5, 23, 5, 35, 12, 47, 12, 4, 48, 61, 9, 27, 48, 24, 6, 4, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.SentencePieceTokenizer
)
| [
[
[
20,
24
],
[
439,
443
]
],
[
[
42,
50
],
[
920,
928
],
[
2724,
2732
]
],
[
[
59,
65
],
[
360,
366
],
[
2370,
2376
],
[
2460,
2466
]
],
[
[
105,
115
],
[
180,
190
]
],
[
[
123,
158
],
[
813,
816
],
[
1179,
1182
],
[
1903,
1906
],
[
1949,
1952
],
[
2130,
2133
],
[
2254,
2257
],
[
2384,
2387
],
[
2474,
2477
],
[
2671,
2674
],
[
2944,
2947
],
[
3546,
3549
],
[
3803,
3806
],
[
4296,
4299
],
[
4342,
4345
]
],
[
[
167,
179
],
[
709,
721
],
[
2047,
2059
],
[
2588,
2600
],
[
3442,
3454
]
],
[
[
395,
410
]
],
[
[
501,
503
],
[
1080,
1082
],
[
1782,
1784
],
[
1844,
1846
]
],
[
[
579,
595
]
],
[
[
1979,
2007
]
],
[
[
2541,
2548
]
],
[
[
3383,
3402
]
]
] |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Tests for the export and import routines.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import io
import six
from six.moves import range, zip
from aiida.backends.testbase import AiidaTestCase
from aiida.orm.importexport import import_data
from aiida import orm
class TestSpecificImport(AiidaTestCase):
def setUp(self):
super(TestSpecificImport, self).setUp()
self.clean_db()
self.insert_data()
def test_simple_import(self):
"""
This is a very simple test which checks that an export file with nodes
that are not associated to a computer is imported correctly. In Django
when such nodes are exported, there is an empty set for computers
in the export file. In SQLA there is such a set only when a computer is
associated with the exported nodes. When an empty computer set is
found at the export file (when imported to an SQLA profile), the SQLA
import code used to crash. This test demonstrates this problem.
"""
import tempfile
from aiida.orm.data.parameter import ParameterData
from aiida.orm.importexport import export, import_data
from aiida.orm.node import Node
from aiida.orm.querybuilder import QueryBuilder
parameters = ParameterData(dict={
'Pr': {
'cutoff': 50.0,
'pseudo_type': 'Wentzcovitch',
'dual': 8,
'cutoff_units': 'Ry'
},
'Ru': {
'cutoff': 40.0,
'pseudo_type': 'SG15',
'dual': 4,
'cutoff_units': 'Ry'
},
}).store()
with tempfile.NamedTemporaryFile() as handle:
nodes = [parameters]
export(nodes, outfile=handle.name, overwrite=True, silent=True)
# Check that we have the expected number of nodes in the database
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Clean the database and verify there are no nodes left
self.clean_db()
self.assertEquals(QueryBuilder().append(Node).count(), 0)
# After importing we should have the original number of nodes again
import_data(handle.name, silent=True)
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
def test_cycle_structure_data(self):
"""
Create an export with some Calculation and Data nodes and import it after having
cleaned the database. Verify that the nodes and their attributes are restored
properly after importing the created export archive
"""
import tempfile
from aiida.common.links import LinkType
from aiida.orm.calculation import Calculation
from aiida.orm.data.structure import StructureData
from aiida.orm.data.remote import RemoteData
from aiida.orm.importexport import export, import_data
from aiida.orm.node import Node
from aiida.orm.querybuilder import QueryBuilder
test_label = 'Test structure'
test_cell = [
[8.34, 0.0, 0.0],
[0.298041701839357, 8.53479766274308, 0.0],
[0.842650688117053, 0.47118495164127, 10.6965192730702]
]
test_kinds = [
{
'symbols': [u'Fe'],
'weights': [1.0],
'mass': 55.845,
'name': u'Fe'
},
{
'symbols': [u'S'],
'weights': [1.0],
'mass': 32.065,
'name': u'S'
}
]
structure = StructureData(cell=test_cell)
structure.append_atom(symbols=['Fe'], position=[0, 0, 0])
structure.append_atom(symbols=['S'], position=[2, 2, 2])
structure.label = test_label
structure.store()
parent_calculation = Calculation()
parent_calculation._set_attr('key', 'value')
parent_calculation.store()
child_calculation = Calculation()
child_calculation._set_attr('key', 'value')
child_calculation.store()
remote_folder = RemoteData(computer=self.computer, remote_path='/').store()
remote_folder.add_link_from(parent_calculation, link_type=LinkType.CREATE)
child_calculation.add_link_from(remote_folder, link_type=LinkType.INPUT)
structure.add_link_from(child_calculation, link_type=LinkType.CREATE)
with tempfile.NamedTemporaryFile() as handle:
nodes = [structure, child_calculation, parent_calculation, remote_folder]
export(nodes, outfile=handle.name, overwrite=True, silent=True)
# Check that we have the expected number of nodes in the database
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Clean the database and verify there are no nodes left
self.clean_db()
self.assertEquals(QueryBuilder().append(Node).count(), 0)
# After importing we should have the original number of nodes again
import_data(handle.name, silent=True)
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Verify that Calculations have non-empty attribute dictionaries
qb = QueryBuilder().append(Calculation)
for [calculation] in qb.iterall():
self.assertIsInstance(calculation.get_attrs(), dict)
self.assertNotEquals(len(calculation.get_attrs()), 0)
# Verify that the structure data maintained its label, cell and kinds
qb = QueryBuilder().append(StructureData)
for [structure] in qb.iterall():
self.assertEquals(structure.label, test_label)
self.assertEquals(structure.cell, test_cell)
qb = QueryBuilder().append(StructureData, project=['attributes.kinds'])
for [kinds] in qb.iterall():
self.assertEqual(len(kinds), 2)
for kind in kinds:
self.assertIn(kind, test_kinds)
# Check that there is a StructureData that is an output of a Calculation
qb = QueryBuilder()
qb.append(Calculation, project=['uuid'], tag='calculation')
qb.append(StructureData, output_of='calculation')
self.assertGreater(len(qb.all()), 0)
# Check that there is a RemoteData that is a child and parent of a Calculation
qb = QueryBuilder()
qb.append(Calculation, tag='parent')
qb.append(RemoteData, project=['uuid'], output_of='parent', tag='remote')
qb.append(Calculation, output_of='remote')
self.assertGreater(len(qb.all()), 0)
class TestSimple(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def test_0(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.data.base import Str, Int, Float, Bool
from aiida.orm.importexport import export
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# producing values for each base type
values = ("Hello", 6, -1.2399834e12, False) # , ["Bla", 1, 1e-10])
filename = os.path.join(temp_folder, "export.tar.gz")
# producing nodes:
nodes = [cls(val).store() for val, cls in zip(values, (Str, Int, Float, Bool))]
# my uuid - list to reload the node:
uuids = [n.uuid for n in nodes]
# exporting the nodes:
export(nodes, outfile=filename, silent=True)
# cleaning:
self.clean_db()
# Importing back the data:
import_data(filename, silent=True)
# Checking whether values are preserved:
for uuid, refval in zip(uuids, values):
self.assertEquals(load_node(uuid).value, refval)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_1(self):
import os
import shutil
import tempfile
from aiida.orm import DataFactory
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(sd)
pks = [sd.pk, calc.pk]
attrs = {}
for pk in pks:
node = load_node(pk)
attrs[node.uuid] = dict()
for k in node.attrs():
attrs[node.uuid][k] = node.get_attr(k)
filename = os.path.join(temp_folder, "export.tar.gz")
export([calc], outfile=filename, silent=True)
self.clean_db()
# NOTE: it is better to load new nodes by uuid, rather than assuming
# that they will have the first 3 pks. In fact, a recommended policy in
# databases is that pk always increment, even if you've deleted elements
import_data(filename, silent=True)
for uuid in attrs.keys():
node = load_node(uuid)
# for k in node.attrs():
for k in attrs[uuid].keys():
self.assertEquals(attrs[uuid][k], node.get_attr(k))
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
# print temp_folder
def test_2(self):
"""
Test the check for the export format version.
"""
import tarfile
import os
import shutil
import tempfile
from aiida.common import exceptions
from aiida.orm import DataFactory
from aiida.orm.importexport import export
import aiida.utils.json as json
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
filename = os.path.join(export_file_tmp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
with tarfile.open(filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack_tmp_folder)
with io.open(os.path.join(unpack_tmp_folder,
'metadata.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['export_version'] = 0.0
with io.open(os.path.join(unpack_tmp_folder, 'metadata.json'),
'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack_tmp_folder, arcname="")
self.tearDownClass()
self.setUpClass()
with self.assertRaises(exceptions.IncompatibleArchiveVersionError):
import_data(filename, silent=True)
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_3(self):
"""
Test importing of nodes, that have links to unknown nodes.
"""
import tarfile
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.common.folders import SandboxFolder
from aiida.orm.data.structure import StructureData
from aiida.orm import load_node
import aiida.utils.json as json
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
node_label = "Test structure data"
sd = StructureData()
sd.label = str(node_label)
sd.store()
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
unpack = SandboxFolder()
with tarfile.open(
filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack.abspath)
with io.open(unpack.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['links_uuid'].append({
'output': sd.uuid,
'input': 'non-existing-uuid',
'label': 'parent'
})
with io.open(unpack.get_abs_path('data.json'), 'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(
filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack.abspath, arcname="")
self.clean_db()
with self.assertRaises(ValueError):
import_data(filename, silent=True)
import_data(filename, ignore_unknown_nodes=True, silent=True)
self.assertEquals(load_node(sd.uuid).label, node_label)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_4(self):
"""
Test control of licenses.
"""
from aiida.common.exceptions import LicensingException
from aiida.common.folders import SandboxFolder
from aiida.orm.importexport import export_tree
from aiida.orm import DataFactory
StructureData = DataFactory('structure')
sd = StructureData()
sd.source = {'license': 'GPL'}
sd.store()
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['GPL'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['Academic'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['CC0'])
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['GPL'])
def cc_filter(license):
return license.startswith('CC')
def gpl_filter(license):
return license == 'GPL'
def crashing_filter(license):
raise NotImplementedError("not implemented yet")
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=cc_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=gpl_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=crashing_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=crashing_filter)
def test_5(self):
"""
This test checks that nodes belonging to different users are correctly
exported & imported.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE) # I assume jc1 CREATED sd2
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid]
uuids_u2 = [jc2.uuid, sd3.uuid]
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd3], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids_u1:
node = load_node(uuid=uuid)
self.assertEquals(node.get_user().email, new_email)
for uuid in uuids_u2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_6(self):
"""
This test checks that nodes belonging to user A (which is not the
default user) can be correctly exported, imported, enriched with nodes
from the default user, re-exported & re-imported and that in the end
all the nodes that have been finally imported belonging to the right
users.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE)
# Set the jc1 to FINISHED
jc1._set_state(calc_states.FINISHED)
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd2], outfile=filename1, silent=True)
uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Now we continue to generate more data based on the imported
# data
sd2_imp = load_node(sd2.uuid)
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2_imp, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
# Set the jc2 to FINISHED
jc2._set_state(calc_states.FINISHED)
# Store the UUIDs of the nodes that should be checked
# if they can be imported correctly.
uuids2 = [jc2.uuid, sd3.uuid]
filename2 = os.path.join(temp_folder, "export2.tar.gz")
export([sd3], outfile=filename2, silent=True)
self.clean_db()
self.insert_data()
import_data(filename2, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
for uuid in uuids2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_7(self):
"""
This test checks that nodes that belong to a specific group are
correctly imported and exported.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1, jc1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd1, jc1, g1], outfile=filename1,
silent=True)
n_uuids = [sd1.uuid, jc1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_group_export(self):
"""
Test that when exporting just a group, its nodes are also exported
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([g1], outfile=filename1, silent=True)
n_uuids = [sd1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workfunction_1(self):
import shutil, os, tempfile
from aiida.work.workfunctions import workfunction
from aiida.orm.data.float import Float
from aiida.orm import load_node
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
@workfunction
def add(a, b):
"""Add 2 numbers"""
return {'res': Float(a + b)}
def max_(**kwargs):
"""select the max value"""
max_val = max([(v.value, v) for v in kwargs.values()])
return {'res': max_val[1]}
try:
# I'm creating a bunch of nuimbers
a, b, c, d, e = (Float(i) for i in range(5))
# this adds the maximum number between bcde to a.
res = add(a=a, b=max_(b=b, c=c, d=d, e=e)['res'])['res']
# These are the uuids that would be exported as well (as parents) if I wanted the final result
uuids_values = [(a.uuid, a.value), (e.uuid, e.value), (res.uuid, res.value)]
# These are the uuids that shouldn't be exported since it's a selection.
not_wanted_uuids = [v.uuid for v in (b, c, d)]
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([res], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that the value is preserved
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
for uuid in not_wanted_uuids:
with self.assertRaises(NotExistent):
load_node(uuid)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workcalculation_2(self):
import shutil, os, tempfile
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.data.float import Float
from aiida.orm.data.int import Int
from aiida.orm import load_node
from aiida.common.links import LinkType
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
master = WorkCalculation().store()
slave = WorkCalculation().store()
input_1 = Int(3).store()
input_2 = Int(5).store()
output_1 = Int(2).store()
master.add_link_from(input_1, 'input_1', link_type=LinkType.INPUT)
slave.add_link_from(master, 'CALL', link_type=LinkType.CALL)
slave.add_link_from(input_2, 'input_2', link_type=LinkType.INPUT)
output_1.add_link_from(master, 'CREATE', link_type=LinkType.CREATE)
uuids_values = [(v.uuid, v.value) for v in (output_1,)]
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([output_1], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_reexport(self):
"""
Export something, import and reexport and check if everything is valid.
The export is rather easy::
___ ___ ___
| | INP | | CREATE | |
| p | --> | c | -----> | a |
|___| |___| |___|
"""
import os, shutil, tempfile, numpy as np, string, random
from datetime import datetime
from aiida.orm import Calculation, load_node, Group
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.importexport import export
from aiida.common.hashing import make_hash
from aiida.common.links import LinkType
def get_hash_from_db_content(groupname):
qb = QueryBuilder()
qb.append(ParameterData, tag='p', project='*')
qb.append(Calculation, tag='c', project='*', edge_tag='p2c', edge_project=('label', 'type'))
qb.append(ArrayData, tag='a', project='*', edge_tag='c2a', edge_project=('label', 'type'))
qb.append(Group, filters={'name': groupname}, project='*', tag='g', group_of='a')
# I want the query to contain something!
self.assertTrue(qb.count() > 0)
# The hash is given from the preservable entries in an export-import cycle,
# uuids, attributes, labels, descriptions, arrays, link-labels, link-types:
hash_ = make_hash([(
item['p']['*'].get_attrs(),
item['p']['*'].uuid,
item['p']['*'].label,
item['p']['*'].description,
item['c']['*'].uuid,
item['c']['*'].get_attrs(),
item['a']['*'].get_attrs(),
[item['a']['*'].get_array(name) for name in item['a']['*'].get_arraynames()],
item['a']['*'].uuid,
item['g']['*'].uuid,
item['g']['*'].name,
item['p2c']['label'],
item['p2c']['type'],
item['c2a']['label'],
item['c2a']['type'],
item['g']['*'].name,
) for item in qb.dict()])
return hash_
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
chars = string.ascii_uppercase + string.digits
size = 10
groupname = 'test-group'
try:
nparr = np.random.random((4, 3, 2))
trial_dict = {}
# give some integers:
trial_dict.update({str(k): np.random.randint(100) for k in range(10)})
# give some floats:
trial_dict.update({str(k): np.random.random() for k in range(10, 20)})
# give some booleans:
trial_dict.update({str(k): bool(np.random.randint(1)) for k in range(20, 30)})
# give some datetime:
trial_dict.update({str(k): datetime(
year=2017,
month=np.random.randint(1, 12),
day=np.random.randint(1, 28)) for k in range(30, 40)})
# give some text:
trial_dict.update({str(k): ''.join(random.choice(chars) for _ in range(size)) for k in range(20, 30)})
p = ParameterData(dict=trial_dict)
p.label = str(datetime.now())
p.description = 'd_' + str(datetime.now())
p.store()
c = Calculation()
# setting also trial dict as attributes, but randomizing the keys)
(c._set_attr(str(int(k) + np.random.randint(10)), v) for k, v in trial_dict.items())
c.store()
a = ArrayData()
a.set_array('array', nparr)
a.store()
# LINKS
# the calculation has input the parameters-instance
c.add_link_from(p, label='input_parameters', link_type=LinkType.INPUT)
# I want the array to be an output of the calculation
a.add_link_from(c, label='output_array', link_type=LinkType.CREATE)
g = Group(name='test-group')
g.store()
g.add_nodes(a)
hash_from_dbcontent = get_hash_from_db_content(groupname)
# I export and reimport 3 times in a row:
for i in range(3):
# Always new filename:
filename = os.path.join(temp_folder, "export-{}.zip".format(i))
# Loading the group from the string
g = Group.get_from_string(groupname)
# exporting based on all members of the group
# this also checks if group memberships are preserved!
export([g] + [n for n in g.nodes], outfile=filename, silent=True)
# cleaning the DB!
self.clean_db()
# reimporting the data from the file
import_data(filename, silent=True, ignore_unknown_nodes=True)
# creating the hash from db content
new_hash = get_hash_from_db_content(groupname)
# I check for equality against the first hash created, which implies that hashes
# are equal in all iterations of this process
self.assertEqual(hash_from_dbcontent, new_hash)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComplex(AiidaTestCase):
def test_complex_graph_import_export(self):
"""
This test checks that a small and bit complex graph can be correctly
exported and imported.
It will create the graph, store it to the database, export it to a file
and import it. In the end it will check if the initial nodes are present
at the imported graph.
"""
import tempfile
import shutil
import os
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.folder import FolderData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
from aiida.common.links import LinkType
from aiida.orm.importexport import export, import_data
from aiida.orm.utils import load_node
from aiida.common.exceptions import NotExistent
temp_folder = tempfile.mkdtemp()
try:
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc1.label = "calc1"
calc1.store()
calc1._set_state(u'RETRIEVING')
pd1 = ParameterData()
pd1.label = "pd1"
pd1.store()
pd2 = ParameterData()
pd2.label = "pd2"
pd2.store()
rd1 = RemoteData()
rd1.label = "rd1"
rd1.set_remote_path("/x/y.py")
rd1.set_computer(self.computer)
rd1.store()
rd1.add_link_from(calc1, link_type=LinkType.CREATE)
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc2.label = "calc2"
calc2.store()
calc2.add_link_from(pd1, link_type=LinkType.INPUT)
calc2.add_link_from(pd2, link_type=LinkType.INPUT)
calc2.add_link_from(rd1, link_type=LinkType.INPUT)
calc2._set_state(u'SUBMITTING')
fd1 = FolderData()
fd1.label = "fd1"
fd1.store()
fd1.add_link_from(calc2, link_type=LinkType.CREATE)
node_uuids_labels = {calc1.uuid: calc1.label, pd1.uuid: pd1.label,
pd2.uuid: pd2.label, rd1.uuid: rd1.label,
calc2.uuid: calc2.label, fd1.uuid: fd1.label}
filename = os.path.join(temp_folder, "export.tar.gz")
export([fd1], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True, ignore_unknown_nodes=True)
for uuid, label in node_uuids_labels.items():
try:
load_node(uuid)
except NotExistent:
self.fail("Node with UUID {} and label {} was not "
"found.".format(uuid, label))
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComputer(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def test_same_computer_import(self):
"""
Test that you can import nodes in steps without any problems. In this
test we will import a first calculation and then a second one. The
import should work as expected and have in the end two job
calculations.
Each calculation is related to the same computer. In the end we should
have only one computer
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Store two job calculation related to the same computer
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Store locally the computer name
comp_name = six.text_type(self.computer.name)
comp_uuid = six.text_type(self.computer.uuid)
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import the first calculation
import_data(filename1, silent=True)
# Check that the calculation computer is imported correctly.
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
# Check that the referenced computer is imported correctly.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
# Store the id of the computer
comp_id = qb.first()[2]
# Import the second calculation
import_data(filename2, silent=True)
# Check that the number of computers remains the same and its data
# did not change.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
self.assertEqual(qb.first()[2], comp_id,
"The computer id is not correct.")
# Check that now you have two calculations attached to the same
# computer.
qb = QueryBuilder()
qb.append(Computer, tag='comp')
qb.append(JobCalculation, has_computer='comp', project=['label'])
self.assertEqual(qb.count(), 2, "Two calculations should be "
"found.")
ret_labels = set(_ for [_] in qb.all())
self.assertEqual(ret_labels, set([calc1_label, calc2_label]),
"The labels of the calculations are not correct.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_same_computer_different_name_import(self):
"""
This test checks that if the computer is re-imported with a different
name to the same database, then the original computer will not be
renamed. It also checks that the names were correctly imported (without
any change since there is no computer name collision)
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Store locally the computer name
comp1_name = six.text_type(self.computer.name)
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Rename the computer
self.computer.set_name(comp1_name + "_updated")
# Store a second calculation
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import the first calculation
import_data(filename1, silent=True)
# Check that the calculation computer is imported correctly.
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
# Check that the referenced computer is imported correctly.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
# Import the second calculation
import_data(filename2, silent=True)
# Check that the number of computers remains the same and its data
# did not change.
qb = QueryBuilder()
qb.append(Computer, project=['name'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_different_computer_same_name_import(self):
"""
This test checks that if there is a name collision, the imported
computers are renamed accordingly.
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import COMP_DUPL_SUFFIX
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Set the computer name
comp1_name = "localhost_1"
self.computer.set_name(comp1_name)
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Reset the database
self.clean_db()
self.insert_data()
# Set the computer name to the same name as before
self.computer.set_name(comp1_name)
# Store a second calculation
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Reset the database
self.clean_db()
self.insert_data()
# Set the computer name to the same name as before
self.computer.set_name(comp1_name)
# Store a third calculation
calc3_label = "calc3"
calc3 = JobCalculation()
calc3.set_computer(self.computer)
calc3.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc3.label = calc3_label
calc3.store()
calc3._set_state(u'RETRIEVING')
# Export the third job calculation
filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz")
export([calc3], outfile=filename3, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import all the calculations
import_data(filename1, silent=True)
import_data(filename2, silent=True)
import_data(filename3, silent=True)
# Retrieve the calculation-computer pairs
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'], tag='jcalc')
qb.append(Computer, project=['name'],
computer_of='jcalc')
self.assertEqual(qb.count(), 3, "Three combinations expected.")
res = qb.all()
self.assertIn([calc1_label, comp1_name], res,
"Calc-Computer combination not found.")
self.assertIn([calc2_label,
comp1_name + COMP_DUPL_SUFFIX.format(0)], res,
"Calc-Computer combination not found.")
self.assertIn([calc3_label,
comp1_name + COMP_DUPL_SUFFIX.format(1)], res,
"Calc-Computer combination not found.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_correct_import_of_computer_json_params(self):
"""
This test checks that the metadata and transport params are
exported and imported correctly in both backends.
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Set the computer name
comp1_name = "localhost_1"
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
self.computer.set_name(comp1_name)
self.computer._set_metadata(comp1_metadata)
self.computer.set_transport_params(comp1_transport_params)
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Clean the local database
self.clean_db()
# Import the data
import_data(filename1, silent=True)
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'],
tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'],
comp1_transport_params,
"Not the expected transport parameters "
"were found")
self.assertEqual(res['comp']['_metadata'],
comp1_metadata,
"Not the expected metadata were found")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_import_of_django_sqla_export_file(self):
"""
Check why sqla import manages to import the django export file correctly
"""
from aiida.backends.tests.utils.fixtures import import_archive_fixture
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
for archive in ['export/compare/django.aiida', 'export/compare/sqlalchemy.aiida']:
# Clean the database
self.clean_db()
# Import the needed data
import_archive_fixture(archive)
# The expected metadata & transport parameters
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
# Check that we got the correct metadata & transport parameters
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'], tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'], comp1_transport_params)
self.assertEqual(res['comp']['_metadata'], comp1_metadata)
class TestLinks(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def get_all_node_links(self):
"""
"""
from aiida.orm import load_node, Node
from aiida.orm.querybuilder import QueryBuilder
qb = QueryBuilder()
qb.append(Node, project='uuid', tag='input')
qb.append(Node, project='uuid', tag='output',
edge_project=['label', 'type'], output_of='input')
return qb.all()
def test_input_and_create_links(self):
"""
Simple test that will verify that INPUT and CREATE links are properly exported and
correctly recreated upon import.
"""
import os, shutil, tempfile
from aiida.orm.data.int import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
node_work = WorkCalculation().store()
node_input = Int(1).store()
node_output = Int(2).store()
node_work.add_link_from(node_input, 'input', link_type=LinkType.INPUT)
node_output.add_link_from(node_work, 'output', link_type=LinkType.CREATE)
export_links = self.get_all_node_links()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([node_output], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def construct_complex_graph(self, export_combination=0):
"""
This method creates a "complex" graph with all available link types
(INPUT, CREATE, RETURN and CALL) and returns the nodes of the graph. It
also returns various combinations of nodes that need to be extracted
but also the final expected set of nodes (after adding the expected
predecessors, desuccessors).
"""
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
if export_combination < 0 or export_combination > 8:
return None
# Node creation
d1 = Int(1).store()
d2 = Int(1).store()
wc1 = WorkCalculation().store()
wc2 = WorkCalculation().store()
pw1 = JobCalculation()
pw1.set_computer(self.computer)
pw1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw1.store()
d3 = Int(1).store()
d4 = Int(1).store()
pw2 = JobCalculation()
pw2.set_computer(self.computer)
pw2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw2.store()
d5 = Int(1).store()
d6 = Int(1).store()
# Link creation
wc1.add_link_from(d1, 'input1', link_type=LinkType.INPUT)
wc1.add_link_from(d2, 'input2', link_type=LinkType.INPUT)
wc2.add_link_from(d1, 'input', link_type=LinkType.INPUT)
wc2.add_link_from(wc1, 'call', link_type=LinkType.CALL)
pw1.add_link_from(d1, 'input', link_type=LinkType.INPUT)
pw1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
pw1._set_state(calc_states.PARSING)
d3.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d3.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
d4.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d4.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
pw2.add_link_from(d4, 'input', link_type=LinkType.INPUT)
pw2._set_state(calc_states.PARSING)
d5.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
d6.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
# Return the generated nodes
graph_nodes = [d1, d2, d3, d4, d5, d6, pw1, pw2, wc1, wc2]
# Create various combinations of nodes that should be exported
# and the final set of nodes that are exported in each case, following
# predecessor/successor links.
export_list = [
(wc1, [d1, d2, d3, d4, pw1, wc1, wc2]),
(wc2, [d1, d3, d4, pw1, wc2]),
(d3, [d1, d3, d4, pw1]),
(d4, [d1, d3, d4, pw1]),
(d5, [d1, d3, d4, d5, d6, pw1, pw2]),
(d6, [d1, d3, d4, d5, d6, pw1, pw2]),
(pw2, [d1, d3, d4, d5, d6, pw1, pw2]),
(d1, [d1]),
(d2, [d2])
]
return graph_nodes, export_list[export_combination]
def test_data_create_reversed_false(self):
"""Verify that create_reversed = False is respected when only exporting Data nodes."""
import os
import shutil
import tempfile
from aiida.common.datastructures import calc_states
from aiida.orm import Data, Group
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
data_input = Int(1).store()
data_output = Int(2).store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(data_input, 'input', link_type=LinkType.INPUT)
calc._set_state(calc_states.PARSING)
data_output.add_link_from(calc, 'create', link_type=LinkType.CREATE)
group = Group.create(name='test_group')
group.add_nodes(data_output)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([group], outfile=export_file, silent=True, create_reversed=False)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
builder = QueryBuilder()
builder.append(Data)
self.assertEqual(builder.count(), 1, 'Expected a single Data node but got {}'.format(builder.count()))
self.assertEqual(builder.all()[0][0].uuid, data_output.uuid)
builder = QueryBuilder()
builder.append(JobCalculation)
self.assertEqual(builder.count(), 0, 'Expected no Calculation nodes')
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_links(self):
"""
This test checks that all the needed links are correctly exported and
imported. More precisely, it checks that INPUT, CREATE, RETURN and CALL
links connecting Data nodes, JobCalculations and WorkCalculations are
exported and imported correctly.
"""
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
graph_nodes, _ = self.construct_complex_graph()
# Getting the input, create, return and call links
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export(graph_nodes, outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_export_set_expansion(self):
import os, shutil, tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm import Node
for export_conf in range(0, 8):
graph_nodes, (export_node, export_target) = (
self.construct_complex_graph(export_conf))
tmp_folder = tempfile.mkdtemp()
try:
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([export_node], outfile=export_file, silent=True)
export_node_str = str(export_node)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
# Get all the nodes of the database
qb = QueryBuilder()
qb.append(Node, project='uuid')
imported_node_uuids = set(str(_[0]) for _ in qb.all())
export_target_uuids = set(str(_.uuid) for _ in export_target)
from aiida.orm.utils import load_node
self.assertEquals(
export_target_uuids,
imported_node_uuids,
"Problem in comparison of export node: " +
str(export_node_str) + "\n" +
"Expected set: " + str(export_target_uuids) + "\n" +
"Imported set: " + str(imported_node_uuids) + "\n" +
"Difference: " + str([load_node(_) for _ in
export_target_uuids.symmetric_difference(
imported_node_uuids)])
)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_recursive_export_input_and_create_links_proper(self):
"""
Check that CALL, INPUT, RETURN and CREATE links are followed
recursively.
"""
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.inline import InlineCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
wc2 = WorkCalculation().store()
wc1 = WorkCalculation().store()
c1 = InlineCalculation().store()
ni1 = Int(1).store()
ni2 = Int(2).store()
no1 = Int(1).store()
no2 = Int(2).store()
# Create the connections between workcalculations and calculations
wc1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
c1.add_link_from(wc1, 'call', link_type=LinkType.CALL)
# Connect the first data node to wc1 & c1
wc1.add_link_from(ni1, 'ni1-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni1, 'ni1-to-c1',
link_type=LinkType.INPUT)
# Connect the second data node to wc1 & c1
wc1.add_link_from(ni2, 'ni2-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni2, 'ni2-to-c1',
link_type=LinkType.INPUT)
# Connecting the first output node to wc1 & c1
no1.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no1.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
# Connecting the second output node to wc1 & c1
no2.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no2.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
# Getting the input, create, return and call links
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([wc2], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_links_for_workflows(self):
"""
Check that CALL links are not followed in the export procedure, and the only creation
is followed for data::
____ ____ ____
| | INP | | CALL | |
| i1 | --> | w1 | <--- | w2 |
|____| |____| |____|
| |
CREATE v v RETURN
____
| |
| o1 |
|____|
"""
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
links_wanted = [l for l in self.get_all_node_links() if l[3] in
(LinkType.CREATE.value,
LinkType.INPUT.value,
LinkType.RETURN.value)]
export_file_1 = os.path.join(tmp_folder, 'export-1.tar.gz')
export_file_2 = os.path.join(tmp_folder, 'export-2.tar.gz')
export([o1], outfile=export_file_1, silent=True)
export([w1], outfile=export_file_2, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file_1, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
self.clean_db()
self.insert_data()
import_data(export_file_2, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_double_return_links_for_workflows(self):
"""
This test checks that double return links to a node can be exported
and imported without problems,
"""
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.node import Node
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
o1.add_link_from(w2, 'return', link_type=LinkType.RETURN)
uuids_wanted = set(_.uuid for _ in (w1, o1, i1, w2))
links_wanted = [l for l in self.get_all_node_links() if l[3] in (
'createlink', 'inputlink', 'returnlink', 'calllink')]
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([o1, w1, w2, i1],
outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
uuids_in_db = [str(uuid) for [uuid] in
QueryBuilder().append(Node, project='uuid').all()]
self.assertEquals(sorted(uuids_wanted), sorted(uuids_in_db))
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_solo_code_is_exported_correctly(self):
"""
This test checks that when a calculation is exported then the
corresponding code is also exported.
"""
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.orm.code import Code
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([code], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
self.assertEquals(load_node(code_uuid).label, code_label)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_input_code_is_exported_correctly(self):
"""
This test checks that when a calculation is exported then the
corresponding code is also exported. It also checks that the links
are also in place after the import.
"""
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.code import Code
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
jc = JobCalculation()
jc.set_computer(self.computer)
jc.set_option('resources',
{"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc.store()
jc.add_link_from(code, 'code', link_type=LinkType.INPUT)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([jc], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
# Check that the node is there
self.assertEquals(load_node(code_uuid).label, code_label)
# Check that the link is in place
qb = QueryBuilder()
qb.append(Code, project='uuid')
qb.append(JobCalculation, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'==': LinkType.INPUT.value}})
self.assertEquals(qb.count(), 1,
"Expected to find one and only one link from "
"code to the calculation node. {} found."
.format(qb.count()))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
| [
[
[
706,
714
]
],
[
[
738,
752
]
],
[
[
776,
791
]
],
[
[
799,
801
],
[
11701,
11703
],
[
11937,
11939
],
[
13652,
13654
],
[
13966,
13968
]
],
[
[
809,
812
],
[
43005,
43008
],
[
43063,
43066
],
[
44588,
44591
],
[
45028,
45031
],
[
45164,
45167
],
[
45808,
45811
],
[
45944,
45947
],
[
48257,
48260
],
[
50281,
50284
],
[
50721,
50724
],
[
51272,
51275
]
],
[
[
835,
840
],
[
29651,
29656
],
[
35186,
35191
],
[
35297,
35302
],
[
35422,
35427
],
[
35651,
35656
],
[
35796,
35801
],
[
35774,
35779
],
[
36847,
36852
],
[
69679,
69684
]
],
[
[
842,
845
],
[
8263,
8266
],
[
8709,
8712
]
],
[
[
883,
896
],
[
994,
1007
],
[
7503,
7516
],
[
37981,
37994
],
[
41143,
41156
],
[
60532,
60545
]
],
[
[
932,
943
],
[
8589,
8600
],
[
10341,
10352
],
[
12383,
12394
],
[
14322,
14333
],
[
14370,
14381
],
[
19447,
19458
],
[
22316,
22327
],
[
23670,
23681
],
[
26170,
26181
],
[
28178,
28189
],
[
30386,
30397
],
[
32212,
32223
],
[
37432,
37443
],
[
44228,
44239
],
[
45408,
45419
],
[
49921,
49932
],
[
50886,
50897
],
[
55345,
55356
],
[
55393,
55404
],
[
55441,
55452
],
[
58326,
58337
],
[
62108,
62119
],
[
67053,
67064
],
[
69061,
69072
],
[
70152,
70163
],
[
74170,
74181
],
[
76346,
76357
],
[
76584,
76595
],
[
78356,
78367
],
[
79660,
79671
],
[
81231,
81242
]
],
[
[
962,
965
],
[
17831,
17834
],
[
21081,
21084
],
[
25003,
25006
],
[
27428,
27431
]
],
[
[
975,
993
],
[
1046,
1064
]
],
[
[
7492,
7502
]
],
[
[
37969,
37980
]
],
[
[
41130,
41142
]
],
[
[
60522,
60531
]
]
] |
import asyncio
import base64
import threading
import cv2
import numpy as np
from flask_socketio import SocketIO, emit
from flask import Flask, render_template
import multiprocessing
class Streamer():
def __init__(self) -> None:
"""Constructor
"""
@staticmethod
async def stream_socket(
url_server: str,
app: 'Flask' = None,
socket_options: 'dict' = None,
socket_msg: 'str' = "mvfy_visual_img",
)-> 'function':
app = Flask(__name__) if app is None else app
socketio = SocketIO(app, **socket_options)
threading.Thread(target=lambda: socketio.run(url_server)).run()
async def wraper_function(img, extension: str = ".jpg", size: tuple = (1920, 1080)):
if size is not None:
frame = cv2.resize(img, size)
_, buffer = cv2.imencode(extension, frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
data = base64.b64encode(buffer)
socketio.emit(socket_msg, {
"data": data
})
return wraper_function
@staticmethod
async def stream_local(
img: np.array,
size: tuple = (1920, 1080),
title: str = "title"
) -> None:
if size is not None:
img = cv2.resize(img, size)
cv2.imshow(title, img) | [
[
[
7,
14
]
],
[
[
22,
28
],
[
969,
975
]
],
[
[
36,
45
],
[
603,
612
]
],
[
[
53,
56
],
[
1316,
1319
],
[
1350,
1353
],
[
827,
830
],
[
874,
877
],
[
906,
909
]
],
[
[
65,
76
],
[
1171,
1173
]
],
[
[
104,
112
],
[
562,
570
]
],
[
[
114,
118
]
],
[
[
137,
142
],
[
503,
508
]
],
[
[
144,
159
]
],
[
[
167,
182
]
],
[
[
191,
199
]
]
] |
# coding=utf-8
"""
@author: magician
@date: 2018/9/14
"""
import datetime
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, request
from sqlalchemy.exc import IntegrityError
from marshmallow import Schema, fields, ValidationError, pre_load
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
# MODELS
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
class Quote(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
author = db.relationship(
'Author',
backref=db.backref('quotes', lazy='dynamic'),
)
posted_at = db.Column(db.DateTime)
# SCHEMAS
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method('format_name', dump_only=True)
def format_name(self, author):
return '{}, {}'.format(author.last, author.first)
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class QuoteSchema(Schema):
id = fields.Int(dump_only=True)
author = fields.Nested(AuthorSchema, validate=must_not_be_blank)
content = fields.Str(required=True, validate=must_not_be_blank)
posted_at = fields.DateTime(dump_only=True)
# Allow client to pass author's full name in request body
# e.g. {"author': 'Tim Peters"} rather than {"first": "Tim", "last": "Peters"}
@pre_load
def process_author(self, data):
author_name = data.get('author')
if author_name:
first, last = author_name.split(' ')
author_dict = dict(first=first, last=last)
else:
author_dict = {}
data['author'] = author_dict
return data
author_schema = AuthorSchema()
authors_schema = AuthorSchema(many=True)
quote_schema = QuoteSchema()
quotes_schema = QuoteSchema(many=True, only=('id', 'content'))
# API
@app.route('/authors')
def get_authors():
authors = Author.query.all()
# Serialize the queryset
result = authors_schema.dump(authors)
return jsonify({'authors': result})
@app.route('/authors/<int:pk>')
def get_author(pk):
try:
author = Author.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Author could not be found.'}), 400
author_result = author_schema.dump(author)
quotes_result = quotes_schema.dump(author.quotes.all())
return jsonify({'author': author_result, 'quotes': quotes_result})
@app.route('/quotes/', methods=['GET'])
def get_quotes():
quotes = Quote.query.all()
result = quotes_schema.dump(quotes, many=True)
return jsonify({'quotes': result})
@app.route('/quotes/<int:pk>')
def get_quote(pk):
try:
quote = Quote.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Quote could not be found.'}), 400
result = quote_schema.dump(quote)
return jsonify({'quote': result})
@app.route('/quotes/', methods=['POST'])
def new_quote():
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
# Validate and deserialize input
try:
data = quote_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 422
first, last = data['author']['first'], data['author']['last']
author = Author.query.filter_by(first=first, last=last).first()
if author is None:
# Create a new author
author = Author(first=first, last=last)
db.session.add(author)
# Create new quote
quote = Quote(
content=data['content'],
author=author,
posted_at=datetime.datetime.utcnow(),
)
db.session.add(quote)
db.session.commit()
result = quote_schema.dump(Quote.query.get(quote.id))
return jsonify({
'message': 'Created new quote.',
'quote': result,
})
if __name__ == '__main__':
db.create_all()
app.run(debug=True, port=5000)
| [
[
[
65,
73
],
[
3951,
3959
]
],
[
[
104,
114
],
[
397,
407
]
],
[
[
133,
138
],
[
272,
277
]
],
[
[
140,
147
],
[
2361,
2368
],
[
2533,
2540
],
[
2707,
2714
],
[
2920,
2927
],
[
3087,
3094
],
[
3191,
3198
],
[
3350,
3357
],
[
3542,
3549
],
[
4104,
4111
]
],
[
[
149,
156
],
[
3294,
3301
]
],
[
[
184,
198
],
[
2502,
2516
],
[
3056,
3070
]
],
[
[
223,
229
],
[
941,
947
],
[
1334,
1340
]
],
[
[
231,
237
],
[
959,
965
],
[
998,
1004
],
[
1022,
1028
],
[
1056,
1062
],
[
1352,
1358
],
[
1392,
1398
],
[
1462,
1468
],
[
1532,
1538
]
],
[
[
239,
254
],
[
1276,
1291
],
[
3503,
3518
]
],
[
[
256,
264
],
[
1715,
1723
]
],
[
[
266,
269
],
[
288,
291
],
[
340,
343
],
[
408,
411
],
[
2205,
2208
],
[
2393,
2396
],
[
2770,
2773
],
[
2951,
2954
],
[
3221,
3224
],
[
4240,
4243
]
],
[
[
392,
394
],
[
438,
440
],
[
458,
460
],
[
468,
470
],
[
510,
512
],
[
520,
522
],
[
546,
548
],
[
556,
558
],
[
585,
587
],
[
605,
607
],
[
615,
617
],
[
659,
661
],
[
669,
671
],
[
712,
714
],
[
722,
724
],
[
734,
736
],
[
775,
777
],
[
826,
828
],
[
886,
888
],
[
896,
898
],
[
4220,
4222
],
[
3812,
3814
],
[
3989,
3991
],
[
4015,
4017
]
],
[
[
431,
437
],
[
2260,
2266
],
[
2470,
2476
],
[
3648,
3654
],
[
3773,
3779
]
],
[
[
579,
584
],
[
2840,
2845
],
[
3025,
3030
],
[
3870,
3875
],
[
4066,
4071
]
],
[
[
928,
940
],
[
1406,
1418
],
[
2047,
2059
],
[
2079,
2091
]
],
[
[
1220,
1237
],
[
1429,
1446
],
[
1497,
1514
]
],
[
[
1322,
1333
],
[
2118,
2129
],
[
2148,
2159
]
],
[
[
2031,
2044
],
[
2609,
2622
]
],
[
[
2062,
2076
],
[
2321,
2335
]
],
[
[
2103,
2115
],
[
3155,
3167
],
[
3463,
3475
],
[
4048,
4060
]
],
[
[
2132,
2145
],
[
2656,
2669
],
[
2871,
2884
]
],
[
[
2231,
2242
]
],
[
[
2428,
2438
]
],
[
[
2813,
2823
]
],
[
[
2985,
2994
]
],
[
[
3265,
3274
]
]
] |
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest, frappe, pyotp
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frappe.auth import HTTPRequest
from frappe.utils import cint
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor, get_cached_user_pass,
two_factor_is_enabled_for_, confirm_otp_token, get_otpsecret_for_, get_verification_obj,
render_string_template, two_factor_is_enabled)
import time
class TestTwoFactor(unittest.TestCase):
def setUp(self):
self.http_requests = create_http_request()
self.login_manager = frappe.local.login_manager
self.user = self.login_manager.user
def tearDown(self):
frappe.local.response['verification'] = None
frappe.local.response['tmp_id'] = None
disable_2fa()
frappe.clear_cache(user=self.user)
def test_should_run_2fa(self):
'''Should return true if enabled.'''
toggle_2fa_all_role(state=True)
self.assertTrue(should_run_2fa(self.user))
toggle_2fa_all_role(state=False)
self.assertFalse(should_run_2fa(self.user))
def test_get_cached_user_pass(self):
'''Cached data should not contain user and pass before 2fa.'''
user,pwd = get_cached_user_pass()
self.assertTrue(all([not user, not pwd]))
def test_authenticate_for_2factor(self):
'''Verification obj and tmp_id should be set in frappe.local.'''
authenticate_for_2factor(self.user)
verification_obj = frappe.local.response['verification']
tmp_id = frappe.local.response['tmp_id']
self.assertTrue(verification_obj)
self.assertTrue(tmp_id)
for k in ['_usr','_pwd','_otp_secret']:
self.assertTrue(frappe.cache().get('{0}{1}'.format(tmp_id,k)),
'{} not available'.format(k))
def test_two_factor_is_enabled(self):
'''
1. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users
2. Should return false, if not enabled
3. Should return true, if enabled and bypass_2fa_for_retricted_ip_users and not user.restricted_ip
4. Should return false, if enabled and bypass_2fa_for_retricted_ip_users and user.restricted_ip
'''
#Scenario 1
disable_2fa()
self.assertFalse(two_factor_is_enabled(self.user))
#Scenario 2
enable_2fa()
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 3
enable_2fa()
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 4
user = frappe.get_doc('User', self.user)
user.restrict_ip = ""
user.save()
enable_2fa(1)
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 5
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
enable_2fa(1)
self.assertFalse(two_factor_is_enabled(self.user))
def test_two_factor_is_enabled_for_user(self):
'''Should return true if enabled for user.'''
toggle_2fa_all_role(state=True)
self.assertTrue(two_factor_is_enabled_for_(self.user))
self.assertFalse(two_factor_is_enabled_for_("Administrator"))
toggle_2fa_all_role(state=False)
self.assertFalse(two_factor_is_enabled_for_(self.user))
def test_get_otpsecret_for_user(self):
'''OTP secret should be set for user.'''
self.assertTrue(get_otpsecret_for_(self.user))
self.assertTrue(frappe.db.get_default(self.user + '_otpsecret'))
def test_confirm_otp_token(self):
'''Ensure otp is confirmed'''
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
if frappe.flags.tests_verbose:
print('Sleeping for 30secs to confirm token expires..')
time.sleep(30)
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
def test_get_verification_obj(self):
'''Confirm verification object is returned.'''
otp_secret = get_otpsecret_for_(self.user)
token = int(pyotp.TOTP(otp_secret).now())
self.assertTrue(get_verification_obj(self.user,token,otp_secret))
def test_render_string_template(self):
'''String template renders as expected with variables.'''
args = {'issuer_name':'Frappe Technologies'}
_str = 'Verification Code from {{issuer_name}}'
_str = render_string_template(_str,args)
self.assertEqual(_str,'Verification Code from Frappe Technologies')
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
def create_http_request():
'''Get http request object.'''
set_request(method='POST', path='login')
enable_2fa()
frappe.form_dict['usr'] = 'test@erpnext.com'
frappe.form_dict['pwd'] = 'test'
frappe.local.form_dict['cmd'] = 'login'
http_requests = HTTPRequest()
return http_requests
def enable_2fa(bypass_two_factor_auth=0):
'''Enable Two factor in system settings.'''
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 1
system_settings.bypass_2fa_for_retricted_ip_users = cint(bypass_two_factor_auth)
system_settings.two_factor_method = 'OTP App'
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def disable_2fa():
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 0
system_settings.bypass_2fa_for_retricted_ip_users = 0
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def toggle_2fa_all_role(state=None):
'''Enable or disable 2fa for 'all' role on the system.'''
all_role = frappe.get_doc('Role','All')
if state == None:
state = False if all_role.two_factor_auth == True else False
if state not in [True,False]:return
all_role.two_factor_auth = state
all_role.save(ignore_permissions=True)
frappe.db.commit()
def get_otp(user):
otp_secret = get_otpsecret_for_(user)
otp = pyotp.TOTP(otp_secret)
return otp.now() | [
[
[
123,
139
]
],
[
[
148,
156
],
[
583,
591
]
],
[
[
158,
164
],
[
689,
695
],
[
778,
784
],
[
825,
831
],
[
882,
888
],
[
1502,
1508
],
[
1551,
1557
],
[
1706,
1712
],
[
2365,
2371
],
[
2420,
2426
],
[
2534,
2540
],
[
2698,
2704
],
[
2753,
2759
],
[
3356,
3362
],
[
3522,
3528
],
[
3598,
3604
],
[
3801,
3807
],
[
3930,
3936
],
[
4643,
4649
],
[
4814,
4820
],
[
4860,
4866
],
[
4894,
4900
],
[
5094,
5100
],
[
5349,
5355
],
[
5407,
5413
],
[
5588,
5594
],
[
5716,
5722
],
[
5939,
5945
]
],
[
[
166,
171
],
[
4169,
4174
],
[
6024,
6029
]
],
[
[
202,
209
],
[
4666,
4673
]
],
[
[
236,
250
],
[
4617,
4631
]
],
[
[
275,
286
],
[
4951,
4962
]
],
[
[
312,
316
],
[
5225,
5229
]
],
[
[
347,
361
],
[
1041,
1055
],
[
1122,
1136
]
],
[
[
363,
387
],
[
1445,
1469
],
[
3475,
3499
]
],
[
[
389,
409
],
[
1266,
1286
]
],
[
[
412,
438
],
[
3009,
3035
],
[
3067,
3093
],
[
3166,
3192
]
],
[
[
440,
457
],
[
3630,
3647
],
[
3735,
3752
],
[
3962,
3979
]
],
[
[
459,
477
],
[
3307,
3325
],
[
4125,
4143
],
[
5992,
6010
]
],
[
[
479,
499
],
[
4217,
4237
]
],
[
[
502,
524
],
[
4474,
4496
]
],
[
[
526,
547
],
[
2210,
2231
],
[
2292,
2313
],
[
2476,
2497
],
[
2640,
2661
],
[
2826,
2847
]
],
[
[
557,
561
],
[
3890,
3894
]
],
[
[
569,
582
]
],
[
[
4583,
4594
],
[
4758,
4769
]
],
[
[
4702,
4721
],
[
644,
663
]
],
[
[
4992,
5002
],
[
2261,
2271
],
[
2343,
2353
],
[
2608,
2618
],
[
2793,
2803
],
[
4800,
4810
]
],
[
[
5373,
5384
],
[
866,
877
],
[
2177,
2188
]
],
[
[
5612,
5631
],
[
991,
1010
],
[
1070,
1089
],
[
2959,
2978
],
[
3114,
3133
]
],
[
[
5963,
5970
],
[
3698,
3705
]
]
] |
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
import shutil
class SwigConan(ConanFile):
name = "swig_installer"
version = "4.0.1"
description = "SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages."
topics = ("conan", "swig", "python", "java", "wrapper")
url = "https://github.com/bincrafters/conan-swig_installer"
homepage = "http://www.swig.org"
author = "Bincrafters <bincrafters@gmail.com>"
license = "GPL-3.0-or-later"
exports = ["LICENSE.md"]
settings = "os_build", "arch_build", "compiler", "os", "arch"
_source_subfolder = "source_subfolder"
def configure(self):
# Verify build configuration
if str(self.settings.os_build) != str(self.settings.os):
raise ConanInvalidConfiguration("settings.os_build must be equal to settings.os")
if str(self.settings.arch_build) != str(self.settings.arch_build):
raise ConanInvalidConfiguration("settings.arch_build must be equal to settings.arch_build")
def package_id(self):
del self.info.settings.compiler
del self.info.settings.os
del self.info.settings.arch
self.info.include_build_settings()
def build_requirements(self):
if tools.os_info.is_windows:
self.build_requires("msys2/20161025")
if self.settings.os_build == "Windows":
self.build_requires("winflexbison/2.5.18@bincrafters/stable")
else:
self.build_requires("bison_installer/3.3.2@bincrafters/stable")
self.build_requires("pcre/8.41")
if self.settings.compiler == "Visual Studio":
self.build_requires("cccl_installer/1.0@bincrafters/stable")
def system_requirements(self):
if self.develop:
if tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = [
"autoconf",
"automake",
]
for package in packages:
installer.install(package)
def source(self):
url = "https://github.com/swig/swig/archive/rel-{}.tar.gz".format(self.version)
sha256 = "2eaf6fb89d071d1be280bf995c63360b3729860c0da64948123b5d7e4cfb6cb7"
foldername = "swig-rel-{}".format(self.version)
tools.get(url, sha256=sha256)
os.rename(foldername, self._source_subfolder)
@contextmanager
def _build_environment(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
yield
else:
yield
def _patch_sources(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "configure.ac"),
"AC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX",
"SWIG_LIB_WIN_UNIX=""\nAC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX")
def build(self):
self._patch_sources()
with tools.chdir(os.path.join(self.build_folder, self._source_subfolder)):
self.run('./autogen.sh', win_bash=tools.os_info.is_windows)
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
deps_libpaths = env_build.library_paths
deps_libs = env_build.libs
deps_defines = env_build.defines
if self.settings.os_build == "Windows" and self.settings.compiler != "Visual Studio":
env_build.link_flags.append("-static")
libargs = list("-L\"{}\"".format(p) for p in deps_libpaths) + list("-l\"{}\"".format(l) for l in deps_libs)
args = [
"PCRE_LIBS={}".format(" ".join(libargs)),
"PCRE_CPPFLAGS={}".format(" ".join("-D{}".format(define) for define in deps_defines)),
"--host={}".format(tools.detected_architecture()),
]
if self.settings.compiler == "Visual Studio":
self.output.warn("Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.")
args.append("--disable-ccache")
with self._build_environment():
env_build.configure(configure_dir=os.path.join(self.build_folder, self._source_subfolder), args=args)
with tools.environment_append({"CONAN_CPU_COUNT": "1" if self.settings.compiler == "Visual Studio" else str(tools.cpu_count())}):
env_build.make()
def package(self):
self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
self.copy(pattern="COPYRIGHT", dst="licenses", src=self._source_subfolder)
with tools.chdir(self.build_folder):
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
env_build.install()
if self.settings.os == "Windows":
shutil.move(os.path.join(self.package_folder, "share", "swig", self.version),
os.path.join(self.package_folder, "bin", "Lib"))
shutil.rmtree(os.path.join(self.package_folder, "share"))
if self.settings.compiler != "Visual Studio":
with tools.chdir(os.path.join(self.package_folder, "bin")):
ext = ".exe" if tools.os_info.is_windows else ""
self.run("strip swig{}".format(ext), win_bash=tools.os_info.is_windows)
self.run("strip ccache-swig{}".format(ext), win_bash=tools.os_info.is_windows)
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info('Appending PATH environment variable: {}'.format(bindir))
self.env_info.PATH.append(bindir)
if self.settings.os == "Windows":
swig_lib_path = os.path.join(self.package_folder, "bin", "Lib")
else:
swig_lib_path = os.path.join(self.package_folder, "share", "swig", self.version)
self.output.info('Setting SWIG_LIB environment variable: {}'.format(swig_lib_path))
self.env_info.SWIG_LIB = swig_lib_path
self.output.info('Setting SWIG_INSTALLER_ROOT to {}'.format(self.package_folder))
self.env_info.SWIG_INSTALLER_ROOT = self.package_folder
| [
[
[
19,
28
],
[
195,
204
]
],
[
[
30,
35
],
[
1420,
1425
],
[
1952,
1957
],
[
2004,
2009
],
[
2489,
2494
],
[
2699,
2704
],
[
2821,
2826
],
[
3124,
3129
],
[
3240,
3245
],
[
3327,
3332
],
[
3941,
3946
],
[
4357,
4362
],
[
4460,
4465
],
[
4717,
4722
],
[
4814,
4819
],
[
5243,
5248
],
[
5334,
5339
],
[
5433,
5438
],
[
5532,
5537
]
],
[
[
37,
62
],
[
3286,
3311
],
[
4773,
4798
]
],
[
[
89,
114
],
[
939,
964
],
[
1108,
1133
]
],
[
[
138,
152
],
[
2579,
2593
]
],
[
[
160,
162
],
[
2527,
2529
],
[
2843,
2845
],
[
3136,
3138
],
[
4272,
4274
],
[
4946,
4948
],
[
5040,
5042
],
[
5119,
5121
],
[
5255,
5257
],
[
5604,
5606
],
[
5841,
5843
],
[
5931,
5933
]
],
[
[
170,
176
],
[
4934,
4940
],
[
5105,
5111
]
],
[
[
185,
194
]
]
] |
"""
WSGI config for pv project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
sys.path.append('/srv/pv/pv')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pv.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
[
[
663,
665
],
[
709,
711
]
],
[
[
673,
676
],
[
678,
681
]
],
[
[
976,
996
],
[
1011,
1031
]
],
[
[
997,
1008
]
]
] |
"""This example demonstrates the usage of ZOOptSearch.
It also checks that it is usable with a separate scheduler.
"""
import time
from ray import tune
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.schedulers import AsyncHyperBandScheduler
from zoopt import ValueType # noqa: F401
def evaluation_fn(step, width, height):
time.sleep(0.1)
return (0.1 + width * step / 100)**(-1) + height * 0.1
def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]
for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluation_fn(step, width, height)
# Feed the score back back to Tune.
tune.report(iterations=step, mean_loss=intermediate_score)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
num_samples = 10 if args.smoke_test else 1000
# Optional: Pass the parameter space yourself
# space = {
# # for continuous dimensions: (continuous, search_range, precision)
# "height": (ValueType.CONTINUOUS, [-10, 10], 1e-2),
# # for discrete dimensions: (discrete, search_range, has_order)
# "width": (ValueType.DISCRETE, [0, 10], True)
# # for grid dimensions: (grid, grid_list)
# "layers": (ValueType.GRID, [4, 8, 16])
# }
zoopt_search_config = {
"parallel_num": 8,
}
zoopt_search = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=num_samples,
# dim_dict=space, # If you want to set the space yourself
**zoopt_search_config)
scheduler = AsyncHyperBandScheduler()
analysis = tune.run(
easy_objective,
metric="mean_loss",
mode="min",
search_alg=zoopt_search,
name="zoopt_search",
scheduler=scheduler,
num_samples=num_samples,
config={
"steps": 10,
"height": tune.quniform(-10, 10, 1e-2),
"width": tune.randint(0, 10)
})
print("Best config found: ", analysis.best_config)
| [
[
[
127,
131
],
[
345,
349
]
],
[
[
149,
153
],
[
1886,
1890
],
[
2156,
2160
],
[
2207,
2211
],
[
763,
767
]
],
[
[
189,
200
],
[
1630,
1641
]
],
[
[
233,
256
],
[
1844,
1867
]
],
[
[
275,
284
]
],
[
[
305,
318
],
[
676,
689
]
],
[
[
426,
440
],
[
1904,
1918
]
],
[
[
862,
870
],
[
885,
893
]
],
[
[
876,
882
],
[
915,
921
],
[
1030,
1036
]
],
[
[
1020,
1024
],
[
1081,
1085
]
],
[
[
1026,
1027
]
],
[
[
1061,
1072
],
[
1716,
1727
],
[
2079,
2090
]
],
[
[
1553,
1572
],
[
1806,
1825
]
],
[
[
1615,
1627
],
[
1987,
1999
]
],
[
[
1832,
1841
],
[
2048,
2057
]
],
[
[
1875,
1883
],
[
2271,
2279
]
]
] |
import urllib
import jose.jwt
import time
import random
import sys
import requests
from flask import Flask, request, redirect, make_response, jsonify
import subprocess
# seconds until the token expires
TOKEN_EXPIRES = 2
# A mocked out oauth server, which serves all the endpoints needed by the oauth type.
class MockOauthApp:
def __init__(self, port):
self.port = port
# mock flask app
self.app = Flask("mock_oauth_app")
self.app.add_url_rule("/authorize", view_func=self.api_authorize)
self.app.add_url_rule("/oauth/token", view_func=self.api_oauth_token, methods=["POST"])
self.app.add_url_rule("/v2/logout", view_func=self.api_logout)
self.app.add_url_rule("/.well-known/openid-configuration", view_func=self.api_openid_configuration)
self.app.add_url_rule("/.well-known/jwks.json", view_func=self.api_jwks)
def api_authorize(self):
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
def api_oauth_token(self):
expires_at = time.time()
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(
name="Fake User", sub="test_user_id", email="fake_user@email.com", email_verified=True, exp=expires_at
)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email offline",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": expires_at,
}
return make_response(jsonify(r))
def api_logout(self):
return_to = request.args.get("returnTo")
return redirect(return_to)
def api_openid_configuration(self):
data = dict(jwks_uri=f"http://localhost:{self.port}/.well-known/jwks.json")
return make_response(jsonify(data))
def api_jwks(self):
data = dict(
alg="RS256",
kty="RSA",
use="sig",
kid="fake_kid",
)
return make_response(jsonify(dict(keys=[data])))
class MockOauthServer:
def __init__(self):
self.process = None
self.port = None
self.server_okay = False
def start(self):
self.port = random.randint(10000, 20000)
self.process = subprocess.Popen([sys.executable, __file__, str(self.port)])
# Verify that the mock oauth server is ready (accepting requests) before starting the tests.
self.server_okay = False
for _ in range(5):
try:
response = requests.get(f"http://localhost:{self.port}/.well-known/jwks.json")
if response.status_code == 200:
self.server_okay = True
break
except Exception:
pass
# wait one second and try again
time.sleep(1)
def terminate(self):
self.process.terminate()
def get_auth_token(app):
"""
Generated an auth token for testing.
:param app: a chalice app.
:return:
"""
headers = dict(host="localhost")
response = app.get("/dp/v1/login", headers=headers)
location = response.headers["Location"]
split = urllib.parse.urlsplit(location)
args = dict(urllib.parse.parse_qsl(split.query))
# follow redirect
url = f"/dp/v1/oauth2/callback?code=fakecode&state={args['state']}"
response = app.get(url, headers=dict(host="localhost", Cookie=response.headers["Set-Cookie"]))
return response.headers["Set-Cookie"]
if __name__ == "__main__":
port = int(sys.argv[1])
mock_app = MockOauthApp(port)
mock_app.app.run(port=port, debug=True)
| [
[
[
7,
13
],
[
3441,
3447
],
[
3489,
3495
]
],
[
[
21,
29
],
[
1357,
1361
]
],
[
[
37,
41
],
[
1129,
1133
],
[
1493,
1497
],
[
1576,
1580
],
[
3091,
3095
]
],
[
[
49,
55
],
[
2479,
2485
]
],
[
[
63,
66
],
[
3806,
3809
],
[
2549,
2552
]
],
[
[
74,
82
],
[
2797,
2805
]
],
[
[
101,
106
],
[
429,
434
]
],
[
[
108,
115
],
[
933,
940
],
[
982,
989
],
[
1857,
1864
]
],
[
[
117,
125
],
[
1023,
1031
],
[
1901,
1909
]
],
[
[
127,
140
],
[
1784,
1797
],
[
2061,
2074
],
[
2260,
2273
]
],
[
[
142,
149
],
[
1798,
1805
],
[
2075,
2082
],
[
2274,
2281
]
],
[
[
157,
167
],
[
2531,
2541
]
],
[
[
203,
216
],
[
1670,
1683
]
],
[
[
315,
327
],
[
3834,
3846
]
],
[
[
2310,
2325
]
],
[
[
3170,
3184
]
],
[
[
3795,
3799
],
[
3847,
3851
],
[
3879,
3883
]
],
[
[
3823,
3831
],
[
3857,
3865
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from .. import generic
from ..util import get_const_tuple
def _schedule_reduce(sch, op, is_idx_reduce=False):
if is_idx_reduce:
real_out = op.output(0)
fused = sch[real_out].fuse(*sch[real_out].op.axis)
out = op.input_tensors[0]
else:
out = op.output(0)
const_shape = True
out_shape = get_const_tuple(out.shape)
for d in out_shape:
if not isinstance(d, int):
const_shape = False
break
if const_shape:
naxes = len(sch[out].op.axis)
parallelism = 1
fuse_axes = []
# We choose a heuristic number 128 to limit the maximum parallelism
while len(fuse_axes) < naxes and parallelism < 128:
ivar = sch[out].op.axis[len(fuse_axes)]
parallelism *= int(ivar.dom.extent)
fuse_axes.append(ivar)
fused = sch[out].fuse(*fuse_axes)
sch[out].parallel(fused)
else:
if len(sch[out].op.axis) >= 5:
# avoid too many parallelism
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
else:
fused = sch[out].fuse(*sch[out].op.axis)
sch[out].parallel(fused)
@generic.schedule_reduce.register(["cpu"])
def schedule_reduce(outs):
"""X86 schedule for reduction op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.tensor.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
generic.schedule_injective_from_existing(sch, operator)
for tensor in operator.input_tensors:
traverse_after_reduce(tensor.op)
elif operator.tag == 'comm_reduce':
_schedule_reduce(sch, operator, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == 'comm_reduce_idx':
_schedule_reduce(sch, operator, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.tensor.PlaceholderOp):
pass
else:
raise RuntimeError("Unsupported operator: %s (tag: %s)" % (operator, operator.tag))
scheduled_ops.append(operator)
traverse_after_reduce(outs[0].op)
return sch
| [
[
[
876,
899
]
],
[
[
907,
910
],
[
2587,
2590
],
[
2626,
2629
],
[
2807,
2810
],
[
4192,
4195
]
],
[
[
926,
929
],
[
2864,
2867
],
[
3316,
3319
]
],
[
[
945,
952
],
[
2184,
2191
],
[
3410,
3417
]
],
[
[
972,
987
],
[
1265,
1280
]
],
[
[
993,
1009
],
[
3621,
3637
],
[
3889,
3905
]
],
[
[
2230,
2245
]
]
] |
# boot.py -- runs on boot-up
import pyb
pyb.LED(3).on() # indicate we are waiting for switch press
pyb.delay(2000) # wait for user to maybe press the switch
switch_value = pyb.Switch()() # sample the switch at end of delay
pyb.LED(3).off() # indicate that we finished waiting for the switch
pyb.LED(4).on() # indicate that we are selecting the mode
if switch_value:
# button pressed, mount SD card as usb storage
pyb.usb_mode('CDC+MSC')
pyb.main('debug.py')
else:
# no button pressed, SD card can be used by script
pyb.usb_mode('CDC+HID')
pyb.main('displaytemp.py')
pyb.LED(4).off() # indicate that we finished selecting the mode
| [
[
[
37,
40
],
[
42,
45
],
[
117,
120
],
[
206,
209
],
[
259,
262
],
[
343,
346
],
[
490,
493
],
[
518,
521
],
[
604,
607
],
[
632,
635
],
[
660,
663
]
],
[
[
191,
203
],
[
421,
433
]
]
] |
from Cimpl import *
image = load_image(choose_file())
def flip_vertical(image: image) -> Image:
vertical_image = copy(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
flipped_color = get_color(image, -x, y)
set_color(vertical_image, x, y, flipped_color)
show(vertical_image)
return vertical_image
def test_flip_vertical(image: Image) -> Image:
""" Writen by Abdelrahman Alatoom (101147742). Function tests that all values of the x axis of the inputted image (into the flip_vertical function) are assigned to to their negative counterparts"""
vertical_image = flip_vertical(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
original_colour = get_color(image, x, y)
for x in range(get_width(vertical_image)):
for y in range(get_height(vertical_image)):
vertical_colour = get_color(vertical_image, -x, y)
if original_colour == vertical_colour:
print('Test Passed')
else: print('Test Failed')
| [
[
[
18,
19
],
[
28,
38
],
[
39,
50
],
[
92,
97
],
[
134,
138
],
[
166,
175
],
[
208,
218
],
[
256,
265
],
[
292,
301
],
[
344,
348
],
[
433,
438
],
[
423,
428
],
[
703,
712
],
[
754,
764
],
[
804,
813
],
[
864,
873
],
[
924,
934
],
[
983,
992
]
],
[
[
20,
25
]
],
[
[
61,
74
],
[
663,
676
]
],
[
[
397,
415
]
]
] |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'bugedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Au quel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.flag import Flag
from .edt_assigne import EdtAssigne
from .supprimer import NSupprimer
from secondaires.rapport.constantes import *
class EdtBugeditP(Presentation):
"""Classe définissant l'éditeur de rapport 'bugedit'.
"""
nom = "bugedit+"
def __init__(self, personnage, rapport):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, rapport)
if personnage and rapport:
self.construire(rapport)
def __getnewargs__(self):
return (None, None)
def construire(self, rapport):
"""Construction de l'éditeur"""
# Titre
titre = self.ajouter_choix("titre", "t", Uniligne, rapport, "titre")
titre.parent = self
titre.prompt = "Titre du rapport : "
titre.apercu = "{objet.titre}"
titre.aide_courte = \
"Entrez le |ent|titre|ff| du rapport ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nTitre actuel : |bc|{objet.titre}|ff|"
# Type
types = sorted(TYPES)
type = self.ajouter_choix("type", "y", Choix, rapport,
"type", types)
type.parent = self
type.prompt = "Type de rapport : "
type.apercu = "{objet.type}"
type.aide_courte = \
"Entrez le |ent|type|ff| de rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\Types disponibles : " \
"{}.\n\Type actuel : |bc|{{objet.type}}|ff|".format(
", ".join(types))
# Catégorie
categories = sorted(CATEGORIES)
categorie = self.ajouter_choix("catégorie", "c", Choix, rapport,
"categorie", categories)
categorie.parent = self
categorie.prompt = "Catégorie du rapport : "
categorie.apercu = "{objet.categorie}"
categorie.aide_courte = \
"Entrez la |ent|catégorie|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nCatégories disponibles : " \
"{}.\n\nCatégorie actuelle : |bc|{{objet.categorie}}|ff|".format(
", ".join(categories))
# Priorité
priorites = sorted(PRIORITES)
priorite = self.ajouter_choix("priorité", "p", Choix, rapport,
"priorite", priorites)
priorite.parent = self
priorite.prompt = "Priorité du rapport : "
priorite.apercu = "{objet.priorite}"
priorite.aide_courte = \
"Entrez la |ent|priorité|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nPriorités disponibles : " \
"{}.\n\nPriorité actuelle : |bc|{{objet.priorite}}|ff|".format(
", ".join(priorites))
# Description
description = self.ajouter_choix("description", "d", Description, \
rapport)
description.parent = self
description.apercu = "{objet.description.paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du rapport #{}".format(
rapport.id).ljust(74) + \
"|ff||\n" + self.opts.separateur
# Public
public = self.ajouter_choix("public", "b", Flag, rapport, "public")
public.parent = self
# Statut
statut = self.ajouter_choix("statut", "s", Choix, rapport,
"statut", STATUTS)
statut.parent = self
statut.prompt = "Statut du rapport : "
statut.apercu = "{objet.statut}"
statut.aide_courte = \
"Entrez le |ent|statut|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nStatuts disponibles : " \
"{}.\n\nStatut actuel : |bc|{{objet.statut}}|ff|".format(
", ".join(STATUTS))
# Avancement
avancement = self.ajouter_choix("avancement", "a", Entier, rapport,
"avancement", 0, 100, "%")
avancement.parent = self
avancement.prompt = "Avancement de la tâche : "
avancement.apercu = "{objet.avancement}"
avancement.aide_courte = \
"Entrez l'|ent|avancement|ff| en pourcent de la tâche ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Avancement actuel : |bc|{valeur}|ff|"
# Assigné à
assigne_a = self.ajouter_choix("assigné à", "i", EdtAssigne, rapport)
assigne_a.parent = self
assigne_a.prompt = "Entrez un nom de joueur : "
assigne_a.apercu = "{objet.aff_assigne_a}"
assigne_a.aide_courte = \
"Entrez un |ent|Immortel|ff| à qui assigner ce rapport, ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Actuellement assigné à : {objet.aff_assigne_a}"
# Supprimer
sup = self.ajouter_choix("supprimer", "sup", NSupprimer,
rapport)
sup.parent = self
sup.aide_courte = "Souhaitez-vous réellement supprimer " \
"le rapport #{} ?".format(rapport.id)
| [
[
[
1962,
1974
],
[
2402,
2414
],
[
2740,
2752
]
],
[
[
2030,
2041
],
[
5182,
5193
]
],
[
[
2094,
2102
],
[
3069,
3077
]
],
[
[
2153,
2159
],
[
6227,
6233
]
],
[
[
2209,
2214
],
[
3488,
3493
],
[
4025,
4030
],
[
4625,
4630
],
[
5700,
5705
]
],
[
[
2263,
2267
],
[
5577,
5581
]
],
[
[
2293,
2303
],
[
6731,
6741
]
],
[
[
2327,
2337
],
[
7200,
7210
]
],
[
[
2381,
2382
],
[
3434,
3439
],
[
3955,
3965
],
[
4558,
4567
],
[
5742,
5749
],
[
6136,
6143
]
],
[
[
2390,
2401
]
]
] |
# coding=utf-8
import os
import sys
from dateutil import parser
from datetime import datetime
from pytz import timezone
import re
import datetime
import dateutil.parser
from datetime import timedelta
def modify_test_data(initial_data):
# set user name
# initial_data['procuringEntity']['name'] = u'Товариство З Обмеженою Відповідальністю \'Мак Медіа Прінт\''
initial_data['procuringEntity']['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
if 'contactPoint' in initial_data['procuringEntity']:
initial_data['procuringEntity']['contactPoint']['telephone'] = u'+380670444580'
initial_data['procuringEntity']['contactPoint']['url'] = u'https://dadadad.com'
initial_data['procuringEntity']['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['procuringEntity']['identifier']['id'] = u'38580144'
# #
initial_data['buyers'][0]['identifier']['id'] = u'38580144'
initial_data['buyers'][0]['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['buyers'][0]['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
initial_data['tender']['tenderPeriod']['startDate'] = add_day_to_date(initial_data['tender']['tenderPeriod']['startDate'])
# initial_data['procuringEntity']['name'] = u'Макстрой Діск, Товариство З Обмеженою Відповідальністю'
# initial_data['procuringEntity']['name'] = u'ФОП ОГАНІН ОЛЕКСАНДР ПЕТРОВИЧ'
return initial_data
def add_day_to_date(date):
dat = parser.parse(date)
new_date = (dat + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z')
new = parser.parse(new_date).isoformat()
return new
def get_currency_type(currency):
if isinstance(currency, str):
currency = currency.decode("utf-8")
currency_dictionary = {
u'грн': 'UAH'
}
currency_type = currency_dictionary.get(currency)
if currency_type:
return currency_type
else:
return currency
def get_month_number(month_name):
monthes = [u"января", u"февраля", u"марта", u"апреля", u"мая", u"июня",
u"июля", u"августа", u"сентября", u"октября", u"ноября", u"декабря",
u"янв.", u"февр.", u"мар.", u"апр.", u"мая.", u"июн.",
u"июл.", u"авг.", u"сент.", u"окт.", u"нояб.", u"дек.",
u"січ.", u"лют.", u"бер.", u"квіт.", u"трав.", u"черв.",
u"лип.", u"серп.", u"вер.", u"жовт.", u"лист.", u"груд.",
u"січня", u"лютого", u"березня", u"квітня", u"травня", u"червня",
u"липня", u"серпня", u"вересня", u"жовтня", u"листопада", u"грудня"]
return monthes.index(month_name) % 12 + 1
def get_time_with_offset(date):
date_obj = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M")
time_zone = timezone('Europe/Kiev')
localized_date = time_zone.localize(date_obj)
return localized_date.strftime('%Y-%m-%d %H:%M:%S.%f%z')
# def get_time_with_offset_formatted(date, input_format_date, output_format):
# date_obj = datetime.datetime.strptime(date, input_format_date)
# time_zone = timezone('Europe/Kiev')
# localized_date = time_zone.localize(date_obj)
# return localized_date.strftime(output_format)
def get_time_with_offset_formatted(date, input_format_date):
tz = timezone('Europe/Kiev')
date_obj = datetime.datetime.strptime(date, input_format_date)
res = tz.localize(date_obj)
result = res.isoformat()
return result
def get_current_date():
now = datetime.now()
return now.strftime('%d-%m-%Y')
def get_unit_code(name):
dictionary = {
u'кілограми': u'KGM',
u'пара': u'PR',
u'літр': u'LTR',
u'набір': u'SET',
u'пачок': u'NMP',
u'метри': u'MTR',
u'лот': u'LO',
u'послуга': u'E48',
u'метри кубічні': u'MTQ',
u'ящик': u'BX',
u'рейс': u'E54',
u'тони': u'TNE',
u'метри квадратні': u'MTK',
u'кілометри': u'KMT',
u'штуки': u'H87',
u'місяць': u'MON',
u'пачка': u'RM',
u'упаковка': u'PK',
u'гектар': u'HAR',
u'блок': u'D64',
u'Флакон': u'VI'
}
expected_name = dictionary.get(name)
if expected_name:
return expected_name
else:
return name
def get_unit_name(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'кілограми': {u'килограмм', u'килограмма', u'килограммов'},
u'пара': {u'пара', u'пары', u'пар'},
u'літр': {u'литр', u'литра', u'литров'},
u'набір': {u'набор', u'набора', u'наборов'},
u'пачок': {u'пачка', u'пачек', u'пачки'},
u'метри': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'послуга': {u'услуга', u'услуг', u'услуги'},
u'метри кубічні': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тони': {u'тонна', u'тонны', u'тонн'},
u'метри квадратні': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'кілометри': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук', u'Штуки'},
u'місяць': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_unit_name_ru(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'килограмм': {u'килограмм', u'килограмма', u'килограммов', u'кілограми'},
u'пара': {u'пара', u'пары', u'пар'},
u'литр': {u'литр', u'литра', u'литров'},
u'набора': {u'набір', u'набора', u'наборов'},
u'пачек': {u'пачка', u'пачек', u'пачки'},
u'метр': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'услуга': {u'услуга', u'услуг', u'услуги'},
u'метр .куб.': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тонны': {u'тонна', u'тонны', u'тонн'},
u'метр квадратный': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'километры': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук'},
u'месяц': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_classification_type(classifications):
classifications_dictionary = {
u'ДК 016:2010': u'ДКПП',
u'ДК 021:2015': u'CPV',
u'ДК 18-2000': u'ДК018',
u'ДК003: 2010': u'ДК003',
u'ДК003:2010': u'ДК003',
u'ДК 015-97': u'ДК015',
u'ДК021': u'CPV'
}
classifications_type = classifications_dictionary.get(classifications)
if classifications_type:
return classifications_type
else:
return classifications
def get_status_type(status_name):
status_name = status_name.strip()
type_dictionary = {
u'Период уточнений': 'active.enquiries',
u'Період уточнень': 'active.enquiries',
u'Период уточнений завершен': 'active.enquiries.ended',
u'Період уточнень завершено': 'active.enquiries.ended',
u'Подача предложений': 'active.tendering',
u'Подача пропозицій': 'active.tendering',
u'Торги': 'active.auction',
u'Квалификация победителя': 'active.qualification',
u'Квалификація переможця': 'active.qualification',
u'Предложения рассмотрены': 'active.awarded',
u'Пропозиції розглянуті': 'active.awarded',
u'Закупка не состоялась': 'unsuccessful',
u'Закупівля не відбулась': 'unsuccessful',
u'Завершено': 'complete',
u'Отменено': 'cancelled',
u'Відмінено': 'cancelled',
u'Розглядається': 'pending',
u'Кваліфікація учасника': 'active.pre-qualification',
u'Пауза перед аукціоном': 'active.pre-qualification.stand-still',
u'Прекваліфікація': 'active.pre-qualification',
u'Преквалификация': 'active.pre-qualification'
}
type_name = type_dictionary.get(status_name)
return type_name
def convert_float_to_string(number):
result = number
if type(number) is float:
return format(number, '.2f')
else:
return result
def get_claim_status (status):
type_dictionary = {
u'Вiдправлено': 'claim',
u'Отримано вiдповiдь': 'answered',
u'Задоволено': 'resolved',
u'Скасована': 'cancelled',
u'Не вирiшена, обробляється': 'pending',
u'Залишена без відповіді': 'ignored',
u'Не задоволено': 'declined',
u'Вимога відхилена': 'invalid',
u'Запит для пiдтверждения скасування': 'stopping'
}
type_name = type_dictionary.get(status)
return type_name
def get_procurementMethod_Type (type):
type_dictionary = {
u'Конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU',
u'Конкурентний діалог 1-ий етап': 'competitiveDialogueUA',
u'Переговорна процедура для потреб оборони': 'aboveThresholdUA.defense',
u'Укладання рамкової угоди': 'closeFrameworkAgreementUA',
u'Допорогові закупівлі': 'belowThreshold',
u'Переговорна процедура': 'negotiation',
u'Звіт про укладений договір': 'reporting',
u'Відкриті торги': 'aboveThresholdUA',
u'Відкриті торги з публікацією англійською мовою': 'aboveThresholdEU',
u'Відкриті торги для закупівлі енергосервісу': 'esco'
}
type_name = type_dictionary.get(type)
return type_name
def sum_of_numbers(number, value):
number = int(number) + int(value)
return number
def abs_number(number):
return abs(int(number))
def get_abs_item_index(lot_index, item_index, items_count):
abs_index = ((int(lot_index)-1) * int(items_count)) + int(item_index)
return abs_index
def get_match_from_string(string, pattern, group):
result = 'null';
p = re.compile(pattern)
m = p.search(string)
if p.search(string):
return m.group(int(group))
return result
def get_percent(value):
value = value * 100
return format(value, '.0f')
def get_conversion_to_int(value):
return int(float(value))
def get_cause(cause_text):
cause_dictionary = {
u'Закупівля творів мистецтва або закупівля, пов’язана із захистом прав інтелектуальної власності, або укладення договору про закупівлю з переможцем архітектурного чи мистецького конкурсу': u'artContestIP',
u'Відсутність конкуренції (у тому числі з технічних причин) на відповідному ринку, внаслідок чого договір про закупівлю може бути укладено лише з одним постачальником, завідсутності при цьому альтернативи': u'noCompetition',
u'Нагальна потреба у здійсненні закупівлі у зв’язку з виникненням особливих економічних чи соціальних обставин, яка унеможливлює дотримання замовниками строків для проведення тендеру, а саме пов’язаних з негайною ліквідацією наслідків надзвичайних ситуацій, а також наданням у встановленому порядку Україною гуманітарної допомоги іншим державам. Застосування переговорної процедури закупівлі в таких випадках здійснюється за рішенням замовника щодо кожної процедури': u'quick',
u'Якщо замовником було двічі відмінено тендер через відсутність достатньої кількостіучасників,прицьому предмет закупівлі, його технічні та якісніхарактеристики, атакож вимогидо учасника не повинні відрізнятисявід вимог, що були визначені замовникому тедерній документації': u'twiceUnsuccessful',
u'Потреба здійснити додаткову закупівлю в того самого постачальника з метою уніфікації, стандартизації або забезпечення сумісності з наявними товарами, технологіями, роботами чи послугами, якщо заміна попереднього постачальника (виконавця робіт, надавача послуг) може призвести до несумісності або виникнення проблем технічного характеру,пов’язаних з експлуатацією та обслуговуванням': u'additionalPurchase',
u'Необхідність проведення додаткових будівельних робіт, не зазначених у початковому проекті, але які стали через непередбачувані обставини необхідними для виконання проекту за сукупності таких умов: договір буде укладено з попереднім виконавцем цих робіт, такі роботи технічно чи економічно пов’язані з головним (первинним) договором; загальна вартість додаткових робіт не перевищує 50 відсотків вартості головного (первинного) договору': u'additionalConstruction',
u'Закупівля юридичних послуг, пов’язаних із захистом прав та інтересів України, у тому числі з метою захисту національної безпеки і оборони, під час врегулювання спорів, розгляду в закордонних юрисдикційних органах справ за участю іноземного суб’єкта та України, на підставі рішення Кабінету Міністрів України або введених в дію відповідно до закону рішень Ради національної безпеки і оборони України': u'stateLegalServices'
}
cause_type = cause_dictionary.get(cause_text)
if cause_type:
return cause_type
else:
return cause_text
def get_items_from_lot(items, lot_id):
lot_items = []
for item in items:
if item['relatedLot'] == lot_id:
lot_items.append(item)
return lot_items
def get_ECP_key(path):
return os.path.join(os.getcwd(), path)
def get_date_formatting(date, format_day):
return dateutil.parser.parse(date).date().strftime(format_day)
def get_scenarios_name():
name = ''
for param in sys.argv:
if 'txt' in param:
name = param
return name
def is_click_button(item_index, items_count, lot_index):
status = 'false'
if int(item_index) < int(items_count) and lot_index > 1:
return 'true'
return status
def get_milestones_title(title):
titles = {
u'підписання договору': 'signingTheContract',
u'поставка товару': 'deliveryOfGoods',
u'дата подання заявки': 'submissionDateOfApplications',
u'дата закінчення звітного періоду': 'endDateOfTheReportingPeriod',
u'дата виставлення рахунку': 'dateOfInvoicing',
u'виконання робіт': 'executionOfWorks',
u'надання послуг': 'submittingServices',
u'інша подія': 'anotherEvent'
}
title_name = titles.get(title)
return title_name
def get_milestones_code(code):
codes = {
u'Аванс': 'prepayment',
u'Пiсляоплата': 'postpayment'
}
code_name = codes.get(code)
return code_name
def get_milestones_duration_type(type):
types = {
u'робочих': 'working',
u'банківськіх': 'banking',
u'календарних': 'calendar'
}
type_name = types.get(type)
return type_name
def get_rationaleType (type):
type_dictionary = {
u'Зменшення обсягів закупівлі': 'volumeCuts',
u'Зміна сторонніх показників (курсу, тарифів...)': 'thirdParty',
u'Зміна ціни у зв’язку із зміною ставок податків і зборів': 'taxRate',
u'Покращення якості предмета закупівлі': 'qualityImprovement',
u'Узгоджене зменшення ціни': 'priceReduction',
u'Зміна ціни за одиницю товару': 'itemPriceVariation',
u'Продовження строку дії договору на наступний рік': 'fiscalYearExtension',
u'Продовження строку дії договору (черездокументально підтверджені об’єктивні обставини)': 'durationExtension',
}
type_name = type_dictionary.get(type)
return type_name
def change_fake_date():
return (datetime.datetime.now(timezone('Europe/Kiev')) + timedelta(days=3)).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
| [
[
[
23,
25
],
[
14262,
14264
],
[
14275,
14277
]
],
[
[
33,
36
],
[
14465,
14468
]
],
[
[
58,
64
],
[
1549,
1555
],
[
1651,
1657
]
],
[
[
86,
94
]
],
[
[
112,
120
],
[
2817,
2825
],
[
3318,
3326
],
[
16446,
16454
]
],
[
[
128,
130
],
[
11023,
11025
]
],
[
[
138,
146
],
[
2750,
2758
],
[
3357,
3365
],
[
3524,
3532
],
[
16424,
16432
]
],
[
[
154,
169
],
[
14350,
14358
]
],
[
[
191,
200
],
[
1590,
1599
],
[
16473,
16482
]
],
[
[
207,
223
]
],
[
[
1516,
1531
],
[
1230,
1245
]
],
[
[
1706,
1723
]
],
[
[
2014,
2030
]
],
[
[
2707,
2727
]
],
[
[
3252,
3282
]
],
[
[
3494,
3510
]
],
[
[
3581,
3594
]
],
[
[
4320,
4333
]
],
[
[
5884,
5900
]
],
[
[
7451,
7474
]
],
[
[
7944,
7959
]
],
[
[
9193,
9216
]
],
[
[
9351,
9367
]
],
[
[
9856,
9882
]
],
[
[
10643,
10657
]
],
[
[
10736,
10746
]
],
[
[
10790,
10808
]
],
[
[
10947,
10968
]
],
[
[
11153,
11164
]
],
[
[
11235,
11256
]
],
[
[
11300,
11309
]
],
[
[
14052,
14070
]
],
[
[
14232,
14243
]
],
[
[
14300,
14319
]
],
[
[
14412,
14430
]
],
[
[
14549,
14564
]
],
[
[
14730,
14750
]
],
[
[
15275,
15294
]
],
[
[
15451,
15479
]
],
[
[
15667,
15684
]
],
[
[
16392,
16408
]
]
] |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
##
# Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
#
# @file genModelsRecurrent_v2.py
# @date 19 October 2021
# @brief Generate recurrent model tcs
# @author Jihoon lee <jhoon.it.lee@samsung.com>
from recorder_v2 import record_v2, inspect_file
from zoneout import Zoneout
import torch
class FCUnroll(torch.nn.Module):
def __init__(self, unroll_for=1, num_fc=1):
super().__init__()
self.fcs = torch.nn.ModuleList([torch.nn.Linear(1, 1) for i in range(num_fc)])
self.unroll_for = unroll_for
# self.loss = torch.nn.MSELoss()
self.loss = torch.nn.Identity()
def forward(self, inputs, labels):
output = inputs[0]
for i in range(self.unroll_for):
for fc in self.fcs:
output = fc(output)
loss = self.loss(output)
# loss = self.loss(output, labels[0])
return output, loss
class RNNCellStacked(torch.nn.Module):
def __init__(self, unroll_for=1, num_rnn=1, input_size=1, hidden_size=1):
super().__init__()
self.rnns = torch.nn.ModuleList(
[
torch.nn.RNNCell(input_size, hidden_size)
for _ in range(num_rnn)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
hs = [torch.zeros_like(inputs[0]) for _ in self.rnns]
out = inputs[0]
ret = []
for _ in range(self.unroll_for):
for i, rnn in enumerate(self.rnns):
hs[i] = rnn(out, hs[i])
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class LSTMStacked(torch.nn.Module):
def __init__(self, num_lstm=1, bidirectional=False):
super().__init__()
self.input_size = self.hidden_size = 2
self.num_lstm = num_lstm
self.bidirectional=bidirectional
self.lstms = torch.nn.ModuleList(
[
torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, batch_first=True, bidirectional=bidirectional)
# Intended comment
# torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, num_layers=num_lstm, batch_first=True, bidirectional=bidirectional)
for i in range(num_lstm)
]
)
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
# hs = [states[2 * i] for i in range(self.num_lstm)]
hs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
# cs = [states[2 * i + 1] for i in range(self.num_lstm)]
cs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
for i, (lstm, h, c) in enumerate(zip(self.lstms, hs, cs)):
out, (hs[i], cs[i]) = lstm(out, (h, c))
loss = self.loss(out, labels[0])
return out, loss
class LSTMCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_lstmcell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.lstmcells = torch.nn.ModuleList(
[
torch.nn.LSTMCell(self.input_size, self.hidden_size)
for _ in range(num_lstmcell)
]
)
self.unroll_for = unroll_for
self.num_lstmcell = num_lstmcell
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstmcell)]
cs = [states[2 * i + 1] for i in range(self.num_lstmcell)]
ret = []
for _ in range(self.unroll_for):
for i, (lstm, h, c) in enumerate(zip(self.lstmcells, hs, cs)):
hs[i], cs[i] = lstm(out, (h, c))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class ZoneoutLSTMStacked(torch.nn.Module):
def __init__(self, batch_size=3, unroll_for=2, num_lstm=1, hidden_state_zoneout_rate=1, cell_state_zoneout_rate=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.cell_state_zoneout_rate = cell_state_zoneout_rate
self.zoneout_lstms = torch.nn.ModuleList(
[
Zoneout(batch_size, self.input_size, self.hidden_size, unroll_for, hidden_state_zoneout_rate, cell_state_zoneout_rate)
for _ in range(num_lstm)
]
)
self.unroll_for = unroll_for
self.num_lstm = num_lstm
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstm)]
cs = [states[2 * i + 1] for i in range(self.num_lstm)]
ret = []
for num_unroll in range(self.unroll_for):
for i, (zoneout_lstm, h, c) in enumerate(zip(self.zoneout_lstms, hs, cs)):
hs[i], cs[i] = zoneout_lstm(out, (h, c, num_unroll))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class GRUCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_grucell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.grus = torch.nn.ModuleList(
[
torch.nn.GRUCell(self.input_size, self.hidden_size, bias=True)
for _ in range(num_grucell)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
hs = inputs[1:]
ret = []
for _ in range(self.unroll_for):
for i, (gru, h) in enumerate(zip(self.grus, hs)):
hs[i] = gru(out, h)
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
if __name__ == "__main__":
record_v2(
FCUnroll(unroll_for=5),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_single",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked_clipped",
clip=True
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=1, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_single",
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=2, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_stacked",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 1, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_single",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 2, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_stacked",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_100",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 1, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_single",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 2, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_stacked",
)
# inspect_file("lstm_single.nnmodelgolden")
| [
[
[
293,
302
],
[
6453,
6462
],
[
6615,
6624
],
[
6788,
6797
],
[
6987,
6996
],
[
7200,
7209
],
[
7525,
7534
],
[
8016,
8025
],
[
8507,
8516
],
[
9015,
9024
],
[
9521,
9530
],
[
9952,
9961
],
[
10442,
10451
],
[
11059,
11068
],
[
11677,
11686
],
[
12294,
12303
],
[
12912,
12921
],
[
13529,
13538
],
[
14147,
14156
],
[
14764,
14773
],
[
15382,
15391
],
[
15999,
16008
],
[
16617,
16626
],
[
17234,
17243
],
[
17852,
17861
],
[
18469,
18478
],
[
19087,
19096
],
[
19704,
19713
],
[
20322,
20331
],
[
20939,
20948
],
[
21485,
21494
],
[
21885,
21894
]
],
[
[
304,
316
]
],
[
[
337,
344
],
[
4680,
4687
]
],
[
[
352,
357
],
[
374,
379
],
[
977,
982
],
[
1805,
1810
],
[
3273,
3278
],
[
4325,
4330
],
[
5593,
5598
],
[
486,
491
],
[
507,
512
],
[
652,
657
],
[
1120,
1125
],
[
1171,
1176
],
[
1334,
1339
],
[
1407,
1412
],
[
1696,
1701
],
[
2049,
2054
],
[
2100,
2105
],
[
2583,
2588
],
[
2769,
2774
],
[
2819,
2824
],
[
2952,
2957
],
[
3002,
3007
],
[
3444,
3449
],
[
3495,
3500
],
[
3715,
3720
],
[
4209,
4214
],
[
4629,
4634
],
[
4954,
4959
],
[
5481,
5486
],
[
5758,
5763
],
[
5809,
5814
],
[
5997,
6002
],
[
6331,
6336
]
],
[
[
365,
373
],
[
6472,
6480
],
[
6634,
6642
],
[
6807,
6815
]
],
[
[
962,
976
],
[
7006,
7020
],
[
7219,
7233
]
],
[
[
1793,
1804
],
[
7544,
7555
],
[
8035,
8046
],
[
8526,
8537
],
[
9034,
9045
]
],
[
[
3257,
3272
],
[
9540,
9555
],
[
9971,
9986
]
],
[
[
4306,
4324
],
[
10461,
10479
],
[
11078,
11096
],
[
11696,
11714
],
[
12313,
12331
],
[
12931,
12949
],
[
13548,
13566
],
[
14166,
14184
],
[
14783,
14801
],
[
15401,
15419
],
[
16018,
16036
],
[
16636,
16654
],
[
17253,
17271
],
[
17871,
17889
],
[
18488,
18506
],
[
19106,
19124
],
[
19723,
19741
],
[
20341,
20359
],
[
20958,
20976
]
],
[
[
5578,
5592
],
[
21504,
21518
],
[
21904,
21918
]
],
[
[
7414,
7424
],
[
7667,
7677
],
[
7846,
7856
]
],
[
[
7426,
7434
],
[
7565,
7573
]
],
[
[
7436,
7446
],
[
7655,
7665
],
[
7834,
7844
]
],
[
[
7448,
7452
],
[
7858,
7862
]
],
[
[
7454,
7466
],
[
7679,
7691
]
],
[
[
7468,
7477
],
[
7623,
7632
]
],
[
[
7479,
7492
],
[
7589,
7602
]
],
[
[
7905,
7915
],
[
8158,
8168
],
[
8337,
8347
]
],
[
[
7917,
7925
],
[
8056,
8064
]
],
[
[
7927,
7937
],
[
8146,
8156
],
[
8325,
8335
]
],
[
[
7939,
7943
],
[
8349,
8353
]
],
[
[
7945,
7957
],
[
8170,
8182
]
],
[
[
7959,
7968
],
[
8114,
8123
]
],
[
[
7970,
7983
],
[
8080,
8093
]
],
[
[
8397,
8407
],
[
8649,
8659
],
[
8828,
8838
]
],
[
[
8409,
8417
],
[
8547,
8555
]
],
[
[
8419,
8429
],
[
8637,
8647
],
[
8816,
8826
]
],
[
[
8431,
8435
],
[
8844,
8848
]
],
[
[
8437,
8449
],
[
8661,
8673
]
],
[
[
8451,
8460
],
[
8605,
8614
]
],
[
[
8462,
8475
],
[
8571,
8584
]
],
[
[
8905,
8915
],
[
9157,
9167
],
[
9336,
9346
]
],
[
[
8917,
8925
],
[
9055,
9063
]
],
[
[
8927,
8937
],
[
9145,
9155
],
[
9324,
9334
]
],
[
[
8939,
8943
],
[
9352,
9356
]
],
[
[
8945,
8957
],
[
9169,
9181
]
],
[
[
8959,
8968
],
[
9113,
9122
]
],
[
[
8970,
8983
],
[
9079,
9092
]
],
[
[
9414,
9424
],
[
9567,
9577
],
[
9782,
9792
]
],
[
[
9426,
9438
],
[
9592,
9604
],
[
9733,
9745
]
],
[
[
9440,
9449
],
[
9721,
9730
]
],
[
[
9451,
9461
],
[
9657,
9667
],
[
9688,
9698
],
[
9770,
9780
]
],
[
[
9463,
9467
],
[
9700,
9704
],
[
9794,
9798
]
],
[
[
9469,
9481
],
[
9669,
9681
]
],
[
[
9483,
9492
],
[
9625,
9634
]
],
[
[
9845,
9855
],
[
9998,
10008
],
[
10213,
10223
]
],
[
[
9857,
9869
],
[
10023,
10035
],
[
10164,
10176
]
],
[
[
9871,
9880
],
[
10152,
10161
]
],
[
[
9882,
9892
],
[
10088,
10098
],
[
10119,
10129
],
[
10201,
10211
]
],
[
[
9894,
9898
],
[
10131,
10135
],
[
10225,
10229
]
],
[
[
9900,
9912
],
[
10100,
10112
]
],
[
[
9914,
9923
],
[
10056,
10065
]
],
[
[
10277,
10287
],
[
10514,
10524
],
[
10819,
10829
]
],
[
[
10289,
10297
],
[
10535,
10543
],
[
10774,
10782
]
],
[
[
10299,
10308
],
[
10762,
10771
]
],
[
[
10310,
10320
],
[
10491,
10501
],
[
10698,
10708
],
[
10729,
10739
],
[
10807,
10817
]
],
[
[
10322,
10326
],
[
10741,
10745
],
[
10831,
10835
]
],
[
[
10328,
10340
],
[
10710,
10722
]
],
[
[
10342,
10351
],
[
10666,
10675
]
],
[
[
10353,
10378
],
[
10571,
10596
]
],
[
[
10380,
10403
],
[
10622,
10645
]
],
[
[
10894,
10904
],
[
11131,
11141
],
[
11436,
11446
]
],
[
[
10906,
10914
],
[
11152,
11160
],
[
11391,
11399
]
],
[
[
10916,
10925
],
[
11379,
11388
]
],
[
[
10927,
10937
],
[
11108,
11118
],
[
11315,
11325
],
[
11346,
11356
],
[
11424,
11434
]
],
[
[
10939,
10943
],
[
11358,
11362
],
[
11448,
11452
]
],
[
[
10945,
10957
],
[
11327,
11339
]
],
[
[
10959,
10968
],
[
11283,
11292
]
],
[
[
10970,
10995
],
[
11188,
11213
]
],
[
[
10997,
11020
],
[
11239,
11262
]
],
[
[
11512,
11522
],
[
11749,
11759
],
[
12054,
12064
]
],
[
[
11524,
11532
],
[
11770,
11778
],
[
12009,
12017
]
],
[
[
11534,
11543
],
[
11997,
12006
]
],
[
[
11545,
11555
],
[
11726,
11736
],
[
11933,
11943
],
[
11964,
11974
],
[
12042,
12052
]
],
[
[
11557,
11561
],
[
11976,
11980
],
[
12066,
12070
]
],
[
[
11563,
11575
],
[
11945,
11957
]
],
[
[
11577,
11586
],
[
11901,
11910
]
],
[
[
11588,
11613
],
[
11806,
11831
]
],
[
[
11615,
11638
],
[
11857,
11880
]
],
[
[
12129,
12139
],
[
12366,
12376
],
[
12671,
12681
]
],
[
[
12141,
12149
],
[
12387,
12395
],
[
12626,
12634
]
],
[
[
12151,
12160
],
[
12614,
12623
]
],
[
[
12162,
12172
],
[
12343,
12353
],
[
12550,
12560
],
[
12581,
12591
],
[
12659,
12669
]
],
[
[
12174,
12178
],
[
12593,
12597
],
[
12683,
12687
]
],
[
[
12180,
12192
],
[
12562,
12574
]
],
[
[
12194,
12203
],
[
12518,
12527
]
],
[
[
12205,
12230
],
[
12423,
12448
]
],
[
[
12232,
12255
],
[
12474,
12497
]
],
[
[
12747,
12757
],
[
12984,
12994
],
[
13289,
13299
]
],
[
[
12759,
12767
],
[
13005,
13013
],
[
13244,
13252
]
],
[
[
12769,
12778
],
[
13232,
13241
]
],
[
[
12780,
12790
],
[
12961,
12971
],
[
13168,
13178
],
[
13199,
13209
],
[
13277,
13287
]
],
[
[
12792,
12796
],
[
13211,
13215
],
[
13301,
13305
]
],
[
[
12798,
12810
],
[
13180,
13192
]
],
[
[
12812,
12821
],
[
13136,
13145
]
],
[
[
12823,
12848
],
[
13041,
13066
]
],
[
[
12850,
12873
],
[
13092,
13115
]
],
[
[
13364,
13374
],
[
13601,
13611
],
[
13906,
13916
]
],
[
[
13376,
13384
],
[
13622,
13630
],
[
13861,
13869
]
],
[
[
13386,
13395
],
[
13849,
13858
]
],
[
[
13397,
13407
],
[
13578,
13588
],
[
13785,
13795
],
[
13816,
13826
],
[
13894,
13904
]
],
[
[
13409,
13413
],
[
13828,
13832
],
[
13918,
13922
]
],
[
[
13415,
13427
],
[
13797,
13809
]
],
[
[
13429,
13438
],
[
13753,
13762
]
],
[
[
13440,
13465
],
[
13658,
13683
]
],
[
[
13467,
13490
],
[
13709,
13732
]
],
[
[
13982,
13992
],
[
14219,
14229
],
[
14524,
14534
]
],
[
[
13994,
14002
],
[
14240,
14248
],
[
14479,
14487
]
],
[
[
14004,
14013
],
[
14467,
14476
]
],
[
[
14015,
14025
],
[
14196,
14206
],
[
14403,
14413
],
[
14434,
14444
],
[
14512,
14522
]
],
[
[
14027,
14031
],
[
14446,
14450
],
[
14536,
14540
]
],
[
[
14033,
14045
],
[
14415,
14427
]
],
[
[
14047,
14056
],
[
14371,
14380
]
],
[
[
14058,
14083
],
[
14276,
14301
]
],
[
[
14085,
14108
],
[
14327,
14350
]
],
[
[
14599,
14609
],
[
14836,
14846
],
[
15141,
15151
]
],
[
[
14611,
14619
],
[
14857,
14865
],
[
15096,
15104
]
],
[
[
14621,
14630
],
[
15084,
15093
]
],
[
[
14632,
14642
],
[
14813,
14823
],
[
15020,
15030
],
[
15051,
15061
],
[
15129,
15139
]
],
[
[
14644,
14648
],
[
15063,
15067
],
[
15153,
15157
]
],
[
[
14650,
14662
],
[
15032,
15044
]
],
[
[
14664,
14673
],
[
14988,
14997
]
],
[
[
14675,
14700
],
[
14893,
14918
]
],
[
[
14702,
14725
],
[
14944,
14967
]
],
[
[
15217,
15227
],
[
15454,
15464
],
[
15759,
15769
]
],
[
[
15229,
15237
],
[
15475,
15483
],
[
15714,
15722
]
],
[
[
15239,
15248
],
[
15702,
15711
]
],
[
[
15250,
15260
],
[
15431,
15441
],
[
15638,
15648
],
[
15669,
15679
],
[
15747,
15757
]
],
[
[
15262,
15266
],
[
15681,
15685
],
[
15771,
15775
]
],
[
[
15268,
15280
],
[
15650,
15662
]
],
[
[
15282,
15291
],
[
15606,
15615
]
],
[
[
15293,
15318
],
[
15511,
15536
]
],
[
[
15320,
15343
],
[
15562,
15585
]
],
[
[
15834,
15844
],
[
16071,
16081
],
[
16376,
16386
]
],
[
[
15846,
15854
],
[
16092,
16100
],
[
16331,
16339
]
],
[
[
15856,
15865
],
[
16319,
16328
]
],
[
[
15867,
15877
],
[
16048,
16058
],
[
16255,
16265
],
[
16286,
16296
],
[
16364,
16374
]
],
[
[
15879,
15883
],
[
16298,
16302
],
[
16388,
16392
]
],
[
[
15885,
15897
],
[
16267,
16279
]
],
[
[
15899,
15908
],
[
16223,
16232
]
],
[
[
15910,
15935
],
[
16128,
16153
]
],
[
[
15937,
15960
],
[
16179,
16202
]
],
[
[
16452,
16462
],
[
16689,
16699
],
[
16994,
17004
]
],
[
[
16464,
16472
],
[
16710,
16718
],
[
16949,
16957
]
],
[
[
16474,
16483
],
[
16937,
16946
]
],
[
[
16485,
16495
],
[
16666,
16676
],
[
16873,
16883
],
[
16904,
16914
],
[
16982,
16992
]
],
[
[
16497,
16501
],
[
16916,
16920
],
[
17006,
17010
]
],
[
[
16503,
16515
],
[
16885,
16897
]
],
[
[
16517,
16526
],
[
16841,
16850
]
],
[
[
16528,
16553
],
[
16746,
16771
]
],
[
[
16555,
16578
],
[
16797,
16820
]
],
[
[
17069,
17079
],
[
17306,
17316
],
[
17611,
17621
]
],
[
[
17081,
17089
],
[
17327,
17335
],
[
17566,
17574
]
],
[
[
17091,
17100
],
[
17554,
17563
]
],
[
[
17102,
17112
],
[
17283,
17293
],
[
17490,
17500
],
[
17521,
17531
],
[
17599,
17609
]
],
[
[
17114,
17118
],
[
17533,
17537
],
[
17623,
17627
]
],
[
[
17120,
17132
],
[
17502,
17514
]
],
[
[
17134,
17143
],
[
17458,
17467
]
],
[
[
17145,
17170
],
[
17363,
17388
]
],
[
[
17172,
17195
],
[
17414,
17437
]
],
[
[
17687,
17697
],
[
17924,
17934
],
[
18229,
18239
]
],
[
[
17699,
17707
],
[
17945,
17953
],
[
18184,
18192
]
],
[
[
17709,
17718
],
[
18172,
18181
]
],
[
[
17720,
17730
],
[
17901,
17911
],
[
18108,
18118
],
[
18139,
18149
],
[
18217,
18227
]
],
[
[
17732,
17736
],
[
18151,
18155
],
[
18241,
18245
]
],
[
[
17738,
17750
],
[
18120,
18132
]
],
[
[
17752,
17761
],
[
18076,
18085
]
],
[
[
17763,
17788
],
[
17981,
18006
]
],
[
[
17790,
17813
],
[
18032,
18055
]
],
[
[
18304,
18314
],
[
18541,
18551
],
[
18846,
18856
]
],
[
[
18316,
18324
],
[
18562,
18570
],
[
18801,
18809
]
],
[
[
18326,
18335
],
[
18789,
18798
]
],
[
[
18337,
18347
],
[
18518,
18528
],
[
18725,
18735
],
[
18756,
18766
],
[
18834,
18844
]
],
[
[
18349,
18353
],
[
18768,
18772
],
[
18858,
18862
]
],
[
[
18355,
18367
],
[
18737,
18749
]
],
[
[
18369,
18378
],
[
18693,
18702
]
],
[
[
18380,
18405
],
[
18598,
18623
]
],
[
[
18407,
18430
],
[
18649,
18672
]
],
[
[
18922,
18932
],
[
19159,
19169
],
[
19464,
19474
]
],
[
[
18934,
18942
],
[
19180,
19188
],
[
19419,
19427
]
],
[
[
18944,
18953
],
[
19407,
19416
]
],
[
[
18955,
18965
],
[
19136,
19146
],
[
19343,
19353
],
[
19374,
19384
],
[
19452,
19462
]
],
[
[
18967,
18971
],
[
19386,
19390
],
[
19476,
19480
]
],
[
[
18973,
18985
],
[
19355,
19367
]
],
[
[
18987,
18996
],
[
19311,
19320
]
],
[
[
18998,
19023
],
[
19216,
19241
]
],
[
[
19025,
19048
],
[
19267,
19290
]
],
[
[
19539,
19549
],
[
19776,
19786
],
[
20081,
20091
]
],
[
[
19551,
19559
],
[
19797,
19805
],
[
20036,
20044
]
],
[
[
19561,
19570
],
[
20024,
20033
]
],
[
[
19572,
19582
],
[
19753,
19763
],
[
19960,
19970
],
[
19991,
20001
],
[
20069,
20079
]
],
[
[
19584,
19588
],
[
20003,
20007
],
[
20093,
20097
]
],
[
[
19590,
19602
],
[
19972,
19984
]
],
[
[
19604,
19613
],
[
19928,
19937
]
],
[
[
19615,
19640
],
[
19833,
19858
]
],
[
[
19642,
19665
],
[
19884,
19907
]
],
[
[
20157,
20167
],
[
20394,
20404
],
[
20699,
20709
]
],
[
[
20169,
20177
],
[
20415,
20423
],
[
20654,
20662
]
],
[
[
20179,
20188
],
[
20642,
20651
]
],
[
[
20190,
20200
],
[
20371,
20381
],
[
20578,
20588
],
[
20609,
20619
],
[
20687,
20697
]
],
[
[
20202,
20206
],
[
20621,
20625
],
[
20711,
20715
]
],
[
[
20208,
20220
],
[
20590,
20602
]
],
[
[
20222,
20231
],
[
20546,
20555
]
],
[
[
20233,
20258
],
[
20451,
20476
]
],
[
[
20260,
20283
],
[
20502,
20525
]
],
[
[
20774,
20784
],
[
21011,
21021
],
[
21316,
21326
]
],
[
[
20786,
20794
],
[
21032,
21040
],
[
21271,
21279
]
],
[
[
20796,
20805
],
[
21259,
21268
]
],
[
[
20807,
20817
],
[
20988,
20998
],
[
21195,
21205
],
[
21226,
21236
],
[
21304,
21314
]
],
[
[
20819,
20823
],
[
21238,
21242
],
[
21328,
21332
]
],
[
[
20825,
20837
],
[
21207,
21219
]
],
[
[
20839,
20848
],
[
21163,
21172
]
],
[
[
20850,
20875
],
[
21068,
21093
]
],
[
[
20877,
20900
],
[
21119,
21142
]
],
[
[
21392,
21402
],
[
21530,
21540
],
[
21730,
21740
]
],
[
[
21404,
21415
],
[
21554,
21565
],
[
21682,
21693
]
],
[
[
21417,
21427
],
[
21618,
21628
],
[
21649,
21659
],
[
21718,
21728
]
],
[
[
21429,
21433
],
[
21661,
21665
],
[
21742,
21746
]
],
[
[
21435,
21447
],
[
21630,
21642
]
],
[
[
21449,
21458
],
[
21586,
21595
]
],
[
[
21792,
21802
],
[
21930,
21940
],
[
22130,
22140
]
],
[
[
21804,
21815
],
[
21954,
21965
],
[
22082,
22093
]
],
[
[
21817,
21827
],
[
22018,
22028
],
[
22049,
22059
],
[
22118,
22128
]
],
[
[
21829,
21833
],
[
22061,
22065
],
[
22142,
22146
]
],
[
[
21835,
21847
],
[
22030,
22042
]
],
[
[
21849,
21858
],
[
21986,
21995
]
]
] |
import json
import pathlib
import urllib.request
def main():
# https://gist.github.com/kawanet/a880c83f06d6baf742e45ac9ac52af96
url = 'https://gist.githubusercontent.com/kawanet/a880c83f06d6baf742e45ac9ac52af96/raw' \
'/b4fbc9a730394eb977277e73cc37b60955463f21/material-colors.json'
json_file_name = 'material-colors.json'
urllib.request.urlretrieve(url, json_file_name)
with open(json_file_name, 'r') as json_file:
colors = json.load(json_file)
out_dir_name = 'material_ui_colors'
pathlib.Path(out_dir_name).mkdir(exist_ok=True)
for color in colors:
with open(out_dir_name + '/_' + color + '.scss', 'w') as out_file:
shades = colors[color]
out = ['$material_ui_' + color + '_' + shade + ': ' + value + ';\n' for shade, value in shades.items()]
out.append('$material_ui_' + color + ': $material_ui_' + color + '_500;')
out_file.writelines(out)
with open(out_dir_name + '/_main.scss', 'w') as out_main_file:
out = ['@import "' + color + '";\n' for color in colors]
out_main_file.writelines(out)
if __name__ == '__main__':
main()
| [
[
[
7,
11
],
[
477,
481
]
],
[
[
20,
27
],
[
546,
553
]
],
[
[
36,
50
],
[
361,
367
]
],
[
[
60,
64
],
[
1188,
1192
]
]
] |
# %% Import packages
from eeyore.samplers import HMC
from bnn_mcmc_examples.examples.mlp.pima.setting1.dataloaders import training_dataloader
from bnn_mcmc_examples.examples.mlp.pima.setting1.model import model
# %% Setup HMC sampler
sampler = HMC(model, theta0=model.prior.sample(), dataloader=training_dataloader, step=0.125, num_steps=6)
| [
[
[
50,
53
],
[
248,
251
]
],
[
[
124,
143
],
[
299,
318
]
],
[
[
207,
212
],
[
252,
257
],
[
266,
271
]
],
[
[
238,
245
]
]
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0048_auto_20160803_0311'),
]
operations = [
migrations.AddField(
model_name='alternate',
name='season_player',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.SeasonPlayer'),
),
migrations.AlterUniqueTogether(
name='alternate',
unique_together=set([]),
),
migrations.RunSQL('''
UPDATE tournament_alternate alt SET season_player_id = (SELECT id FROM tournament_seasonplayer sp WHERE sp.season_id = alt.season_id AND sp.player_id = alt.player_id)
''')
]
| [
[
[
96,
106
],
[
166,
176
],
[
296,
306
],
[
532,
542
],
[
650,
660
]
],
[
[
108,
114
],
[
405,
411
]
],
[
[
122,
147
],
[
447,
453
]
],
[
[
156,
165
]
]
] |
##############################################
# #
# Ferdinand 0.40, Ian Thompson, LLNL #
# #
# gnd,endf,fresco,azure,hyrma #
# #
##############################################
import os
import math
from write_fresco import write_fresco
import fudge.sums as sumsModule
import fudge.styles as stylesModule
import fudge.reactionData.crossSection as crossSectionModule
import fudge.productData.distributions as distributionsModule
############################################## write_fresco
def reconstructPointwise(gnd,base,verbose,debug,egrid,angles,thin,reconstyle):
projectile = gnd.PoPs[gnd.projectile]
target = gnd.PoPs[gnd.target]
if hasattr(projectile, 'nucleus'): projectile = projectile.nucleus
if hasattr(target, 'nucleus'): target = target.nucleus
pZ = projectile.charge[0].value; tZ = target.charge[0].value
charged = pZ*tZ != 0
identicalParticles = gnd.projectile == gnd.target
rStyle = reconstyle.label
if debug: print("Charged-particle elastic:",charged,", identical:",identicalParticles,' rStyle:',rStyle)
if charged and angles is not None:
from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import CoulombPlusNuclearElastic as CoulombPlusNuclearElasticModule
from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import nuclearPlusInterference as nuclearPlusInterferenceModule
# from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import RutherfordScattering as RutherfordScatteringModule
from fudge.productData.distributions import reference as referenceModule
thmin = angles[0]
pi = 3.1415826536
muCutoff = math.cos(thmin*pi/180.)
fresco_base = base + '.fresco_recon'
channels = write_fresco(gnd,fresco_base,verbose,debug,True,None,None,False,egrid,angles)
name_frin = fresco_base + '.frin' # must be same as in write_fresco
name_frout= fresco_base + '.frout'
accuracy = None
cmd = "frescox < "+name_frin+" > "+name_frout
print(cmd)
os.system(cmd) # Run FRESCO
f239 = open('fort.239','r')
egrid = []
totalxs = []; elasticxs = []; fissionxs = []; absorbtionxs = []
chanxs =[];
# lastzero = [ 0 for i in range(len(channels))]
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
if not rreac.eliminated:
chanxs.append([])
if len(channels) != len(chanxs):
print("Only getting",channels," data channels, not",len(chanxs))
exit()
if debug: print("Fresco channel order:",channels)
mb = 1e-3
for line in f239:
if 'NaN' not in line:
data = line.split()
try:
elab,absorbtion,reaction,total,elastic = [float(d) for d in data[:5]]
sigr = [float(d) for d in data[5:]]
#print elab,absorbtion,reaction,total,elastic,sigr
egrid.append(elab)
totalxs.append(total*mb)
elasticxs.append(elastic*mb)
fissionxs.append(0.0)
absorbtionxs.append(absorbtion*mb)
for c in range(len(channels)):
chanxs[c].append(sigr[c]*mb)
# if sigr[c]== 0.: lastzero[c] = elab
except:
pass
crossSectionAxes = crossSectionModule.defaultAxes( 'MeV' )
total = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, totalxs), dataForm="XsAndYs" )
elastic = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, elasticxs), dataForm="XsAndYs" )
fission = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, fissionxs), dataForm="XsAndYs" )
absorbtion = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, absorbtionxs), dataForm="XsAndYs" )
if not isinstance( reconstyle, stylesModule.crossSectionReconstructed ):
raise TypeError("style must be an instance of crossSectionReconstructed, not %s" % type(reconstyle))
haveEliminated = False
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
reaction = rreac.reactionLink.link
haveEliminated = haveEliminated or rreac.eliminated
# elastic or capture
if reaction == gnd.getReaction('capture'): rreac.tag = 'capture'
elif reaction == gnd.getReaction('elastic'): rreac.tag = 'elastic'
elif 'fission' in rreac.label: rreac.tag = rreac.label
else: rreac.tag = 'competitive'
xsecs = {'total':total, 'elastic':elastic, 'fission':fission, 'nonelastic':absorbtion}
for c in range(1,len(channels)): # skip c=1 elastic !! FIXME
#print channels[c],':',len(egrid),len(chanxs[c])
xsecs[channels[c]] = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, chanxs[c]), dataForm="XsAndYs" )
# print 'xsecs[channels[c]]',xsecs[channels[c]].toString()
if haveEliminated:
eliminatedReaction = [rr for rr in gnd.resonances.resolved.evaluated.resonanceReactions if rr.eliminated]
if len(eliminatedReaction) != 1:
raise TypeError("Only 1 reaction can be eliminated in Reich-Moore approximation!")
xsecs[eliminatedReaction[0].tag] = absorbtion - fission
epsilon = 1e-8 # for joining multiple regions together
# for each reaction, add tabulated pointwise data (ENDF MF=3) to reconstructed resonances:
possibleChannels = { 'elastic' : True, 'capture' : True, 'fission' : True, 'total' : False, 'nonelastic' : False }
elasticChannel = gnd.getReaction('elastic')
derivedFromLabel = ''
for reaction in gnd :
if isinstance( reaction, sumsModule.multiplicitySum ): continue
iselastic = reaction is elasticChannel
evaluatedCrossSection = reaction.crossSection.evaluated
if not isinstance( evaluatedCrossSection, crossSectionModule.resonancesWithBackground ):
continue
# which reconstructed cross section corresponds to this reaction?
if( derivedFromLabel == '' ) : derivedFromLabel = evaluatedCrossSection.label
if( derivedFromLabel != evaluatedCrossSection.label ) :
print(('WARNING derivedFromLabel = "%s" != "%s"' % (derivedFromLabel, evaluatedCrossSection.label)))
RRxsec = None
if str( reaction ) in xsecs:
RRxsec = xsecs[ str( reaction ) ]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
else :
for possibleChannel in possibleChannels :
if( possibleChannels[possibleChannel] ) :
if( possibleChannel in str( reaction ) ) :
RRxsec = xsecs[possibleChannel]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
if( RRxsec is None ) :
if( reaction is gnd.getReaction( possibleChannel ) ) :
RRxsec = xsecs[possibleChannel]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
if( RRxsec is not None ) : break
if( RRxsec is None ) :
if verbose:
print(( "Warning: couldn't find appropriate reconstructed cross section to add to reaction %s" % reaction ))
continue
background = evaluatedCrossSection.background
background = background.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec = RRxsec.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec.convertUnits( {RRxsec.domainUnit: background.domainUnit, RRxsec.rangeUnit: background.rangeUnit } )
background, RRxsec = background.mutualify(0,0,0, RRxsec, -epsilon,epsilon,True)
RRxsec = background + RRxsec # result is a crossSection.XYs1d instance
if thin:
RRx = RRxsec.thin( accuracy or .001 )
else:
RRx = RRxsec
RRx.label = rStyle
reaction.crossSection.add( RRx )
# print "Channels ",reaction.label,iselastic,":\n",RRxsec.toString(),"\n&\n",RRx.toString()
if iselastic:
effXsc = RRxsec
gnd.styles.add( reconstyle )
# print "Last energies of zero cross section:",lastzero
if angles is None: return
f241 = open('fort.241','r')
sigdd = {}
for rr in channels: sigdd[rr] = []
for line in f241:
if '# Elab =' in line:
elab,ich = float(line[9:9+15]),int(line[9+15:9+15+4])-1 # Elab = 1.00000000E-06 1
line1 = line
dist = []
elif "&" in line:
rr = channels[ich]
sigdd[rr].append([elab,dist])
# if elab<1.0001: print '\n',ich,rr,sigdd[rr]
elif "NaN" in line:
continue
else:
mu,p = line.split()
try:
mu,p = float(mu),float(p)
dist.insert(0,p)
dist.insert(0,mu)
except:
pass
angularAxes = distributionsModule.angular.defaultAxes( 'MeV' )
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
if not rreac.eliminated:
productName = rreac.ejectile
residName = rreac.residual
elastic = productName == gnd.projectile and residName == gnd.target
print("Add angular distribution for",productName," in",rreac.label,"channel (elastic=",elastic,")")
reaction = rreac.reactionLink.link
firstProduct = reaction.outputChannel.getProductWithName(productName)
effDist = distributionsModule.angular.XYs2d( axes = angularAxes )
elab_max = 0.; elab_min = 1e10; nangles=0
ne = 0
for elab,dist in sigdd[rreac.label]:
if debug: print('E=',elab,'has',len(dist),' angles')
if len(dist) <= 3:
print(' E=',elab,'has',len(dist),' angles')
continue
angdist = distributionsModule.angular.XYs1d( data = dist, outerDomainValue = elab, axes = angularAxes, dataForm = 'list' )
if thin:
angdist = angdist.thin( accuracy or .001 )
norm = angdist.integrate()
if norm != 0.0:
if debug: print(rreac.label,elab,norm)
effDist.append( angdist )
elab_max = max(elab,elab_max); elab_min = min(elab,elab_min); nangles = max(len(dist),nangles)
ne += 1
print(" Angles reconstructed at %i energies from %s to %s MeV with up to %i angles at each energy" % (ne,elab_min,elab_max,nangles))
newForm = distributionsModule.angular.twoBodyForm( label = reconstyle.label,
productFrame = firstProduct.distribution.evaluated.productFrame, angularSubform = effDist )
firstProduct.distribution.add( newForm )
if elastic and charged: # dCrossSection_dOmega for charged-particle elastics:
NCPI = nuclearPlusInterferenceModule.nuclearPlusInterference( muCutoff=muCutoff,
crossSection=nuclearPlusInterferenceModule.crossSection( effXsc),
distribution=nuclearPlusInterferenceModule.distribution( effDist)
)
# Rutherford = RutherfordScatteringModule.RutherfordScattering()
CoulombElastic = CoulombPlusNuclearElasticModule.form( gnd.projectile, rStyle, nuclearPlusInterference = NCPI, identicalParticles=identicalParticles )
reaction.doubleDifferentialCrossSection.add( CoulombElastic )
reaction.crossSection.remove( rStyle )
reaction.crossSection.add( crossSectionModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
firstProduct.distribution.remove( rStyle )
firstProduct.distribution.add( referenceModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
secondProduct = reaction.outputChannel[1]
# secondProduct.distribution[rStyle].angularSubform.link = firstProduct.distribution[rStyle] ## Fails
# give 'recoil' distribution!
return
| [
[
[
338,
340
],
[
2242,
2244
]
],
[
[
349,
353
],
[
1869,
1873
]
],
[
[
379,
391
],
[
1950,
1962
]
],
[
[
399,
423
],
[
5882,
5892
]
],
[
[
431,
459
],
[
4044,
4056
]
],
[
[
467,
520
],
[
3522,
3540
],
[
3574,
3592
],
[
3683,
3701
],
[
3794,
3812
],
[
3908,
3926
],
[
4955,
4973
],
[
6083,
6101
],
[
11989,
12007
]
],
[
[
528,
582
],
[
9251,
9270
],
[
9836,
9855
],
[
10241,
10260
],
[
10929,
10948
]
],
[
[
650,
670
]
]
] |
""" Generic script for monitoring counts from a counter """
import numpy as np
import time
import pyqtgraph as pg
from pylabnet.gui.pyqt.external_gui import Window
from pylabnet.utils.logging.logger import LogClient
from pylabnet.scripts.pause_script import PauseService
from pylabnet.network.core.generic_server import GenericServer
from pylabnet.network.client_server import si_tt
from pylabnet.utils.helper_methods import load_script_config, get_ip, unpack_launcher, load_config, get_gui_widgets, get_legend_from_graphics_view, find_client, load_script_config
# Static methods
# def generate_widgets():
# """Static method to return systematically named gui widgets for 4ch wavemeter monitor"""
# graphs, legends, numbers = [], [], []
# for i in range(2):
# graphs.append('graph_widget_' + str(i + 1))
# legends.append('legend_widget_' + str(i + 1))
# numbers.append('number_label_' + str(i + 1))
# for i in range(2, 8):
# numbers.append('number_label_' + str(i + 1))
# return graphs, legends, numbers
class CountMonitor:
# Generate all widget instances for the .ui to use
# _plot_widgets, _legend_widgets, _number_widgets = generate_widgets()
def __init__(self, ctr_client: si_tt.Client, ui='count_monitor', logger_client=None, server_port=None, combined_channel=False, config=None):
""" Constructor for CountMonitor script
:param ctr_client: instance of hardware client for counter
:param gui_client: (optional) instance of client of desired output GUI
:param logger_client: (obj) instance of logger client.
:param server_port: (int) port number of script server
:combined_channel: (bool) If true, show additional trace with summed counts.
"""
self._ctr = ctr_client
self.log = logger_client
self.combined_channel = combined_channel
self._bin_width = None
self._n_bins = None
self._ch_list = None
self._plot_list = None # List of channels to assign to each plot (e.g. [[1,2], [3,4]])
self._plots_assigned = [] # List of plots on the GUI that have been assigned
if self.combined_channel:
ui = 'count_monitor_combined'
else:
ui = 'count_monitor'
# Instantiate GUI window
self.gui = Window(
gui_template=ui,
host=get_ip(),
port=server_port,
log=self.log
)
# Setup stylesheet.
self.gui.apply_stylesheet()
if self.combined_channel:
num_plots = 3
else:
num_plots = 2
# Get all GUI widgets
self.widgets = get_gui_widgets(
self.gui,
graph_widget=num_plots,
number_label=8,
event_button=num_plots,
legend_widget=num_plots
)
# Load config
self.config = {}
if config is not None:
self.config = load_script_config(
script='monitor_counts',
config=config,
logger=self.logger_client
)
if not 'name' in self.config:
self.config.update({'name': f'monitor{np.random.randint(1000)}'})
def set_hardware(self, ctr):
""" Sets hardware client for this script
:param ctr: instance of count monitor hardware client
"""
# Initialize counter instance
self._ctr = ctr
def set_params(self, bin_width=1e9, n_bins=1e4, ch_list=[1], plot_list=None):
""" Sets counter parameters
:param bin_width: bin width in ps
:param n_bins: number of bins to display on graph
:param ch_list: (list) channels to record
:param plot_list: list of channels to assign to each plot (e.g. [[1,2], [3,4]])
"""
# Save params to internal variables
self._bin_width = int(bin_width)
self._n_bins = int(n_bins)
self._ch_list = ch_list
self._plot_list = plot_list
def run(self):
""" Runs the counter from scratch"""
try:
# Start the counter with desired parameters
self._initialize_display()
# Give time to initialize
# time.sleep(0.05)
self._is_running = True
self._ctr.start_trace(
name=self.config['name'],
ch_list=self._ch_list,
bin_width=self._bin_width,
n_bins=self._n_bins
)
# Continuously update data until paused
while self._is_running:
self._update_output()
self.gui.force_update()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
def pause(self):
""" Pauses the counter"""
self._is_running = False
def resume(self):
""" Resumes the counter.
To be used to resume after the counter has been paused.
"""
try:
self._is_running = True
# Clear counter and resume plotting
self._ctr.clear_ctr(name=self.config['name'])
while self._is_running:
self._update_output()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
# Technical methods
def _initialize_display(self):
""" Initializes the display (configures all plots) """
plot_index = 0
for index in range(len(self.widgets['graph_widget'])):
# Configure and return legend widgets
self.widgets['legend_widget'][index] = get_legend_from_graphics_view(
self.widgets['legend_widget'][index]
)
for color, channel in enumerate(self._ch_list):
# Figure out which plot to assign to
if self._plot_list is not None:
for index, channel_set in enumerate(self._plot_list):
if channel in channel_set:
plot_index = index
break
# If we have not assigned this plot yet, assign it
# if plot_index not in self._plots_assigned:
# self.gui_handler.assign_plot(
# plot_widget=self._plot_widgets[plot_index],
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# legend_widget=self._legend_widgets[plot_index]
# )
# self._plots_assigned.append(plot_index)
# Now assign this curve
# self.gui_handler.assign_curve(
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# curve_label='Channel {}'.format(channel),
# error=True
# )
# Create a curve and store the widget in our dictionary
self.widgets[f'curve_{channel}'] = self.widgets['graph_widget'][plot_index].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color])
)
self.widgets['legend_widget'][plot_index].addItem(
self.widgets[f'curve_{channel}'],
' - ' + f'Channel {channel}'
)
# Assign scalar
# self.gui_handler.assign_label(
# label_widget=self._number_widgets[channel - 1],
# label_label='Channel {}'.format(channel)
# )
# Handle button pressing
from functools import partial
for plot_index, clear_button in enumerate(self.widgets['event_button']):
clear_button.clicked.connect(partial(lambda plot_index: self._clear_plot(plot_index), plot_index=plot_index))
if self.combined_channel:
self.widgets['curve_combo'] = self.widgets['graph_widget'][index + 1].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color + 1])
)
self.widgets['legend_widget'][index + 1].addItem(
self.widgets['curve_combo'],
' - ' + 'Combined Counts'
)
def _clear_plot(self, plot_index):
""" Clears the curves on a particular plot
:param plot_index: (int) index of plot to clear
"""
# First, handle case where combined count channel is clears (very ugly).
if self.combined_channel and plot_index == len(self._plot_list):
channel = 'combo'
# Set the curve to constant with last point for all entries
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
else:
# Find all curves in this plot
for channel in self._plot_list[plot_index]:
# Set the curve to constant with last point for all entries
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
self._ctr.clear_ctr(name=self.config['name'])
def _update_output(self):
""" Updates the output to all current values"""
# Update all active channels
# x_axis = self._ctr.get_x_axis()/1e12
counts = self._ctr.get_counts(name=self.config['name'])
counts_per_sec = counts * (1e12 / self._bin_width)
# noise = np.sqrt(counts)*(1e12/self._bin_width)
# plot_index = 0
summed_counts = np.sum(counts_per_sec, axis=0)
for index, count_array in enumerate(counts_per_sec):
# Figure out which plot to assign to
channel = self._ch_list[index]
# if self._plot_list is not None:
# for index_plot, channel_set in enumerate(self._plot_list):
# if channel in channel_set:
# plot_index = index_plot
# break
# Update GUI data
# self.gui_handler.set_curve_data(
# data=count_array,
# error=noise[index],
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# curve_label='Channel {}'.format(channel)
# )
# self.gui_handler.set_label(
# text='{:.4e}'.format(count_array[-1]),
# label_label='Channel {}'.format(channel)
# )
self.widgets[f'curve_{channel}'].setData(count_array)
self.widgets[f'number_label'][channel - 1].setText(str(count_array[-1]))
if self.combined_channel:
self.widgets['curve_combo'].setData(summed_counts)
def launch(**kwargs):
""" Launches the count monitor script """
# logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)
logger = kwargs['logger']
clients = kwargs['clients']
config = load_script_config(
'monitor_counts',
kwargs['config'],
logger
)
if config['combined_channel'] == 'True':
combined_channel = True
else:
combined_channel = False
# Instantiate CountMonitor
try:
monitor = CountMonitor(
ctr_client=find_client(
clients,
config,
client_type='si_tt',
client_config='standard_ctr',
logger=logger
),
logger_client=logger,
server_port=kwargs['server_port'],
combined_channel=combined_channel
)
except KeyError:
print('Please make sure the module names for required servers and GUIS are correct.')
time.sleep(15)
raise
# except:
# config = None
# ch_list = [7, 8]
# plot_list = [[7], [8]]
# Instantiate Pause server
# try:
# pause_logger = LogClient(
# host=loghost,
# port=logport,
# module_tag='count_monitor_pause_server'
# )
# except ConnectionRefusedError:
# logger.warn('Could not connect Count Monitor Pause server to logger')
# pause_service = PauseService()
# pause_service.assign_module(module=monitor)
# pause_service.assign_logger(logger=pause_logger)
# timeout = 0
# while timeout < 1000:
# try:
# port = np.random.randint(1, 9999)
# pause_server = GenericServer(
# host=get_ip(),
# port=port,
# service=pause_service)
# pause_logger.update_data(data=dict(port=port))
# timeout = 9999
# except ConnectionRefusedError:
# logger.warn(f'Failed to instantiate Count Monitor Pause server at port {port}')
# timeout += 1
# pause_server.start()
# Set parameters
monitor.set_params(**config['params'])
# Run
monitor.run()
| [
[
[
68,
79
],
[
3215,
3217
],
[
8566,
8568
],
[
8915,
8917
],
[
9458,
9460
]
],
[
[
87,
91
],
[
11611,
11615
]
],
[
[
99,
114
],
[
6992,
6994
],
[
7855,
7857
]
],
[
[
158,
164
],
[
2339,
2345
]
],
[
[
207,
216
]
],
[
[
259,
271
]
],
[
[
321,
334
]
],
[
[
378,
383
],
[
1250,
1255
]
],
[
[
426,
444
]
],
[
[
446,
452
],
[
2393,
2399
]
],
[
[
454,
469
]
],
[
[
471,
482
]
],
[
[
484,
499
],
[
2688,
2703
]
],
[
[
501,
530
],
[
5649,
5678
]
],
[
[
532,
543
],
[
11161,
11172
]
],
[
[
545,
563
],
[
2978,
2996
],
[
10852,
10870
]
],
[
[
1069,
1081
],
[
11124,
11136
]
],
[
[
10630,
10636
]
]
] |
# Generated by Django 2.1.15 on 2020-02-16 11:10
# flake8: noqa
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
[
[
86,
96
],
[
123,
133
],
[
280,
290
]
],
[
[
98,
104
],
[
373,
379
],
[
494,
500
],
[
585,
591
],
[
691,
697
],
[
881,
887
],
[
955,
961
],
[
1020,
1026
],
[
1085,
1091
],
[
1149,
1155
],
[
1426,
1432
]
],
[
[
113,
122
]
]
] |
## Message passing over a discrete BN ##
## Library created by Pablo Martínez Olmos, University Carlos III Madrid ##
## olmos@tsc.uc3m.es ##
## Last modification 15/11/2016 ##
import numpy as np
## Messages are stored in the logaritmic domain ##
## Global constants (to control numerical issues)
inf_log=100 #To impose hard constraints (i.e. an observed variable)
constant_log=50 #Used to improve stability in the Check Node (CN) operation
## Function definitions
def create_var_node(ID,cardinality,neighbor_order,observed_value_index=-1):
# Variable Nodes are defined by a dictionary with several fields
var_node={}
var_node['ID']=ID
var_node['node_type']=0 #type 0 refers to variable node, 1o to check nodes.
var_node['cardinality']=cardinality #Num. of possible values the RV can take
var_node['neighbor_order']=np.array(neighbor_order) #Ordered array of the neighbor's IDs (neighbors are CNs!)
var_node['input_msgs']=[] #List to store input messages
var_node['observed']=observed_value_index #-1 if the variable is not observed
var_node['inner_factor']=np.zeros([cardinality,1]) #Internal vector used to imposed hard messages when variable is observed
#If variable is observed, then the inner_factor vector is log[0 0 ... 0 1 0 ...]
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
#Initialize input msgs by filling with zeros
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
def create_message(input_node,output_node,table):
#Messages are defined by a dictionary with three keys: input node (sender node), output_node (receiver node), and table of values
message={}
message['input_node']=input_node
message['output_node']=output_node
message['table']=table
return message
def create_factor_node(ID,neighbors,CPD):
# Check Nodes are defined by a dictionary with several fields
factor_node={}
factor_node['ID']=ID
factor_node['node_type']=1
factor_node['input_msgs']=[]
CPD=np.array(CPD)
CPD=CPD.reshape(CPD.shape[0],) #Just to make sure that CPD is a np. array vector of dim. (n,)
factor_node['CPD']=np.array(CPD) #CPD table associated to the factor
factor_node['CPD_order']=np.zeros([len(neighbors),1]).astype(int) #Ordered array of the neighbor's IDs (neighbors are CNs!)
factor_node['cardinalities']=np.zeros([len(neighbors),1]).astype(int) #Cardinalities of the neighbors
#Initialize input msgs, CPD_order & cardinalities
#Note that creating factor nodes requires variable nodes to be created first, as CN input messages
#are initialized already to the inner_factor field of every neighbor variable node
for index,node in enumerate(neighbors):
card=node['cardinality']
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
factor_node['cardinalities'][index]=card
factor_node['CPD_order'][index]=node['ID']
return factor_node
def initialize_variable(var_node,observed_value_index=-1):
#After running message passing, variable nodes store the incoming messages for future calculations
#If we want to run again message passing in the same graph, we have to re-initialize both
#variable nodes and check nodes.
var_node['inner_factor']=np.zeros([var_node['cardinality'],1])
var_node['observed']=observed_value_index
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
def initialize_factor_msgs(factor_node,neighbors):
#After running message passing, variable nodes store the incoming messages for future calculations
#If we want to run again message passing in the same graph, we have to re-initialize both
#variable nodes and check nodes.
factor_node['input_msgs']=[]
for index,node in enumerate(neighbors):
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
#The next two routines are used to encode and decode positions to store CPD values in a
#vector form. We use a tree-encoding determined by the order of variables and their cardinalities
#See First Example Message Passing.ipynb for an illustration
def CPD_position_to_variable_index(position,v_card,CPD_size):
#We use this function to find the encoding for each position of a CPD table
#of CPD_size positions, where the cardinalities of the variables (in order) are given in v_card
#This function returns the index value of each variable
v_card=np.array(v_card) #To make sure we have a np.array
var_index=np.zeros([v_card.shape[0],1]).astype(int)
remaining=CPD_size
for i,card in enumerate(v_card):
remaining=remaining//card
index_i=position//remaining
position=position-index_i*(remaining)
var_index[i]=index_i
return var_index
def variable_index_to_CPD_position(var_index,v_card,CPD_size):
#This function returns the encoded CPD position for a given configuration of the variables.
#The CPD table is of size CPD_size, the cardinalities of the variables (in order) are given in v_card
#and the value indexes (in order) of the variables are given in var_index
var_index=np.array(var_index)
v_card=np.array(v_card)
position=0
offset=CPD_size
for i,card in enumerate(v_card):
offset=offset//card
position+=var_index[i]*offset
return position
def update_var_to_factor(var_node):
#Routine to update the output messages of a variable node (var_node)
prod_table=np.zeros([var_node['cardinality'],1])
#We first multiply all the input messages (sums in the log domain)
for msg in var_node['input_msgs']:
prod_table+=msg['table']
#We also take into account the inner_factor of the variable_node. In
#case it is observed, the output messages have to be consistent with the observation
prod_table+=var_node['inner_factor']
#For every output message, we have to substract from prod_table the message received
#through the corresponding edge
for msg in var_node['input_msgs']:
if(var_node['observed']==-1):
reply_table=prod_table-msg['table']
else:
reply_table=np.ones([var_node['cardinality'],1])*(-inf_log)
reply_table[var_node['observed']]=inf_log
#We limit the absolute value of the messages, to exp(inf_log)
reply_table[reply_table>inf_log]=inf_log
reply_table[reply_table<-inf_log]=-inf_log
#The ouput message is stored in the corresponding neighbor
factor_rx=msg['input_node']
reply_msg=create_message(input_node=var_node,output_node=factor_rx,table=reply_table)
#Short foor loop to save messages in factor_node in the corresponding order
for index,v in enumerate(factor_rx['CPD_order']):
if(v==var_node['ID']):
factor_rx['input_msgs'][index]=reply_msg
break
def compute_var_marginal(var_node):
#Routine to compute the marginal pmf of a variable node (var_node)
#Simply the product of all incoming msgs times the inner_factor
marg_table=np.zeros([var_node['cardinality'],1])
for msg in var_node['input_msgs']:
marg_table+=msg['table']
marg_table+=var_node['inner_factor']
marg_table=np.exp(marg_table)
marg_table/=sum(marg_table)
return marg_table
def update_factor_to_var(factor_node):
#Routine to update the output messages of a check node (var_node)
#This is the most complicated in the library, as it involves marginalization
#over each argument of the CPD function times the product of incoming messgaes
output_tables=[]
#Output message tables initialization
for card in factor_node['cardinalities']:
output_tables.append(np.zeros([card,1]))
#With a single loop we go only once through every element of the CPD table
#It is multiplied accordingly to input messages and the resulting terms are
#added to the corresponding output tables
for CPD_entry,CPD_val in enumerate(factor_node['CPD']):
values=CPD_position_to_variable_index(
position=CPD_entry,v_card=factor_node['cardinalities'],CPD_size=factor_node['CPD'].shape[0])
#The CPD value is multiplied by all incoming input messages but one,
#and the result is added to the ouput table
#Since we have to marginalize, not all operations can be done in the log domain
#To avoid numerical inestabilities when performing the operations, we substract a large exponent (constant log)
#which is sum at the very end, when we move back to the log domain
for index in range(factor_node['cardinalities'].shape[0]):
aux=CPD_val
for index2 in range(factor_node['cardinalities'].shape[0]):
if(index2!=index):
aux*=np.exp(factor_node['input_msgs'][index2]['table'][values[index2]]-constant_log)
output_tables[index][values[index]]+=aux
#Once the output tables have been computed, we create the output messages and store them in
#the corresponding variable nodes
for index,msg in enumerate(factor_node['input_msgs']):
output=output_tables[index]
output=np.log(output)+constant_log
output[output>inf_log]=inf_log
output[output<-inf_log]=-inf_log
var_rx=msg['input_node']
reply_msg=create_message(input_node=factor_node,output_node=var_rx,table=output)
#Short foor loop to save messages in factor_node in the corresponding order
for index2,f in enumerate(var_rx['neighbor_order']):
if(f==factor_node['ID']):
var_rx['input_msgs'][index2]=reply_msg
break
def create_joint_node(ID,node_members,neighbor_order,observed_values_indexes=-1):
#Routine to define a joint variable node. This is useful to eliminate cycles in
#the factor graph and perform exact inference.
#Note a routine to create a joint factor node that uses joint variable nodes
#is not provided. The corresponding CPD of such factor nodes has to be computed
#first and then create the joint node with the function create_factor_node
#We do not consider the case that the joint variable node is partially observed
#(e.g. one of the joined variable nodes is observed). We only consider the case
#where the joint node is completely observed.
#See Second Example Message Passing.ipynb for an example of how to define and
#manage joint variable nodes.
var_node={}
var_node['ID']=ID
var_node['node_type']=0
var_node['input_msgs']=[]
var_node['observed']=-1
var_node['neighbor_order']=np.array(neighbor_order)
card=1
#Cardinality of joint node is the product of cardinalities
for member in node_members:
card*=member['cardinality']
var_node['cardinality']=card
var_node['inner_factor']=np.zeros([card,1])
if(observed_values_indexes!=-1):
var_node['observed']=variable_index_to_CPD_position(observed_values_indexes,var_node['values'],card)
var_node['inner_factor']-=inf_log
var_node['inner_factor'][var_node['observed']]=inf_log
#Initialize input msgs
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
| [
[
[
184,
195
],
[
852,
854
],
[
1107,
1109
],
[
2201,
2203
],
[
2336,
2338
],
[
2416,
2418
],
[
2548,
2550
],
[
3546,
3548
],
[
4875,
4877
],
[
4942,
4944
],
[
5589,
5591
],
[
5620,
5622
],
[
5932,
5934
],
[
6616,
6618
],
[
7546,
7548
],
[
7735,
7737
],
[
8241,
8243
],
[
9323,
9325
],
[
9720,
9722
],
[
11213,
11215
],
[
11462,
11464
]
],
[
[
299,
306
],
[
1364,
1371
],
[
1427,
1434
],
[
3699,
3706
],
[
3762,
3769
],
[
6655,
6662
],
[
6710,
6717
],
[
6831,
6838
],
[
6822,
6829
],
[
6882,
6889
],
[
6872,
6879
],
[
9779,
9786
],
[
9770,
9777
],
[
9820,
9827
],
[
9810,
9817
],
[
11666,
11673
],
[
11729,
11736
]
],
[
[
368,
380
],
[
9389,
9401
],
[
9735,
9747
]
],
[
[
477,
492
]
],
[
[
1629,
1643
],
[
3004,
3018
],
[
4202,
4216
],
[
7005,
7019
],
[
9888,
9902
]
],
[
[
1964,
1982
]
],
[
[
3226,
3245
]
],
[
[
3779,
3801
]
],
[
[
4561,
4591
],
[
8561,
8591
]
],
[
[
5233,
5263
],
[
11552,
11582
]
],
[
[
5806,
5826
]
],
[
[
7354,
7374
]
],
[
[
7824,
7844
]
],
[
[
10260,
10277
]
]
] |
from six import text_type
from rest_framework import HTTP_HEADER_ENCODING, exceptions
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from atlassian_connect_django.models.connect import AtlassianUser
from atlassian_connect_django import helpers
from .models import SecurityContextToken
def get_atlassian_security_context_and_user_from_request(request, raise_exceptions=True):
def exception(msg):
if not raise_exceptions:
return None, None
if raise_exceptions == 'rest_framework':
raise exceptions.AuthenticationFailed(msg)
raise PermissionDenied(msg)
auth = request.META.get('HTTP_X_JIRA_SECURITY_CONTEXT', b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
auth = auth.split()
if not auth or auth[0].lower() != b'token':
return None, None
if len(auth) == 1:
return exception(_('Invalid x-jira-security-context token header. No credentials provided.'))
elif len(auth) > 2:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain spaces.'))
try:
token = auth[1].decode()
except UnicodeError:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain invalid characters.'))
try:
token = SecurityContextToken.objects.select_related('security_context').get(key=token)
except SecurityContextToken.DoesNotExist:
return exception(_('Invalid x-jira-security-context token.'))
if not token.security_context.is_plugin_enabled:
return exception(_('Security context inactive or deleted.'))
site = helpers.get_current_site(request=request)
if site and site != token.security_context.site:
return exception(_('Invalid x-jira-security-context token header. SecurityContext site "%s" not equals to "%s"' % (token.security_context.site.name, site.name)))
atlassian_user = AtlassianUser(accountId=token.atlassian_user_account_id)
atlassian_user.set_secutiry_context(security_context=token.security_context)
return token.security_context, atlassian_user
| [
[
[
16,
25
],
[
762,
771
]
],
[
[
53,
73
],
[
850,
870
]
],
[
[
75,
85
],
[
599,
609
]
],
[
[
121,
137
],
[
650,
666
]
],
[
[
175,
193
],
[
1019,
1020
],
[
1145,
1146
],
[
1329,
1330
],
[
1609,
1610
],
[
1732,
1733
],
[
1908,
1909
]
],
[
[
247,
260
],
[
2074,
2087
]
],
[
[
298,
305
],
[
1788,
1795
]
],
[
[
326,
346
],
[
1459,
1479
],
[
1549,
1569
]
],
[
[
359,
411
]
]
] |
#!/usr/bin/env python
import uuid
from construct import Container
class SMAPI_Request(object):
'''
Implentation of a ICUV Request
'''
def __init__(self, function_name, target_identifier,
authenticated_userid=b"", password=b"", additional_parameters=b""):
self._function_name = function_name
self._function_name_length = len(function_name)
self._authenticated_userid = authenticated_userid
self._authenticated_userid_length = len(authenticated_userid)
self._password = password
self._password_length = len(password)
self._target_identifier = target_identifier
self._target_identifier_length = len(target_identifier)
self._additional_parameters = additional_parameters
self._additional_parameters_length = len(additional_parameters)
self._input_length = (self._function_name_length + 4 +
self._authenticated_userid_length + 4 +
self._password_length + 4 +
self._target_identifier_length + 4 +
self._additional_parameters_length)
def get_container(self):
return Container(input_length = self._input_length,
function_name_length = self._function_name_length,
function_name = self._function_name,
authenticated_userid_length = self._authenticated_userid_length,
authenticated_userid = self._authenticated_userid,
password_length = self._password_length,
password = self._password,
target_identifier_length = self._target_identifier_length,
target_identifier = self._target_identifier,
additional_parameters = self._additional_parameters)
def __repr__(self):
"<{} (container={})>".format(
self.__class__.__name__,
self.get_container())
| [
[
[
30,
34
]
],
[
[
57,
66
],
[
1221,
1230
]
],
[
[
75,
88
]
]
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
from ppmessage.core.constant import IOS_FAKE_TOKEN
from ppmessage.core.constant import CONVERSATION_TYPE
from ppmessage.core.constant import MESSAGE_SUBTYPE
from ppmessage.core.constant import MESSAGE_STATUS
from ppmessage.core.constant import MESSAGE_TYPE
from ppmessage.core.constant import TASK_STATUS
from ppmessage.core.constant import REDIS_DISPATCHER_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_PUSH_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_MQTTPUSH_KEY
from ppmessage.core.constant import REDIS_GCMPUSH_KEY
from ppmessage.core.constant import REDIS_IOSPUSH_KEY
from ppmessage.core.constant import REDIS_JPUSH_KEY
from ppmessage.core.constant import PPCOM_OFFLINE
from ppmessage.core.constant import YVOBJECT
from ppmessage.core.constant import DIS_SRV
from ppmessage.core.constant import OS
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import OrgGroupUserData
from ppmessage.db.models import AppUserData
from ppmessage.db.models import MessagePush
from ppmessage.db.models import MessagePushTask
from ppmessage.db.models import PCSocketInfo
from ppmessage.db.models import PCSocketDeviceData
from ppmessage.db.models import ConversationUserData
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.utils.datetimestring import datetime_to_timestamp
from ppmessage.core.utils.datetimestring import datetime_to_microsecond_timestamp
from operator import itemgetter
import uuid
import time
import json
import logging
class Meta(type):
def __init__(cls, name, bases, dict_):
type.__init__(cls, name, bases, dict_)
return
Policy = Meta("Policy", (object,), {})
class AbstractPolicy(Policy):
def __init__(self, dis):
self._dis = dis
self._task = dis._task
self._redis = dis.application.redis
self._online_users = set()
self._offline_users = set()
self._devices = set()
self._devices_hash = {}
self._users_hash = {}
self._is_service_user = {}
self._conversation_users = set()
self._conversation_user_datas_uuid = {}
self._conversation_user_datas_hash = {}
self._users = set()
return
@classmethod
def conversation_users(cls, _app_uuid, _conversation_uuid, _redis):
_key = ConversationUserData.__tablename__ + ".conversation_uuid." + _conversation_uuid
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def conversation_datas(cls, _app_uuid, _conversation_uuid, _users, _redis):
_pi = _redis.pipeline()
_pre = ConversationUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pos = ".conversation_uuid." + _conversation_uuid
for _user_uuid in _users:
_key = _pre + _user_uuid + _pos
_pi.get(_key)
_datas = _pi.execute()
return _datas
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return []
@classmethod
def app_users(cls, _app_uuid, _is_service_user, _redis):
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user." + str(_is_service_user)
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def distributor_users(cls, _app_uuid, _redis):
# is_service_user == True
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user.True"
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def group_users(cls, _group_uuid, _redis):
_pattern = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
_keys = _redis.smembers(_pattern)
return list(_keys)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
def _android_token(self, _user_uuid, _device_uuid):
_token = _user_uuid + "/" + _device_uuid + "/" + self._task["message_type"] + "/" + self._task["uuid"]
return _token
def _body(self):
_message = {}
_message["id"] = self._task.get("uuid")
_message["fi"] = self._task.get("from_uuid")
_message["ti"] = self._task.get("to_uuid")
_message["ft"] = self._task.get("from_type")
_message["tt"] = self._task.get("to_type")
_message["mt"] = self._task.get("message_type")
_message["ms"] = self._task.get("message_subtype")
_message["ci"] = self._task.get("conversation_uuid")
_message["ct"] = self._task.get("conversation_type")
_message["tl"] = self._task.get("title")
_message["bo"] = self._task.get("body")
if _message["ct"] == CONVERSATION_TYPE.S2P:
_message["ti"] = self._task["app_uuid"]
_message["tt"] = YVOBJECT.AP
if isinstance(self._task.get("title"), unicode):
_message["tl"] = self._task.get("title").encode("utf-8")
if isinstance(self._task.get("body"), unicode):
_message["bo"] = self._task.get("body").encode("utf-8")
_message["ts"] = datetime_to_microsecond_timestamp(self._task["createtime"])
self._task["message_body"] = _message
_message_body = json.dumps(self._task["message_body"])
if isinstance(_message_body, unicode):
_message_body = _message_body.encode("utf-8")
_values = {
"uuid": self._task["uuid"],
"task_status": TASK_STATUS.PROCESSED,
"message_body": _message_body,
}
_row = MessagePushTask(**_values)
_row.async_update(self._redis)
_row.update_redis_keys(self._redis)
return
def _user_devices(self, _user_uuid):
_user = self._users_hash.get(_user_uuid)
_is_service_user = self._is_service_user.get(_user_uuid)
if _user == None or _is_service_user == None:
logging.error("no user or is_service_user in hash: %s" % _user_uuid)
return
_user["_online_devices"] = {}
_device_name = ["mobile_device_uuid", "browser_device_uuid"]
if _is_service_user == False:
_device_name = ["ppcom_mobile_device_uuid", "ppcom_browser_device_uuid"]
for _i in _device_name:
_device_uuid = self._users_hash[_user_uuid][_i]
if _device_uuid == None or len(_device_uuid) == 0:
continue
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
continue
self._devices_hash[_device_uuid] = _device
self._devices.add(_device_uuid)
if _device.get("device_is_online") == True:
_user["_online_devices"][_device_uuid] = _device
if len(_user["_online_devices"]) > 0:
self._online_users.add(_user_uuid)
else:
self._offline_users.add(_user_uuid)
return
def _users_devices(self):
for _i in self._users:
self._users_hash[_i] = redis_hash_to_dict(self._redis, DeviceUser, _i)
for _i in self._users:
self._user_devices(_i)
logging.info("online : %d, %s" % (len(self._online_users), self._online_users))
logging.info("offline : %d, %s" % (len(self._offline_users), self._offline_users))
return
def _pcsocket_data(self, _device_uuid):
_redis = self._redis
_key = PCSocketDeviceData.__tablename__ + ".device_uuid." + _device_uuid
_pc_socket_uuid = _redis.get(_key)
if _pc_socket_uuid == None:
logging.error("device no pcsocket %s" % _device_uuid)
return None
_info = redis_hash_to_dict(_redis, PCSocketInfo, _pc_socket_uuid)
if _info == None:
logging.error("dispatcher cant not find pcsocket %s" % str(_pc_socket_uuid))
return None
_d = {"host": _info["host"], "port": _info["port"], "device_uuid": _device_uuid}
return _d
def _push_to_db(self, _user_uuid, _status=MESSAGE_STATUS.PUSHED):
_values = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"task_uuid": self._task["uuid"],
"user_uuid": _user_uuid,
"status": _status
}
_row = MessagePush(**_values)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
return _row.uuid
def _push_to_ios(self, _user_uuid, _device_uuid):
logging.info("push ios %s:%s" % (_user_uuid, _device_uuid))
_app_uuid = self._task["app_uuid"]
_user = self._users_hash.get(_user_uuid)
_device = self._devices_hash.get(_device_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
if _user == None:
logging.error("push ios failed for no user")
return
if _device == None:
logging.error("push ios failed for no device")
return
_token = _device.get("device_ios_token")
if _token == None or len(_token) == 0:
logging.error("push ios failed for no ios token")
return
if _device["device_ios_token"] == IOS_FAKE_TOKEN:
logging.error("push ios failed for fake token")
return
if _conversation_data != None and _conversation_data["user_mute_notification"] == True:
# user only do not want recv push for this conversation
logging.error("push ios failed for silence required")
return
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_is_dev = bool(_device.get("is_development"))
_config = {
"is_development": _is_dev,
"user_language": _user.get("user_language"),
"device_ios_token": _token,
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.info("push ios: %s" % str(_push))
self._redis.rpush(REDIS_IOSPUSH_KEY, json.dumps(_push))
return
def _push_to_android(self, _user_uuid, _device_uuid):
_app_uuid = self._task["app_uuid"]
_device = self._devices_hash.get(_device_uuid)
_user = self._users_hash.get(_user_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_config = {
"user_language": _user.get("user_language"),
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.error("try push for android: %s" % str(_push))
if self._task["_app"].get("enable_jpush"):
_config["device_android_jpush_registrationid"] = _device.get("device_android_jpush_registrationid")
self._redis.rpush(REDIS_JPUSH_KEY, json.dumps(_push))
elif self._task["_app"].get("enable_gcm_push"):
_config["device_android_gcmtoken"] = _device.get("device_android_gcmtoken")
self._redis.rpush(REDIS_GCMPUSH_KEY, json.dumps(_push))
else:
logging.error("no push enable for android: %s" % str(_push))
return
def _push_to_socket(self, _user_uuid, _device_uuid):
_pcsocket = self._pcsocket_data(_device_uuid)
if _pcsocket == None:
logging.error("no pcsocket data for: %s" % _device_uuid)
return
_device = self._devices_hash.get(_device_uuid)
# if _device == None:
# logging.error("no device hash for: %s" % _device_uuid)
# return
_from_user = {}
_from_type = self._task.get("from_type")
_fields = [
"uuid",
"user_icon",
"user_email",
"user_fullname",
"updatetime",
]
if _from_type == YVOBJECT.DU:
for _i in _fields:
_from_user[_i] = self._task["_user"].get(_i)
_from_user["updatetime"] = datetime_to_timestamp(_from_user["updatetime"])
if _from_type == YVOBJECT.OG:
_from_user = self._task["_group"]
if _from_type == YVOBJECT.AP:
_from_user = self._task["_app"]
_body = self._task.get("message_body")
_body["pid"] = _device.get("push_uuid")
_body["from_user"] = _from_user
_push = {
"pcsocket": _pcsocket,
"body": _body
}
_key = REDIS_PUSH_NOTIFICATION_KEY + ".host." + _pcsocket["host"] + ".port." + _pcsocket["port"]
self._redis.rpush(_key, json.dumps(_push))
return
def _push_to_mobile(self, _user_uuid, _device_uuid):
_device = self._devices_hash[_device_uuid]
if _device["device_ostype"] == OS.IOS:
self._push_to_ios(_user_uuid, _device_uuid)
return
if _device["device_ostype"] == OS.AND:
self._push_to_android(_user_uuid, _device_uuid)
return
return
def _push(self):
if len(self._online_users) == 0:
self.no_online_user()
return
for _user_uuid in self._online_users:
_user = self._users_hash[_user_uuid]
_online_devices = _user.get("_online_devices")
_real_push = not _user.get("user_mute_notification")
_pid = self._push_to_db(_user_uuid)
for _device_uuid in _online_devices:
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
if _real_push == True:
self._push_to_mobile(_user_uuid, _device_uuid)
return
def _other_device(self):
"""
the other device uuid belong to same user uuid
"""
if self._task.get("from_device_uuid") == None:
return
if self._task.get("from_type") != YVOBJECT.DU:
return
if self._task.get("_user") == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
if self._task["_user"]["ppcom_mobile_device_uuid"] == None or \
self._task["_user"]["ppcom_browser_device_uuid"] == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.S2S or \
self._task["conversation_type"] == CONVERSATION_TYPE.S2P:
if self._task["_user"]["mobile_device_uuid"] == None or \
self._task["_user"]["browser_device_uuid"] == None:
return
_device_uuid = None
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
_device_uuid = self._task["_user"]["ppcom_mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["ppcom_mobile_device_uuid"]:
_device_uuid = self._task["_user"]["ppcom_browser_device_uuid"]
else:
_device_uuid = self._task["_user"]["mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["mobile_device_uuid"]:
_device_uuid = self._task["_user"]["browser_device_uuid"]
if _device_uuid not in self._devices_hash:
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None or _device["device_is_online"] != True:
return
self._devices_hash[_device_uuid] = _device
_user_uuid = self._task["from_uuid"]
if _user_uuid not in self._users_hash:
self._users_hash[_user_uuid] = self._task["_user"]
_pid = self._push_to_db(_user_uuid)
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
return
def _explicit(self):
"""
explicit message SYS type
"""
_device_uuid = self._task.get("to_device_uuid")
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
logging.error("no device:%s" % _device_uuid)
return
_user_uuid = self._task.get("from_uuid")
self._users_hash[_user_uuid] = self._task["_user"]
self._devices_hash[_device_uuid] = _device
# not save db for explicit message
self._push_to_socket(_user_uuid, _device_uuid)
return
def _send_apologize(self, _text):
_task = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"conversation_uuid": self._task["conversation_uuid"],
"conversation_type": CONVERSATION_TYPE.S2P,
"message_type": MESSAGE_TYPE.NOTI,
"message_subtype": MESSAGE_SUBTYPE.TEXT,
"from_uuid": self._task["to_uuid"],
"from_type": self._task["to_type"],
"to_uuid": self._task["to_uuid"],
"to_type": self._task["to_type"],
"body": _text,
"task_status": TASK_STATUS.PENDING,
}
_row = MessagePushTask(**_task)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
_m = {"task_uuid": _row.uuid}
self._redis.rpush(REDIS_DISPATCHER_NOTIFICATION_KEY, json.dumps(_m))
return
def _get_app_apologize(self):
_text = None
_lang = self._task["_user"]["user_language"]
if _lang == None or len(_lang) == 0:
_lang = "zh_cn"
_offline = "offline_" + _lang
_text = self._task["_app"][_offline]
if _text == None:
_text = PPCOM_OFFLINE[_lang]
return _text
def no_online_user(self):
if self._task["conversation_type"] != CONVERSATION_TYPE.P2S:
return
if self._task["_app"].get("return_offline_message") != True:
logging.info("return_offline_message is not set")
return
_text = self._get_app_apologize()
if _text == None:
return
self._send_apologize(_text)
return
def users(self):
_app_uuid = self._task["app_uuid"]
_conversation_uuid = self._task["conversation_uuid"]
_users = AbstractPolicy.conversation_users(_app_uuid, _conversation_uuid, self._redis)
_datas = AbstractPolicy.conversation_datas(_app_uuid, _conversation_uuid, _users, self._redis)
_datas = dict(zip(_users, _datas))
# the is_service_user include the sender user_uuid
_table = AppUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pi = self._redis.pipeline()
for _user_uuid in _users:
_key = _table + _user_uuid
_pi.get(_key)
_is = _pi.execute()
_is_list = []
for _i in _is:
if _i == None or len(_i) == 0:
_is_list.append(False)
continue
_d = json.loads(_i)
_is_list.append(_d.get("is_service_user"))
self._is_service_user = dict(zip(_users, _is_list))
# remove the sender self
if self._task["from_type"] == YVOBJECT.DU:
_user_uuid = self._task["from_uuid"]
if _user_uuid in _users:
_users.remove(_user_uuid)
if _user_uuid in _datas:
del _datas[_user_uuid]
self._users = _users
self._conversation_users = _users
self._conversation_user_datas_uuid = _datas
return
def dispatch(self):
self._body()
if self._task.get("to_device_uuid") != None:
self._explicit()
return
if self._task.get("conversation_uuid") == None:
logging.error("no conversation should be explicit")
return
self.users()
self._users_devices()
self._push()
self._other_device()
return
class BroadcastPolicy(AbstractPolicy):
def __init__(self, dis):
super(BroadcastPolicy, self).__init__(dis)
return
def users(self):
super(BroadcastPolicy, self).users()
return
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return AbstractPolicy.distributor_users(_app_uuid, _redis)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
_b_users = AbstractPolicy.app_users(_app_uuid, False, _redis)
return _a_users + _b_users
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
return _a_users
| [
[
[
138,
152
],
[
9799,
9813
]
],
[
[
189,
206
],
[
5201,
5218
],
[
15455,
15472
],
[
15697,
15714
],
[
15770,
15787
],
[
16028,
16045
],
[
18006,
18023
],
[
19087,
19104
]
],
[
[
243,
258
],
[
18107,
18122
]
],
[
[
295,
309
],
[
8593,
8607
]
],
[
[
346,
358
],
[
18057,
18069
]
],
[
[
395,
406
],
[
5994,
6005
],
[
18371,
18382
]
],
[
[
444,
477
],
[
18586,
18619
]
],
[
[
514,
541
],
[
13827,
13854
]
],
[
[
578,
596
]
],
[
[
633,
650
],
[
12362,
12379
]
],
[
[
687,
704
],
[
10959,
10976
]
],
[
[
741,
756
],
[
12152,
12167
]
],
[
[
793,
806
],
[
18964,
18977
]
],
[
[
843,
851
],
[
5305,
5313
],
[
13203,
13211
],
[
13433,
13441
],
[
13530,
13538
],
[
15313,
15321
],
[
20502,
20510
]
],
[
[
888,
895
]
],
[
[
932,
934
],
[
14135,
14137
],
[
14258,
14260
]
],
[
[
968,
976
]
],
[
[
1009,
1019
],
[
7620,
7630
]
],
[
[
1052,
1062
],
[
7003,
7013
],
[
16659,
16669
],
[
17356,
17366
]
],
[
[
1095,
1111
],
[
3997,
4013
]
],
[
[
1144,
1155
],
[
3363,
3374
],
[
3733,
3744
],
[
19885,
19896
]
],
[
[
1188,
1199
],
[
8882,
8893
],
[
10240,
10251
],
[
11395,
11406
]
],
[
[
1232,
1247
],
[
6085,
6100
],
[
18417,
18432
]
],
[
[
1280,
1292
],
[
8265,
8277
]
],
[
[
1325,
1343
],
[
7987,
8005
]
],
[
[
1376,
1396
],
[
2520,
2540
],
[
2812,
2832
]
],
[
[
1431,
1449
],
[
6971,
6989
],
[
7588,
7606
],
[
8238,
8256
],
[
16627,
16645
],
[
17324,
17342
]
],
[
[
1498,
1519
],
[
13347,
13368
]
],
[
[
1568,
1601
],
[
5610,
5643
]
],
[
[
1624,
1634
]
],
[
[
1643,
1647
],
[
8661,
8665
],
[
17844,
17848
]
],
[
[
1655,
1659
]
],
[
[
1667,
1671
],
[
5750,
5754
],
[
10978,
10982
],
[
12169,
12173
],
[
12381,
12385
],
[
13949,
13953
],
[
18621,
18625
],
[
20288,
20292
]
],
[
[
1679,
1686
],
[
6441,
6448
],
[
7712,
7719
],
[
7800,
7807
],
[
8144,
8151
],
[
8334,
8341
],
[
9073,
9080
],
[
9408,
9415
],
[
9512,
9519
],
[
9687,
9694
],
[
9827,
9834
],
[
10079,
10086
],
[
10891,
10898
],
[
11895,
11902
],
[
12426,
12433
],
[
12661,
12668
],
[
17422,
17429
],
[
19211,
19218
],
[
21083,
21090
]
],
[
[
1694,
1698
],
[
1818,
1822
]
],
[
[
1809,
1815
],
[
1869,
1875
]
],
[
[
1854,
1868
],
[
21306,
21320
],
[
19584,
19598
],
[
19679,
19693
],
[
21605,
21619
],
[
21762,
21776
],
[
21831,
21845
],
[
22021,
22035
]
],
[
[
21290,
21305
],
[
21366,
21381
],
[
21454,
21469
]
]
] |
#!/usr/bin/env python
#
# author: syl20bnr (2013)
# goal: Focus the nth window in the current workspace (limited to 10 firsts)
#
# Example of usage in i3 config:
#
# bindsym $mod+0 exec focus_win.py -n 0
# bindsym $mod+1 exec focus_win.py -n 1
# ... ...
# bindsym $mod+8 exec focus_win.py -n 8
# bindsym $mod+9 exec focus_win.py -n 9
import argparse
from subprocess import Popen
import i3
PARSER = argparse.ArgumentParser(prog='focus_win')
PARSER.add_argument('-n', '--number',
required=True,
type=int,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help='Window number (limited to [0,9]).')
def focus_nth_window(nth):
''' Roughly focus the nth window in the hierarchy (limited to 10 first) '''
wins = get_windows_from_current_workspace()
if nth == 0:
nth = 10
cmd = 'i3-msg [con_id={0}] focus'.format(wins[nth-1])
Popen(cmd, shell=True)
def get_windows_from_current_workspace():
res = []
ws = get_current_workspace()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
def get_current_workspace():
''' Returns the current workspace '''
workspaces = i3.msg('get_workspaces')
workspace = i3.filter(tree=workspaces, focused=True)
if workspace:
return workspace[0]['name']
return ''
if __name__ == '__main__':
args = PARSER.parse_args()
focus_nth_window(args.number)
| [
[
[
363,
371
],
[
422,
430
]
],
[
[
395,
400
],
[
942,
947
]
],
[
[
408,
410
],
[
1071,
1073
],
[
1159,
1161
],
[
1363,
1365
],
[
1404,
1406
]
],
[
[
413,
419
],
[
464,
470
],
[
1553,
1559
]
],
[
[
695,
711
],
[
1577,
1593
]
],
[
[
971,
1005
],
[
809,
843
]
],
[
[
1279,
1300
],
[
1031,
1052
]
],
[
[
1546,
1550
],
[
1594,
1598
]
]
] |
import datetime
from jasonpi.normalizers import facebook_profile, google_profile
def test_facebook_profile():
"""
Test that facebook_profile computes
a correct profile received from facebook oauth.
"""
data = {
'email': 'some@email.com',
'first_name': 'Alfred',
'last_name': 'Dupont',
'gender': 'male',
'birthday': '02/25/1970'
}
profile = facebook_profile(data)
assert profile['email'] == data['email']
assert profile['first_name'] == data['first_name']
assert profile['last_name'] == data['last_name']
assert profile['gender'] == data['gender']
assert profile['birthday'] == datetime.date(1970, 2, 25)
def test_google_profile():
"""
Test that google_profile computes
a correct profile received from google oauth.
"""
data = {
'emailAddresses': [{'value': 'some@email.com'}],
'names': [{'givenName': 'Alfred', 'familyName': 'Dupont'}],
'genders': [{'value': 'male'}],
'birthdays': [{'date': {'year': 1970, 'month': 2, 'day': 25}}]
}
profile = google_profile(data)
assert profile['email'] == data['emailAddresses'][0]['value']
assert profile['first_name'] == data['names'][0]['givenName']
assert profile['last_name'] == data['names'][0]['familyName']
assert profile['gender'] == data['genders'][0]['value']
assert profile['birthday'] == datetime.date(1970, 2, 25)
| [
[
[
7,
15
],
[
668,
676
],
[
1410,
1418
]
],
[
[
49,
65
],
[
411,
427
]
],
[
[
67,
81
],
[
1097,
1111
]
],
[
[
88,
109
]
],
[
[
701,
720
]
]
] |
# x_6_8
#
#
class StockError(Exception):
pass
class NumberError(Exception):
pass
order_count = input('きび団子を何個注文しますか?:')
card_number = input('カード番号を入力してください?(例、0000-0000-0000-0000):')
try:
if int(order_count) > 100:
raise StockError
if card_number != '1111-1111-1111-1111':
raise NumberError
except StockError:
print('在庫切れです')
except NumberError:
print('カードエラー')
else:
print('ご購入ありがとうございます')
| [
[
[
19,
29
],
[
247,
257
],
[
336,
346
]
],
[
[
59,
70
],
[
317,
328
],
[
375,
386
]
],
[
[
94,
105
],
[
213,
224
]
],
[
[
133,
144
],
[
265,
276
]
]
] |
# Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling asynchronous connections to a blockchain
daemon.'''
import asyncio
import itertools
import json
import time
import aiohttp
from aiorpcx import JSONRPC
from electrumx.lib.util import hex_to_bytes, class_logger
class DaemonError(Exception):
'''Raised when the daemon returns an error in its results.'''
class WarmingUpError(Exception):
'''Internal - when the daemon is warming up.'''
class ServiceRefusedError(Exception):
'''Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for
some reason.'''
class Daemon(object):
'''Handles connections to a daemon at the given URL.'''
WARMING_UP = -28
id_counter = itertools.count()
def __init__(self, coin, url, *, max_workqueue=10, init_retry=0.25, max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.url_index = None
self.urls = []
self.set_url(url)
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
self.init_retry = init_retry
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.session = None
async def __aenter__(self):
self.session = aiohttp.ClientSession(connector=self.connector())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.session.close()
self.session = None
def connector(self):
return None
def set_url(self, url):
'''Set the URLS to the given list, and switch to the first one.'''
urls = url.split(',')
urls = [self.coin.sanitize_url(url) for url in urls]
for n, url in enumerate(urls):
status = '' if n else ' (current)'
logged_url = self.logged_url(url)
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
self.url_index = 0
self.urls = urls
def current_url(self):
'''Returns the current daemon URL.'''
return self.urls[self.url_index]
def logged_url(self, url=None):
'''The host and port part, for logging.'''
url = url or self.current_url()
return url[url.rindex('@') + 1:]
def failover(self):
'''Call to fail-over to the next daemon URL.
Returns False if there is only one, otherwise True.
'''
if len(self.urls) > 1:
self.url_index = (self.url_index + 1) % len(self.urls)
self.logger.info(f'failing over to {self.logged_url()}')
return True
return False
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with self.session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
text = await resp.text()
text = text.strip() or resp.reason
raise ServiceRefusedError(text)
async def _send(self, payload, processor):
'''Send a payload to be converted to JSON.
Handles temporary connection issues. Daemon reponse errors
are raise through DaemonError.
'''
def log_error(error):
nonlocal last_error_log, retry
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error}. Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
on_good_message = None
last_error_log = 0
data = json.dumps(payload)
retry = self.init_retry
while True:
try:
result = await self._send_data(data)
result = processor(result)
if on_good_message:
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error')
except aiohttp.ServerDisconnectedError:
log_error('disconnected')
on_good_message = 'connection restored'
except ConnectionResetError:
log_error('connection reset')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - check your daemon is running')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except ServiceRefusedError as e:
log_error(f'daemon service refused: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks')
on_good_message = 'running normally'
await asyncio.sleep(retry)
retry = max(min(self.max_retry, retry * 2), self.init_retry)
async def _send_single(self, method, params=None):
'''Send a single request to the daemon.'''
def processor(result):
err = result['error']
if not err:
return result['result']
if err.get('code') == self.WARMING_UP:
raise WarmingUpError
raise DaemonError(err)
payload = {'method': method, 'id': next(self.id_counter)}
if params:
payload['params'] = params
return await self._send(payload, processor)
async def _send_vector(self, method, params_iterable, replace_errs=False):
'''Send several requests of the same method.
The result will be an array of the same length as params_iterable.
If replace_errs is true, any item with an error is returned as None,
otherwise an exception is raised.'''
def processor(result):
errs = [item['error'] for item in result if item['error']]
if any(err.get('code') == self.WARMING_UP for err in errs):
raise WarmingUpError
if not errs or replace_errs:
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
for p in params_iterable]
if payload:
return await self._send(payload, processor)
return []
async def _is_rpc_available(self, method):
'''Return whether given RPC method is available in the daemon.
Results are cached and the daemon will generally not be queried with
the same method more than once.'''
available = self.available_rpcs.get(method)
if available is None:
available = True
try:
await self._send_single(method)
except DaemonError as e:
err = e.args[0]
error_code = err.get("code")
available = error_code != JSONRPC.METHOD_NOT_FOUND
self.available_rpcs[method] = available
return available
async def block_hex_hashes(self, first, count):
'''Return the hex hashes of count block starting at height first.'''
params_iterable = ((h, ) for h in range(first, first + count))
return await self._send_vector('getblockhash', params_iterable)
async def deserialised_block(self, hex_hash):
'''Return the deserialised block with the given hex hash.'''
return await self._send_single('getblock', (hex_hash, True))
async def raw_blocks(self, hex_hashes):
'''Return the raw binary blocks with the given hex hashes.'''
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
# Convert hex string to bytes
return [hex_to_bytes(block) for block in blocks]
async def mempool_hashes(self):
'''Update our record of the daemon's mempool hashes.'''
return await self._send_single('getrawmempool')
async def getnetworkinfo(self):
'''Return the result of the 'getnetworkinfo' RPC call.'''
return await self._send_single('getnetworkinfo')
async def getrawtransaction(self, hex_hash, verbose=False):
'''Return the serialized raw transaction with the given hash.'''
# Cast to int because some coin daemons are old and require it
return await self._send_single('getrawtransaction',
(hex_hash, int(verbose)))
async def getrawtransactions(self, hex_hashes, replace_errs=True):
'''Return the serialized raw transactions with the given hashes.
Replaces errors with None by default.'''
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
txs = await self._send_vector('getrawtransaction', params_iterable,
replace_errs=replace_errs)
# Convert hex strings to bytes
return [hex_to_bytes(tx) if tx else None for tx in txs]
async def broadcast_transaction(self, raw_tx):
'''Broadcast a transaction to the network.'''
return await self._send_single('sendrawtransaction', (raw_tx, ))
async def height(self):
'''Query the daemon for its current height.'''
self._height = await self._send_single('getblockcount')
return self._height
def cached_height(self):
'''Return the cached daemon height.
If the daemon has not been queried yet this returns None.'''
return self._height
| [
[
[
249,
256
],
[
1301,
1308
],
[
4316,
4323
],
[
5325,
5332
]
],
[
[
264,
273
],
[
860,
869
]
],
[
[
281,
285
],
[
3992,
3996
]
],
[
[
293,
297
],
[
3668,
3672
]
],
[
[
306,
313
],
[
1557,
1564
],
[
4400,
4407
],
[
4693,
4700
],
[
4878,
4885
]
],
[
[
334,
341
],
[
7425,
7432
]
],
[
[
374,
386
],
[
8287,
8299
],
[
9439,
9451
]
],
[
[
388,
400
],
[
1014,
1026
]
],
[
[
409,
420
],
[
7288,
7299
],
[
5761,
5772
],
[
6615,
6626
]
],
[
[
507,
521
],
[
5180,
5194
],
[
5728,
5742
],
[
6482,
6496
]
],
[
[
594,
613
],
[
3332,
3351
],
[
5024,
5043
]
],
[
[
745,
751
]
]
] |
# Generated by Django 2.2.4 on 2020-06-21 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
[
[
73,
83
],
[
113,
123
],
[
220,
230
]
],
[
[
85,
91
],
[
316,
322
],
[
435,
441
],
[
500,
506
],
[
566,
572
]
],
[
[
103,
112
]
]
] |
"""
The :mod:`sportsbet.datasets` module provides the
tools to download and transform sports betting data.
"""
from ._base import load
from ._soccer._combined import SoccerDataLoader
from ._soccer._fd import FDSoccerDataLoader
from ._soccer._fte import FTESoccerDataLoader
from ._soccer._dummy import DummySoccerDataLoader
__all__ = [
'SoccerDataLoader',
'FDSoccerDataLoader',
'FTESoccerDataLoader',
'DummySoccerDataLoader',
'load',
]
| [
[
[
131,
135
]
],
[
[
167,
183
]
],
[
[
209,
227
]
],
[
[
254,
273
]
],
[
[
302,
323
]
],
[
[
325,
332
]
]
] |
import logging
import os
import urllib
from markupsafe import escape
import paste.httpexceptions
from six import string_types, text_type
from sqlalchemy import false, true
from galaxy import datatypes, model, util, web
from galaxy import managers
from galaxy.datatypes.display_applications.util import decode_dataset_user, encode_dataset_user
from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
from galaxy.util import inflector, smart_str
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web.base.controller import BaseUIController, ERROR, SUCCESS, url_for, UsesExtendedMetadataMixin
from galaxy.web.framework.helpers import grids, iff, time_ago, to_unicode
from galaxy.tools.errors import EmailErrorReporter
log = logging.getLogger( __name__ )
comptypes = []
try:
import zlib # noqa: F401
comptypes.append( 'zip' )
except ImportError:
pass
class HistoryDatasetAssociationListGrid( grids.Grid ):
# Custom columns for grid.
class HistoryColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda):
return escape(hda.history.name)
class StatusColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda ):
if hda.deleted:
return "deleted"
return ""
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
accepted_filter_labels_and_vals = { "Active" : "False", "Deleted" : "True", "All": "All" }
accepted_filters = []
for label, val in accepted_filter_labels_and_vals.items():
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
# Grid definition
title = "Saved Datasets"
model_class = model.HistoryDatasetAssociation
template = '/dataset/grid.mako'
default_sort_key = "-update_time"
columns = [
grids.TextColumn( "Name", key="name",
# Link name to dataset's history.
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch", id=item.id ) ) ), filterable="advanced", attach_popup=True ),
HistoryColumn( "History", key="history", sortable=False, target="inbound",
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch_history", id=item.id ) ) ) ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced", grid_name="HistoryDatasetAssocationListGrid" ),
StatusColumn( "Status", key="deleted", attach_popup=False ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
grids.GridOperation( "Copy to current history", condition=( lambda item: not item.deleted ), async_compatible=True ),
]
standard_filters = []
default_filter = dict( name="All", deleted="False", tags="All" )
preserve_state = False
use_async = True
use_paging = True
num_rows_per_page = 50
def build_initial_query( self, trans, **kwargs ):
# Show user's datasets that are not deleted, not in deleted histories, and not hidden.
# To filter HDAs by user, need to join model class/HDA and History table so that it is
# possible to filter by user. However, for dictionary-based filtering to work, need a
# primary table for the query.
return trans.sa_session.query( self.model_class ).select_from( self.model_class.table.join( model.History.table ) ) \
.filter( model.History.user == trans.user ) \
.filter( self.model_class.deleted == false() ) \
.filter( model.History.deleted == false() ) \
.filter( self.model_class.visible == true() )
class DatasetInterface( BaseUIController, UsesAnnotations, UsesItemRatings, UsesExtendedMetadataMixin ):
stored_list_grid = HistoryDatasetAssociationListGrid()
def __init__( self, app ):
super( DatasetInterface, self ).__init__( app )
self.history_manager = managers.histories.HistoryManager( app )
self.hda_manager = managers.hdas.HDAManager( app )
def _get_job_for_dataset( self, trans, dataset_id ):
'''
Return the job for the given dataset. This will throw an error if the
dataset is either nonexistent or inaccessible to the user.
'''
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
assert hda and self._can_access_dataset( trans, hda )
return hda.creating_job
def _can_access_dataset( self, trans, dataset_association, allow_admin=True, additional_roles=None ):
roles = trans.get_current_user_roles()
if additional_roles:
roles = roles + additional_roles
return ( allow_admin and trans.user_is_admin() ) or trans.app.security_agent.can_access_dataset( roles, dataset_association.dataset )
@web.expose
def errors( self, trans, id ):
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).get( self.decode_id( id ) )
if not hda or not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "Either this dataset does not exist or you do not have permission to access it." )
return trans.fill_template( "dataset/errors.mako", hda=hda )
@web.expose
def stdout( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stdout = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stdout = job.stdout
except:
stdout = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stdout )
@web.expose
# TODO: Migrate stderr and stdout to use _get_job_for_dataset; it wasn't tested.
def stderr( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stderr = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stderr = job.stderr
except:
stderr = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stderr )
@web.expose
def exit_code( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
exit_code = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
exit_code = job.exit_code
except:
exit_code = "Invalid dataset ID or you are not allowed to access this dataset"
return exit_code
@web.expose
def report_error( self, trans, id, email='', message="", **kwd ):
biostar_report = 'biostar' in str( kwd.get( 'submit_error_report') ).lower()
if biostar_report:
return trans.response.send_redirect( url_for( controller='biostar', action='biostar_tool_bug_report', hda=id, email=email, message=message ) )
try:
error_reporter = EmailErrorReporter( id, trans.app )
error_reporter.send_report( user=trans.user, email=email, message=message )
return trans.show_ok_message( "Your error report has been sent" )
except Exception as e:
return trans.show_error_message( "An error occurred sending the report by email: %s" % str( e ) )
@web.expose
def default(self, trans, dataset_id=None, **kwd):
return 'This link may not be followed from within Galaxy.'
@web.expose
def get_metadata_file(self, trans, hda_id, metadata_name):
""" Allows the downloading of metadata files associated with datasets (eg. bai index for bam files) """
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id ) )
if not data or not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
fname = ''.join(c in util.FILENAME_VALID_CHARS and c or '_' for c in data.name)[0:150]
file_ext = data.metadata.spec.get(metadata_name).get("file_ext", metadata_name)
trans.response.headers["Content-Type"] = "application/octet-stream"
trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (data.hid, fname, file_ext)
return open(data.metadata.get(metadata_name).file_name)
def _check_dataset(self, trans, hda_id):
# DEPRECATION: We still support unencoded ids for backward compatibility
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id) )
if data is None:
raise ValueError( 'Invalid reference dataset id: %s.' % hda_id)
except:
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( hda_id ) )
except:
data = None
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( hda_id ) )
if not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
if data.purged:
return trans.show_error_message( "The dataset you are attempting to view has been purged." )
if data.deleted and not ( trans.user_is_admin() or ( data.history and trans.get_user() == data.history.user ) ):
return trans.show_error_message( "The dataset you are attempting to view has been deleted." )
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to view it." )
return data
@web.expose
@web.json
def transfer_status(self, trans, dataset_id, filename=None):
""" Primarily used for the S3ObjectStore - get the status of data transfer
if the file is not in cache """
data = self._check_dataset(trans, dataset_id)
if isinstance( data, string_types ):
return data
log.debug( "Checking transfer status for dataset %s..." % data.dataset.id )
# Pulling files in extra_files_path into cache is not handled via this
# method but that's primarily because those files are typically linked to
# through tool's output page anyhow so tying a JavaScript event that will
# call this method does not seem doable?
if data.dataset.external_filename:
return True
else:
return trans.app.object_store.file_ready(data.dataset)
@web.expose
def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
data = self._check_dataset(trans, dataset_id)
if not isinstance( data, trans.app.model.DatasetInstance ):
return data
# Ensure offset is an integer before passing through to datatypes.
if offset:
offset = int(offset)
# Ensure ck_size is an integer before passing through to datatypes.
if ck_size:
ck_size = int(ck_size)
return data.datatype.display_data(trans, data, preview, filename, to_ext, offset=offset, ck_size=ck_size, **kwd)
@web.expose
def edit(self, trans, dataset_id=None, filename=None, hid=None, **kwd):
"""Allows user to modify parameters of an HDA."""
message = None
status = 'done'
refresh_frames = []
error = False
def __ok_to_edit_metadata( dataset_id ):
# prevent modifying metadata when dataset is queued or running as input/output
# This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now
for job_to_dataset_association in trans.sa_session.query(
self.app.model.JobToInputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all() \
+ trans.sa_session.query( self.app.model.JobToOutputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all():
if job_to_dataset_association.job.state not in [ job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED ]:
return False
return True
if hid is not None:
history = trans.get_history()
# TODO: hid handling
data = history.datasets[ int( hid ) - 1 ]
id = None
elif dataset_id is not None:
id = self.decode_id( dataset_id )
data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
else:
trans.log_event( "dataset_id and hid are both None, cannot load a dataset to edit" )
return trans.show_error_message( "You must provide a history dataset id to edit" )
if data is None:
trans.log_event( "Problem retrieving dataset (encoded: %s, decoded: %s) with history id %s." % ( str( dataset_id ), str( id ), str( hid ) ) )
return trans.show_error_message( "History dataset id is invalid" )
if dataset_id is not None and data.history.user is not None and data.history.user != trans.user:
trans.log_event( "User attempted to edit an HDA they do not own (encoded: %s, decoded: %s)" % ( dataset_id, id ) )
# Do not reveal the dataset's existence
return trans.show_error_message( "History dataset id is invalid" )
current_user_roles = trans.get_current_user_roles()
if data.history.user and not data.dataset.has_manage_permissions_roles( trans ):
# Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time,
# so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS
# permission. In this case, we'll reset this permission to the hda user's private role.
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] }
trans.app.security_agent.set_dataset_permission( data.dataset, permissions )
if self._can_access_dataset( trans, data ):
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." )
params = util.Params( kwd, sanitize=False )
if params.change:
# The user clicked the Save button on the 'Change data type' form
if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change:
# prevent modifying datatype when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them."
error = True
else:
trans.app.datatypes_registry.change_datatype( data, params.datatype )
trans.sa_session.flush()
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data }, overwrite=False ) # overwrite is False as per existing behavior
message = "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype )
refresh_frames = ['history']
else:
message = "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype )
error = True
elif params.save:
# The user clicked the Save button on the 'Edit Attributes' form
data.name = params.name if params.name else ''
data.info = params.info if params.info else ''
message = ''
if __ok_to_edit_metadata( data.id ):
# The following for loop will save all metadata_spec items
for name, spec in data.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = params.get("is_" + name, None)
other = params.get("or_" + name, None)
if optional and optional == '__NOTHING__':
# optional element... == '__NOTHING__' actually means it is NOT checked (and therefore omitted)
setattr(data.metadata, name, None)
else:
if other:
setattr( data.metadata, name, other )
else:
setattr( data.metadata, name, spec.unwrap( params.get(name, None) ) )
data.datatype.after_setting_metadata( data )
# Sanitize annotation before adding it.
if params.annotation:
annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation )
# This block on controller code is inactive until the 'extended_metadata' edit box is added back into the UI
# Add or delete extended metadata
# if params.extended_metadata:
# em_string = params.extended_metadata
# if len(em_string):
# em_payload = None
# try:
# em_payload = loads(em_string)
# except Exception as e:
# message = 'Invalid JSON input'
# error = True
# if em_payload is not None:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# ex_obj = self.create_extended_metadata(trans, em_payload)
# self.set_item_extended_metadata_obj(trans, data, ex_obj)
# message = "Updated Extended metadata '%s'." % data.name
# status = 'done'
# else:
# message = "data not found"
# error = True
# else:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# message = "Deleted Extended metadata '%s'." % data.name
# status = 'done'
# If setting metadata previously failed and all required elements have now been set, clear the failed state.
if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta():
data._state = None
trans.sa_session.flush()
message = "Attributes updated%s" % message
refresh_frames = ['history']
else:
trans.sa_session.flush()
message = "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata."
status = "warning"
refresh_frames = ['history']
elif params.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
# prevent modifying metadata when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them."
error = True
else:
for name, spec in data.metadata.spec.items():
# We need to be careful about the attributes we are resetting
if name not in [ 'name', 'info', 'dbkey', 'base_name' ]:
if spec.get( 'default' ):
setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) )
message = 'Attributes have been queued to be updated'
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data } )
trans.sa_session.flush()
refresh_frames = ['history']
elif params.convert_data:
target_type = kwd.get("target_type", None)
if target_type:
message = data.datatype.convert_dataset(trans, data, target_type)
refresh_frames = ['history']
elif params.update_roles_button:
if not trans.user:
return trans.show_error_message( "You must be logged in if you want to change permissions." )
if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ):
access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action )
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
# The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We
# need to ensure that they did not associate roles that would cause accessibility problems.
permissions, in_roles, error, message = \
trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd )
if error:
# Keep the original role associations for the DATASET_ACCESS permission on the dataset.
permissions[ access_action ] = data.dataset.get_access_roles( trans )
status = 'error'
else:
error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
if error:
message += error
status = 'error'
else:
message = 'Your changes completed successfully.'
trans.sa_session.refresh( data.dataset )
else:
message = "You are not authorized to change this dataset's permissions"
error = True
else:
if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
# Copy dbkey into metadata, for backwards compatability
# This looks like it does nothing, but getting the dbkey
# returns the metadata dbkey unless it is None, in which
# case it resorts to the old dbkey. Setting the dbkey
# sets it properly in the metadata
# This is likely no longer required, since the dbkey exists entirely within metadata (the old_dbkey field is gone): REMOVE ME?
data.metadata.dbkey = data.dbkey
# let's not overwrite the imported datatypes module with the variable datatypes?
# the built-in 'id' is overwritten in lots of places as well
ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ]
ldatatypes.sort()
all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' )
if error:
status = 'error'
return trans.fill_template( "/dataset/edit_attributes.mako",
data=data,
data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ),
datatypes=ldatatypes,
current_user_roles=current_user_roles,
all_roles=all_roles,
message=message,
status=status,
dataset_id=dataset_id,
refresh_frames=refresh_frames )
else:
return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) )
@web.expose
@web.require_login( "see all available datasets" )
def list( self, trans, **kwargs ):
"""List all available datasets"""
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
hda_ids = util.listify( kwargs.get( 'id', [] ) )
# Display no message by default
status, message = None, None
# Load the hdas and ensure they all belong to the current user
hdas = []
for encoded_hda_id in hda_ids:
hda_id = self.decode_id( encoded_hda_id )
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).filter_by( id=hda_id ).first()
if hda:
# Ensure history is owned by current user
if hda.history.user_id is not None and trans.user:
assert trans.user.id == hda.history.user_id, "HistoryDatasetAssocation does not belong to current user"
hdas.append( hda )
else:
log.warning( "Invalid history_dataset_association id '%r' passed to list", hda_id )
if hdas:
if operation == "switch" or operation == "switch_history":
# Switch to a history that the HDA resides in.
# Convert hda to histories.
histories = []
for hda in hdas:
histories.append( hda.history )
# Use history controller to switch the history. TODO: is this reasonable?
status, message = trans.webapp.controllers['history']._list_switch( trans, histories )
# Current history changed, refresh history frame; if switching to a dataset, set hda seek.
trans.template_context['refresh_frames'] = ['history']
if operation == "switch":
hda_ids = [ trans.security.encode_id( hda.id ) for hda in hdas ]
trans.template_context[ 'seek_hda_ids' ] = hda_ids
elif operation == "copy to current history":
#
# Copy datasets to the current history.
#
target_histories = [ trans.get_history() ]
# Reverse HDAs so that they appear in the history in the order they are provided.
hda_ids.reverse()
status, message = self._copy_datasets( trans, hda_ids, target_histories )
# Current history changed, refresh history frame.
trans.template_context['refresh_frames'] = ['history']
# Render the list view
return self.stored_list_grid( trans, status=status, message=message, **kwargs )
@web.expose
def imp( self, trans, dataset_id=None, **kwd ):
""" Import another user's dataset via a shared URL; dataset is added to user's current history. """
# Set referer message.
referer = trans.request.referer
if referer:
referer_message = "<a href='%s'>return to the previous page</a>" % escape(referer)
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' )
# Error checking.
if not dataset_id:
return trans.show_error_message( "You must specify a dataset to import. You can %s." % referer_message, use_panels=True )
# Do import.
cur_history = trans.get_history( create=True )
status, message = self._copy_datasets( trans, [ dataset_id ], [ cur_history ], imported=True )
message = "Dataset imported. <br>You can <a href='%s'>start using the dataset</a> or %s." % ( url_for('/'), referer_message )
return trans.show_message( message, type=status, use_panels=True )
@web.expose
@web.json
@web.require_login( "use Galaxy datasets" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns dataset's name and link. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
return_dict = { "name" : dataset.name, "link" : url_for( controller='dataset', action="display_by_username_and_slug", username=dataset.history.user.username, slug=trans.security.encode_id( dataset.id ) ) }
return return_dict
@web.expose
def get_embed_html_async( self, trans, id ):
""" Returns HTML for embedding a dataset in a page. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
return "Embedded Dataset '%s'" % dataset.name
@web.expose
@web.require_login( "use Galaxy datasets" )
def set_accessible_async( self, trans, id=None, accessible=False ):
""" Does nothing because datasets do not have an importable/accessible attribute. This method could potentially set another attribute. """
return
@web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
""" Rate a dataset asynchronously and return updated community data. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
return trans.show_error_message( "The specified dataset does not exist." )
# Rate dataset.
self.rate_item( trans.sa_session, trans.get_user(), dataset, rating )
return self.get_ave_item_rating_data( trans.sa_session, dataset )
@web.expose
def display_by_username_and_slug( self, trans, username, slug, filename=None, preview=True ):
""" Display dataset by username and slug; because datasets do not yet have slugs, the slug is the dataset's id. """
id = slug
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
# Filename used for composite types.
if filename:
return self.display( trans, dataset_id=slug, filename=filename)
truncated, dataset_data = self.hda_manager.text_data( dataset, preview )
dataset.annotation = self.get_item_annotation_str( trans.sa_session, dataset.history.user, dataset )
# If dataset is chunkable, get first chunk.
first_chunk = None
if dataset.datatype.CHUNKABLE:
first_chunk = dataset.datatype.get_chunk(trans, dataset, 0)
# If data is binary or an image, stream without template; otherwise, use display template.
# TODO: figure out a way to display images in display template.
if isinstance(dataset.datatype, datatypes.binary.Binary) or isinstance(dataset.datatype, datatypes.images.Image) or isinstance(dataset.datatype, datatypes.text.Html):
trans.response.set_content_type( dataset.get_mime() )
return open( dataset.file_name )
else:
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating( trans.sa_session, trans.get_user(), dataset )
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, dataset )
return trans.fill_template_mako( "/dataset/display.mako", item=dataset, item_data=dataset_data,
truncated=truncated, user_item_rating=user_item_rating,
ave_item_rating=ave_item_rating, num_ratings=num_ratings,
first_chunk=first_chunk )
else:
raise web.httpexceptions.HTTPNotFound()
@web.expose
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset is None:
raise web.httpexceptions.HTTPNotFound()
truncated, dataset_data = self.hda_manager.text_data( dataset, preview=True )
# Get annotation.
dataset.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
return trans.stream_template_mako( "/dataset/item_content.mako", item=dataset, item_data=dataset_data, truncated=truncated )
@web.expose
def annotate_async( self, trans, id, new_annotation=None, **kwargs ):
# TODO:?? why is this an access check only?
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
if dataset and new_annotation:
# Sanitize annotation before adding it.
new_annotation = sanitize_html( new_annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), dataset, new_annotation )
trans.sa_session.flush()
return new_annotation
@web.expose
def get_annotation_async( self, trans, id ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
if annotation and isinstance( annotation, text_type ):
annotation = annotation.encode( 'ascii', 'replace' ) # paste needs ascii here
return annotation
@web.expose
def display_at( self, trans, dataset_id, filename=None, **kwd ):
"""Sets up a dataset permissions so it is viewable at an external site"""
if not trans.app.config.enable_old_display_applications:
return trans.show_error_message( "This method of accessing external display applications has been disabled by a Galaxy administrator." )
site = filename
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataset_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if 'display_url' not in kwd or 'redirect_url' not in kwd:
return trans.show_error_message( 'Invalid parameters specified for "display at" link, please contact a Galaxy administrator' )
try:
redirect_url = kwd['redirect_url'] % urllib.quote_plus( kwd['display_url'] )
except:
redirect_url = kwd['redirect_url'] # not all will need custom text
if trans.app.security_agent.dataset_is_public( data.dataset ):
return trans.response.send_redirect( redirect_url ) # anon access already permitted by rbac
if self._can_access_dataset( trans, data ):
trans.app.host_security_agent.set_dataset_permissions( data, trans.user, site )
return trans.response.send_redirect( redirect_url )
else:
return trans.show_error_message( "You are not allowed to view this dataset at external sites. Please contact your Galaxy administrator to acquire management permissions for this dataset." )
@web.expose
def display_application( self, trans, dataset_id=None, user_id=None, app_name=None, link_name=None, app_action=None, action_param=None, action_param_extra=None, **kwds ):
"""Access to external display applications"""
# Build list of parameters to pass in to display application logic (app_kwds)
app_kwds = {}
for name, value in dict(kwds).iteritems(): # clone kwds because we remove stuff as we go.
if name.startswith( "app_" ):
app_kwds[ name[ len( "app_" ): ] ] = value
del kwds[ name ]
if kwds:
log.debug( "Unexpected Keywords passed to display_application: %s" % kwds ) # route memory?
# decode ids
data, user = decode_dataset_user( trans, dataset_id, user_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if user is None:
user = trans.user
if user:
user_roles = user.all_roles()
else:
user_roles = []
# Decode application name and link name
app_name = urllib.unquote_plus( app_name )
link_name = urllib.unquote_plus( link_name )
if None in [ app_name, link_name ]:
return trans.show_error_message( "A display application name and link name must be provided." )
if self._can_access_dataset( trans, data, additional_roles=user_roles ):
msg = []
preparable_steps = []
refresh = False
display_app = trans.app.datatypes_registry.display_applications.get( app_name )
if not display_app:
log.debug( "Unknown display application has been requested: %s", app_name )
return paste.httpexceptions.HTTPNotFound( "The requested display application (%s) is not available." % ( app_name ) )
dataset_hash, user_hash = encode_dataset_user( trans, data, user )
try:
display_link = display_app.get_link( link_name, data, dataset_hash, user_hash, trans, app_kwds )
except Exception as e:
log.debug( "Error generating display_link: %s", e )
# User can sometimes recover from, e.g. conversion errors by fixing input metadata, so use conflict
return paste.httpexceptions.HTTPConflict( "Error generating display_link: %s" % e )
if not display_link:
log.debug( "Unknown display link has been requested: %s", link_name )
return paste.httpexceptions.HTTPNotFound( "Unknown display link has been requested: %s" % link_name )
if data.state == data.states.ERROR:
msg.append( ( 'This dataset is in an error state, you cannot view it at an external display application.', 'error' ) )
elif data.deleted:
msg.append( ( 'This dataset has been deleted, you cannot view it at an external display application.', 'error' ) )
elif data.state != data.states.OK:
msg.append( ( 'You must wait for this dataset to be created before you can view it at an external display application.', 'info' ) )
refresh = True
else:
# We have permissions, dataset is not deleted and is in OK state, allow access
if display_link.display_ready():
if app_action in [ 'data', 'param' ]:
assert action_param, "An action param must be provided for a data or param action"
# data is used for things with filenames that could be passed off to a proxy
# in case some display app wants all files to be in the same 'directory',
# data can be forced to param, but not the other way (no filename for other direction)
# get param name from url param name
try:
action_param = display_link.get_param_name_by_url( action_param )
except ValueError as e:
log.debug( e )
return paste.httpexceptions.HTTPNotFound( str( e ) )
value = display_link.get_param_value( action_param )
assert value, "An invalid parameter name was provided: %s" % action_param
assert value.parameter.viewable, "This parameter is not viewable."
if value.parameter.type == 'data':
try:
if action_param_extra:
assert value.parameter.allow_extra_files_access, "Extra file content requested (%s), but allow_extra_files_access is False." % ( action_param_extra )
file_name = os.path.join( value.extra_files_path, action_param_extra )
else:
file_name = value.file_name
content_length = os.path.getsize( file_name )
rval = open( file_name )
except OSError as e:
log.debug( "Unable to access requested file in display application: %s", e )
return paste.httpexceptions.HTTPNotFound( "This file is no longer available." )
else:
rval = str( value )
content_length = len( rval )
trans.response.set_content_type( value.mime_type( action_param_extra=action_param_extra ) )
trans.response.headers[ 'Content-Length' ] = content_length
return rval
elif app_action is None:
# redirect user to url generated by display link
# Fix for Safari caching display links, which can change if the underlying dataset has an attribute change, e.g. name, metadata, etc
trans.response.headers[ 'Cache-Control' ] = [ 'no-cache', 'max-age=0', 'no-store', 'must-revalidate' ]
return trans.response.send_redirect( display_link.display_url() )
else:
msg.append( ( 'Invalid action provided: %s' % app_action, 'error' ) )
else:
if app_action is None:
if trans.history != data.history:
msg.append( ( 'You must import this dataset into your current history before you can view it at the desired display application.', 'error' ) )
else:
refresh = True
msg.append( ( 'Launching this display application required additional datasets to be generated, you can view the status of these jobs below. ', 'info' ) )
if not display_link.preparing_display():
display_link.prepare_display()
preparable_steps = display_link.get_prepare_steps()
else:
raise Exception( 'Attempted a view action (%s) on a non-ready display application' % app_action )
return trans.fill_template_mako( "dataset/display_application/display.mako",
msg=msg,
display_app=display_app,
display_link=display_link,
refresh=refresh,
preparable_steps=preparable_steps )
return trans.show_error_message( 'You do not have permission to view this dataset at an external display application.' )
def _delete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in trans.history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
hda.mark_deleted()
hda.clear_associated_files()
trans.log_event( "Dataset id %s marked as deleted" % str(id) )
self.hda_manager.stop_creating_job( hda )
trans.sa_session.flush()
except Exception as e:
msg = 'HDA deletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg + ': ' + str( e ) )
trans.log_event( msg )
message = 'Dataset deletion failed'
status = 'error'
return ( message, status )
def _undelete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda and hda.undeletable, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_undeleted()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been undeleted" % str(id) )
except Exception:
msg = 'HDA undeletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset undeletion failed'
status = 'error'
return ( message, status )
def _unhide( self, trans, dataset_id ):
try:
id = self.decode_id( dataset_id )
except:
return False
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
if hda:
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_unhidden()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been unhidden" % str(id) )
return True
return False
def _purge( self, trans, dataset_id ):
message = None
status = 'done'
try:
id = self.decode_id( dataset_id )
user = trans.get_user()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
# Invalid HDA
assert hda, 'Invalid history dataset ID'
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
# If the user is anonymous, make sure the HDA is owned by the current session.
if not user:
current_history_id = trans.galaxy_session.current_history_id
assert topmost_parent.history.id == current_history_id, 'Data does not belong to current user'
# If the user is known, make sure the HDA is owned by the current user.
else:
assert topmost_parent.history.user == user, 'Data does not belong to current user'
# Ensure HDA is deleted
hda.deleted = True
# HDA is purgeable
# Decrease disk usage first
if user:
user.adjust_total_disk_usage(-hda.quota_amount(user))
# Mark purged
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
# Don't delete anything if there are active HDAs or any LDDAs, even if
# the LDDAs are deleted. Let the cleanup scripts get it in the latter
# case.
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of HDA (%s):' % ( hda.dataset.id, hda.id ) )
trans.sa_session.flush()
except Exception as exc:
msg = 'HDA purge failed (encoded: %s, decoded: %s): %s' % ( dataset_id, id, exc )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset removal from disk failed'
status = 'error'
return ( message, status )
@web.expose
def delete( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
message, status = self._delete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def delete_async( self, trans, dataset_id, filename ):
message, status = self._delete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def undelete( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=True, message=message, status=status ) )
@web.expose
def undelete_async( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def unhide( self, trans, dataset_id, filename ):
if self._unhide( trans, dataset_id ):
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_hidden=True ) )
raise Exception( "Error unhiding" )
@web.expose
def purge( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def purge_async( self, trans, dataset_id, filename ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def show_params( self, trans, dataset_id=None, from_noframe=None, **kwd ):
"""
Show the parameters used for the job associated with an HDA
"""
try:
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
except ValueError:
hda = None
if not hda:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % escape( str( dataset_id ) ) )
if not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "You are not allowed to access this dataset" )
# Get the associated job, if any. If this hda was copied from another,
# we need to find the job that created the origial dataset association.
params_objects = None
job = None
tool = None
upgrade_messages = {}
has_parameter_errors = False
inherit_chain = hda.source_dataset_chain
if inherit_chain:
job_dataset_association = inherit_chain[-1][0]
else:
job_dataset_association = hda
if job_dataset_association.creating_job_associations:
job = job_dataset_association.creating_job_associations[0].job
if job:
# Get the tool object
try:
# Load the tool
toolbox = self.get_toolbox()
tool = toolbox.get_tool( job.tool_id )
assert tool is not None, 'Requested tool has not been loaded.'
# Load parameter objects, if a parameter type has changed, it's possible for the value to no longer be valid
try:
params_objects = job.get_param_values( trans.app, ignore_errors=False )
except:
params_objects = job.get_param_values( trans.app, ignore_errors=True )
# use different param_objects in the following line, since we want to display original values as much as possible
upgrade_messages = tool.check_and_update_param_values( job.get_param_values( trans.app, ignore_errors=True ),
trans,
update_values=False )
has_parameter_errors = True
except:
pass
if job is None:
return trans.show_error_message( "Job information is not available for this dataset." )
# TODO: we should provide the basic values along with the objects, in order to better handle reporting of old values during upgrade
return trans.fill_template( "show_params.mako",
inherit_chain=inherit_chain,
history=trans.get_history(),
hda=hda,
job=job,
tool=tool,
params_objects=params_objects,
upgrade_messages=upgrade_messages,
has_parameter_errors=has_parameter_errors )
@web.expose
def copy_datasets( self, trans, source_history=None, source_content_ids="", target_history_id=None, target_history_ids="", new_history_name="", do_copy=False, **kwd ):
user = trans.get_user()
if source_history is not None:
decoded_source_history_id = self.decode_id( source_history )
history = self.history_manager.get_owned( decoded_source_history_id, trans.user, current_history=trans.history )
current_history = trans.get_history()
else:
history = current_history = trans.get_history()
refresh_frames = []
if source_content_ids:
if not isinstance( source_content_ids, list ):
source_content_ids = source_content_ids.split(",")
encoded_dataset_collection_ids = [ s[ len("dataset_collection|"): ] for s in source_content_ids if s.startswith("dataset_collection|") ]
encoded_dataset_ids = [ s[ len("dataset|"): ] for s in source_content_ids if s.startswith("dataset|") ]
decoded_dataset_collection_ids = set(map( self.decode_id, encoded_dataset_collection_ids ))
decoded_dataset_ids = set(map( self.decode_id, encoded_dataset_ids ))
else:
decoded_dataset_collection_ids = []
decoded_dataset_ids = []
if new_history_name:
target_history_ids = []
else:
if target_history_id:
target_history_ids = [ self.decode_id(target_history_id) ]
elif target_history_ids:
if not isinstance( target_history_ids, list ):
target_history_ids = target_history_ids.split(",")
target_history_ids = list(set([ self.decode_id(h) for h in target_history_ids if h ]))
else:
target_history_ids = []
done_msg = error_msg = ""
new_history = None
if do_copy:
invalid_contents = 0
if not ( decoded_dataset_ids or decoded_dataset_collection_ids ) or not ( target_history_ids or new_history_name ):
error_msg = "You must provide both source datasets and target histories. "
else:
if new_history_name:
new_history = trans.app.model.History()
new_history.name = new_history_name
new_history.user = user
trans.sa_session.add( new_history )
trans.sa_session.flush()
target_history_ids.append( new_history.id )
if user:
target_histories = [ hist for hist in map( trans.sa_session.query( trans.app.model.History ).get, target_history_ids ) if hist is not None and hist.user == user ]
else:
target_histories = [ history ]
if len( target_histories ) != len( target_history_ids ):
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_history_ids ) - len( target_histories ) )
source_contents = map( trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get, decoded_dataset_ids )
source_contents.extend( map( trans.sa_session.query( trans.app.model.HistoryDatasetCollectionAssociation ).get, decoded_dataset_collection_ids ) )
source_contents.sort(key=lambda content: content.hid)
for content in source_contents:
if content is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist. "
invalid_contents += 1
elif content.history != history:
error_msg = error_msg + "You tried to copy a dataset which is not in your current history. "
invalid_contents += 1
else:
for hist in target_histories:
if content.history_content_type == "dataset":
hist.add_dataset( content.copy( copy_children=True ) )
else:
copy_collected_datasets = True
copy_kwds = {}
if copy_collected_datasets:
copy_kwds["element_destination"] = hist
hist.add_dataset_collection( content.copy( **copy_kwds ) )
if current_history in target_histories:
refresh_frames = ['history']
trans.sa_session.flush()
hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' %
( url_for( controller="history", action="switch_to_history",
hist_id=trans.security.encode_id( hist.id ) ), escape(hist.name) )
for hist in target_histories ] )
num_source = len( source_content_ids ) - invalid_contents
num_target = len(target_histories)
done_msg = "%i %s copied to %i %s: %s." % (num_source, inflector.cond_plural(num_source, "dataset"), num_target, inflector.cond_plural(num_target, "history"), hist_names_str )
trans.sa_session.refresh( history )
source_contents = history.active_contents
target_histories = [history]
if user:
target_histories = user.active_histories
return trans.fill_template( "/dataset/copy_view.mako",
source_history=history,
current_history=current_history,
source_content_ids=source_content_ids,
target_history_id=target_history_id,
target_history_ids=target_history_ids,
source_contents=source_contents,
target_histories=target_histories,
new_history_name=new_history_name,
done_msg=done_msg,
error_msg=error_msg,
refresh_frames=refresh_frames )
def _copy_datasets( self, trans, dataset_ids, target_histories, imported=False ):
""" Helper method for copying datasets. """
user = trans.get_user()
done_msg = error_msg = ""
invalid_datasets = 0
if not dataset_ids or not target_histories:
error_msg = "You must provide both source datasets and target histories."
else:
# User must own target histories to copy datasets to them.
for history in target_histories:
if user != history.user:
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_histories ) )
for dataset_id in dataset_ids:
decoded_id = self.decode_id( dataset_id )
data = self.hda_manager.get_accessible( decoded_id, trans.user )
data = self.hda_manager.error_if_uploading( data )
if data is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist or that you do not have access to. "
invalid_datasets += 1
else:
for hist in target_histories:
dataset_copy = data.copy( copy_children=True )
if imported:
dataset_copy.name = "imported: " + dataset_copy.name
hist.add_dataset( dataset_copy )
trans.sa_session.flush()
num_datasets_copied = len( dataset_ids ) - invalid_datasets
done_msg = "%i dataset%s copied to %i histor%s." % \
( num_datasets_copied, iff( num_datasets_copied == 1, "", "s"), len( target_histories ), iff( len( target_histories ) == 1, "y", "ies") )
trans.sa_session.refresh( history )
if error_msg != "":
status = ERROR
message = error_msg
else:
status = SUCCESS
message = done_msg
return status, message
| [
[
[
7,
14
],
[
751,
758
]
],
[
[
22,
24
],
[
43625,
43627
],
[
43835,
43837
]
],
[
[
32,
38
],
[
37987,
37993
],
[
39898,
39904
],
[
39950,
39956
]
],
[
[
63,
69
],
[
1092,
1098
],
[
29848,
29854
],
[
55191,
55197
],
[
62980,
62986
]
],
[
[
77,
97
],
[
9402,
9407
],
[
37609,
37614
],
[
39564,
39569
],
[
40538,
40543
],
[
41100,
41105
],
[
41319,
41324
],
[
42932,
42937
],
[
44118,
44123
],
[
55100,
55105
]
],
[
[
114,
126
],
[
10524,
10536
]
],
[
[
128,
137
],
[
36933,
36942
]
],
[
[
161,
166
],
[
3977,
3982
],
[
4035,
4040
]
],
[
[
168,
172
],
[
4096,
4100
]
],
[
[
193,
202
],
[
33759,
33768
],
[
33816,
33825
],
[
33872,
33881
]
],
[
[
204,
209
],
[
1844,
1849
],
[
2536,
2541
],
[
3844,
3849
],
[
3891,
3896
],
[
4010,
4015
],
[
5390,
5395
],
[
27316,
27321
]
],
[
[
211,
215
],
[
8380,
8384
],
[
15273,
15277
],
[
26946,
26950
]
],
[
[
217,
220
],
[
5306,
5309
],
[
5722,
5725
],
[
6122,
6125
],
[
6607,
6610
],
[
7012,
7015
],
[
7751,
7754
],
[
7889,
7892
],
[
10228,
10231
],
[
10244,
10247
],
[
11095,
11098
],
[
11761,
11764
],
[
26658,
26661
],
[
26674,
26677
],
[
29507,
29510
],
[
30551,
30554
],
[
30567,
30570
],
[
30581,
30584
],
[
31160,
31163
],
[
31551,
31554
],
[
31567,
31570
],
[
31850,
31853
],
[
31866,
31869
],
[
31905,
31908
],
[
32521,
32524
],
[
35009,
35012
],
[
35738,
35741
],
[
36479,
36482
],
[
37069,
37072
],
[
38730,
38733
],
[
52274,
52277
],
[
52603,
52606
],
[
52845,
52848
],
[
53128,
53131
],
[
53374,
53377
],
[
53654,
53657
],
[
54218,
54221
],
[
54695,
54698
],
[
58041,
58044
],
[
34969,
34972
],
[
35354,
35357
],
[
36094,
36097
],
[
36758,
36761
],
[
52474,
52477
],
[
53018,
53021
],
[
53533,
53536
],
[
54089,
54092
]
],
[
[
240,
248
],
[
4391,
4399
],
[
4459,
4467
]
],
[
[
304,
323
],
[
39475,
39494
]
],
[
[
325,
344
],
[
40687,
40706
]
],
[
[
381,
396
],
[
4149,
4164
]
],
[
[
398,
413
],
[
4166,
4181
]
],
[
[
438,
447
],
[
63273,
63282
],
[
63331,
63340
]
],
[
[
449,
458
],
[
6096,
6105
],
[
6581,
6590
]
],
[
[
497,
510
],
[
18184,
18197
],
[
36248,
36261
]
],
[
[
550,
566
],
[
4131,
4147
]
],
[
[
568,
573
],
[
66301,
66306
]
],
[
[
575,
582
],
[
66374,
66381
]
],
[
[
584,
591
],
[
7254,
7261
],
[
29955,
29962
],
[
30438,
30445
],
[
30969,
30976
],
[
62819,
62826
]
],
[
[
593,
618
],
[
4183,
4208
]
],
[
[
660,
665
],
[
935,
940
],
[
1005,
1010
],
[
1142,
1147
],
[
1974,
1979
],
[
2460,
2465
],
[
2731,
2736
],
[
2837,
2842
],
[
3050,
3055
],
[
1699,
1704
]
],
[
[
667,
670
],
[
2118,
2121
],
[
2365,
2368
],
[
66088,
66091
],
[
66154,
66157
]
],
[
[
672,
680
],
[
2791,
2799
]
],
[
[
682,
692
],
[
16458,
16468
]
],
[
[
725,
743
],
[
7402,
7420
]
],
[
[
745,
748
],
[
10572,
10575
],
[
27747,
27750
],
[
39340,
39343
],
[
40439,
40442
],
[
40909,
40912
],
[
41226,
41229
],
[
42882,
42885
],
[
44002,
44005
],
[
47661,
47664
],
[
48820,
48823
],
[
51816,
51819
],
[
52091,
52094
]
],
[
[
782,
791
],
[
837,
846
]
],
[
[
814,
818
]
],
[
[
900,
933
],
[
4236,
4269
]
],
[
[
4113,
4129
],
[
4319,
4335
]
]
] |
#===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py KNN classification scikit-learn-compatible classes
from ._base import NeighborsBase, KNeighborsMixin
from ._base import parse_auto_method, prediction_algorithm
from sklearn.base import ClassifierMixin as BaseClassifierMixin
from .._utils import (
getFPType,
sklearn_check_version,
get_patch_message,
PatchingConditionsChain)
from .._device_offload import support_usm_ndarray
from sklearn.utils.validation import check_array
import numpy as np
from scipy import sparse as sp
import logging
if sklearn_check_version("0.22"):
from sklearn.neighbors._classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors._base import _check_weights
from sklearn.utils.validation import _deprecate_positional_args
else:
from sklearn.neighbors.classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors.base import _check_weights
def _deprecate_positional_args(f):
return f
def daal4py_classifier_predict(estimator, X, base_predict):
if sklearn_check_version('1.0'):
estimator._check_feature_names(X, reset=False)
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
daal_model = getattr(estimator, '_daal_model', None)
n_features = getattr(estimator, 'n_features_in_', None)
shape = getattr(X, 'shape', None)
if n_features and shape and len(shape) > 1 and shape[1] != n_features:
raise ValueError((f'X has {X.shape[1]} features, '
f'but KNNClassifier is expecting '
f'{n_features} features as input'))
try:
fptype = getFPType(X)
except ValueError:
fptype = None
_patching_status = PatchingConditionsChain(
"sklearn.neighbors.KNeighborsClassifier.predict")
_dal_ready = _patching_status.and_conditions([
(daal_model is not None, "oneDAL model was not trained."),
(fptype is not None, "Unable to get dtype."),
(not sp.issparse(X), "X is sparse. Sparse input is not supported.")])
_patching_status.write_log()
if _dal_ready:
params = {
'method': 'defaultDense',
'k': estimator.n_neighbors,
'nClasses': len(estimator.classes_),
'voteWeights': 'voteUniform'
if estimator.weights == 'uniform' else 'voteDistance',
'resultsToEvaluate': 'computeClassLabels',
'resultsToCompute': ''
}
method = parse_auto_method(
estimator, estimator.algorithm, estimator.n_samples_fit_, n_features)
predict_alg = prediction_algorithm(method, fptype, params)
prediction_result = predict_alg.compute(X, daal_model)
result = estimator.classes_.take(
np.asarray(prediction_result.prediction.ravel(), dtype=np.intp))
else:
result = base_predict(estimator, X)
return result
if sklearn_check_version("0.24"):
class KNeighborsClassifier_(KNeighborsMixin, BaseClassifierMixin, NeighborsBase):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = \
weights if sklearn_check_version("1.0") else _check_weights(weights)
elif sklearn_check_version("0.22"):
from sklearn.neighbors._base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
else:
from sklearn.neighbors.base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
class KNeighborsClassifier(KNeighborsClassifier_):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
@support_usm_ndarray()
def fit(self, X, y):
return NeighborsBase._fit(self, X, y)
@support_usm_ndarray()
def predict(self, X):
return daal4py_classifier_predict(self, X, BaseKNeighborsClassifier.predict)
@support_usm_ndarray()
def predict_proba(self, X):
if sklearn_check_version('1.0'):
self._check_feature_names(X, reset=False)
return BaseKNeighborsClassifier.predict_proba(self, X)
| [
[
[
830,
843
],
[
3795,
3808
],
[
4612,
4625
],
[
5433,
5446
],
[
6734,
6747
]
],
[
[
845,
860
],
[
3757,
3772
],
[
4627,
4642
],
[
5448,
5463
]
],
[
[
880,
897
],
[
3266,
3283
]
],
[
[
899,
919
],
[
3389,
3409
]
],
[
[
945,
983
],
[
3774,
3793
],
[
4704,
4723
],
[
5525,
5544
]
],
[
[
1011,
1020
],
[
2426,
2435
]
],
[
[
1026,
1047
],
[
1269,
1290
],
[
3694,
3715
],
[
4445,
4466
],
[
1826,
1847
],
[
4382,
4403
],
[
6975,
6996
]
],
[
[
1053,
1070
]
],
[
[
1076,
1099
],
[
2508,
2531
]
],
[
[
1131,
1150
],
[
6672,
6691
],
[
6771,
6790
],
[
6910,
6929
]
],
[
[
1188,
1199
],
[
1919,
1930
]
],
[
[
1207,
1218
],
[
1962,
1964
],
[
1974,
1976
],
[
3551,
3553
],
[
3606,
3608
]
],
[
[
1237,
1249
],
[
2776,
2778
]
],
[
[
1257,
1264
]
],
[
[
1350,
1408
],
[
6870,
6894
],
[
7074,
7098
]
],
[
[
1449,
1463
],
[
4416,
4430
],
[
5268,
5282
],
[
6089,
6103
]
],
[
[
1505,
1531
],
[
3820,
3846
],
[
4735,
4761
],
[
5556,
5582
],
[
6171,
6197
]
],
[
[
1587,
1645
],
[
6870,
6894
],
[
7074,
7098
]
],
[
[
1685,
1699
],
[
4416,
4430
],
[
5268,
5282
],
[
6089,
6103
]
],
[
[
1709,
1735
],
[
3820,
3846
],
[
4735,
4761
],
[
5556,
5582
],
[
6171,
6197
]
],
[
[
1763,
1789
],
[
6834,
6860
]
],
[
[
3735,
3756
],
[
6142,
6163
]
],
[
[
4516,
4578
],
[
4676,
4702
]
],
[
[
4590,
4611
],
[
6142,
6163
]
],
[
[
5337,
5399
],
[
5497,
5523
]
],
[
[
5411,
5432
],
[
6142,
6163
]
],
[
[
6121,
6141
]
]
] |
'''
Created by auto_sdk on 2020.08.19
'''
from dingtalk.api.base import RestApi
class OapiAtsChannelAccountAddRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.biz_code = None
self.channel_user_identify = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.ats.channel.account.add'
| [
[
[
72,
79
],
[
118,
125
],
[
160,
167
]
],
[
[
86,
117
]
]
] |
#!/usr/bin/python3
"""
Unit Test for api v1 Flask App
"""
import inspect
import pep8
import web_flask
import unittest
from os import stat
web_flask = __import__('web_flask.2-c_route', globals(), locals(), ['*'])
class TestCRouteDocs(unittest.TestCase):
"""Class for testing Hello Route docs"""
all_funcs = inspect.getmembers(web_flask, inspect.isfunction)
@classmethod
def setUpClass(cls):
print('\n\n.................................')
print('..... Testing Documentation .....')
print('............ C Route ...........')
print('.................................\n\n')
def test_doc_file(self):
"""... documentation for the file"""
actual = web_flask.__doc__
self.assertIsNotNone(actual)
def test_all_function_docs(self):
"""... tests for ALL DOCS for all functions"""
all_functions = TestCRouteDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8(self):
"""... tests if file conforms to PEP8 Style"""
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['web_flask/2-c_route.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
"""... tests if file has correct permissions so user can execute"""
file_stat = stat('web_flask/2-c_route.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
if __name__ == '__main__':
"""
MAIN TESTS
"""
unittest.main
| [
[
[
65,
72
],
[
317,
324
],
[
347,
354
]
],
[
[
80,
84
],
[
1106,
1110
]
],
[
[
92,
101
]
],
[
[
109,
117
],
[
235,
243
],
[
1620,
1628
]
],
[
[
133,
137
],
[
1403,
1407
]
],
[
[
138,
147
],
[
336,
345
],
[
714,
723
]
],
[
[
220,
234
],
[
887,
901
]
]
] |
from genologics.lims import Lims
from genologics.config import BASEURI, USERNAME, PASSWORD
from multiqc.utils import report, config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import table
from collections import OrderedDict
import logging
import re
class MultiQC_clarity_metadata(BaseMultiqcModule):
def __init__(self):
self.log = logging.getLogger('multiqc')
# Check that this plugin hasn't been disabled
if config.kwargs.get('disable_clarity', False) is True:
self.log.info("Skipping MultiQC_Clarity as disabled on command line")
return None
if getattr(config, 'disable_clarity', False) is True:
self.log.debug("Skipping MultiQC_Clarity as specified in config file")
return None
super(MultiQC_clarity_metadata, self).__init__(name='Clarity LIMS', anchor='clarity')
self.intro = '''<p>The <a href="https://github.com/MultiQC/MultiQC_Clarity" target="_blank">MultiQC_Clarity</a>
plugin fetches data from a specified
<a href="https://www.genologics.com/clarity-lims/" target="_blank">Basespace Clarity LIMS</a> instance.</p>'''
self.lims = Lims(BASEURI, USERNAME, PASSWORD)
self.metadata = {}
self.header_metadata = {}
self.general_metadata = {}
self.tab_metadata = {}
self.samples = []
self.schema = getattr(config, 'clarity', None)
if self.schema is None:
self.log.debug("No config found for MultiQC_Clarity")
return None
self.name_edit_regex = self.schema.get("name_edit_regex")
self.get_samples()
self.get_metadata('report_header_info')
self.get_metadata('general_stats')
self.get_metadata('clarity_module')
self.update_multiqc_report()
self.make_sections()
report.modules_output.append(self)
def get_samples(self):
if config.kwargs.get('clarity_project'):
pj = self.lims.get_projects(name=config.kwargs['clarity_project'])
if len(pj) > 1:
self.log.error("Found multiple match projects in Clarity.")
elif len(pj) < 1:
self.log.error("Could not identify project in Clarity.")
else:
self.samples = self.lims.get_samples(projectlimsid=pj[0].id)
else:
names = set()
for x in report.general_stats_data:
names.update(x.keys())
for d in report.saved_raw_data.values():
try:
self.names.update(d.keys())
except AttributeError:
pass
if not config.kwargs.get('clarity_skip_edit_names'):
names = self.edit_names(names)
self.log.info("Looking into Clarity for samples {}".format(", ".join(names)))
found = 0
try:
for name in names:
matching_samples = self.lims.get_samples(name=name)
if not matching_samples:
self.log.error("Could not find a sample matching {0}, skipping.".format(name))
continue
if len(matching_samples) > 1:
self.log.error("Found multiple samples matching {0}, skipping".format(name))
continue
found += 1
self.samples.append(matching_samples[0])
except Exception as e:
self.log.warn("Could not connect to Clarity LIMS: {}".format(e))
return None
self.log.info("Found {} out of {} samples in LIMS.".format(found, len(names)))
def edit_names(self, names):
if self.name_edit_regex:
return self.edit_names_with_regex(names)
edited=[]
for name in names:
if name.endswith("_1") or name.endswith("_2"):
edited.append(name[:-2])
elif name.endswith("_R1") or name.endswith("_R2"):
edited.append(name[:-3])
else:
edited.append(name)
return edited
def edit_names_with_regex(self, names):
edited = []
for name in names:
matches = re.search(re.compile(self.name_edit_regex), name)
edited.append(matches.group(1))
return edited
def flatten_metadata(self, metadata):
for first_level in metadata:
for second_level in metadata[first_level]:
if isinstance(metadata[first_level][second_level], set) or isinstance(metadata[first_level][second_level], list):
metadata[first_level][second_level] = ", ".join(metadata[first_level][second_level])
return metadata
def get_project_metadata(self, udfs):
project_metadata={}
for sample in self.samples:
project_metadata[sample.project.name]={}
for udf in udfs:
if udf in sample.project.udf:
try:
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
except:
project_metadata[sample.project.name][udf] = set()
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
return self.flatten_metadata(project_metadata)
def get_sample_metadata(self, udfs):
sample_metadata={}
for sample in self.samples:
sample_metadata[sample.name]={}
for udf in udfs:
if udf in sample.udf:
try:
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
except:
sample_metadata[sample.name][udf] = set()
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
return self.flatten_metadata(sample_metadata)
def get_metadata(self, part):
for key in self.schema[part]:
if key == 'Project':
metadata = self.get_project_metadata(self.schema[part]['Project'])
elif key == 'Sample':
metadata =self.get_sample_metadata(self.schema[part]['Sample'])
else:
metadata = self.get_artifact_metadata(self.schema[part])
if part == "report_header_info":
self.header_metadata.update(metadata)
elif part == "general_stats":
self.general_metadata.update(metadata)
else:
self.tab_metadata.update(metadata)
def get_artifact_metadata(self, pt_to_udfs):
artifact_metadata={}
for sample in self.samples:
artifact_metadata[sample.name]={}
for process_type in pt_to_udfs:
if process_type == 'Sample':
continue
if process_type == 'Project':
continue
artifacts = self.lims.get_artifacts(sample_name=sample.name, process_type=process_type)
for udf_name in pt_to_udfs[process_type].get("outputs", []):
values = []
for artifact in artifacts:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
processes = set([art.parent_process for art in artifacts])
inputs=[]
for p in processes:
inputs.extend([art for art in p.all_inputs() if sample.name in [s.name for s in art.samples]])
for udf_name in pt_to_udfs[process_type].get("inputs", []):
values = []
for artifact in inputs:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
return self.flatten_metadata(artifact_metadata)
def update_multiqc_report(self):
if config.report_header_info is None:
config.report_header_info = []
for first_level in self.header_metadata:
d = {}
for key in self.header_metadata[first_level]:
d[key] = self.header_metadata[first_level][key]
config.report_header_info.append(d)
headers = {}
for first_level in self.schema["general_stats"]:
for header in self.schema["general_stats"][first_level]:
headers[header] = {}
if isinstance(self.schema["general_stats"][first_level][header], dict):
for subsubkey, cfg in self.schema["general_stats"][first_level][header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['description'] = headers[header].get('description', '{} - {}'.format(first_level, header))
headers[header]['namespace'] = headers[header].get('namespace', 'Clarity LIMS')
headers[header]['scale'] = headers[header].get('scale', 'YlGn')
report.general_stats_headers.append(headers)
report.general_stats_data.append(self.general_metadata)
def make_sections(self):
headers = OrderedDict()
for first_level in self.tab_metadata:
for header in self.tab_metadata[first_level]:
desc = header
if header not in headers:
headers[header] = {}
for key in self.schema['clarity_module']:
if header in self.schema['clarity_module'][key]:
desc = key
elif isinstance(self.schema['clarity_module'][key], dict):
for subkey, val in self.schema['clarity_module'][key].items():
# print(val)
if val is None:
break
elif header in val:
desc = key
if isinstance(val[header], dict):
for subsubkey, cfg in val[header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['namespace'] = headers[header].get('namespace', desc)
headers[header]['title'] = headers[header].get('title', header)
headers[header]['description'] = headers[header].get('description', header)
self.intro += table.plot(self.tab_metadata, headers)
| [
[
[
29,
33
],
[
1214,
1218
]
],
[
[
64,
71
],
[
1219,
1226
]
],
[
[
73,
81
],
[
1228,
1236
]
],
[
[
83,
91
],
[
1238,
1246
]
],
[
[
119,
125
],
[
1883,
1889
],
[
2438,
2444
],
[
2525,
2531
],
[
9435,
9441
],
[
9488,
9494
]
],
[
[
127,
133
],
[
477,
483
],
[
655,
661
],
[
1432,
1438
],
[
1958,
1964
],
[
2041,
2047
],
[
2709,
2715
],
[
8157,
8163
],
[
8204,
8210
],
[
8437,
8443
]
],
[
[
174,
191
],
[
318,
335
]
],
[
[
218,
223
],
[
11232,
11237
]
],
[
[
249,
260
],
[
9592,
9603
]
],
[
[
268,
275
],
[
382,
389
]
],
[
[
283,
285
],
[
4293,
4295
],
[
4303,
4305
]
],
[
[
293,
317
],
[
820,
844
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open("README.rst", "rt") as inf:
readme = inf.read()
ver_dic = {}
with open("cgen/version.py") as version_file:
version_file_contents = version_file.read()
exec(compile(version_file_contents, "cgen/version.py", 'exec'), ver_dic)
setup(
name="cgen",
version=ver_dic["VERSION_TEXT"],
description="C/C++ source generation from an AST",
long_description=readme,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
author="Andreas Kloeckner",
author_email="inform@tiker.net",
license="MIT",
url="http://documen.tician.de/cgen/",
packages=["cgen"],
python_requires="~=3.6",
install_requires=[
"pytools>=2015.1.2",
"numpy>=1.6",
])
| [
[
[
70,
75
],
[
322,
327
]
],
[
[
110,
113
],
[
128,
131
]
],
[
[
119,
125
],
[
475,
481
]
],
[
[
140,
147
],
[
312,
319
],
[
366,
373
]
],
[
[
185,
197
],
[
227,
239
]
],
[
[
203,
224
],
[
261,
282
]
]
] |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg_resources import parse_version
def cmp_versions(version1, version2):
if version1 is None:
if version2 is None:
return 0
else:
return -1
if version2 is None:
return 1
if parse_version(version1) == parse_version(version2):
return 0
elif parse_version(version1) > parse_version(version2):
return 1
return -1
| [
[
[
670,
683
],
[
884,
897
],
[
911,
924
],
[
962,
975
],
[
988,
1001
]
],
[
[
690,
702
]
]
] |
import cv2
import numpy as np
import time
'''
TEST FILE using 1000, 1000 output image.
Actual code will have an output image of 200,200, which also means a different homography
'''
#recalculated homography
# homography_front = np.array([[3.12570133882145e-05, 0.000286172662353515, -0.680179732686621],
# [0.000967963380750764,-0.00220708598330688,-0.733040431894039],
# [9.31003590466217e-08,-7.28146482745869e-06,-0.00116847956395974]])
# homography_left = np.array([[-0.000710128671370178, 6.65307627276203e-05, -0.0692689783742822],
# [0.000516381003921171, -0.00181011134155597, -0.997595526929844],
# [-2.51074118905076e-08, -6.83854860981181e-06, -0.000959883483255739]])
# homography_right = np.array([[-0.000926831714971124,-7.57332958427531e-05,0.994215703860414],
# [-0.000923137149283102,0.00327126641381199,0.107337667969103],
# [-2.77833313194565e-07,1.03110471009649e-05,0.00115801865068319]])
# Original
homography_front = np.array([[4.62227601649053e-05, 0.000243520884225642, -0.678748083960862],
[0.000969465596108860, -0.00207033488113324, -0.734366621126640],
[1.58512860546350e-07, -6.83048800828728e-06, -0.00119023476366804]])
homography_left = np.array([[-0.000759672412515488, 2.34075591542924e-05, -0.0699936817773495],
[0.000483107853918350, -0.00189886717269873, -0.997544805245074],
[-1.49265515027449e-07, -7.08702713960990e-06, -0.000910631508297557]])
homography_right = np.array([[-0.000908962187561903, -3.67579540055241e-05, 0.994837127281325],
[-0.000886484342219692, 0.00317263543314027, 0.101420799019439],
[-1.14460320494404e-07, 9.99234254412552e-06, 0.00111021419224332]])
#LARGER RANGE OF VIEW
translation = np.array([[1, 0, 0],[0,1,100],[0,0,1]])
def warp_image(image, homography):
im_out = cv2.warpPerspective(image, np.matmul(translation,homography), (600, 800))
# cv2.imshow('warped', im_out)
# cv2.waitKey(0)
#cv2.imshow('image', im_out)
return im_out
def left_hom(image):
im_out = cv2.warp
# Create mask of front image. im_mask indicates black pixel area
def find_mask(image):
black_range1 = np.array([0,0,0])
im_mask = (cv2.inRange(image, black_range1, black_range1)).astype('bool')
im_mask_inv = (1-im_mask).astype('bool')
im_mask_inv = np.dstack((im_mask_inv, im_mask_inv, im_mask_inv))
im_mask= np.dstack((im_mask, im_mask, im_mask))
return im_mask_inv, im_mask
if __name__ == "__main__":
count = 0
while True:
img_front = cv2.imread('../collected_images/5/center/'+ str(count)+'.jpg')
img_left = cv2.imread('../collected_images/5/left/'+ str(count)+'.jpg')
img_right = cv2.imread('../collected_images/5/right/'+ str(count)+'.jpg')
im_front = warp_image(img_front, homography_front).astype('uint8')
im_left = warp_image(img_left, homography_left).astype('uint8')
im_right = warp_image(img_right, homography_right).astype('uint8')
init_time = time.time()
im_side = im_left + im_right
im_mask_inv, im_mask = find_mask(im_side)
front_masked = np.multiply(im_front, im_mask).astype('uint8')
side_masked = np.multiply(im_side, im_mask_inv).astype('uint8')
print("Masking Time: ", time.time()-init_time)
summed_image = front_masked + side_masked
#Gaussian Blurring?
#summed_image = cv2.GaussianBlur(summed_image, (5,5), 0)
# cv2.imshow('front', front_masked)
# cv2.imshow('left', im_left)
# cv2.imshow('right', im_right)
# cv2.imshow('front', im_front)
cv2.imshow('summed', summed_image)
cv2.imwrite('../collected_images/5/mosaic_full/'+str(count) + '.jpg', summed_image)
#summed_image_cropped = summed_image[200:800, :500, :]
print("Time elapsed: ", (time.time() - init_time))
#cv2.imshow('summed cropped', summed_image_cropped)
count +=1
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
| [
[
[
7,
10
],
[
2526,
2529
],
[
2608,
2611
],
[
2689,
2692
],
[
3605,
3608
],
[
3649,
3652
],
[
3946,
3949
],
[
1822,
1825
],
[
2038,
2041
],
[
2187,
2190
]
],
[
[
18,
29
],
[
994,
996
],
[
1233,
1235
],
[
1477,
1479
],
[
1733,
1735
],
[
3118,
3120
],
[
3187,
3189
],
[
1849,
1851
],
[
2154,
2156
],
[
2313,
2315
],
[
2377,
2379
]
],
[
[
37,
41
],
[
2996,
3000
],
[
3269,
3273
],
[
3829,
3833
]
],
[
[
975,
991
],
[
2794,
2810
]
],
[
[
1215,
1230
],
[
2867,
2882
]
],
[
[
1458,
1474
],
[
2941,
2957
]
],
[
[
1719,
1730
],
[
1859,
1870
]
],
[
[
1778,
1788
],
[
2772,
2782
],
[
2846,
2856
],
[
2919,
2929
]
],
[
[
2008,
2016
]
],
[
[
2117,
2126
],
[
3076,
3085
]
],
[
[
2480,
2485
],
[
2574,
2579
],
[
2654,
2659
],
[
2736,
2741
],
[
3702,
3707
],
[
3924,
3929
]
],
[
[
2514,
2523
],
[
2783,
2792
]
],
[
[
2597,
2605
],
[
2857,
2865
]
],
[
[
2677,
2686
],
[
2930,
2939
]
],
[
[
2761,
2769
],
[
3130,
3138
]
],
[
[
2836,
2843
],
[
3026,
3033
]
],
[
[
2908,
2916
],
[
3036,
3044
]
],
[
[
2984,
2993
],
[
3281,
3290
],
[
3843,
3852
]
],
[
[
3016,
3023
],
[
3086,
3093
],
[
3199,
3206
]
],
[
[
3053,
3064
],
[
3208,
3219
]
],
[
[
3066,
3073
],
[
3140,
3147
]
],
[
[
3103,
3115
],
[
3315,
3327
]
],
[
[
3173,
3184
],
[
3330,
3341
]
],
[
[
3300,
3312
],
[
3626,
3638
],
[
3719,
3731
]
],
[
[
3942,
3943
],
[
3979,
3980
]
]
] |
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import (Required, Length, Email, ValidationError,
EqualTo)
from app.models import User
class Unique(object):
'''
Custom validator to check an object's attribute
is unique. For example users should not be able
to create an account if the account's email
address is already in the database. This class
supposes you are using SQLAlchemy to query the
database.
'''
def __init__(self, model, field, message):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class Forgot(Form):
''' User forgot password form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
class Resend(Form):
''' User forgot password form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
class Reset(Form):
''' User reset password form. '''
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
class Login(Form):
''' User login form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
password = PasswordField(validators=[Required()],
description='Password')
class SignUp(Form):
''' User sign up form. '''
first_name = TextField(validators=[Required(), Length(min=2)],
description='Name')
last_name = TextField(validators=[Required(), Length(min=2)],
description='Surname')
email = TextField(validators=[Required(), Email(),
Unique(User, User.email,
'This email address is ' +
'already linked to an account.')],
description='Email address')
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
| [
[
[
22,
26
],
[
850,
854
],
[
1020,
1024
],
[
1189,
1193
],
[
1478,
1482
],
[
1745,
1749
]
],
[
[
47,
56
],
[
910,
919
],
[
1080,
1089
],
[
1528,
1537
],
[
1802,
1811
],
[
1909,
1918
],
[
2018,
2027
]
],
[
[
58,
71
],
[
1251,
1264
],
[
1418,
1431
],
[
1638,
1651
],
[
2330,
2343
],
[
2497,
2510
]
],
[
[
104,
112
],
[
932,
940
],
[
1102,
1110
],
[
1286,
1294
],
[
1550,
1558
],
[
1664,
1672
],
[
1824,
1832
],
[
1931,
1939
],
[
2040,
2048
],
[
2365,
2373
]
],
[
[
114,
120
],
[
1298,
1304
],
[
1836,
1842
],
[
1943,
1949
],
[
2377,
2383
]
],
[
[
122,
127
],
[
944,
949
],
[
1114,
1119
],
[
1562,
1567
],
[
2052,
2057
]
],
[
[
129,
144
],
[
805,
820
]
],
[
[
178,
185
],
[
1321,
1328
],
[
2400,
2407
]
],
[
[
210,
214
],
[
2102,
2106
],
[
2108,
2112
]
],
[
[
223,
229
],
[
2095,
2101
]
],
[
[
843,
849
]
],
[
[
1013,
1019
]
],
[
[
1183,
1188
]
],
[
[
1472,
1477
]
],
[
[
1738,
1744
]
]
] |
# Call Domain api here | [] |
#! /usr/bin/env python3
#
# makesedonac.py
#
# Compile sedonac.jar
#
# Author: Brian Frank
# Creation: 7 Dec 07
#
from __future__ import print_function
import os
import env
import compilejar
depends = [env.sedonaJar]
srcDir = os.path.join(env.src, "sedonac", "src")
jarFile = env.sedonacJar
packages = [
"sedonac",
"sedonac.analysis",
"sedonac.asm",
"sedonac.ast",
"sedonac.gen",
"sedonac.ir",
"sedonac.namespace",
"sedonac.parser",
"sedonac.platform",
"sedonac.scode",
"sedonac.steps",
"sedonac.test",
"sedonac.translate",
"sedonac.util",
]
# Make
def compile():
try:
compilejar.compile(srcDir, depends, packages, jarFile)
except env.BuildError:
print("**")
print("** FAILED [" + jarFile + "]")
print("**")
return 1
# Main
if __name__ == '__main__':
compile()
| [
[
[
147,
161
]
],
[
[
169,
171
],
[
248,
250
]
],
[
[
179,
182
],
[
223,
226
],
[
261,
264
],
[
298,
301
],
[
691,
694
]
],
[
[
190,
200
],
[
627,
637
]
],
[
[
212,
219
],
[
654,
661
]
],
[
[
238,
244
],
[
646,
652
]
],
[
[
288,
295
],
[
673,
680
],
[
749,
756
]
],
[
[
313,
321
],
[
663,
671
]
],
[
[
605,
612
],
[
835,
842
]
]
] |
from edsa_packages import recursion, sorting
#Recursion tests
def test_sum_array():
'''
Make sure sum_array works
'''
assert recursion.sum_array([8, 3, 2, 7, 4]) == 24, 'incorrect'
assert recursion.sum_array([5, 7, 8, 8, 6, 3, 4]) == 41, 'incorrect'
assert recursion.sum_array([25, 14, 2, 3, 5]) == 49, 'incorrect'
def test_fibonacci():
'''
Make sure fibonacci works
'''
assert recursion.fibonacci(8) == 22, 'incorrect'
assert recursion.fibonacci(10) == 55, 'incorrect'
assert recursion.fibonacci(5) == 5, 'incorrect'
def test_factorial():
'''
Make sure factorial works
'''
assert recursion.factorial(4) == 24, 'incorrect'
assert recursion.factorial(8) == 40320, 'incorrect'
assert recursion.factorial(3) == 6, 'incorrect'
def test_reverse():
'''
Make sure reverse works
'''
assert recursion.reverse('apple') == 'elppa', 'incorrect'
assert recursion.reverse('test') == 'tset', 'incorrect'
assert recursion.reverse('peanut') == 'tunaep', 'incorrect'
#Sorting tests
def test_bubble_sort():
'''
Make sure bubble_sort works
'''
assert sorting.bubble_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.bubble_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.bubble_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_merge_sort():
'''
Make sure merge_sort works
'''
assert sorting.merge_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.merge_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.merge_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_quick_sort():
'''
Make sure quick_sort works
'''
assert sorting.quick_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.quick_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.quick_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
| [
[
[
26,
35
],
[
143,
152
],
[
210,
219
],
[
283,
292
],
[
421,
430
],
[
474,
483
],
[
528,
537
],
[
649,
658
],
[
702,
711
],
[
758,
767
],
[
875,
884
],
[
937,
946
],
[
997,
1006
]
],
[
[
37,
44
],
[
1150,
1157
],
[
1326,
1333
],
[
1478,
1485
],
[
1669,
1676
],
[
1844,
1851
],
[
1995,
2002
],
[
2185,
2192
],
[
2360,
2367
],
[
2511,
2518
]
],
[
[
68,
82
]
],
[
[
346,
360
]
],
[
[
574,
588
]
],
[
[
804,
816
]
],
[
[
1071,
1087
]
],
[
[
1592,
1607
]
],
[
[
2108,
2123
]
]
] |
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from hashlib import sha1
import hmac
import base64
import datetime
from awscli.compat import six
from awscli.arguments import CustomArgument
logger = logging.getLogger('ec2bundleinstance')
# This customization adds the following scalar parameters to the
# bundle-instance operation:
# --bucket:
BUCKET_DOCS = ('The bucket in which to store the AMI. '
'You can specify a bucket that you already own or '
'a new bucket that Amazon EC2 creates on your behalf. '
'If you specify a bucket that belongs to someone else, '
'Amazon EC2 returns an error.')
# --prefix:
PREFIX_DOCS = ('The prefix for the image component names being stored '
'in Amazon S3.')
# --owner-akid
OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.'
# --policy
POLICY_DOCS = (
"An Amazon S3 upload policy that gives "
"Amazon EC2 permission to upload items into Amazon S3 "
"on the user's behalf. If you provide this parameter, "
"you must also provide "
"your secret access key, so we can create a policy "
"signature for you (the secret access key is not passed "
"to Amazon EC2). If you do not provide this parameter, "
"we generate an upload policy for you automatically. "
"For more information about upload policies see the "
"sections about policy construction and signatures in the "
'<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev'
'/HTTPPOSTForms.html">'
'Amazon Simple Storage Service Developer Guide</a>.')
# --owner-sak
OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the '
'Amazon S3 bucket specified in the --bucket '
'parameter. This parameter is required so that a '
'signature can be computed for the policy.')
def _add_params(argument_table, **kwargs):
# Add the scalar parameters and also change the complex storage
# param to not be required so the user doesn't get an error from
# argparse if they only supply scalar params.
storage_arg = argument_table['storage']
storage_arg.required = False
arg = BundleArgument(storage_param='Bucket',
name='bucket',
help_text=BUCKET_DOCS)
argument_table['bucket'] = arg
arg = BundleArgument(storage_param='Prefix',
name='prefix',
help_text=PREFIX_DOCS)
argument_table['prefix'] = arg
arg = BundleArgument(storage_param='AWSAccessKeyId',
name='owner-akid',
help_text=OWNER_AKID_DOCS)
argument_table['owner-akid'] = arg
arg = BundleArgument(storage_param='_SAK',
name='owner-sak',
help_text=OWNER_SAK_DOCS)
argument_table['owner-sak'] = arg
arg = BundleArgument(storage_param='UploadPolicy',
name='policy',
help_text=POLICY_DOCS)
argument_table['policy'] = arg
def _check_args(parsed_args, **kwargs):
# This function checks the parsed args. If the user specified
# the --ip-permissions option with any of the scalar options we
# raise an error.
logger.debug(parsed_args)
arg_dict = vars(parsed_args)
if arg_dict['storage']:
for key in ('bucket', 'prefix', 'owner_akid',
'owner_sak', 'policy'):
if arg_dict[key]:
msg = ('Mixing the --storage option '
'with the simple, scalar options is '
'not recommended.')
raise ValueError(msg)
POLICY = ('{{"expiration": "{expires}",'
'"conditions": ['
'{{"bucket": "{bucket}"}},'
'{{"acl": "ec2-bundle-read"}},'
'["starts-with", "$key", "{prefix}"]'
']}}'
)
def _generate_policy(params):
# Called if there is no policy supplied by the user.
# Creates a policy that provides access for 24 hours.
delta = datetime.timedelta(hours=24)
expires = datetime.datetime.utcnow() + delta
expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
policy = POLICY.format(expires=expires_iso,
bucket=params['Bucket'],
prefix=params['Prefix'])
params['UploadPolicy'] = policy
def _generate_signature(params):
# If we have a policy and a sak, create the signature.
policy = params.get('UploadPolicy')
sak = params.get('_SAK')
if policy and sak:
policy = base64.b64encode(six.b(policy)).decode('utf-8')
new_hmac = hmac.new(sak.encode('utf-8'), digestmod=sha1)
new_hmac.update(six.b(policy))
ps = base64.encodestring(new_hmac.digest()).strip().decode('utf-8')
params['UploadPolicySignature'] = ps
del params['_SAK']
def _check_params(params, **kwargs):
# Called just before call but prior to building the params.
# Adds information not supplied by the user.
storage = params['Storage']['S3']
if 'UploadPolicy' not in storage:
_generate_policy(storage)
if 'UploadPolicySignature' not in storage:
_generate_signature(storage)
EVENTS = [
('building-argument-table.ec2.bundle-instance', _add_params),
('operation-args-parsed.ec2.bundle-instance', _check_args),
('before-parameter-build.ec2.BundleInstance', _check_params),
]
def register_bundleinstance(event_handler):
# Register all of the events for customizing BundleInstance
for event, handler in EVENTS:
event_handler.register(event, handler)
class BundleArgument(CustomArgument):
def __init__(self, storage_param, *args, **kwargs):
super(BundleArgument, self).__init__(*args, **kwargs)
self._storage_param = storage_param
def _build_storage(self, params, value):
# Build up the Storage data structure
if 'Storage' not in params:
params['Storage'] = {'S3': {}}
params['Storage']['S3'][self._storage_param] = value
def add_to_params(self, parameters, value):
if value:
self._build_storage(parameters, value)
| [
[
[
573,
580
],
[
734,
741
]
],
[
[
601,
605
],
[
5252,
5256
]
],
[
[
613,
617
],
[
5212,
5216
]
],
[
[
625,
631
],
[
5145,
5151
],
[
5310,
5316
]
],
[
[
639,
647
],
[
4616,
4624
],
[
4659,
4667
]
],
[
[
675,
678
],
[
5162,
5165
],
[
5282,
5285
]
],
[
[
709,
723
],
[
6216,
6230
]
],
[
[
725,
731
],
[
3820,
3826
]
],
[
[
881,
892
],
[
2848,
2859
]
],
[
[
1209,
1220
],
[
3020,
3031
]
],
[
[
1329,
1344
],
[
3204,
3219
]
],
[
[
1417,
1428
],
[
3569,
3580
]
],
[
[
2151,
2165
],
[
3385,
3399
]
],
[
[
2421,
2432
],
[
5856,
5867
]
],
[
[
3623,
3634
],
[
5920,
5931
]
],
[
[
4232,
4238
],
[
4767,
4773
]
],
[
[
4463,
4479
],
[
5681,
5697
]
],
[
[
4948,
4967
],
[
5762,
5781
]
],
[
[
5451,
5464
],
[
5984,
5997
]
],
[
[
5793,
5799
],
[
6138,
6144
]
],
[
[
6008,
6031
]
],
[
[
6201,
6215
],
[
2734,
2748
],
[
2906,
2920
],
[
3078,
3092
],
[
3270,
3284
],
[
3449,
3463
],
[
6304,
6318
]
]
] |
import datetime
from http import HTTPStatus
from locust import HttpUser, task, between
# This test can be run after installing locust through the cli as "locust --host=http://<deployed_host>:<port>"
# Then url http://localhost:8089/ should be access to start the test.
# Can also be run using no UI mode as "locust --no-web -c <number_of_clients> -r <clients_per_second> --run-time <time e.g. 1h30m> --host=http://<deployed_host>:<port>"
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
@task(1)
def get_developers(self):
r = self.client.get("/developers")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
@task(1)
def get_developers_search(self):
r = self.client.get("/developers/search/james")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
| [
[
[
7,
15
]
],
[
[
33,
43
],
[
621,
631
],
[
825,
835
]
],
[
[
63,
71
],
[
461,
469
]
],
[
[
73,
77
],
[
508,
512
],
[
692,
696
]
],
[
[
79,
86
],
[
488,
495
]
],
[
[
446,
460
]
]
] |
import json
import sys
from os import path
header_comment = '# %%\n'
def nb2py(notebook):
result = []
cells = notebook['cells']
for cell in cells:
cell_type = cell['cell_type']
if cell_type == 'markdown':
result.append('%s"""\n%s\n"""'%
(header_comment, ''.join(cell['source'])))
if cell_type == 'code':
result.append("%s%s" % (header_comment, ''.join(cell['source'])))
return '\n\n'.join(result)
def py2nb(py_str):
# remove leading header comment
if py_str.startswith(header_comment):
py_str = py_str[len(header_comment):]
cells = []
chunks = py_str.split('\n\n%s' % header_comment)
for chunk in chunks:
cell_type = 'code'
if chunk.startswith("'''"):
chunk = chunk.strip("'\n")
cell_type = 'markdown'
elif chunk.startswith('"""'):
chunk = chunk.strip('"\n')
cell_type = 'markdown'
cell = {
'cell_type': cell_type,
'metadata': {},
'source': chunk.splitlines(True),
}
if cell_type == 'code':
cell.update({'outputs': [], 'execution_count': None})
cells.append(cell)
notebook = {
'cells': cells,
'metadata': {
'anaconda-cloud': {},
'kernelspec': {
'display_name': 'Python 3',
'language': 'python',
'name': 'python3'},
'language_info': {
'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.6.1'}},
'nbformat': 4,
'nbformat_minor': 1
}
return notebook
def convert(in_file, out_file):
_, in_ext = path.splitext(in_file)
_, out_ext = path.splitext(out_file)
if in_ext == '.ipynb' and out_ext == '.py':
with open(in_file, 'r') as f:
notebook = json.load(f)
py_str = nb2py(notebook)
with open(out_file, 'w') as f:
f.write(py_str)
elif in_ext == '.py' and out_ext == '.ipynb':
with open(in_file, 'r') as f:
py_str = f.read()
notebook = py2nb(py_str)
with open(out_file, 'w') as f:
json.dump(notebook, f, indent=2)
else:
raise(Exception('Extensions must be .ipynb and .py or vice versa'))
def main():
argv = sys.argv
if len(argv) < 3:
print('Usage: ipynb-py-convert in.ipynb out.py')
print('or: ipynb-py-convert in.py out.ipynb')
sys.exit(1)
convert(in_file=argv[1], out_file=argv[2])
if __name__ == '__main__':
main()
| [
[
[
7,
11
],
[
2147,
2151
],
[
2463,
2467
]
],
[
[
19,
22
],
[
2608,
2611
],
[
2761,
2764
]
],
[
[
38,
42
],
[
1973,
1977
],
[
2013,
2017
]
],
[
[
44,
58
],
[
309,
323
],
[
420,
434
],
[
576,
590
],
[
621,
635
],
[
692,
706
]
],
[
[
76,
81
],
[
2177,
2182
]
],
[
[
500,
505
],
[
2398,
2403
]
],
[
[
1929,
1936
],
[
2778,
2785
]
],
[
[
2589,
2593
],
[
2854,
2858
]
]
] |
import sys
input_file = open(sys.argv[1])
input_lines = input_file.readlines()
total_wrapping = 0
total_ribbon = 0
for line in input_lines:
l, w, h = line.split("x")
l = int(l)
w = int(w)
h = int(h)
dimensions = [l, w, h]
min_1 = min(dimensions)
dimensions.remove(min_1)
min_2 = min(dimensions)
total_wrapping += (2 * l * w) + (2 * w * h) + (2 * h * l) + (min_1 * min_2)
total_ribbon += ((min_1 * 2) + (min_2 * 2)) + (l * w * h)
# first half
print("total_wrapping", total_wrapping)
# second half
print("total_ribbon", total_ribbon)
| [
[
[
8,
11
],
[
31,
34
]
],
[
[
13,
23
],
[
58,
68
]
],
[
[
44,
55
],
[
131,
142
]
],
[
[
82,
96
],
[
337,
351
],
[
513,
527
]
],
[
[
101,
113
],
[
417,
429
],
[
566,
578
]
],
[
[
123,
127
],
[
158,
162
]
],
[
[
148,
149
],
[
186,
187
]
],
[
[
151,
152
],
[
201,
202
]
],
[
[
154,
155
],
[
216,
217
]
],
[
[
178,
179
],
[
238,
239
],
[
360,
361
],
[
392,
393
],
[
464,
465
]
],
[
[
193,
194
],
[
241,
242
],
[
364,
365
],
[
374,
375
],
[
468,
469
]
],
[
[
208,
209
],
[
244,
245
],
[
378,
379
],
[
388,
389
],
[
472,
473
]
],
[
[
224,
234
],
[
263,
273
],
[
279,
289
],
[
320,
330
]
],
[
[
251,
256
],
[
297,
302
],
[
398,
403
],
[
435,
440
]
],
[
[
308,
313
],
[
406,
411
],
[
449,
454
]
]
] |
import io
import json
import gzip
from base64 import b64decode
from http.cookies import SimpleCookie
import chardet
import rfc3986
import graphene
import yaml
from requests.structures import CaseInsensitiveDict
from requests.cookies import RequestsCookieJar
from starlette.datastructures import MutableHeaders
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from urllib.parse import parse_qs
from .status_codes import HTTP_200
from .statics import DEFAULT_ENCODING
class QueryDict(dict):
def __init__(self, query_string):
self.update(parse_qs(query_string))
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
list_ = super().__getitem__(key)
try:
return list_[-1]
except IndexError:
return []
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _get_list(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def get_list(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._get_list(key, default, force_list=True)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def items_list(self):
"""
Yield (key, value) pairs, where value is the the list.
"""
yield from super().items()
# TODO: add slots
class Request:
__slots__ = ["_starlette", "formats", "_headers", "_encoding", "api", "_content"]
def __init__(self, scope, receive, api=None):
self._starlette = StarletteRequest(scope, receive)
self.formats = None
self._encoding = None
self.api = api
self._content = None
headers = CaseInsensitiveDict()
for key, value in self._starlette.headers.items():
headers[key] = value
self._headers = headers
@property
def session(self):
"""The session data, in dict form, from the Request."""
if "Responder-Session" in self.cookies:
data = self.cookies[self.api.session_cookie]
data = self.api._signer.unsign(data)
data = b64decode(data)
return json.loads(data)
return {}
@property
def headers(self):
"""A case-insensitive dictionary, containing all headers sent in the Request."""
return self._headers
@property
def mimetype(self):
return self.headers.get("Content-Type", "")
@property
def method(self):
"""The incoming HTTP method used for the request, lower-cased."""
return self._starlette.method.lower()
@property
def full_url(self):
"""The full URL of the Request, query parameters and all."""
return str(self._starlette.url)
@property
def url(self):
"""The parsed URL of the Request."""
return rfc3986.urlparse(self.full_url)
@property
def cookies(self):
"""The cookies sent in the Request, as a dictionary."""
cookies = RequestsCookieJar()
cookie_header = self.headers.get("Cookie", "")
bc = SimpleCookie(cookie_header)
for k, v in bc.items():
cookies[k] = v
return cookies.get_dict()
@property
def params(self):
"""A dictionary of the parsed query parameters used for the Request."""
try:
return QueryDict(self.url.query)
except AttributeError:
return QueryDict({})
@property
async def encoding(self):
"""The encoding of the Request's body. Can be set, manually. Must be awaited."""
# Use the user-set encoding first.
if self._encoding:
return self._encoding
# Then try what's defined by the Request.
elif await self.declared_encoding:
return self.declared_encoding
# Then, automatically detect the encoding.
else:
return await self.apparent_encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
@property
async def content(self):
"""The Request body, as bytes. Must be awaited."""
if not self._content:
self._content = await self._starlette.body()
return self._content
@property
async def text(self):
"""The Request body, as unicode. Must be awaited."""
return (await self.content).decode(await self.encoding)
@property
async def declared_encoding(self):
if "Encoding" in self.headers:
return self.headers["Encoding"]
@property
async def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library. Must be awaited."""
declared_encoding = await self.declared_encoding
if declared_encoding:
return declared_encoding
else:
return chardet.detect(await self.content)["encoding"]
@property
def is_secure(self):
return self.url.scheme == "https"
def accepts(self, content_type):
"""Returns ``True`` if the incoming Request accepts the given ``content_type``."""
return content_type in self.headers.get("Accept", [])
async def media(self, format=None):
"""Renders incoming json/yaml/form data as Python objects. Must be awaited.
:param format: The name of the format being used. Alternatively accepts a custom callable for the format type.
"""
if format is None:
format = "yaml" if "yaml" in self.mimetype or "" else "json"
format = "form" if "form" in self.mimetype or "" else format
if format in self.formats:
return await self.formats[format](self)
else:
return await format(self)
class Response:
__slots__ = [
"req",
"status_code",
"text",
"content",
"encoding",
"media",
"headers",
"formats",
"cookies",
"session",
]
def __init__(self, req, *, formats):
self.req = req
self.status_code = None #: The HTTP Status Code to use for the Response.
self.text = None #: A unicode representation of the response body.
self.content = None #: A bytes representation of the response body.
self.encoding = DEFAULT_ENCODING
self.media = (
None
) #: A Python object that will be content-negotiated and sent back to the client. Typically, in JSON formatting.
self.headers = (
{}
) #: A Python dictionary of ``{key: value}``, representing the headers of the response.
self.formats = formats
self.cookies = {} #: The cookies set in the Response, as a dictionary
self.session = (
req.session.copy()
) #: The cookie-based session data, in dict form, to add to the Response.
@property
async def body(self):
if self.content:
return (self.content, {})
if self.text:
return (self.text.encode(self.encoding), {"Encoding": self.encoding})
for format in self.formats:
if self.req.accepts(format):
return (await self.formats[format](self, encode=True)), {}
# Default to JSON anyway.
return (
await self.formats["json"](self, encode=True),
{"Content-Type": "application/json"},
)
async def __call__(self, receive, send):
body, headers = await self.body
if self.headers:
headers.update(self.headers)
response = StarletteResponse(
body, status_code=self.status_code, headers=headers
)
await response(receive, send)
| [
[
[
7,
9
]
],
[
[
17,
21
],
[
3266,
3270
]
],
[
[
29,
33
]
],
[
[
53,
62
],
[
3231,
3240
]
],
[
[
88,
100
],
[
4188,
4200
]
],
[
[
110,
117
],
[
5942,
5949
]
],
[
[
125,
132
],
[
3947,
3954
]
],
[
[
140,
148
]
],
[
[
156,
160
]
],
[
[
193,
212
],
[
2807,
2826
]
],
[
[
242,
259
],
[
4099,
4116
]
],
[
[
297,
311
]
],
[
[
343,
370
],
[
2645,
2661
]
],
[
[
403,
432
],
[
8655,
8672
]
],
[
[
459,
467
],
[
625,
633
]
],
[
[
495,
503
]
],
[
[
525,
541
],
[
7385,
7401
]
],
[
[
550,
559
],
[
4459,
4468
],
[
4535,
4544
]
],
[
[
2473,
2480
]
],
[
[
6841,
6849
]
]
] |
# Copyright 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Adversarial autoencoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers, eval
FLAGS = flags.FLAGS
class AAE(train.AE):
def model(self, latent, depth, scales, adversary_lr, disc_layer_sizes):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
def discriminator(h):
with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):
h = tf.layers.flatten(h)
for size in [int(s) for s in disc_layer_sizes.split(',')]:
h = tf.layers.dense(h, size, tf.nn.leaky_relu)
return tf.layers.dense(h, 1)
encode = encoder(x)
decode = decoder(h)
ae = decoder(encode)
loss_ae = tf.losses.mean_squared_error(x, ae)
prior_samples = tf.random_normal(tf.shape(encode), dtype=encode.dtype)
adversary_logit_latent = discriminator(encode)
adversary_logit_prior = discriminator(prior_samples)
adversary_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.zeros_like(adversary_logit_latent)))
adversary_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_prior,
labels=tf.ones_like(adversary_logit_prior)))
autoencoder_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.ones_like(adversary_logit_latent)))
def _accuracy(logits, label):
labels = tf.logical_and(label, tf.ones_like(logits, dtype=bool))
correct = tf.equal(tf.greater(logits, 0), labels)
return tf.reduce_mean(tf.to_float(correct))
latent_accuracy = _accuracy(adversary_logit_latent, False)
prior_accuracy = _accuracy(adversary_logit_prior, True)
adversary_accuracy = (latent_accuracy + prior_accuracy)/2
utils.HookReport.log_tensor(loss_ae, 'loss_ae')
utils.HookReport.log_tensor(adversary_loss_latents, 'loss_adv_latent')
utils.HookReport.log_tensor(adversary_loss_prior, 'loss_adv_prior')
utils.HookReport.log_tensor(autoencoder_loss_latents, 'loss_ae_latent')
utils.HookReport.log_tensor(adversary_accuracy, 'adversary_accuracy')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
disc_vars = tf.global_variables('disc')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
loss_ae + autoencoder_loss_latents, var_list=ae_vars)
train_disc = tf.train.AdamOptimizer(adversary_lr).minimize(
adversary_loss_prior + adversary_loss_latents,
var_list=disc_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, encode, decode, ae,
tf.group(train_ae, train_disc, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
if FLAGS.dataset == 'lines32':
batched = (n_interpolations, 32, n_images_per_interpolation, 32, 1)
batched_interp = tf.transpose(
tf.reshape(inter, batched), [0, 2, 1, 3, 4])
mean_distance, mean_smoothness = tf.py_func(
eval.line_eval, [batched_interp], [tf.float32, tf.float32])
tf.summary.scalar('mean_distance', mean_distance)
tf.summary.scalar('mean_smoothness', mean_smoothness)
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = AAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
adversary_lr=FLAGS.adversary_lr,
disc_layer_sizes=FLAGS.disc_layer_sizes)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('adversary_lr', 1e-4,
'Learning rate for discriminator.')
flags.DEFINE_string('disc_layer_sizes', '100,100',
'Comma-separated list of discriminator layer sizes.')
app.run(main)
| [
[
[
643,
658
]
],
[
[
682,
690
]
],
[
[
714,
728
]
],
[
[
737,
741
],
[
5977,
5981
]
],
[
[
760,
763
],
[
6849,
6852
]
],
[
[
781,
786
],
[
883,
888
],
[
6307,
6312
],
[
6380,
6385
],
[
6538,
6543
],
[
6612,
6617
],
[
6716,
6721
]
],
[
[
795,
811
],
[
1007,
1009
],
[
1022,
1024
],
[
1124,
1126
],
[
1139,
1141
],
[
1193,
1195
],
[
1221,
1223
],
[
1934,
1936
],
[
1995,
1997
],
[
2012,
2014
],
[
2199,
2201
],
[
2227,
2229
],
[
2338,
2340
],
[
2409,
2411
],
[
2437,
2439
],
[
2547,
2549
],
[
2620,
2622
],
[
2648,
2650
],
[
2759,
2761
],
[
3664,
3666
],
[
3722,
3724
],
[
3832,
3834
],
[
3850,
3852
],
[
3893,
3895
],
[
3940,
3942
],
[
3986,
3988
],
[
4046,
4048
],
[
4106,
4108
],
[
4244,
4246
],
[
4413,
4415
],
[
4479,
4481
],
[
4606,
4608
],
[
4996,
4998
],
[
5037,
5039
],
[
5060,
5062
],
[
5095,
5097
],
[
5129,
5131
],
[
5163,
5165
],
[
5197,
5199
],
[
5223,
5225
],
[
5257,
5259
],
[
5285,
5287
],
[
5462,
5464
],
[
5492,
5494
],
[
5582,
5584
],
[
5645,
5647
],
[
5657,
5659
],
[
5682,
5684
],
[
5744,
5746
],
[
1554,
1556
],
[
1586,
1588
],
[
1622,
1624
],
[
1742,
1744
],
[
1767,
1769
],
[
1808,
1810
],
[
2858,
2860
],
[
2880,
2882
],
[
2936,
2938
],
[
2945,
2947
],
[
2995,
2997
],
[
3010,
3012
]
],
[
[
828,
832
],
[
5898,
5902
]
],
[
[
834,
840
],
[
1355,
1361
],
[
1450,
1456
]
],
[
[
842,
847
],
[
907,
912
],
[
4539,
4544
]
],
[
[
849,
854
],
[
3238,
3243
],
[
3294,
3299
],
[
3373,
3378
],
[
3449,
3454
],
[
3529,
3534
],
[
3756,
3761
]
],
[
[
856,
867
],
[
3615,
3626
]
],
[
[
869,
873
],
[
5610,
5614
]
],
[
[
875,
880
],
[
4129,
4134
],
[
4436,
4441
],
[
5325,
5330
],
[
5872,
5877
],
[
5915,
5920
],
[
6003,
6008
],
[
6070,
6075
],
[
6102,
6107
],
[
6130,
6135
],
[
6187,
6192
],
[
6232,
6237
]
],
[
[
903,
906
],
[
6040,
6043
]
],
[
[
5824,
5828
],
[
6857,
6861
]
]
] |
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.one_to_many and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.opts.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField):
return mark_safe('<a href="%s">%s</a>' % (
conditional_escape(value.url),
conditional_escape(value),
))
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| [
[
[
23,
39
]
],
[
[
48,
56
],
[
13795,
13803
],
[
13911,
13919
],
[
13926,
13934
]
],
[
[
64,
71
],
[
14030,
14037
]
],
[
[
96,
107
],
[
5399,
5410
]
],
[
[
141,
164
],
[
4426,
4449
]
],
[
[
200,
217
],
[
8685,
8702
],
[
9736,
9753
],
[
10494,
10511
],
[
12059,
12076
]
],
[
[
255,
269
],
[
4230,
4244
]
],
[
[
271,
278
],
[
3946,
3953
]
],
[
[
301,
307
],
[
6158,
6164
],
[
7623,
7629
],
[
7637,
7643
],
[
7712,
7718
],
[
8309,
8315
],
[
12632,
12638
],
[
12674,
12680
],
[
12825,
12831
],
[
12944,
12950
],
[
12962,
12968
],
[
13048,
13054
],
[
13163,
13169
],
[
13254,
13260
],
[
16799,
16805
],
[
16903,
16909
],
[
16986,
16992
]
],
[
[
347,
357
],
[
14651,
14661
],
[
15354,
15364
],
[
15777,
15787
]
],
[
[
396,
405
],
[
5168,
5177
]
],
[
[
437,
448
],
[
11759,
11770
],
[
11828,
11839
]
],
[
[
474,
481
],
[
12863,
12870
],
[
12997,
13004
],
[
13085,
13092
],
[
13198,
13205
],
[
13830,
13837
],
[
13958,
13965
],
[
14071,
14078
]
],
[
[
483,
486
],
[
1966,
1969
],
[
10623,
10626
],
[
14009,
14012
]
],
[
[
488,
496
],
[
12880,
12888
],
[
13847,
13855
]
],
[
[
531,
540
],
[
10689,
10698
]
],
[
[
542,
552
],
[
7833,
7843
],
[
7895,
7905
],
[
10567,
10577
],
[
3861,
3871
]
],
[
[
554,
564
],
[
12192,
12202
],
[
13442,
13452
],
[
14125,
14135
]
],
[
[
595,
613
],
[
13336,
13354
],
[
13379,
13397
]
],
[
[
615,
626
],
[
4623,
4634
]
],
[
[
663,
672
],
[
13288,
13297
]
],
[
[
703,
711
],
[
3797,
3805
],
[
4693,
4701
]
],
[
[
749,
758
],
[
8516,
8525
]
],
[
[
765,
786
]
],
[
[
1124,
1144
]
],
[
[
1630,
1635
],
[
4183,
4188
]
],
[
[
2181,
2188
]
],
[
[
2667,
2674
],
[
3154,
3161
]
],
[
[
2955,
2972
]
],
[
[
3217,
3236
]
],
[
[
5154,
5167
],
[
3567,
3580
],
[
5235,
5248
],
[
6073,
6086
],
[
6307,
6320
]
],
[
[
7360,
7377
],
[
8415,
8432
]
],
[
[
7944,
7958
]
],
[
[
8558,
8570
]
],
[
[
9420,
9438
],
[
8643,
8661
],
[
10269,
10287
],
[
12010,
12028
]
],
[
[
9779,
9794
]
],
[
[
11932,
11951
]
],
[
[
12220,
12237
]
],
[
[
13466,
13483
]
],
[
[
14151,
14167
],
[
14346,
14362
],
[
14932,
14948
],
[
16183,
16199
]
],
[
[
14195,
14218
],
[
14882,
14905
],
[
15870,
15893
],
[
16136,
16159
]
],
[
[
14369,
14387
]
],
[
[
15392,
15412
],
[
16527,
16547
]
],
[
[
16024,
16050
],
[
16574,
16600
]
],
[
[
16254,
16284
]
]
] |
# -*- coding: utf-8 -*-
import collections
import functools
import os
import struct
import sys
import types as pytypes
import uuid
import weakref
from copy import deepcopy
from numba import _dispatcher
from numba.core import utils, types, errors, typing, serialize, config, compiler, sigutils
from numba.core.compiler_lock import global_compiler_lock
from numba.core.typeconv.rules import default_type_manager
from numba.core.typing.templates import fold_arguments
from numba.core.typing.typeof import Purpose, typeof
from numba.core.bytecode import get_code_object
from numba.core.caching import NullCache, FunctionCache
from numba.core import entrypoints
class OmittedArg(object):
"""
A placeholder for omitted arguments with a default value.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return "omitted arg(%r)" % (self.value,)
@property
def _numba_type_(self):
return types.Omitted(self.value)
class _FunctionCompiler(object):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
self.py_func = py_func
self.targetdescr = targetdescr
self.targetoptions = targetoptions
self.locals = locals
self.pysig = utils.pysignature(self.py_func)
self.pipeline_class = pipeline_class
# Remember key=(args, return_type) combinations that will fail
# compilation to avoid compilation attempt on them. The values are
# the exceptions.
self._failed_cache = {}
def fold_argument_types(self, args, kws):
"""
Given positional and named argument types, fold keyword arguments
and resolve defaults by inserting types.Omitted() instances.
A (pysig, argument types) tuple is returned.
"""
def normal_handler(index, param, value):
return value
def default_handler(index, param, default):
return types.Omitted(default)
def stararg_handler(index, param, values):
return types.StarArgTuple(values)
# For now, we take argument values from the @jit function, even
# in the case of generated jit.
args = fold_arguments(self.pysig, args, kws,
normal_handler,
default_handler,
stararg_handler)
return self.pysig, args
def compile(self, args, return_type):
status, retval = self._compile_cached(args, return_type)
if status:
return retval
else:
raise retval
def _compile_cached(self, args, return_type):
key = tuple(args), return_type
try:
return False, self._failed_cache[key]
except KeyError:
pass
try:
retval = self._compile_core(args, return_type)
except errors.TypingError as e:
self._failed_cache[key] = e
return False, e
else:
return True, retval
def _compile_core(self, args, return_type):
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, self.targetoptions)
flags = self._customize_flags(flags)
impl = self._get_implementation(args, {})
cres = compiler.compile_extra(self.targetdescr.typing_context,
self.targetdescr.target_context,
impl,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
pipeline_class=self.pipeline_class)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
return cres
def get_globals_for_reduction(self):
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
return self.py_func
def _customize_flags(self, flags):
return flags
class _GeneratedFunctionCompiler(_FunctionCompiler):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
super(_GeneratedFunctionCompiler, self).__init__(
py_func, targetdescr, targetoptions, locals, pipeline_class)
self.impls = set()
def get_globals_for_reduction(self):
# This will recursively get the globals used by any nested
# implementation function.
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
impl = self.py_func(*args, **kws)
# Check the generating function and implementation signatures are
# compatible, otherwise compiling would fail later.
pysig = utils.pysignature(self.py_func)
implsig = utils.pysignature(impl)
ok = len(pysig.parameters) == len(implsig.parameters)
if ok:
for pyparam, implparam in zip(pysig.parameters.values(),
implsig.parameters.values()):
# We allow the implementation to omit default values, but
# if it mentions them, they should have the same value...
if (pyparam.name != implparam.name or
pyparam.kind != implparam.kind or
(implparam.default is not implparam.empty and
implparam.default != pyparam.default)):
ok = False
if not ok:
raise TypeError("generated implementation %s should be compatible "
"with signature '%s', but has signature '%s'"
% (impl, pysig, implsig))
self.impls.add(impl)
return impl
_CompileStats = collections.namedtuple(
'_CompileStats', ('cache_path', 'cache_hits', 'cache_misses'))
class _CompilingCounter(object):
"""
A simple counter that increment in __enter__ and decrement in __exit__.
"""
def __init__(self):
self.counter = 0
def __enter__(self):
assert self.counter >= 0
self.counter += 1
def __exit__(self, *args, **kwargs):
self.counter -= 1
assert self.counter >= 0
def __bool__(self):
return self.counter > 0
__nonzero__ = __bool__
class _DispatcherBase(_dispatcher.Dispatcher):
"""
Common base class for dispatcher Implementations.
"""
__numba__ = "py_func"
def __init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required):
self._tm = default_type_manager
# A mapping of signatures to compile results
self.overloads = collections.OrderedDict()
self.py_func = py_func
# other parts of Numba assume the old Python 2 name for code object
self.func_code = get_code_object(py_func)
# but newer python uses a different name
self.__code__ = self.func_code
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
_dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(),
arg_count, self._fold_args,
argnames, defargs,
can_fallback,
has_stararg,
exact_match_required)
self.doc = py_func.__doc__
self._compiling_counter = _CompilingCounter()
weakref.finalize(self, self._make_finalizer())
def _compilation_chain_init_hook(self):
"""
This will be called ahead of any part of compilation taking place (this
even includes being ahead of working out the types of the arguments).
This permits activities such as initialising extension entry points so
that the compiler knows about additional externally defined types etc
before it does anything.
"""
entrypoints.init_all()
def _reset_overloads(self):
self._clear()
self.overloads.clear()
def _make_finalizer(self):
"""
Return a finalizer function that will release references to
related compiled functions.
"""
overloads = self.overloads
targetctx = self.targetctx
# Early-bind utils.shutting_down() into the function's local namespace
# (see issue #689)
def finalizer(shutting_down=utils.shutting_down):
# The finalizer may crash at shutdown, skip it (resources
# will be cleared by the process exiting, anyway).
if shutting_down():
return
# This function must *not* hold any reference to self:
# we take care to bind the necessary objects in the closure.
for cres in overloads.values():
try:
targetctx.remove_user_function(cres.entry_point)
except KeyError:
pass
return finalizer
@property
def signatures(self):
"""
Returns a list of compiled function signatures.
"""
return list(self.overloads)
@property
def nopython_signatures(self):
return [cres.signature for cres in self.overloads.values()
if not cres.objectmode and not cres.interpmode]
def disable_compile(self, val=True):
"""Disable the compilation of new signatures at call time.
"""
# If disabling compilation then there must be at least one signature
assert (not val) or len(self.signatures) > 0
self._can_compile = not val
def add_overload(self, cres):
args = tuple(cres.signature.args)
sig = [a._code for a in args]
self._insert(sig, cres.entry_point, cres.objectmode, cres.interpmode)
self.overloads[args] = cres
def fold_argument_types(self, args, kws):
return self._compiler.fold_argument_types(args, kws)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This allows to resolve the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# XXX how about a dispatcher template class automating the
# following?
# Fold keyword arguments and resolve default values
pysig, args = self._compiler.fold_argument_types(args, kws)
kws = {}
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
def get_overload(self, sig):
"""
Return the compiled function for the given signature.
"""
args, return_type = sigutils.normalize_signature(sig)
return self.overloads[tuple(args)].entry_point
@property
def is_compiling(self):
"""
Whether a specialization is currently being compiled.
"""
return self._compiling_counter
def _compile_for_args(self, *args, **kws):
"""
For internal use. Compile a specialized version of the function
for the given *args* and *kws*, and return the resulting callable.
"""
assert not kws
# call any initialisation required for the compilation chain (e.g.
# extension point registration).
self._compilation_chain_init_hook()
def error_rewrite(e, issue_type):
"""
Rewrite and raise Exception `e` with help supplied based on the
specified issue_type.
"""
if config.SHOW_HELP:
help_msg = errors.error_extras[issue_type]
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
if config.FULL_TRACEBACKS:
raise e
else:
raise e.with_traceback(None)
argtypes = []
for a in args:
if isinstance(a, OmittedArg):
argtypes.append(types.Omitted(a.value))
else:
argtypes.append(self.typeof_pyval(a))
try:
return self.compile(tuple(argtypes))
except errors.ForceLiteralArg as e:
# Received request for compiler re-entry with the list of arguments
# indicated by e.requested_args.
# First, check if any of these args are already Literal-ized
already_lit_pos = [i for i in e.requested_args
if isinstance(args[i], types.Literal)]
if already_lit_pos:
# Abort compilation if any argument is already a Literal.
# Letting this continue will cause infinite compilation loop.
m = ("Repeated literal typing request.\n"
"{}.\n"
"This is likely caused by an error in typing. "
"Please see nested and suppressed exceptions.")
info = ', '.join('Arg #{} is {}'.format(i, args[i])
for i in sorted(already_lit_pos))
raise errors.CompilerError(m.format(info))
# Convert requested arguments into a Literal.
args = [(types.literal
if i in e.requested_args
else lambda x: x)(args[i])
for i, v in enumerate(args)]
# Re-enter compilation with the Literal-ized arguments
return self._compile_for_args(*args)
except errors.TypingError as e:
# Intercept typing error that may be due to an argument
# that failed inferencing as a Numba type
failed_args = []
for i, arg in enumerate(args):
val = arg.value if isinstance(arg, OmittedArg) else arg
try:
tp = typeof(val, Purpose.argument)
except ValueError as typeof_exc:
failed_args.append((i, str(typeof_exc)))
else:
if tp is None:
failed_args.append(
(i,
"cannot determine Numba type of value %r" % (val,)))
if failed_args:
# Patch error message to ease debugging
msg = str(e).rstrip() + (
"\n\nThis error may have been caused by the following argument(s):\n%s\n"
% "\n".join("- argument %d: %s" % (i, err)
for i, err in failed_args))
e.patch_message(msg)
error_rewrite(e, 'typing')
except errors.UnsupportedError as e:
# Something unsupported is present in the user code, add help info
error_rewrite(e, 'unsupported_error')
except (errors.NotDefinedError, errors.RedefinedError,
errors.VerificationError) as e:
# These errors are probably from an issue with either the code supplied
# being syntactically or otherwise invalid
error_rewrite(e, 'interpreter')
except errors.ConstantInferenceError as e:
# this is from trying to infer something as constant when it isn't
# or isn't supported as a constant
error_rewrite(e, 'constant_inference')
except Exception as e:
if config.SHOW_HELP:
if hasattr(e, 'patch_message'):
help_msg = errors.error_extras['reportable']
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
# ignore the FULL_TRACEBACKS config, this needs reporting!
raise e
def inspect_llvm(self, signature=None):
"""Get the LLVM intermediate representation generated by compilation.
Parameters
----------
signature : tuple of numba types, optional
Specify a signature for which to obtain the LLVM IR. If None, the
IR is returned for all available signatures.
Returns
-------
llvm : dict[signature, str] or str
Either the LLVM IR string for the specified signature, or, if no
signature was given, a dictionary mapping signatures to LLVM IR
strings.
"""
if signature is not None:
lib = self.overloads[signature].library
return lib.get_llvm_str()
return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
def inspect_asm(self, signature=None):
"""Get the generated assembly code.
Parameters
----------
signature : tuple of numba types, optional
Specify a signature for which to obtain the assembly code. If
None, the assembly code is returned for all available signatures.
Returns
-------
asm : dict[signature, str] or str
Either the assembly code for the specified signature, or, if no
signature was given, a dictionary mapping signatures to assembly
code.
"""
if signature is not None:
lib = self.overloads[signature].library
return lib.get_asm_str()
return dict((sig, self.inspect_asm(sig)) for sig in self.signatures)
def inspect_types(self, file=None, signature=None,
pretty=False, style='default', **kwargs):
"""Print/return Numba intermediate representation (IR)-annotated code.
Parameters
----------
file : file-like object, optional
File to which to print. Defaults to sys.stdout if None. Must be
None if ``pretty=True``.
signature : tuple of numba types, optional
Print/return the intermediate representation for only the given
signature. If None, the IR is printed for all available signatures.
pretty : bool, optional
If True, an Annotate object will be returned that can render the
IR with color highlighting in Jupyter and IPython. ``file`` must
be None if ``pretty`` is True. Additionally, the ``pygments``
library must be installed for ``pretty=True``.
style : str, optional
Choose a style for rendering. Ignored if ``pretty`` is ``False``.
This is directly consumed by ``pygments`` formatters. To see a
list of available styles, import ``pygments`` and run
``list(pygments.styles.get_all_styles())``.
Returns
-------
annotated : Annotate object, optional
Only returned if ``pretty=True``, otherwise this function is only
used for its printing side effect. If ``pretty=True``, an Annotate
object is returned that can render itself in Jupyter and IPython.
"""
overloads = self.overloads
if signature is not None:
overloads = {signature: self.overloads[signature]}
if not pretty:
if file is None:
file = sys.stdout
for ver, res in overloads.items():
print("%s %s" % (self.py_func.__name__, ver), file=file)
print('-' * 80, file=file)
print(res.type_annotation, file=file)
print('=' * 80, file=file)
else:
if file is not None:
raise ValueError("`file` must be None if `pretty=True`")
from numba.core.annotations.pretty_annotate import Annotate
return Annotate(self, signature=signature, style=style)
def inspect_cfg(self, signature=None, show_wrapper=None):
"""
For inspecting the CFG of the function.
By default the CFG of the user function is shown. The *show_wrapper*
option can be set to "python" or "cfunc" to show the python wrapper
function or the *cfunc* wrapper function, respectively.
"""
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
if show_wrapper == 'python':
fname = cres.fndesc.llvm_cpython_wrapper_name
elif show_wrapper == 'cfunc':
fname = cres.fndesc.llvm_cfunc_wrapper_name
else:
fname = cres.fndesc.mangled_name
return lib.get_function_cfg(fname)
return dict((sig, self.inspect_cfg(sig, show_wrapper=show_wrapper))
for sig in self.signatures)
def inspect_disasm_cfg(self, signature=None):
"""
For inspecting the CFG of the disassembly of the function.
Requires python package: r2pipe
Requires radare2 binary on $PATH.
Notebook rendering requires python package: graphviz
signature : tuple of Numba types, optional
Print/return the disassembly CFG for only the given signatures.
If None, the IR is printed for all available signatures.
"""
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
return lib.get_disasm_cfg()
return dict((sig, self.inspect_disasm_cfg(sig))
for sig in self.signatures)
def get_annotation_info(self, signature=None):
"""
Gets the annotation information for the function specified by
signature. If no signature is supplied a dictionary of signature to
annotation information is returned.
"""
signatures = self.signatures if signature is None else [signature]
out = collections.OrderedDict()
for sig in signatures:
cres = self.overloads[sig]
ta = cres.type_annotation
key = (ta.func_id.filename + ':' + str(ta.func_id.firstlineno + 1),
ta.signature)
out[key] = ta.annotate_raw()[key]
return out
def _explain_ambiguous(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
# The order here must be deterministic for testing purposes, which
# is ensured by the OrderedDict.
sigs = self.nopython_signatures
# This will raise
self.typingctx.resolve_overload(self.py_func, sigs, args, kws,
allow_ambiguous=False)
def _explain_matching_error(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
msg = ("No matching definition for argument type(s) %s"
% ', '.join(map(str, args)))
raise TypeError(msg)
def _search_new_conversions(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
Search for approximately matching signatures for the given arguments,
and ensure the corresponding conversions are registered in the C++
type manager.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
found = False
for sig in self.nopython_signatures:
conv = self.typingctx.install_possible_conversions(args, sig.args)
if conv:
found = True
return found
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.py_func)
def typeof_pyval(self, val):
"""
Resolve the Numba type of Python value *val*.
This is called from numba._dispatcher as a fallback if the native code
cannot decide the type.
"""
# Not going through the resolve_argument_type() indirection
# can save a couple µs.
try:
tp = typeof(val, Purpose.argument)
except ValueError:
tp = types.pyobject
else:
if tp is None:
tp = types.pyobject
return tp
class _MemoMixin:
__uuid = None
# A {uuid -> instance} mapping, for deserialization
_memo = weakref.WeakValueDictionary()
# hold refs to last N functions deserialized, retaining them in _memo
# regardless of whether there is another reference
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
@property
def _uuid(self):
"""
An instance-specific UUID, to avoid multiple deserializations of
a given instance.
Note: this is lazily-generated, for performance reasons.
"""
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
class Dispatcher(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
"""
Implementation of user-facing dispatcher objects (i.e. created using
the @jit decorator).
This is an abstract base class. Subclasses should define the targetdescr
class attribute.
"""
_fold_args = True
_impl_kinds = {
'direct': _FunctionCompiler,
'generated': _GeneratedFunctionCompiler,
}
__numba__ = 'py_func'
def __init__(self, py_func, locals={}, targetoptions={},
impl_kind='direct', pipeline_class=compiler.Compiler):
"""
Parameters
----------
py_func: function object to be compiled
locals: dict, optional
Mapping of local variable names to Numba types. Used to override
the types deduced by the type inference engine.
targetoptions: dict, optional
Target-specific config options.
impl_kind: str
Select the compiler mode for `@jit` and `@generated_jit`
pipeline_class: type numba.compiler.CompilerBase
The compiler pipeline type.
"""
self.typingctx = self.targetdescr.typing_context
self.targetctx = self.targetdescr.target_context
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
can_fallback = not targetoptions.get('nopython', False)
_DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required=False)
functools.update_wrapper(self, py_func)
self.targetoptions = targetoptions
self.locals = locals
self._cache = NullCache()
compiler_class = self._impl_kinds[impl_kind]
self._impl_kind = impl_kind
self._compiler = compiler_class(py_func, self.targetdescr,
targetoptions, locals, pipeline_class)
self._cache_hits = collections.Counter()
self._cache_misses = collections.Counter()
self._type = types.Dispatcher(self)
self.typingctx.insert_global(self, self._type)
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[{self.py_func.__name__}, type code={self._type._code}]')
for cres in self.overloads.values():
cres.dump(tab = tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}[{self.py_func.__name__}]')
@property
def _numba_type_(self):
return types.Dispatcher(self)
def enable_caching(self):
self._cache = FunctionCache(self.py_func)
def __get__(self, obj, objtype=None):
'''Allow a JIT function to be bound as a method to an object'''
if obj is None: # Unbound method
return self
else: # Bound method
return pytypes.MethodType(self, obj)
def _reduce_states(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
NOTE: part of ReduceMixin protocol
"""
if self._can_compile:
sigs = []
else:
sigs = [cr.signature for cr in self.overloads.values()]
return dict(
uuid=str(self._uuid),
py_func=self.py_func,
locals=self.locals,
targetoptions=self.targetoptions,
impl_kind=self._impl_kind,
can_compile=self._can_compile,
sigs=sigs,
)
@classmethod
def _rebuild(cls, uuid, py_func, locals, targetoptions, impl_kind,
can_compile, sigs):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
pass
self = cls(py_func, locals, targetoptions, impl_kind)
# Make sure this deserialization will be merged with subsequent ones
self._set_uuid(uuid)
for sig in sigs:
self.compile(sig)
self._can_compile = can_compile
return self
@global_compiler_lock
def compile(self, sig):
if not self._can_compile:
raise RuntimeError("compilation disabled")
# Use counter to track recursion compilation depth
with self._compiling_counter:
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
# Try to load from disk cache
cres = self._cache.load_overload(sig, self.targetctx)
if cres is not None:
self._cache_hits[sig] += 1
# XXX fold this in add_overload()? (also see compiler.py)
if not cres.objectmode and not cres.interpmode:
self.targetctx.insert_user_function(cres.entry_point,
cres.fndesc, [cres.library])
self.add_overload(cres)
return cres.entry_point
self._cache_misses[sig] += 1
try:
cres = self._compiler.compile(args, return_type)
except errors.ForceLiteralArg as e:
def folded(args, kws):
return self._compiler.fold_argument_types(args, kws)[1]
raise e.bind_fold_arguments(folded)
self.add_overload(cres)
self._cache.save_overload(sig, cres)
return cres.entry_point
def get_compile_result(self, sig):
"""Compile (if needed) and return the compilation result with the
given signature.
"""
atypes = tuple(sig.args)
if atypes not in self.overloads:
self.compile(atypes)
return self.overloads[atypes]
def recompile(self):
"""
Recompile all signatures afresh.
"""
sigs = list(self.overloads)
old_can_compile = self._can_compile
# Ensure the old overloads are disposed of, including compiled functions.
self._make_finalizer()()
self._reset_overloads()
self._cache.flush()
self._can_compile = True
try:
for sig in sigs:
self.compile(sig)
finally:
self._can_compile = old_can_compile
@property
def stats(self):
return _CompileStats(
cache_path=self._cache.cache_path,
cache_hits=self._cache_hits,
cache_misses=self._cache_misses,
)
def parallel_diagnostics(self, signature=None, level=1):
"""
Print parallel diagnostic information for the given signature. If no
signature is present it is printed for all known signatures. level is
used to adjust the verbosity, level=1 (default) is minimal verbosity,
and 2, 3, and 4 provide increasing levels of verbosity.
"""
def dump(sig):
ol = self.overloads[sig]
pfdiag = ol.metadata.get('parfor_diagnostics', None)
if pfdiag is None:
msg = "No parfors diagnostic available, is 'parallel=True' set?"
raise ValueError(msg)
pfdiag.dump(level)
if signature is not None:
dump(signature)
else:
[dump(sig) for sig in self.signatures]
def get_metadata(self, signature=None):
"""
Obtain the compilation metadata for a given signature.
"""
if signature is not None:
return self.overloads[signature].metadata
else:
return dict((sig, self.overloads[sig].metadata) for sig in self.signatures)
def get_function_type(self):
"""Return unique function type of dispatcher when possible, otherwise
return None.
A Dispatcher instance has unique function type when it
contains exactly one compilation result and its compilation
has been disabled (via its disable_compile method).
"""
if not self._can_compile and len(self.overloads) == 1:
cres = tuple(self.overloads.values())[0]
return types.FunctionType(cres.signature)
class LiftedCode(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
"""
Implementation of the hidden dispatcher objects used for lifted code
(a lifted loop is really compiled as a separate function).
"""
_fold_args = False
can_cache = False
def __init__(self, func_ir, typingctx, targetctx, flags, locals):
self.func_ir = func_ir
self.lifted_from = None
self.typingctx = typingctx
self.targetctx = targetctx
self.flags = flags
self.locals = locals
_DispatcherBase.__init__(self, self.func_ir.arg_count,
self.func_ir.func_id.func,
self.func_ir.func_id.pysig,
can_fallback=True,
exact_match_required=False)
def _reduce_states(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
NOTE: part of ReduceMixin protocol
"""
return dict(
uuid=self._uuid, func_ir=self.func_ir, flags=self.flags,
locals=self.locals, extras=self._reduce_extras(),
)
def _reduce_extras(self):
"""
NOTE: sub-class can override to add extra states
"""
return {}
@classmethod
def _rebuild(cls, uuid, func_ir, flags, locals, extras):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
pass
# NOTE: We are assuming that this is must be cpu_target, which is true
# for now.
# TODO: refactor this to not assume on `cpu_target`
from numba.core import registry
typingctx = registry.cpu_target.typing_context
targetctx = registry.cpu_target.target_context
self = cls(func_ir, typingctx, targetctx, flags, locals, **extras)
self._set_uuid(uuid)
return self
def get_source_location(self):
"""Return the starting line number of the loop.
"""
return self.func_ir.loc.line
def _pre_compile(self, args, return_type, flags):
"""Pre-compile actions
"""
pass
@global_compiler_lock
def compile(self, sig):
# Use counter to track recursion compilation depth
with self._compiling_counter:
# XXX this is mostly duplicated from Dispatcher.
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
# (e.g. if another thread compiled it before we got the lock)
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
self._pre_compile(args, return_type, flags)
# Clone IR to avoid (some of the) mutation in the rewrite pass
cloned_func_ir = self.func_ir.copy()
cres = compiler.compile_ir(typingctx=self.typingctx,
targetctx=self.targetctx,
func_ir=cloned_func_ir,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
lifted=(),
lifted_from=self.lifted_from,
is_lifted_loop=True,)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
class LiftedLoop(LiftedCode):
def _pre_compile(self, args, return_type, flags):
assert not flags.enable_looplift, "Enable looplift flags is on"
class LiftedWith(LiftedCode):
can_cache = True
def _reduce_extras(self):
return dict(output_types=self.output_types)
@property
def _numba_type_(self):
return types.Dispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
pysig = None
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
class ObjModeLiftedWith(LiftedWith):
def __init__(self, *args, **kwargs):
self.output_types = kwargs.pop('output_types', None)
super(LiftedWith, self).__init__(*args, **kwargs)
if not self.flags.force_pyobject:
raise ValueError("expecting `flags.force_pyobject`")
if self.output_types is None:
raise TypeError('`output_types` must be provided')
@property
def _numba_type_(self):
return types.ObjModeDispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
assert not kws
self._legalize_arg_types(args)
# Coerce to object mode
args = [types.ffi_forced_object] * len(args)
if self._can_compile:
self.compile(tuple(args))
signatures = [typing.signature(self.output_types, *args)]
pysig = None
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
call_template = typing.make_concrete_template(
name, key=func_name, signatures=signatures)
return call_template, pysig, args, kws
def _legalize_arg_types(self, args):
for i, a in enumerate(args, start=1):
if isinstance(a, types.List):
msg = (
'Does not support list type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
elif isinstance(a, types.Dispatcher):
msg = (
'Does not support function type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
# Initialize typeof machinery
_dispatcher.typeof_init(
OmittedArg,
dict((str(t), t._code) for t in types.number_domain))
| [
[
[
33,
44
],
[
5914,
5925
],
[
25140,
25151
],
[
6823,
6834
],
[
22412,
22423
],
[
27653,
27664
],
[
27704,
27715
]
],
[
[
52,
61
],
[
27244,
27253
]
],
[
[
69,
71
]
],
[
[
79,
85
]
],
[
[
93,
96
],
[
19879,
19882
]
],
[
[
104,
120
],
[
28516,
28523
]
],
[
[
128,
132
],
[
25484,
25488
]
],
[
[
140,
147
],
[
24967,
24974
],
[
7936,
7943
]
],
[
[
165,
173
]
],
[
[
193,
204
],
[
6477,
6488
],
[
41191,
41202
],
[
7480,
7491
]
],
[
[
228,
233
],
[
1283,
1288
],
[
4914,
4919
],
[
4964,
4969
],
[
8890,
8895
],
[
26960,
26965
]
],
[
[
235,
240
],
[
41268,
41273
],
[
957,
962
],
[
12866,
12871
],
[
13379,
13384
],
[
14078,
14083
],
[
24828,
24833
],
[
24751,
24756
],
[
27748,
27753
],
[
28182,
28187
],
[
34048,
34053
],
[
38370,
38375
],
[
39695,
39700
],
[
40115,
40120
],
[
40680,
40685
],
[
40930,
40935
],
[
1978,
1983
],
[
2071,
2076
]
],
[
[
242,
248
],
[
2910,
2916
],
[
13039,
13045
],
[
13962,
13968
],
[
14367,
14373
],
[
15494,
15500
],
[
15669,
15675
],
[
15693,
15699
],
[
15732,
15738
],
[
15962,
15968
],
[
16318,
16324
],
[
31092,
31098
],
[
40865,
40871
],
[
41125,
41131
],
[
12516,
12522
]
],
[
[
250,
256
],
[
11315,
11321
],
[
39082,
39088
],
[
40244,
40250
],
[
40428,
40434
]
],
[
[
258,
267
],
[
25715,
25724
],
[
34102,
34111
],
[
3944,
3953
],
[
4615,
4624
]
],
[
[
269,
275
],
[
25165,
25171
],
[
16221,
16227
],
[
12471,
12477
],
[
12635,
12641
]
],
[
[
277,
285
],
[
26259,
26267
],
[
3114,
3122
],
[
3317,
3325
],
[
37270,
37278
]
],
[
[
287,
295
],
[
11611,
11619
],
[
30160,
30168
],
[
36766,
36774
]
],
[
[
333,
353
],
[
29893,
29913
],
[
36496,
36516
]
],
[
[
392,
412
],
[
6723,
6743
]
],
[
[
453,
467
],
[
2225,
2239
]
],
[
[
505,
512
],
[
14716,
14723
],
[
24689,
24696
]
],
[
[
514,
520
],
[
14704,
14710
],
[
24677,
24683
]
],
[
[
553,
568
],
[
6982,
6997
]
],
[
[
600,
609
],
[
27379,
27388
]
],
[
[
611,
624
],
[
28258,
28271
]
],
[
[
648,
659
],
[
8408,
8419
]
],
[
[
668,
678
],
[
41220,
41230
],
[
7220,
7230
],
[
12821,
12831
],
[
14637,
14647
]
],
[
[
991,
1008
],
[
4175,
4192
],
[
26040,
26057
]
],
[
[
4148,
4174
],
[
26080,
26106
],
[
4312,
4338
]
],
[
[
5898,
5911
],
[
32276,
32289
]
],
[
[
6013,
6030
],
[
7908,
7925
]
],
[
[
6461,
6476
],
[
25750,
25765
],
[
34137,
34152
],
[
27102,
27117
],
[
34622,
34637
]
],
[
[
24869,
24879
],
[
25738,
25748
],
[
34125,
34135
]
],
[
[
25704,
25714
]
],
[
[
34091,
34101
],
[
38035,
38045
],
[
38193,
38203
]
],
[
[
38024,
38034
]
],
[
[
38182,
38192
],
[
39256,
39266
],
[
39385,
39395
]
],
[
[
39238,
39255
]
]
] |
#!/bin/env python
import sys
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from textwrap import dedent
ats_mock = Mock()
with patch.dict('sys.modules',
{'ats' : ats_mock}, autospec=True):
import genie.parsergen
from genie.parsergen import oper_fill
from genie.parsergen import oper_check
from genie.parsergen import oper_fill_tabular
from genie.parsergen.examples.parsergen.pyAts import parsergen_demo_mkpg
import xml.etree.ElementTree as ET
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxe.show_interface import ShowInterfacesSwitchport,\
ShowIpInterfaceBriefPipeVlan,\
ShowInterfaces, ShowIpInterface,\
ShowIpv6Interface, \
ShowInterfacesTrunk, \
ShowInterfacesCounters, \
ShowInterfacesAccounting, \
ShowIpInterfaceBriefPipeIp
class test_show_interface_parsergen(unittest.TestCase):
def test_tabular_parser(self):
self.showCommandOutput='''
R1#show ip interface brief
Interface IP-Address OK? Method Status Protocol
GigabitEthernet0/0 10.1.10.20 YES NVRAM up up
GigabitEthernet1/0/1 unassigned YES unset up up
GigabitEthernet1/0/10 unassigned YES unset down down
'''
self.outputDict = {'GigabitEthernet0/0': {'IP-Address': '10.1.10.20',
'Interface': 'GigabitEthernet0/0',
'Method': 'NVRAM',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/1': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/1',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/10': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/10',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'down',
'Status': 'down'}}
# Define how device stub will behave when accessed by production parser.
device_kwargs = {'is_connected.return_value':True,
'execute.return_value':dedent(self.showCommandOutput)}
device1 = Mock(**device_kwargs)
device1.name='router3'
result = genie.parsergen.oper_fill_tabular(device=device1,
show_command="show ip interface brief",
refresh_cache=True,
header_fields=
[ "Interface",
"IP-Address",
"OK\?",
"Method",
"Status",
"Protocol" ],
label_fields=
[ "Interface",
"IP-Address",
"OK?",
"Method",
"Status",
"Protocol" ],
index=[0])
self.assertEqual(result.entries, self.outputDict)
args, kwargs = device1.execute.call_args
self.assertTrue('show ip interface brief' in args,
msg='The expected command was not sent to the router')
#############################################################################
# unitest For Show Interfaces switchport
#############################################################################
class test_show_ip_interfaces_brief_pipe_ip(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'interface':
{'GigabitEthernet0/0': {'interface_ok': 'YES',
'interface_status': 'up',
'ip_address': '10.1.18.80',
'method': 'manual',
'protocol_status': 'up'}}}
golden_output = {'execute.return_value': '''
R1#sh ip int brief | i 10.1.18.80
GigabitEthernet0/0 10.1.18.80 YES manual up up
'''}
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device)
parsed_output = obj.parse(ip='10.1.18.80')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(ip='10.1.18.80')
# Comment out due to old version of yang, will enhance it
# class test_show_interface_brief_pipe_vlan_yang(unittest.TestCase):
# device = Device(name='aDevice')
# device1 = Device(name='bDevice')
# golden_parsed_output = {'interface': {'Vlan1': {'vlan_id': {'1': {'ip_address': 'unassigned'}}},
# 'Vlan100': {'vlan_id': {'100': {'ip_address': '201.0.12.1'}}}}}
# class etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# <name>1</name>
# <ip>
# <no-address>
# <address>false</address>
# </no-address>
# </ip>
# <shutdown/>
# </Vlan>
# <Vlan>
# <name>100</name>
# <ip>
# <address>
# <primary>
# <address>201.0.12.1</address>
# <mask>255.255.255.0</mask>
# </primary>
# </address>
# </ip>
# <ipv6>
# <address>
# <prefix-list>
# <prefix>2001::12:30/128</prefix>
# </prefix-list>
# </address>
# </ipv6>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# golden_output = {'get.return_value': etree_holder()}
# def test_golden(self):
# self.device = Mock(**self.golden_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.golden_parsed_output)
# empty_parsed_output = {'interface': {}}
# class empty_etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# empty_output = {'get.return_value': empty_etree_holder()}
# def test_empty(self):
# self.device1 = Mock(**self.empty_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device1)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.empty_parsed_output)
#############################################################################
# unitest For Show Interfaces switchport
#############################################################################
class test_show_interfaces_switchport(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/4": {
"switchport_mode": "trunk",
"pruning_vlans": "2-1001",
'operational_mode': 'trunk',
"switchport_enable": True,
"trunk_vlans": "200-211",
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
}
},
"GigabitEthernet1/0/2": {
"pruning_vlans": "2-1001",
"switchport_enable": True,
"unknown_multicast_blocked": False,
"trunk_vlans": "100-110",
"port_channel": {
"port_channel_int": "Port-channel12",
"port_channel_member": True
},
"access_vlan": "1",
"operational_mode": "trunk",
"unknown_unicast_blocked": False,
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q",
"operational": "10 (VLAN0010) 100 (VLAN0100)",
"trunk_mappings": "10 (VLAN0010) 100 (VLAN0100)"
},
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
},
"protected": False,
"native_vlan_tagging": True,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"switchport_mode": "trunk"
},
"GigabitEthernet1/0/5": {
"switchport_mode": "static access",
"pruning_vlans": "2-1001",
"switchport_enable": True,
"trunk_vlans": "all",
'operational_mode': 'down',
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": False,
"capture_vlans": "all",
"encapsulation": {
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
}
},
"Port-channel12": {
"switchport_enable": True,
"private_vlan": {
"encapsulation": "dot1q",
"native_vlan_tagging": True
},
"native_vlan_tagging": False,
"negotiation_of_trunk": True,
"unknown_unicast_blocked": False,
"protected": False,
"encapsulation": {
"administrative_encapsulation": "dot1q",
"native_vlan": "0"
},
"switchport_mode": "trunk",
"unknown_multicast_blocked": False,
"trunk_vlans": "100-110",
"operational_mode": "down",
"pruning_vlans": "2-1001",
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": [
"GigabitEthernet1/0/2"
]
}
}
}
golden_output = {'execute.return_value': '''
Name: Gi1/0/2
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk (member of bundle Po12)
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings:
10 (VLAN0010) 100 (VLAN0100)
Operational private-vlan:
10 (VLAN0010) 100 (VLAN0100)
Trunking VLANs Enabled: 100-110
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/4
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 200-211
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/5
Switchport: Enabled
Administrative Mode: static access
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: Off
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: ALL
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Po12
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: unassigned
Trunking Native Mode VLAN: 0 (Inactive)
Administrative Native VLAN tagging: disabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 100-110
Pruning VLANs Enabled: 2-1001
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
'''}
def test_golden(self):
self.device = Mock(**self.golden_output)
intf_obj = ShowInterfacesSwitchport(device=self.device)
parsed_output = intf_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
intf_obj = ShowInterfacesSwitchport(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = intf_obj.parse()
#############################################################################
# unitest For Show Interfaces
#############################################################################
class test_show_interfaces(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Port-channel12": {
"flow_control": {
"send": False,
"receive": False
},
"type": "EtherChannel",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d23h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 2000,
"in_rate_pkts": 2
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 961622,
"in_multicast_pkts": 4286699522,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 72614643,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 944788,
"out_pkts": 39281,
"out_late_collision": 0,
"out_octets": 6235318,
"in_overrun": 0,
"out_babble": 0
},
"auto_negotiate": True,
"phys_address": "0057.d228.1a02",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "full",
"link_type": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo"
},
"encapsulations": {
"encapsulation": "qinq virtual lan",
"first_dot1q": "10",
"second_dot1q": "20",
},
"last_input": "never",
"last_output": "1d22h",
"line_protocol": "up",
"mac_address": "0057.d228.1a02",
"connected": True,
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": ['GigabitEthernet1/0/2'],
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255"
},
"GigabitEthernet1/0/1": {
"flow_control": {
"send": False,
"receive": False
},
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 30,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 12127,
"in_multicast_pkts": 4171,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 2297417,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 0,
"out_pkts": 12229,
"out_late_collision": 0,
"out_octets": 2321107,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "0057.d228.1a64",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"description": "desc",
"oper_status": "down",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"ipv4": {
"10.1.1.1/24": {
"prefix_length": "24",
"ip": "10.1.1.1"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "04:39:18",
"line_protocol": "down",
"mac_address": "0057.d228.1a64",
"connected": False,
"port_channel": {
"port_channel_member": False
},
"media_type": "10/100/1000BaseTX",
"bandwidth": 768,
"port_speed": "1000",
"enabled": False,
"arp_timeout": "04:00:00",
"mtu": 1500,
"delay": 3330,
"reliability": "255/255"
},
"GigabitEthernet3": {
"flow_control": {
"send": False,
"receive": False
},
"type": "CSR vNIC",
'auto_negotiate': True,
'duplex_mode': 'full',
'link_type': 'auto',
'media_type': 'RJ45',
'port_speed': '1000',
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "never",
"out_interface_resets": 1,
"in_mac_pause_frames": 0,
"out_collision": 0,
"in_crc_errors": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 6,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 480,
"out_unknown_protocl_drops": 0,
"out_no_carrier": 0,
"out_lost_carrier": 0,
"in_broadcast_pkts": 0,
"out_pkts": 28,
"out_late_collision": 0,
"out_octets": 7820,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "5254.0072.9b0c",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
},
"unnumbered": {
"interface_ref": "Loopback0"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "00:00:27",
"line_protocol": "up",
"mac_address": "5254.0072.9b0c",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"Loopback0": {
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 75,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo"
},
"mtu": 1514,
"encapsulations": {
"encapsulation": "loopback"
},
"last_output": "never",
"type": "Loopback",
"line_protocol": "up",
"oper_status": "up",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 0,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 0,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5760,
"in_overrun": 0,
"in_abort": 0
},
"reliability": "255/255",
"bandwidth": 8000000,
"port_channel": {
"port_channel_member": False
},
"enabled": True,
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
}
},
"rxload": "1/255",
"delay": 5000,
"last_input": "1d02h"
},
"Vlan100": {
"type": "Ethernet SVI",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 50790,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 3657594,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5526,
"in_overrun": 0
},
"phys_address": "0057.d228.1a51",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"output_hang": "never",
"ipv4": {
"201.0.12.1/24": {
"prefix_length": "24",
"ip": "201.0.12.1"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "1d03h",
"line_protocol": "up",
"mac_address": "0057.d228.1a51",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"GigabitEthernet1/0/2": {
"flow_control": {
"send": False,
"receive": False
},
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 5,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 3000,
"in_rate_pkts": 5
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 545526,
"in_multicast_pkts": 535961,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 41210298,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 535961,
"out_pkts": 23376,
"out_late_collision": 0,
"out_octets": 3642296,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "0057.d228.1a02",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"media_type": "10/100/1000BaseTX",
"rxload": "1/255",
"duplex_mode": "full",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "00:00:02",
"line_protocol": "up",
"mac_address": "0057.d228.1a02",
"connected": True,
"port_channel": {
"port_channel_member": True,
'port_channel_int': 'Port-channel12',
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255"
},
"GigabitEthernet0/0/4": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"bandwidth": 1000000,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
}
},
"delay": 10,
"enabled": False,
"encapsulations": {
"encapsulation": "arpa"
},
"flow_control": {
"receive": False, "send": False
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mac_address": "380e.4d6c.7006",
"phys_address": "380e.4d6c.7006",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-2T+6X1GE"
}
}
golden_output = {'execute.return_value': '''
GigabitEthernet1/0/1 is administratively down, line protocol is down (disabled)
Hardware is Gigabit Ethernet, address is 0057.d228.1a64 (bia 0057.d228.1a64)
Description: desc
Internet address is 10.1.1.1/24
MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 04:39:18, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
30 second input rate 0 bits/sec, 0 packets/sec
30 second output rate 0 bits/sec, 0 packets/sec
12127 packets input, 2297417 bytes, 0 no buffer
Received 4173 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4171 multicast, 0 pause input
0 input packets with dribble condition detected
12229 packets output, 2321107 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet1/0/2 is up, line protocol is up (connected)
Hardware is Gigabit Ethernet, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:02, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 3000 bits/sec, 5 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
545526 packets input, 41210298 bytes, 0 no buffer
Received 535996 broadcasts (535961 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 535961 multicast, 0 pause input
0 input packets with dribble condition detected
23376 packets output, 3642296 bytes, 0 underruns
0 output errors, 0 collisions, 5 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet3 is up, line protocol is up
Hardware is CSR vNIC, address is 5254.0072.9b0c (bia 5254.0072.9b0c)
Interface is unnumbered. Using address of Loopback0 (200.2.1.1)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is RJ45
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:27, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
6 packets input, 480 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
28 packets output, 7820 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
Loopback0 is up, line protocol is up
Hardware is Loopback
Internet address is 200.2.1.1/24
MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation LOOPBACK, loopback not set
Keepalive set (10 sec)
Last input 1d02h, output never, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
72 packets output, 5760 bytes, 0 underruns
0 output errors, 0 collisions, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Vlan100 is up, line protocol is up
Hardware is Ethernet SVI, address is 0057.d228.1a51 (bia 0057.d228.1a51)
Internet address is 201.0.12.1/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d03h, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
50790 packets input, 3657594 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
72 packets output, 5526 bytes, 0 underruns
0 output errors, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Port-channel12 is up, line protocol is up (connected)
Hardware is EtherChannel, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, link type is auto, media type is
input flow-control is off, output flow-control is unsupported
Members in this channel: Gi1/0/2
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d22h, output hang never
Last clearing of "show interface" counters 1d23h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 2000 bits/sec, 2 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
961622 packets input, 72614643 bytes, 0 no buffer
Received 944818 broadcasts (944788 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4286699522 multicast, 0 pause input
0 input packets with dribble condition detected
39281 packets output, 6235318 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet0/0/4 is administratively down, line protocol is down
Hardware is BUILT-IN-2T+6X1GE, address is 380e.4d6c.7006 (bia 380e.4d6c.7006)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
Full Duplex, 1000Mbps, link type is auto, media type is unknown media type
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output never, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
0 packets output, 0 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
'''}
golden_interface_output = {'execute.return_value': '''
CE1#show interfaces GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Hardware is CSR vNIC, address is 5e00.0001.0000 (bia 5e00.0001.0000)
Internet address is 172.16.1.243/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is Virtual
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input 00:00:02, output 00:00:25, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 32000 bits/sec, 28 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
7658 packets input, 1125842 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
44 packets output, 4324 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
'''
}
golden_parsed_interface_output={
"GigabitEthernet1": {
"rxload": "1/255",
"phys_address": "5e00.0001.0000",
"flow_control": {
"send": False,
"receive": False
},
"arp_type": "arpa",
"type": "CSR vNIC",
"enabled": True,
"media_type": "Virtual",
"last_input": "00:00:02",
"link_type": "auto",
"last_output": "00:00:25",
"counters": {
"in_errors": 0,
"in_frame": 0,
"in_watchdog": 0,
"out_babble": 0,
"in_overrun": 0,
"out_collision": 0,
"out_buffer_failure": 0,
"out_no_carrier": 0,
"in_runts": 0,
"out_late_collision": 0,
"in_mac_pause_frames": 0,
"out_underruns": 0,
"out_pkts": 44,
"in_ignored": 0,
"in_pkts": 7658,
"out_buffers_swapped": 0,
"out_interface_resets": 1,
"rate": {
"out_rate": 0,
"load_interval": 300,
"in_rate_pkts": 28,
"out_rate_pkts": 0,
"in_rate": 32000
},
"out_mac_pause_frames": 0,
"in_broadcast_pkts": 0,
"in_no_buffer": 0,
"out_deferred": 0,
"in_crc_errors": 0,
"out_octets": 4324,
"out_lost_carrier": 0,
"in_octets": 1125842,
"out_unknown_protocl_drops": 0,
"last_clear": "never",
"in_throttles": 0,
"in_multicast_pkts": 0,
"out_errors": 0,
"in_giants": 0
},
"keepalive": 10,
"mtu": 1500,
"delay": 10,
"encapsulations": {
"encapsulation": "arpa"
},
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24"
}
},
"queues": {
"output_queue_size": 0,
"input_queue_size": 0,
"input_queue_flushes": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
"output_queue_max": 40,
"input_queue_drops": 0,
"input_queue_max": 375
},
"auto_negotiate": True,
"line_protocol": "up",
"oper_status": "up",
"duplex_mode": "full",
"bandwidth": 1000000,
"arp_timeout": "04:00:00",
"port_speed": "1000",
"port_channel": {
"port_channel_member": False
},
"output_hang": "never",
"txload": "1/255",
"mac_address": "5e00.0001.0000",
"reliability": "255/255"
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfaces(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_show_interfaces(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_interface_output)
#############################################################################
# unitest For Show ip interface
#############################################################################
class test_show_ip_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Vlan211": {
"sevurity_level": "default",
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"address_determined_by": "configuration file",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"201.11.14.1/24": {
"prefix_length": "24",
"ip": "201.11.14.1",
"secondary": False,
"broadcase_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet0/0": {
"sevurity_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"vrf": "Mgmt-vrf",
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.8.134/24": {
"prefix_length": "24",
"ip": "10.1.8.134",
"secondary": False,
"broadcase_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet2": {
"enabled": False,
"oper_status": "down"
},
"GigabitEthernet1/0/1": {
"sevurity_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": False,
"oper_status": "down",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.1.1/24": {
"prefix_length": "24",
"ip": "10.1.1.1",
"secondary": False,
"broadcase_address": "255.255.255.255"
},
"10.2.2.2/24": {
"prefix_length": "24",
"ip": "10.2.2.2",
"secondary": True
},
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
'wccp': {
'redirect_outbound': False,
'redirect_inbound': False,
'redirect_exclude': False,
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"directed_broadcast_forwarding": False,
"ip_flow_switching": False,
"input_features": ["MCI Check", "QoS Classification", "QoS Marking"],
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
Internet address is 201.11.14.1/24
Broadcast address is 255.255.255.255
Address determined by configuration file
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet0/0 is up, line protocol is up
Internet address is 10.1.8.134/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "Mgmt-vrf"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet1/0/1 is administratively down, line protocol is down
Internet address is 10.1.1.1/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Secondary address 10.2.2.2/24
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: QoS Classification, QoS Marking, MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
GigabitEthernet2 is administratively down, line protocol is down
Internet protocol processing disabled
'''}
golden_interface_output = {'execute.return_value':'''
CE1#show ip interface GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Internet address is 172.16.1.243/24
Broadcast address is 255.255.255.255
Address determined by DHCP
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_interface_output = {
"GigabitEthernet1": {
"ip_multicast_fast_switching": True,
"oper_status": "up",
"ip_output_packet_accounting": False,
"address_determined_by": "DHCP",
"rtp_ip_header_compression": False,
"ip_multicast_distributed_fast_switching": False,
"wccp": {
"redirect_exclude": False,
"redirect_outbound": False,
"redirect_inbound": False
},
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
}
},
"router_discovery": False,
"tcp_ip_header_compression": False,
"probe_proxy_name_replies": False,
"local_proxy_arp": False,
"policy_routing": False,
"mtu": 1500,
"icmp": {
"mask_replies": "never sent",
"unreachables": "always sent",
"redirects": "always sent"
},
"enabled": True,
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"ip_cef_switching": True,
"ip_fast_switching": True,
"sevurity_level": "default",
"directed_broadcast_forwarding": False,
"proxy_arp": True,
"ip_null_turbo_vector": True,
"network_address_translation": False,
"input_features": [
"MCI Check"
],
"bgp_policy_mapping": False,
"split_horizon": True,
"ip_access_violation_accounting": False,
"ip_cef_switching_turbo_vector": True,
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24",
"broadcase_address": "255.255.255.255",
"secondary": False
}
},
"ip_flow_switching": False
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_interface_golden(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_interface_output)
#############################################################################
# unitest For show ipv6 interface
#############################################################################
class test_show_ipv6_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/1": {
"joined_group_addresses": [
"FF02::1"
],
"ipv6": {
"2001:DB8:2:2::2/64": {
"ip": "2001:DB8:2:2::2",
"prefix_length": "64",
"status": "tentative"
},
"2000::1/126": {
"ip": "2000::1",
"prefix_length": "126",
"status": "tentative"
},
"2001:DB8:1:1::1/64": {
"ip": "2001:DB8:1:1::1",
"prefix_length": "64",
"status": "tentative"
},
"2001:DB8:4:4:257:D2FF:FE28:1A64/64": {
"ip": "2001:DB8:4:4:257:D2FF:FE28:1A64",
"prefix_length": "64",
"status": "tentative",
"eui_64": True
},
"2001:DB8:3:3::3/64": {
"ip": "2001:DB8:3:3::3",
"prefix_length": "64",
"status": "tentative",
"anycast": True
},
"FE80::257:D2FF:FE28:1A64": {
"ip": "FE80::257:D2FF:FE28:1A64",
"status": "tentative",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "down",
"enabled": False,
"mtu": 1500
},
"Vlan211": {
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF14:1",
"FF02::1:FF28:1A71"
],
"ipv6": {
"2001:10::14:1/112": {
"ip": "2001:10::14:1",
"prefix_length": "112",
"status": "valid",
'autoconf': {
'preferred_lifetime': 604711,
'valid_lifetime': 2591911,
},
},
"FE80::257:D2FF:FE28:1A71": {
"ip": "FE80::257:D2FF:FE28:1A71",
"status": "valid",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "up",
"enabled": True,
"autoconf": True,
"mtu": 1500
},
"GigabitEthernet3": {
"enabled": True,
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF1E:4F2",
"FF02::2"
],
"ipv6": {
"enabled": False,
"FE80::5054:FF:FE1E:4F2": {
"ip": "FE80::5054:FF:FE1E:4F2",
"status": "valid",
"origin": "link_layer",
},
"unnumbered": {
"interface_ref": "Loopback0",
},
"nd": {
"dad_attempts": 1,
"reachable_time": 30000,
"using_time": 30000,
"dad_enabled": True
},
"icmp": {
"unreachables": "sent",
"redirects": True,
"error_messages_limited": 100
},
"nd": {
"dad_attempts": 1,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000,
"advertised_reachable_time": 0,
"advertised_retransmit_interval": 0,
"router_advertisements_interval": 200,
"router_advertisements_live": 1800,
"advertised_default_router_preference": 'Medium',
"advertised_reachable_time_unspecified": True,
"advertised_retransmit_interval_unspecified": True,
},
},
"oper_status": "up",
"mtu": 1500,
"addresses_config_method": 'stateless autoconfig',
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::257:D2FF:FE28:1A71
No Virtual link-local address(es):
Stateless address autoconfig enabled
Global unicast address(es):
2001:10::14:1, subnet is 2001:10::14:0/112
valid lifetime 2591911 preferred lifetime 604711
Joined group address(es):
FF02::1
FF02::1:FF14:1
FF02::1:FF28:1A71
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet1/0/1 is administratively down, line protocol is down
IPv6 is tentative, link-local address is FE80::257:D2FF:FE28:1A64 [TEN]
No Virtual link-local address(es):
Description: desc
Global unicast address(es):
2000::1, subnet is 2000::/126 [TEN]
2001:DB8:1:1::1, subnet is 2001:DB8:1:1::/64 [TEN]
2001:DB8:2:2::2, subnet is 2001:DB8:2:2::/64 [TEN]
2001:DB8:3:3::3, subnet is 2001:DB8:3:3::/64 [ANY/TEN]
2001:DB8:4:4:257:D2FF:FE28:1A64, subnet is 2001:DB8:4:4::/64 [EUI/TEN]
Joined group address(es):
FF02::1
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet3 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::5054:FF:FE1E:4F2
No Virtual link-local address(es):
Interface is unnumbered. Using address of Loopback0
No global unicast address is configured
Joined group address(es):
FF02::1
FF02::2
FF02::1:FF1E:4F2
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpv6Interface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpv6Interface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces trunk
#############################################################################
class test_show_interfaces_trunk(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/4": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/4",
"encapsulation": "802.1q"
},
"GigabitEthernet1/0/23": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/23",
"encapsulation": "802.1q"
},
"Port-channel12": {
"vlans_allowed_active_in_mgmt_domain": '100-110',
"vlans_allowed_on_trunk": '100-110',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '100-110',
"name": "Port-channel12",
"encapsulation": "802.1q"
},
"Port-channel14": {
"vlans_allowed_active_in_mgmt_domain": '200-211, 300-302',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "Port-channel14",
"encapsulation": "802.1q"
}
}
}
golden_output = {'execute.return_value': '''
Port Mode Encapsulation Status Native vlan
Gi1/0/4 on 802.1q trunking 1
Gi1/0/23 on 802.1q trunking 1
Po12 on 802.1q trunking 1
Po14 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Po14 200-211
Port Vlans allowed and active in management domain
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Po14 200-211, 300-302
Port Vlans in spanning tree forwarding state and not pruned
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Port Vlans in spanning tree forwarding state and not pruned
Po14 200-211
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
parsed_output = interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <WORD> counters
#############################################################################
class test_show_interfaces_counters(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/1": {
"out": {
"mcast_pkts": 188396,
"bcast_pkts": 0,
"ucast_pkts": 124435064,
"name": "GigabitEthernet1/0/1",
"octets": 24884341205
},
"in": {
"mcast_pkts": 214513,
"bcast_pkts": 0,
"ucast_pkts": 15716712,
"name": "GigabitEthernet1/0/1",
"octets": 3161931167
}
}
}
}
golden_output = {'execute.return_value': '''
Port InOctets InUcastPkts InMcastPkts InBcastPkts
Gi1/0/1 3161931167 15716712 214513 0
Port OutOctets OutUcastPkts OutMcastPkts OutBcastPkts
Gi1/0/1 24884341205 124435064 188396 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesCounters(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse(interface='Gi1/0/1')
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesCounters(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1/0/1')
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <interface> accounting
#############################################################################
class test_show_interfaces_accounting(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = \
{
"GigabitEthernet1": {
"accounting": {
"arp": {
"chars_in": 4590030,
"chars_out": 120,
"pkts_in": 109280,
"pkts_out": 2
},
"ip": {
"chars_in": 2173570,
"chars_out": 2167858,
"pkts_in": 22150,
"pkts_out": 22121
},
"ipv6": {
"chars_in": 1944,
"chars_out": 0,
"pkts_in": 24,
"pkts_out": 0
},
"other": {
"chars_in": 5306164,
"chars_out": 120,
"pkts_in": 112674,
"pkts_out": 2
}
}
},
"GigabitEthernet2": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 968690,
"chars_out": 1148402,
"pkts_in": 11745,
"pkts_out": 10821
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
},
"GigabitEthernet3": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 1190691,
"chars_out": 1376253,
"pkts_in": 15271,
"pkts_out": 14382
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
}
}
golden_output = {'execute.return_value': '''
show interface accounting
GigabitEthernet1
Protocol Pkts In Chars In Pkts Out Chars Out
Other 112674 5306164 2 120
IP 22150 2173570 22121 2167858
ARP 109280 4590030 2 120
IPv6 24 1944 0 0
GigabitEthernet2
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 11745 968690 10821 1148402
ARP 91 5460 92 5520
IPv6 1 70 0 0
GigabitEthernet3
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 15271 1190691 14382 1376253
ARP 91 5460 92 5520
IPv6 1 70 0 0
Loopback0
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
Loopback1
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesAccounting(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfacesAccounting(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
if __name__ == '__main__':
unittest.main() | [
[
[
26,
29
]
],
[
[
37,
45
],
[
1195,
1203
],
[
4950,
4958
],
[
9120,
9128
],
[
18731,
18739
],
[
55547,
55555
],
[
71945,
71953
],
[
80906,
80914
],
[
84529,
84537
],
[
86448,
86456
],
[
90537,
90545
]
],
[
[
72,
76
],
[
149,
153
],
[
3279,
3283
],
[
5641,
5645
],
[
5896,
5900
],
[
18048,
18052
],
[
18324,
18328
],
[
54529,
54533
],
[
54770,
54774
],
[
55055,
55059
],
[
70917,
70921
],
[
71159,
71163
],
[
71446,
71450
],
[
80206,
80210
],
[
80450,
80454
],
[
83840,
83844
],
[
84086,
84090
],
[
85693,
85697
],
[
85961,
85965
],
[
90086,
90090
],
[
90317,
90321
]
],
[
[
103,
108
],
[
161,
166
]
],
[
[
130,
136
],
[
3229,
3235
]
],
[
[
138,
146
],
[
204,
212
]
],
[
[
242,
257
],
[
3350,
3355
]
],
[
[
290,
299
]
],
[
[
332,
342
]
],
[
[
375,
392
]
],
[
[
450,
469
]
],
[
[
478,
505
]
],
[
[
532,
538
],
[
4983,
4989
],
[
9153,
9159
],
[
18764,
18770
],
[
55580,
55586
],
[
71978,
71984
],
[
80939,
80945
],
[
84562,
84568
],
[
86481,
86487
]
],
[
[
585,
607
],
[
6015,
6037
],
[
18446,
18468
],
[
54645,
54667
],
[
71034,
71056
],
[
80325,
80347
],
[
83961,
83983
],
[
85817,
85839
],
[
90202,
90224
]
],
[
[
660,
684
],
[
18094,
18118
],
[
18369,
18393
]
],
[
[
727,
755
]
],
[
[
798,
812
],
[
54579,
54593
],
[
54821,
54835
],
[
55116,
55130
]
],
[
[
814,
829
],
[
70967,
70982
],
[
71210,
71225
],
[
71507,
71522
]
],
[
[
872,
889
],
[
80256,
80273
],
[
80501,
80518
]
],
[
[
933,
952
],
[
83890,
83909
],
[
84137,
84156
]
],
[
[
996,
1018
],
[
85743,
85765
],
[
86012,
86034
]
],
[
[
1062,
1086
],
[
90126,
90150
],
[
90358,
90382
]
],
[
[
1130,
1156
],
[
5682,
5708
],
[
5936,
5962
]
],
[
[
1165,
1194
]
],
[
[
4912,
4949
]
],
[
[
9088,
9119
]
],
[
[
18710,
18730
]
],
[
[
55524,
55546
]
],
[
[
71920,
71944
]
],
[
[
80879,
80905
]
],
[
[
84499,
84528
]
],
[
[
86416,
86447
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Li Yuanming
Email: yli056@e.ntu.edu.sg
Date: 1/27/2021
ML model structure definitions.
"""
import abc
import inspect
from enum import Enum
from typing import Optional, Union, Tuple, Dict, OrderedDict
from pydantic import BaseModel, PositiveInt, conint, PositiveFloat, Field, validator
from typing_extensions import Literal
class Operation(Enum):
"""
Operation enum to the layer or connection. There are three kinds of operations: ``'A'`` for add the specific
layer / connection, ``'D'`` for delete the specific layer / connection, ``M`` for modify the layer /
connection, and ``E`` for no operation.
"""
ADD = 'A'
DELETE = 'D'
MODIFY = 'M'
EMPTY = 'E'
class LayerType(Enum):
"""
Enum of the supported layer type. This is to hint which class of layer the provided data is converted to.
"""
LINEAR = 'torch.nn.Linear'
CONV_1D = 'torch.nn.Conv1d'
CONV_2D = 'torch.nn.Conv2d'
RELU = 'torch.nn.ReLU'
TANH = 'torch.nn.Tanh'
BN_1D = 'torch.nn.BatchNorm1d'
BN_2D = 'torch.nn.BatchNorm2d'
MP_1D = 'torch.nn.MaxPool1d'
MP_2D = 'torch.nn.MaxPool2d'
AAP_1D = 'torch.nn.AdaptiveAvgPool1d'
AAP_2D = 'torch.nn.AdaptiveAvgPool2d'
class ModelLayer(BaseModel, abc.ABC):
# noinspection PyUnresolvedReferences
"""
Layer of the model structure.
For layer attributes need to be set :code:`None`, use :code:`'null'` instead. This is for the reason of
updated parameters with value :code:`None` will be viewed as not set. So we take special care to the
desired :code:`None`, replacing it with :code:`'null'`.
Attributes:
op_ (Operation): Operation to the layer.
type_ (LayerType): Indicates the type of this layer. This field also provides hint for :class:`pydantic`
model conversion.
__required_type__ (LayerType): By overriding this attributes, we can use :meth:`check_layer_type` to
provide validation of the sub classes.
"""
op_: Operation
type_: LayerType
__required_type__: LayerType
@classmethod
def parse_layer_obj(cls, layer_obj):
"""
Parse from a ML layer object.
This function will inspect the required parameters to build the layer, and try to obtain its
parameter value from the layer object. The default parameter parser is python default
:code:`getattr`, which assume we can get the value from the same-named attribute of the
layer object.
For parameter cannot parsed with default parser, set a function with the format:
:code:`__{parameter_name}_parser__(layer_obj: Any) -> Any`.
Has the following signature:
Input Arguments:
* layer_obj : Any
The layer object to be parsed.
Return Arguments:
* Any
The parsed value of the given parameter.
TODO:
Signature checking for __{parameter_name}_parser__
"""
kwargs = {'op_': Operation.EMPTY, 'type_': cls.__required_type__}
signature = inspect.signature(layer_obj.__init__)
for param in signature.parameters:
parser = getattr(cls, f'__{param}_parser__', lambda obj: getattr(obj, param))
kwargs[param] = parser(layer_obj)
return cls(**kwargs)
@validator('type_')
def check_layer_type(cls, layer_type: LayerType) -> LayerType: # noqa
"""
Checks layer type value provided is the same as the required value.
This is to generate validator for check :code:`layer_type` field of subclasses of :class:`ModelLayer`.
"""
if layer_type != cls.__required_type__:
raise ValueError(f'Expected {cls.__required_type__} but got {layer_type}')
return layer_type
class Linear(ModelLayer):
in_features: Optional[PositiveInt]
out_features: Optional[PositiveInt]
bias: Optional[bool]
__required_type__ = LayerType.LINEAR
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class _ConvNd(ModelLayer, abc.ABC):
in_channels: Optional[PositiveInt]
out_channels: Optional[PositiveInt]
kernel_size: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
padding: Optional[Union[conint(ge=0), Tuple[conint(ge=0), ...]]]
dilation: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
groups: PositiveInt
bias: bool
padding_mode: Literal['zeros', 'reflect', 'replicate', 'circular']
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class Conv1d(_ConvNd):
__required_type__ = LayerType.CONV_1D
class Conv2d(_ConvNd):
__required_type__ = LayerType.CONV_2D
class ReLU(ModelLayer):
inplace: Optional[bool]
__required_type__ = LayerType.RELU
class Tanh(ModelLayer):
__required_type__ = LayerType.TANH
class _BatchNorm(ModelLayer, abc.ABC):
num_features: Optional[PositiveInt]
eps: Optional[PositiveFloat]
momentum: Optional[Union[PositiveFloat, Literal['null']]]
affine: Optional[bool]
track_running_stats: Optional[bool]
class BatchNorm1d(_BatchNorm):
__required_type__ = LayerType.BN_1D
class BatchNorm2d(_BatchNorm):
__required_type__ = LayerType.BN_2D
class _MaxPool(ModelLayer, abc.ABC):
kernel_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]] = None
padding: Union[conint(ge=0), Tuple[conint(ge=0), ...]] = 0
dilation: Union[PositiveInt, Tuple[PositiveInt, ...]] = 1
return_indices: bool = False
ceil_mode: bool = False
class MaxPool1d(_MaxPool):
__required_type__ = LayerType.MP_1D
class MaxPool2d(_MaxPool):
__required_type__ = LayerType.MP_2D
class _AdaptiveAvgPool(ModelLayer, abc.ABC):
output_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
class AdaptiveAvgPool1d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_1D
class AdaptiveAvgPool2d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_2D
_LayerType = Union[Linear, Conv1d, Conv2d, ReLU, Tanh, BatchNorm1d, BatchNorm2d, MaxPool1d, MaxPool2d,
AdaptiveAvgPool1d, AdaptiveAvgPool2d]
class Structure(BaseModel):
# noinspection PyUnresolvedReferences
"""
Indicate a ML model structure using a graph data structure.
:attr:`layer` is the graph node, representing a layer of the model. :attr:`connection` is the graph edge,
representing which two layers are connected, and the directions of tensor pass.
Attributes:
layer (OrderedDict[str, _LayerType]): Layer mapping, the key is layer name, and the value is layer
attributes. See :class:`ModelLayer` for reference.
connection (Optional[Dict[str, Dict[str, Operation]]]): The connection (:attr:`connection`) maps
the starting layer name, to the ending layer name with a connection operation.
Examples::
>>> from collections import OrderedDict
>>> # add a nn.Linear layer named 'fc1' with in_features=1024, out_features=10
>>> layer_mapping = OrderedDict({
... 'fc1': LinearLayer(in_features=1024, out_features=10, type_=LayerType.LINEAR, op_=Operation.ADD),
... })
>>> # connection example for add connection from 'conv1' to 'fc1'
>>> connection_mapping = {'conv1': {'fc1': Operation.ADD}}
>>> struct = Structure(layer=layer_mapping, connection=connection_mapping)
>>> print(struct)
layer={'fc1': LinearLayer(in_features=1024, out_features=10, bias=None)}
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}}
>>> # Other than using the model object, we can pass in a plain dictionary,
... # and utilize `Structure.parse_obj`.
>>> structure_data = {
... 'layer': {'fc': {'in_features': 1024, 'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'A'}},
... 'connection': {'conv1': {'fc1': 'A'}}
... }
>>> Structure.parse_obj(structure_data)
Structure(layer={'fc': LinearLayer(in_features=1024, out_features=10, bias=None)},
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}})
"""
layer: OrderedDict[str, _LayerType] = Field(
default_factory=OrderedDict,
example={'fc': {'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'M'}}
)
connection: Optional[Dict[str, Dict[str, Operation]]] = Field(
default_factory=dict,
example={'conv1': {'fc1': 'A'}}
)
| [
[
[
157,
160
],
[
1301,
1304
],
[
4152,
4155
],
[
5046,
5049
],
[
5433,
5436
],
[
5938,
5941
]
],
[
[
168,
175
],
[
3134,
3141
]
],
[
[
193,
197
],
[
401,
405
],
[
768,
772
]
],
[
[
217,
225
],
[
3898,
3906
],
[
3938,
3946
],
[
3970,
3978
],
[
4179,
4187
],
[
4219,
4227
],
[
4258,
4266
],
[
4324,
4332
],
[
4391,
4399
],
[
4461,
4469
],
[
4895,
4903
],
[
5074,
5082
],
[
5105,
5113
],
[
5143,
5151
],
[
5203,
5211
],
[
5243,
5251
],
[
5516,
5524
],
[
8528,
8536
]
],
[
[
227,
232
],
[
4267,
4272
],
[
4333,
4338
],
[
4400,
4405
],
[
4470,
4475
],
[
5152,
5157
],
[
5460,
5465
],
[
5525,
5530
],
[
5590,
5595
],
[
5654,
5659
],
[
5965,
5970
],
[
6196,
6201
]
],
[
[
234,
239
],
[
4286,
4291
],
[
4352,
4357
],
[
4420,
4425
],
[
4489,
4494
],
[
5479,
5484
],
[
5544,
5549
],
[
5610,
5615
],
[
5673,
5678
],
[
5984,
5989
]
],
[
[
241,
245
],
[
8537,
8541
],
[
8547,
8551
]
],
[
[
247,
258
],
[
8408,
8419
],
[
8346,
8357
]
],
[
[
281,
290
],
[
1290,
1299
],
[
6361,
6370
]
],
[
[
292,
303
],
[
3907,
3918
],
[
3947,
3958
],
[
4188,
4199
],
[
4228,
4239
],
[
4273,
4284
],
[
4292,
4303
],
[
4339,
4350
],
[
4358,
4369
],
[
4476,
4487
],
[
4495,
4506
],
[
4527,
4538
],
[
5083,
5094
],
[
5466,
5477
],
[
5485,
5496
],
[
5531,
5542
],
[
5550,
5561
],
[
5660,
5671
],
[
5679,
5690
],
[
5971,
5982
],
[
5990,
6001
]
],
[
[
305,
311
],
[
4406,
4412
],
[
4426,
4432
],
[
5596,
5602
],
[
5616,
5622
]
],
[
[
313,
326
],
[
5114,
5127
],
[
5158,
5171
]
],
[
[
328,
333
],
[
8377,
8382
],
[
8572,
8577
]
],
[
[
335,
344
],
[
3387,
3396
]
],
[
[
375,
382
],
[
4572,
4579
],
[
5173,
5180
]
],
[
[
391,
400
],
[
2056,
2065
],
[
8557,
8566
],
[
3065,
3074
]
],
[
[
758,
767
],
[
2077,
2086
],
[
2111,
2120
],
[
4010,
4019
],
[
4771,
4780
],
[
4838,
4847
],
[
4935,
4944
],
[
5000,
5009
],
[
5315,
5324
],
[
5388,
5397
],
[
5816,
5825
],
[
5885,
5894
],
[
6078,
6087
],
[
6164,
6173
],
[
3462,
3471
],
[
3448,
3457
]
],
[
[
1279,
1289
],
[
3868,
3878
],
[
4140,
4150
],
[
4869,
4879
],
[
4963,
4973
],
[
5034,
5044
],
[
5421,
5431
],
[
5926,
5936
]
],
[
[
3861,
3867
],
[
6202,
6208
]
],
[
[
4132,
4139
],
[
4737,
4744
],
[
4804,
4811
]
],
[
[
4730,
4736
],
[
6210,
6216
]
],
[
[
4797,
4803
],
[
6218,
6224
]
],
[
[
4864,
4868
],
[
6226,
6230
]
],
[
[
4958,
4962
],
[
6232,
6236
]
],
[
[
5023,
5033
],
[
5278,
5288
],
[
5351,
5361
]
],
[
[
5266,
5277
],
[
6238,
6249
]
],
[
[
5339,
5350
],
[
6251,
6262
]
],
[
[
5412,
5420
],
[
5781,
5789
],
[
5850,
5858
]
],
[
[
5771,
5780
],
[
6264,
6273
]
],
[
[
5840,
5849
],
[
6275,
6284
]
],
[
[
5909,
5925
],
[
6035,
6051
],
[
6121,
6137
]
],
[
[
6017,
6034
],
[
6305,
6322
]
],
[
[
6103,
6120
],
[
6324,
6341
]
],
[
[
6183,
6193
],
[
8363,
8373
]
],
[
[
6351,
6360
]
]
] |
"""
Author: Andrew Harris
Python 3.8
"""
import logging
import os
import pandas as pd
from ete3 import Tree
from tqdm import tqdm
############################### Set up logger #################################
def set_logger_level(WORKING_DIR, LOG_LEVEL):
logger = logging.getLogger(__name__)
# Remove existing log file if present
if os.path.exists(WORKING_DIR / 'logs/topobin.log'):
os.remove(WORKING_DIR / 'logs/topobin.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(WORKING_DIR / 'logs/topobin.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(LOG_LEVEL)
return logger
############################## Helper Functions ###############################
def remove_heterotachy_info(l):
"""Remove any information in bracketsete3
does not support this format of newick"""
if ("[" not in l) and ("]" not in l):
return l
open_brackets = [i for i, x in enumerate(l) if x == "["]
close_brackets = [i for i, x in enumerate(l) if x == "]"]
final_string = f'{l[:open_brackets[0]]}'
for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):
final_string += l[cb+1:ob]
final_string += l[close_brackets[-1]+1:]
return final_string
def tv_header_validation(df):
"""Return False if first four required column headers are not valid"""
required_cols = list(df.columns[:4])
try:
assert required_cols == ["Chromosome", "Window", "NewickTree", "TopologyID"]
return True
except AssertionError:
return False
############################### Main Function ################################
def topobinner(TREEVIEWER_FN, UPDATED_TV_FILENAME, TOPOBIN_ROOTED, WORKING_DIR, MULTIPROCESS, LOG_LEVEL):
logger = set_logger_level(WORKING_DIR, LOG_LEVEL) # Setup log file level
# Load in Tree Viewer excel file
df = pd.read_excel(TREEVIEWER_FN, engine='openpyxl')
df = df.reset_index(drop=True)
# Validate headers
header_check = tv_header_validation(df)
if not header_check:
raise AssertionError("Input file headers are not valid, please ensure required headers are correct.")
df['TopologyID'] = ['NULL']*len(df)
trees = df['NewickTree']
topologies = dict()
logger.info(f"{len(trees):,} trees to run")
# Set root boolean value
if TOPOBIN_ROOTED == "Y":
TOPOBIN_ROOTED = False
else:
TOPOBIN_ROOTED = True
# Bin Trees
tqdm_text = "#" + "{}".format("run1").zfill(3)
with tqdm(total=len(trees), desc=tqdm_text, ascii=True) as pbar:
for n, t in enumerate(trees):
# Check to see if tree is NoTree
if t == "NoTree":
pbar.update(1)
continue
# Set first tree in collection dictionary +
# move to next tree
if len(topologies.keys()) == 0:
topologies[n] = {'count': 1, 'idx': [n]}
pbar.update(1)
continue
else:
# Iterate through topology list
# add new topology if no rf == 0
# increase count if rf == 0 with topology
new_topology = True
for idx in topologies.keys():
if df.at[idx, 'NewickTree'] == "NoTree":
continue
t1 = Tree(remove_heterotachy_info(t))
t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree']))
comparison = t1.compare(t2, unrooted=TOPOBIN_ROOTED)
rf = comparison['rf']
if rf == 0:
topologies[idx]['count'] += 1
topologies[idx]['idx'].append(n)
new_topology = False
break
else:
continue
if new_topology:
topologies[n] = {'count': 1, 'idx': [n]}
pbar.update(1)
continue
else:
pbar.update(1)
continue
# Sort topologies dictionary by 'count'
topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)}
num_topologies = len(topologies.keys())
# Set zfill number
if num_topologies < 100:
zfillnum = 3
elif 100 < num_topologies < 1000:
zfillnum = 4
else:
zfillnum = 5
# Update DataFrame TopologyID column with results
overview_df = pd.DataFrame(
{
"TopologyID": [("Tree" + "{}".format(str(i)).zfill(zfillnum)) for i in range(1, len(topologies.keys())+1)],
"Count": [topologies[i]["count"] for i in topologies.keys()],
"Rank": [i for i in range(1, len(topologies.keys())+1)],
}
)
topoCount = 1
for topo in topologies.keys():
idx = topologies[topo]['idx']
topoName = "Tree" + "{}".format(topoCount).zfill(zfillnum)
for i in idx:
df.at[i, 'TopologyID'] = topoName
continue
topoCount += 1
# Output updated Tree Viewer file
df.to_excel(UPDATED_TV_FILENAME, index=False, engine='openpyxl')
logger.info(f"{overview_df}")
return
| [
[
[
48,
55
],
[
270,
277
],
[
465,
472
],
[
547,
554
],
[
663,
670
]
],
[
[
63,
65
],
[
347,
349
],
[
405,
407
]
],
[
[
74,
86
],
[
2037,
2039
],
[
4704,
4706
]
],
[
[
104,
108
],
[
3519,
3523
],
[
3577,
3581
]
],
[
[
126,
130
],
[
2669,
2673
]
],
[
[
215,
231
],
[
1921,
1937
]
],
[
[
895,
918
],
[
3524,
3547
],
[
3582,
3605
]
],
[
[
1418,
1438
],
[
2162,
2182
]
],
[
[
1806,
1816
]
]
] |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#####################################################################################
"""
程序功能说明:
1.统计reads or peaks 相对于TTS,TSS,STARTCODON,STOPCODON的分布
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../../")
from ablib.utils.tools import *
from ablib.utils.distribution import *
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-g', '--gff', dest='gff', action='store',
type='string', help='gff file,do not have to provide it if db is exited')
p.add_option(
'-d', '--db', dest='db', default='gffdb', action='store',
type='string', help='the gff database file to create or use')
p.add_option(
'-b', '--bamorbed', dest='bamorbed', action='store',
type='string', help='bam or bed file, Important: the bamfile\'s suffix must be ".bam"')
p.add_option(
'-w', '--halfwinwidth', dest='halfwinwidth', default=1000, action='store',
type='int', help='halfwinwidth,default is 1000')
p.add_option(
'-p', '--postype', dest='postype', action='store',
type='string', help='gene position type:tss,tts,startcodon,stopcodon,intronstart,intronend')
p.add_option(
'-o', '--outfile', dest='outfile', default="distance2xxx_reads_density.txt", action='store',
type='string', help='gene expression file')
p.add_option(
'-n', '--samplename', dest='samplename', default='', action='store',
type='string', help='sample name,default is ""')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string', help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=True, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if not opt.postype:
opt_parser.error('Option -p must be assigned.\n')
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
sample = ""
if opt.samplename != "":
sample = opt.samplename + '_'
if opt.outfile == 'distance2xxx_reads_density.txt':
opt.outfile = sample + 'distance2' + opt.postype + '_reads_density.txt'
intype = "bam"
match = re.search(r'\.bam$', opt.bamorbed)
if not match:
intype = "bed"
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = "/".join(scriptPath.split("/")[0:-2]) # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
#os.mkdir(outPath) if not os.path.isdir(outPath) else None
os.system('mkdir -p ' + outPath)
logPath = os.path.abspath(opt.logDir)
#os.mkdir(logPath) if not os.path.isdir(logPath) else None
os.system('mkdir -p ' + logPath)
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if opt.gff:
db = gffutils.create_db(opt.gff, opt.db, merge_strategy="create_unique", verbose=False, force=True)
db = gffutils.FeatureDB(opt.db)
Watcher()
pool = multiprocessing.Pool(processes=15)
server = multiprocessing.Manager()
dis = server.dict()
for chr in db.seqids():
# if chr != "chr1":
# continue
if intype == "bam":
chr_dict = readBamHeader(opt.bamorbed)
if not chr in chr_dict:
continue
# print(chr)
dis[chr] = [0 for x in range(2 * opt.halfwinwidth)]
pool.apply_async(distributionToOnePointByChr,
args=(chr, opt.bamorbed, opt.db, opt.outfile, opt.postype, opt.halfwinwidth, dis))
pool.close()
pool.join()
d = dict(dis).copy()
server.shutdown()
profile = numpy.zeros(2 * opt.halfwinwidth, dtype='i')
for chr in sorted(d.keys()):
wincvg = numpy.fromiter(d[chr], dtype='i', count=2 * opt.halfwinwidth)
profile += wincvg
# pyplot.plot( numpy.arange( -opt.halfwinwidth, opt.halfwinwidth ), profile )
# pyplot.show()
os.chdir(opt.outDir)
fout = open(opt.outfile, 'w')
fout.writelines(
"+distance\tdensity\n")
n = 0
for i in range(-opt.halfwinwidth, opt.halfwinwidth):
fout.writelines(str(i) + '\t' + str(profile[n]) + '\n')
n += 1
fout.close()
#cmd = "cd " + outPath + "&& R --slave < /users/ablife/ablife-R/Line_single_ggplot2.r --args " + opt.outfile + " " + sample + 'distance2' + opt.postype + '_reads_density ./ \n'
cmd = "cd " + outPath + "&& Rscript " + binPath + "/plot/Line_single_ggplot2.r -f " + opt.outfile + " -t " + sample + 'distance2' + opt.postype + '_reads_density -n ' + sample + 'distance2' + opt.postype + '_reads_density -o ./'
os.system(cmd)
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
| [
[
[
697,
699
],
[
4531,
4533
]
],
[
[
701,
703
],
[
977,
979
],
[
991,
993
],
[
4962,
4964
],
[
4978,
4980
],
[
5109,
5111
],
[
5220,
5222
],
[
5263,
5265
],
[
5350,
5352
],
[
10821,
10823
],
[
9694,
9696
],
[
10385,
10387
]
],
[
[
705,
708
],
[
789,
792
],
[
794,
797
],
[
958,
961
],
[
1112,
1115
],
[
1199,
1202
],
[
6861,
6864
],
[
7348,
7351
],
[
11998,
12001
],
[
3898,
3901
],
[
3945,
3948
]
],
[
[
710,
717
],
[
6847,
6854
],
[
7240,
7247
],
[
7288,
7295
],
[
7395,
7402
],
[
10857,
10864
],
[
11247,
11254
],
[
11378,
11385
],
[
11431,
11438
],
[
11484,
11491
],
[
12089,
12096
],
[
6016,
6023
],
[
6051,
6058
],
[
6334,
6341
],
[
6383,
6390
],
[
6473,
6480
],
[
6656,
6663
]
],
[
[
719,
723
]
],
[
[
725,
733
],
[
6705,
6713
],
[
7371,
7379
],
[
11292,
11300
]
],
[
[
755,
767
],
[
1504,
1516
]
],
[
[
769,
780
],
[
2696,
2707
]
],
[
[
834,
844
]
],
[
[
852,
861
]
],
[
[
869,
877
],
[
8588,
8596
],
[
8693,
8701
]
],
[
[
885,
890
]
],
[
[
898,
903
],
[
9404,
9409
],
[
9499,
9504
]
],
[
[
911,
926
],
[
8747,
8762
],
[
8795,
8810
]
],
[
[
950,
956
]
],
[
[
1065,
1066
]
],
[
[
1104,
1105
],
[
12012,
12021
],
[
8726,
8733
],
[
8976,
8989
],
[
9171,
9198
]
],
[
[
1214,
1222
],
[
7278,
7286
]
],
[
[
1411,
1420
],
[
4157,
4166
]
],
[
[
4020,
4032
],
[
7335,
7347
],
[
11985,
11997
]
],
[
[
4133,
4143
],
[
4194,
4204
]
],
[
[
4145,
4148
],
[
4177,
4180
],
[
4248,
4251
],
[
4283,
4286
],
[
4270,
4273
],
[
4320,
4323
],
[
4355,
4358
],
[
4380,
4383
],
[
4470,
4473
],
[
4433,
4436
],
[
4552,
4555
],
[
5125,
5128
],
[
5279,
5282
],
[
6755,
6758
],
[
10803,
10806
],
[
11950,
11953
],
[
12022,
12025
],
[
12131,
12134
],
[
6223,
6226
],
[
8566,
8569
],
[
8607,
8610
],
[
8616,
8619
],
[
8712,
8715
],
[
8990,
8993
],
[
9127,
9130
],
[
9236,
9239
],
[
9250,
9253
],
[
9258,
9261
],
[
9271,
9274
],
[
9284,
9287
],
[
9420,
9423
],
[
9543,
9546
],
[
9703,
9706
],
[
9731,
9734
],
[
9833,
9836
],
[
9851,
9854
],
[
10238,
10241
],
[
10284,
10287
],
[
10344,
10347
]
],
[
[
4150,
4154
]
],
[
[
4305,
4311
],
[
4447,
4453
],
[
10261,
10267
],
[
10321,
10327
]
],
[
[
4346,
4352
],
[
4447,
4453
],
[
10261,
10267
],
[
10321,
10327
]
],
[
[
4508,
4514
],
[
8936,
8942
]
],
[
[
4523,
4528
],
[
4573,
4578
]
],
[
[
4584,
4590
],
[
8936,
8942
]
],
[
[
4949,
4959
],
[
5048,
5058
]
],
[
[
5029,
5036
],
[
10192,
10199
]
],
[
[
5099,
5106
],
[
5244,
5251
],
[
5394,
5401
],
[
5509,
5516
],
[
12076,
12083
],
[
10166,
10173
]
],
[
[
5253,
5260
],
[
5374,
5381
],
[
6739,
6746
]
],
[
[
5383,
5391
],
[
10843,
10851
]
],
[
[
5496,
5506
]
],
[
[
5954,
5965
],
[
6826,
6837
]
],
[
[
6700,
6702
],
[
6784,
6786
]
],
[
[
6729,
6736
],
[
6838,
6845
]
],
[
[
7359,
7368
],
[
7445,
7454
],
[
11345,
11354
],
[
11428,
11437
],
[
12037,
12046
]
],
[
[
8511,
8515
],
[
10440,
10444
]
],
[
[
11278,
11289
],
[
11331,
11342
],
[
11479,
11490
],
[
12053,
12064
]
],
[
[
11316,
11327
],
[
11542,
11553
],
[
11563,
11574
],
[
11589,
11600
]
],
[
[
11975,
11982
],
[
12067,
12074
]
]
] |
# this file structure follows http://flask.pocoo.org/docs/1.0/patterns/appfactories/
# initializing db in api.models.base instead of in api.__init__.py
# to prevent circular dependencies
from .base import db
from .Email import Email
from .Person import Person
from .VideoInfo import VideoInfo
__all__ = ["db", "Email", "Person", "VideoInfo"]
# You must import all of the new Models you create to this page
| [
[
[
205,
207
]
],
[
[
227,
232
]
],
[
[
253,
259
]
],
[
[
283,
292
]
],
[
[
294,
301
]
]
] |
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any, Union
from interactions.base.interaction import Interaction
from interactions.context import InteractionContext
from protocolbuffers.Localization_pb2 import LocalizedString
from sims.sim import Sim
from sims4.utils import flexmethod
from sims4communitylib.logging.has_class_log import HasClassLog
from sims4communitylib.mod_support.mod_identity import CommonModIdentity
class CommonInteractionOverrideName(HasClassLog):
"""CommonInteractionOverrideName()
An inheritable class that provides a way to override the :func:`~get_name` function of :class:`.CommonInteraction`.
.. warning:: This class is to be used in conjunction with :class:`.CommonInteraction`. Inheriting from this class will do nothing for class that does not also inherit from :class:`.CommonInteraction`.
"""
# noinspection PyMissingOrEmptyDocstring
@classmethod
def get_mod_identity(cls) -> Union[CommonModIdentity, None]:
return None
def __init__(self) -> None:
super().__init__()
HasClassLog.__init__(self)
# noinspection PyMethodParameters,PyMissingOrEmptyDocstring
@flexmethod
def get_name(cls, inst: Interaction, target: Any=None, context: InteractionContext=None, **interaction_parameters) -> Union[LocalizedString, None]:
inst_or_cls = inst or cls
try:
context_inst_or_cls = context or inst_or_cls
interaction_sim = context_inst_or_cls.sim
interaction_target = target or context_inst_or_cls.target
cls.get_verbose_log().format_with_message(
'Creating display name.',
class_name=cls.__name__,
interaction_sim=interaction_sim,
interaction_target=interaction_target,
interaction=inst,
interaction_context=context
)
override_name = cls._create_display_name(
interaction_sim,
interaction_target,
interaction=inst,
interaction_context=context,
**interaction_parameters
)
if override_name is not None:
return override_name
except Exception as ex:
cls.get_log().error('An error occurred while running get_name of interaction {}'.format(cls.__name__), exception=ex)
return super(Interaction, inst_or_cls).get_name(target=target, context=context, **interaction_parameters)
# noinspection PyUnusedLocal
@classmethod
def _create_display_name(cls, interaction_sim: Sim, interaction_target: Any, interaction: Union[Interaction, None]=None, interaction_context: Union[InteractionContext, None]=None, **interaction_parameters) -> Union[LocalizedString, None]:
"""_create_display_name(interaction_sim, interaction_target, interaction=None, interaction_context=None, **interaction_parameters)
A hook that allows using a custom display name for an Interaction.
:param interaction_sim: The source Sim of the interaction.
:type interaction_sim: Sim
:param interaction_target: The target Object of the interaction.
:type interaction_target: Any
:param interaction: An instance of an interaction or None if no instance of the interaction is available. Default is None.
:type interaction: Union[Interaction, None], optional
:param interaction_context: The context of the interaction or None if no interaction context is available. Default is None.
:type interaction_context: Union[InteractionContext, None], optional
:param interaction_parameters: Extra interaction parameters.
:type interaction_parameters: Any
:return: A Localized String to display for the interaction or None if the original display name should be used.
:rtype: Union[LocalizedString, None]
"""
raise NotImplementedError()
| [
[
[
280,
283
],
[
1455,
1458
],
[
2859,
2862
]
],
[
[
285,
290
],
[
1178,
1183
],
[
1528,
1533
],
[
2996,
3001
],
[
2877,
2882
],
[
2929,
2934
]
],
[
[
334,
345
],
[
1434,
1445
],
[
2639,
2650
],
[
2883,
2894
]
],
[
[
379,
397
],
[
1474,
1492
],
[
2935,
2953
]
],
[
[
443,
458
],
[
1534,
1549
],
[
3002,
3017
]
],
[
[
480,
483
],
[
2834,
2837
]
],
[
[
508,
518
],
[
1395,
1405
]
],
[
[
571,
582
],
[
694,
705
],
[
1298,
1309
]
],
[
[
638,
655
],
[
1184,
1201
]
],
[
[
664,
693
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
from tensorflow.python.layers.maxout import maxout
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = ['avg_pool2d',
'avg_pool3d',
'batch_norm',
'bias_add',
'conv2d',
'conv3d',
'conv2d_in_plane',
'conv2d_transpose',
'conv3d_transpose',
'convolution',
'convolution2d',
'convolution2d_in_plane',
'convolution2d_transpose',
'convolution3d',
'convolution3d_transpose',
'dropout',
'elu',
'flatten',
'fully_connected',
'GDN',
'gdn',
'layer_norm',
'linear',
'pool',
'max_pool2d',
'max_pool3d',
'one_hot_encoding',
'relu',
'relu6',
'repeat',
'scale_gradient',
'separable_conv2d',
'separable_convolution2d',
'softmax',
'spatial_softmax',
'stack',
'unit_norm',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'maxout']
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
_FUSED_DEFAULT = os.getenv('TF_DEFAULT_USES_FUSED_BATCH_NORM',
'').lower() in ('true', 't', '1')
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def avg_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]` if
`data_format` is `NDHWC`, and `[batch_size, channels, depth, height, width]` if
`data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'AvgPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.AveragePooling3D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(
inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If the rank of `inputs` is neither 2 or 4.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_inputs = inputs
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank.'
' Expected 2 or 4 but got %d' % (
inputs.name, original_rank))
if original_rank == 2:
channels = inputs.get_shape()[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, 1, 1, channels]
if data_format == DATA_FORMAT_NCHW:
new_shape = [-1, channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
dtype = inputs.dtype.base_dtype
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
trainable_beta = trainable and center
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
if not param_initializers:
param_initializers = {}
if center:
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable_beta)
else:
beta = array_ops.constant(0.0, shape=params_shape)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
else:
gamma = array_ops.constant(1.0, shape=params_shape)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(is_training,
_fused_batch_norm_training,
_fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, array_ops.shape(original_inputs))
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=None,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_decay: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `decay` is still applied
to get the means and variances for inference.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
# This environment variable is only used during the testing period of fused
# batch norm and will be removed after that.
if fused is None:
fused = _FUSED_DEFAULT
# Only use _fused_batch_norm if all of the following three
# conditions are true:
# (1) fused is set True;
# (2) it is possible to use (currently it doesn't support batch weights,
# renorm, and the case when rank is neither 2 nor 4);
# (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,
# or non-default updates_collections (not implemented in
# normalization_layers.BatchNormalization yet); otherwise use the fused
# implementation in normalization_layers.BatchNormalization.
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = batch_weights is None and not renorm and rank in [2, 4]
if fused and possible_to_fuse and (
zero_debias_moving_mean or rank == 2 or
updates_collections is not ops.GraphKeys.UPDATE_OPS):
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
name=sc.name,
_scope=sc,
_reuse=reuse,
fused=fused)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(
layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(
layer.moving_variance, variables_collections, 'moving_variance')
if layer.beta is not None:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma is not None:
_add_variable_to_collections(
layer.gamma, variables_collections, 'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
if renorm:
raise ValueError('renorm is not supported with batch_weights, '
'updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' % (
inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable('beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable('gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables.
partitioner = variable_scope.get_variable_scope().partitioner
try:
variable_scope.get_variable_scope().set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
finally:
variable_scope.get_variable_scope().set_partitioner(partitioner)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean,
update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training,
_force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(is_training,
_delay_updates,
moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if beta is not None:
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer(),
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the result of adding biases to the inputs.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
num_features = inputs_shape[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable('biases',
shape=[num_features,],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = layer_class(filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
convolution2d = convolution
convolution3d = convolution
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: A list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding type to use, either 'SAME' or 'VALID'.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable('weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_filters_in,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv2d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = convolutional_layers.Convolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def convolution3d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NDHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution3d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]` for `NDHWC` data format or
`[batch, in_channels, depth, height, width]` for `NCDHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 3 holding the [kernel_depth, kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 3.
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter(
{'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'Conv3d_transpose', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = convolutional_layers.Convolution3DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: The tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A tensor representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(rate=1 - keep_prob,
noise_shape=noise_shape,
name=sc.name,
_scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def flatten(inputs,
outputs_collections=None,
scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A flattened tensor with shape [batch_size, k].
Raises:
ValueError: If inputs rank is unknown or less than 2.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_rank = inputs.get_shape().ndims
if (inputs_rank is None) or (inputs_rank < 2):
raise ValueError('Inputs must have a least 2 dimensions.')
inputs_shape = array_ops.shape(inputs)
batch_dim = array_ops.slice(inputs_shape, [0], [1])
spatial_dims = array_ops.slice(inputs_shape, [1], [inputs_rank - 1])
flat_spatial_dim = math_ops.reduce_prod(spatial_dims)
flat_spatial_dim = array_ops.expand_dims(flat_spatial_dim, 0)
flat_shape = array_ops.concat([batch_dim, flat_spatial_dim], 0)
outputs = array_ops.reshape(inputs, flat_shape)
# Attempt to propagate shape information, if it is defined.
input_shape = inputs.get_shape().as_list()
batch_dim, spatial_dims = input_shape[0], input_shape[1:]
if all(spatial_dims):
outputs.set_shape([batch_dim,
functools.reduce(lambda x, y: x * y, spatial_dims)])
else:
outputs.set_shape([batch_dim, None])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
if inputs_rank < new_rank:
raise ValueError(
'Inputs has rank less than new_rank. {} must have rank at least'
' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,
inputs.get_shape()))
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
new_shape = array_ops.concat((outer_dimensions,
[math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: A `Tensor` or `SparseTensor`.
new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
def _model_variable_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None, trainable=True,
collections=None, caching_device=None,
partitioner=None, rename=None, use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, collections=collections, trainable=trainable,
caching_device=caching_device, partitioner=partitioner,
custom_getter=getter, use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(
collections_set, collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError(
'num_outputs should be int or long, got %s.' % (num_outputs,))
layer_variable_getter = _build_variable_getter({'bias': 'biases',
'kernel': 'weights'})
with variable_scope.variable_scope(
scope, 'fully_connected', [inputs],
reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(
outputs_collections, sc.original_name_scope, outputs)
class GDN(base.Layer):
"""Generalized divisive normalization layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Arguments:
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division
is replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one.
A good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require `reuse=True` in such
cases.
Properties:
inverse: Boolean, whether GDN is computed (`True`) or IGDN (`False`).
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
beta: The beta parameter as defined above (1D `Tensor`).
gamma: The gamma parameter as defined above (2D `Tensor`).
"""
def __init__(self,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2 ** -18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(GDN, self).__init__(trainable=trainable, name=name, **kwargs)
self.inverse = inverse
self._beta_min = beta_min
self._gamma_init = gamma_init
self._reparam_offset = reparam_offset
self.data_format = data_format
self.activity_regularizer = activity_regularizer
self._channel_axis() # trigger ValueError early
self.input_spec = base.InputSpec(min_ndim=3, max_ndim=5)
def _channel_axis(self):
try:
return {'channels_first': 1, 'channels_last': -1}[self.data_format]
except KeyError:
raise ValueError('Unsupported `data_format` for GDN layer: {}.'.format(
self.data_format))
@staticmethod
def _lower_bound(inputs, bound, name=None):
"""Same as tf.maximum, but with helpful gradient for inputs < bound.
The gradient is overwritten so that it is passed through if the input is not
hitting the bound. If it is, only gradients that push `inputs` higher than
the bound are passed through. No gradients are passed through to the bound.
Args:
inputs: input tensor
bound: lower bound for the input tensor
name: name for this op
Returns:
tf.maximum(inputs, bound)
"""
with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
inputs = ops.convert_to_tensor(inputs, name='inputs')
bound = ops.convert_to_tensor(bound, name='bound')
with ops.get_default_graph().gradient_override_map(
{'Maximum': 'GDNLowerBound'}):
return math_ops.maximum(inputs, bound, name=scope)
@staticmethod
def _lower_bound_grad(op, grad):
"""Gradient for `_lower_bound`.
Args:
op: the tensorflow op for which to calculate a gradient
grad: gradient with respect to the output of the op
Returns:
gradients with respect to the inputs of the op
"""
inputs = op.inputs[0]
bound = op.inputs[1]
pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
return [math_ops.cast(pass_through_if, grad.dtype) * grad, None]
def build(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
num_channels = input_shape[channel_axis].value
if num_channels is None:
raise ValueError('The channel dimension of the inputs to `GDN` '
'must be defined.')
self._input_rank = input_shape.ndims
self.input_spec = base.InputSpec(ndim=input_shape.ndims,
axes={channel_axis: num_channels})
pedestal = array_ops.constant(self._reparam_offset ** 2, dtype=self.dtype)
beta_bound = array_ops.constant(
(self._beta_min + self._reparam_offset ** 2) ** .5, dtype=self.dtype)
gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)
def beta_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)
def gamma_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
assert len(shape) == 2
assert shape[0] == shape[1]
eye = linalg_ops.eye(shape[0], dtype=dtype)
return math_ops.sqrt(self._gamma_init * eye + pedestal)
beta = self.add_variable('reparam_beta',
shape=[num_channels],
initializer=beta_initializer,
dtype=self.dtype,
trainable=True)
beta = self._lower_bound(beta, beta_bound)
self.beta = math_ops.square(beta) - pedestal
gamma = self.add_variable('reparam_gamma',
shape=[num_channels, num_channels],
initializer=gamma_initializer,
dtype=self.dtype,
trainable=True)
gamma = self._lower_bound(gamma, gamma_bound)
self.gamma = math_ops.square(gamma) - pedestal
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
shape = self.gamma.get_shape().as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
# Compute normalization pool.
if self.data_format == 'channels_first':
norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID',
data_format='NC' + 'DHW'[-(ndim - 2):])
if ndim == 3:
norm_pool = array_ops.expand_dims(norm_pool, 2)
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.squeeze(norm_pool, [2])
elif ndim == 5:
shape = array_ops.shape(norm_pool)
norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.reshape(norm_pool, shape)
else: # ndim == 4
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
else: # channels_last
norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
norm_pool = math_ops.sqrt(norm_pool)
if self.inverse:
outputs = inputs * norm_pool
else:
outputs = inputs / norm_pool
outputs.set_shape(inputs.get_shape())
return outputs
def _compute_output_shape(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
if not 3 <= input_shape.ndim <= 5:
raise ValueError('`input_shape` must be of rank 3 to 5, inclusive.')
if input_shape[channel_axis].value is None:
raise ValueError(
'The channel dimension of `input_shape` must be defined.')
return input_shape
ops.RegisterGradient('GDNLowerBound')(GDN._lower_bound_grad) # pylint:disable=protected-access
def gdn(inputs,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2 ** -18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for GDN layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Args:
inputs: Tensor input.
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division
is replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one.
A good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require `reuse=True` in such
cases.
reuse: Boolean, whether to reuse the weights of a previous layer by the same
name.
Returns:
Output tensor.
"""
layer = GDN(inverse=inverse,
beta_min=beta_min,
gamma_init=gamma_init,
reparam_offset=reparam_offset,
data_format=data_format,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
scope=None):
"""Adds a Layer Normalization layer.
Based on the paper:
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
https://arxiv.org/abs/1607.06450.
Can be used as a normalizer function for conv2d and fully_connected.
Given a tensor `inputs` of rank `R`, moments are calculated and normalization
is performed over axes `begin_norm_axis ... R - 1`. Scaling and centering,
if requested, is performed over axes `begin_shift_axis .. R - 1`.
By default, `begin_norm_axis = 1` and `begin_params_axis = -1`,
meaning that normalization is performed over all but the first axis
(the `HWC` if `inputs` is `NHWC`), while the `beta` and `gamma` trainable
parameters are calculated for the rightmost axis (the `C` if `inputs` is
`NHWC`). Scaling and recentering is performed via broadcast of the
`beta` and `gamma` parameters with the normalized tensor.
The shapes of `beta` and `gamma` are `inputs.shape[begin_params_axis:]`,
and this part of the inputs' shape must be fully defined.
Args:
inputs: A tensor having rank `R`. The normalization is performed over
axes `begin_norm_axis ... R - 1` and centering and scaling parameters
are calculated over `begin_params_axis ... R - 1`.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
begin_norm_axis: The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation, having the same
shape and dtype as `inputs`.
Raises:
ValueError: If the rank of `inputs` is not known at graph build time,
or if `inputs.shape[begin_params_axis:]` is not fully defined at
graph build time.
"""
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs],
reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if begin_norm_axis < 0:
begin_norm_axis = inputs_rank + begin_norm_axis
if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:
raise ValueError(
'begin_params_axis (%d) and begin_norm_axis (%d) '
'must be < rank(inputs) (%d)'
% (begin_params_axis, begin_norm_axis, inputs_rank))
params_shape = inputs_shape[begin_params_axis:]
if not params_shape.is_fully_defined():
raise ValueError(
'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % (
inputs.name, begin_params_axis, inputs_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections,
'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
norm_axes = list(range(begin_norm_axis, inputs_rank))
mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1e-12
outputs = nn.batch_normalization(
inputs, mean, variance, offset=beta, scale=gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.MaxPooling2D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def max_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]` if
`data_format` is `NDHWC`, and `[batch_size, channels, depth, height, width]` if
`data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If 'kernel_size' is not a 3-D list
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'MaxPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
layer = pooling_layers.MaxPooling3D(pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' %
(pooling_type.lower()), [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: Total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
One-hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(labels,
num_classes,
on_value=on_value,
off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(list(output_collections or []) +
[ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
A tensor result of applying the layer, repetitions times.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i+1)
outputs = layer(outputs, *args, **kwargs)
return outputs
def _scale_gradient_shape(op):
"""Shape helper function for scale_gradient function below."""
return [op.inputs[0].shape]
def _scale_gradient_grad(op, grad):
"""Python gradient helper function for scale_gradient function below."""
return [grad * op.inputs[1], None]
@function.Defun(python_grad_func=_scale_gradient_grad,
shape_func=_scale_gradient_shape)
def scale_gradient(inputs, gradient_multiplier):
"""Identity operation, but with the gradient multiplied by a tensor.
The TensorFlow gradient system will compute the gradient with respect to
`inputs` as the product of the gradient with respect to the `output`
multiplied by a specified `gradient_multiplier` tensor. If
`gradient_multiplier` is equal to 1, then this results in the true gradient.
Otherwise, it results in a scaled gradient.
This can be useful for adjusting the relative learning rate of different
parameter tensors when performing gradient descent, and because this rescaling
can be inserted at arbitrary locations within a graph, is often more
convenient to apply than simply rescaling the final computed gradients.
Args:
inputs: Tensor to be output.
gradient_multiplier: Tensor by which to multiply the gradient with respect
to `output` to compute the gradient with respect to `inputs`. Its shape
must be broadcastable to the shape of `inputs`.
Returns:
output Tensor, equal to `inputs`.
"""
# gradient_multiplier is implicitly saved by decorator, and only used for
# gradient computation.
del gradient_multiplier
return inputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `normalizer_fn` is None,
it adds bias to the result, creating a variable called 'biases', otherwise,
the `normalizer_fn` is applied. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for atrous convolution. Can be an int if both rates are the same.
If any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter(
{'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'})
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first' if data_format and data_format.startswith('NC')
else 'channels_last')
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=weights_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias:
_add_variable_to_collections(layer.bias,
variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.channel_dimension(
inputs.get_shape(), df, min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w,
num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, stride_h, stride_w, 1]
outputs = nn.depthwise_conv2d(inputs, depthwise_weights, strides, padding,
rate=utils.two_element_tuple(rate),
data_format=data_format)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable('biases',
shape=[num_outputs,],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
@add_arg_scope
def spatial_softmax(features,
temperature=None,
name=None,
variables_collections=None,
trainable=True,
data_format='NHWC'):
"""Computes the spatial softmax of a convolutional feature map.
First computes the softmax over the spatial extent of each channel of a
convolutional feature map. Then computes the expected 2D position of the
points of maximal activation for each channel, resulting in a set of
feature keypoints [x1, y1, ... xN, yN] for all N channels.
Read more here:
"Learning visual feature spaces for robotic manipulation with
deep spatial autoencoders." Finn et. al, http://arxiv.org/abs/1509.06113.
Args:
features: A `Tensor` of size [batch_size, W, H, num_channels]; the
convolutional feature map.
temperature: Softmax temperature (optional). If None, a learnable
temperature is created.
name: A name for this operation (optional).
variables_collections: Collections for the temperature variable.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
Returns:
feature_keypoints: A `Tensor` with size [batch_size, num_channels * 2];
the expected 2D locations of each channel's feature keypoint (normalized
to the range (-1,1)). The inner dimension is arranged as
[x1, y1, ... xN, yN].
Raises:
ValueError: If unexpected data_format specified.
ValueError: If num_channels dimension is unspecified.
"""
shape = array_ops.shape(features)
static_shape = features.shape
if data_format == DATA_FORMAT_NHWC:
height, width, num_channels = shape[1], shape[2], static_shape[3]
elif data_format == DATA_FORMAT_NCHW:
num_channels, height, width = static_shape[1], shape[2], shape[3]
else:
raise ValueError('data_format has to be either NCHW or NHWC.')
if num_channels.value is None:
raise ValueError('The num_channels dimension of the inputs to '
'`spatial_softmax` should be defined. Found `None`.')
with ops.name_scope(name, 'spatial_softmax', [features]) as name:
# Create tensors for x and y coordinate values, scaled to range [-1, 1].
pos_x, pos_y = array_ops.meshgrid(math_ops.lin_space(-1., 1., num=height),
math_ops.lin_space(-1., 1., num=width),
indexing='ij')
pos_x = array_ops.reshape(pos_x, [height * width])
pos_y = array_ops.reshape(pos_y, [height * width])
if temperature is None:
temperature_collections = utils.get_variable_collections(
variables_collections, 'temperature')
temperature = variables.model_variable(
'temperature',
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer(),
collections=temperature_collections,
trainable=trainable)
if data_format == 'NCHW':
features = array_ops.reshape(features, [-1, height * width])
else:
features = array_ops.reshape(
array_ops.transpose(features, [0, 3, 1, 2]), [-1, height * width])
softmax_attention = nn.softmax(features/temperature)
expected_x = math_ops.reduce_sum(
pos_x * softmax_attention, [1], keep_dims=True)
expected_y = math_ops.reduce_sum(
pos_y * softmax_attention, [1], keep_dims=True)
expected_xy = array_ops.concat([expected_x, expected_y], 1)
feature_keypoints = array_ops.reshape(
expected_xy, [-1, num_channels.value * 2])
feature_keypoints.set_shape([None, num_channels.value * 2])
return feature_keypoints
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
A `Tensor` result of applying the stacked layers.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i+1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum(
math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None):
"""Project into the Poincare ball with norm <= 1.0 - epsilon.
https://en.wikipedia.org/wiki/Poincare_ball_model
Used in
Poincare Embeddings for Learning Hierarchical Representations
Maximilian Nickel, Douwe Kiela
https://arxiv.org/pdf/1705.08039.pdf
For a 1-D tensor with `axis = 0`, computes
(x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
output =
x otherwise
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Axis along which to normalize. A scalar or a vector of
integers.
epsilon: A small deviation from the edge of the unit sphere for numerical
stability.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keep_dims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer(),
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\\(\\text{dim}_0, \\text{dim}_1, ..., \\text{dim}_n\\\)]
with more than 2 dimensions (\\\(n > 1\\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`],
where \\\( r_{i_0, ..., i_{n-1}, k} =
\\sum_{0 \\leq j < \\text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\\).
This is accomplished by reshaping `x` to 2-D
[\\\(\\text{dim}_0 \\cdot ... \\cdot \\text{dim}_{n-1}, \\text{dim}_n\\\)]
before the matrix multiply and afterwards reshaping it to
[\\\(\\text{dim}_0, ..., \\text{dim}_{n-1},\\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(list(weight_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable('weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(x,
[-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(list(bias_collections or []) +
[ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable('bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unstack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.stack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
elu = functools.partial(fully_connected, activation_fn=nn.elu)
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv3d = convolution3d
conv2d_transpose = convolution2d_transpose
conv3d_transpose = convolution3d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| [
[
[
830,
845
]
],
[
[
869,
877
]
],
[
[
901,
915
]
],
[
[
924,
933
],
[
129028,
129037
],
[
129099,
129108
],
[
129180,
129189
],
[
129249,
129258
],
[
129315,
129324
],
[
129383,
129392
],
[
65275,
65284
]
],
[
[
941,
943
],
[
3660,
3662
]
],
[
[
951,
954
],
[
66765,
66768
],
[
73385,
73388
]
],
[
[
1008,
1021
],
[
3770,
3783
],
[
5824,
5837
],
[
18466,
18479
],
[
37228,
37241
],
[
40700,
40713
],
[
47745,
47758
],
[
52752,
52765
],
[
57484,
57497
],
[
62270,
62283
],
[
63828,
63841
],
[
67327,
67340
],
[
70381,
70394
],
[
87851,
87864
],
[
93567,
93580
],
[
95648,
95661
],
[
97782,
97795
],
[
100794,
100807
],
[
105347,
105360
],
[
113278,
113291
],
[
114178,
114191
],
[
119975,
119988
]
],
[
[
1074,
1083
],
[
13706,
13715
],
[
14251,
14260
],
[
14870,
14879
],
[
15335,
15344
],
[
30918,
30927
],
[
31565,
31574
],
[
32543,
32552
],
[
33030,
33039
],
[
39928,
39937
],
[
51105,
51114
],
[
52054,
52063
],
[
69211,
69220
],
[
92236,
92245
],
[
92630,
92639
],
[
111624,
111633
],
[
112544,
112553
],
[
116997,
117006
]
],
[
[
1136,
1148
],
[
41063,
41075
],
[
47953,
47965
],
[
53012,
53024
],
[
57745,
57757
],
[
70623,
70635
],
[
105641,
105653
],
[
122836,
122848
]
],
[
[
1201,
1206
],
[
5759,
5764
],
[
7866,
7871
],
[
13367,
13372
],
[
14027,
14032
],
[
14667,
14672
],
[
15117,
15122
],
[
16015,
16020
],
[
16412,
16417
],
[
17170,
17175
],
[
17802,
17807
],
[
18342,
18347
],
[
28532,
28537
],
[
30653,
30658
],
[
31295,
31300
],
[
32332,
32337
],
[
32804,
32809
],
[
33568,
33573
],
[
35251,
35256
],
[
35908,
35913
],
[
36358,
36363
],
[
37104,
37109
],
[
39795,
39800
],
[
40481,
40486
],
[
47564,
47569
],
[
50776,
50781
],
[
50838,
50843
],
[
50891,
50896
],
[
51017,
51022
],
[
51960,
51965
],
[
52628,
52633
],
[
57360,
57365
],
[
62146,
62151
],
[
63734,
63739
],
[
65393,
65398
],
[
68548,
68553
],
[
70002,
70007
],
[
74868,
74873
],
[
92105,
92110
],
[
92496,
92501
],
[
93404,
93409
],
[
95583,
95588
],
[
97717,
97722
],
[
100352,
100357
],
[
100525,
100530
],
[
100649,
100654
],
[
100730,
100735
],
[
101843,
101848
],
[
109789,
109794
],
[
111199,
111204
],
[
111263,
111268
],
[
111318,
111323
],
[
111417,
111422
],
[
112063,
112068
],
[
112446,
112451
],
[
113154,
113159
],
[
113879,
113884
],
[
116897,
116902
]
],
[
[
1247,
1253
],
[
101572,
101578
],
[
117084,
117090
],
[
121055,
121061
],
[
121261,
121267
]
],
[
[
1294,
1302
],
[
104029,
104037
]
],
[
[
1343,
1346
],
[
8112,
8115
],
[
18767,
18770
],
[
84181,
84184
],
[
123023,
123026
],
[
123093,
123096
],
[
123164,
123167
],
[
5209,
5212
],
[
5274,
5277
],
[
7316,
7319
],
[
7381,
7384
],
[
12074,
12077
],
[
17984,
17987
],
[
18049,
18052
],
[
24933,
24936
],
[
25191,
25194
],
[
26115,
26118
],
[
26268,
26271
],
[
29405,
29408
],
[
36090,
36093
],
[
36155,
36158
],
[
39210,
39213
],
[
45653,
45656
],
[
56113,
56116
],
[
60899,
60902
],
[
63439,
63442
],
[
64342,
64345
],
[
64405,
64408
],
[
66417,
66420
],
[
68249,
68252
],
[
68452,
68455
],
[
70299,
70302
],
[
70339,
70342
],
[
73796,
73799
],
[
79574,
79577
],
[
79654,
79657
],
[
79713,
79716
],
[
79767,
79770
],
[
82397,
82400
],
[
91135,
91138
],
[
95053,
95056
],
[
95118,
95121
],
[
97187,
97190
],
[
97252,
97255
],
[
99908,
99911
],
[
100018,
100021
],
[
101436,
101439
],
[
101519,
101522
],
[
102027,
102030
],
[
102109,
102112
],
[
103275,
103278
],
[
109322,
109325
],
[
116379,
116382
],
[
119371,
119374
],
[
122328,
122331
],
[
122393,
122396
],
[
126786,
126789
],
[
127286,
127289
],
[
128049,
128052
],
[
17018,
17021
],
[
35039,
35042
]
],
[
[
1387,
1400
],
[
68340,
68353
]
],
[
[
1441,
1453
],
[
80488,
80500
],
[
83863,
83875
]
],
[
[
1491,
1495
],
[
74971,
74975
],
[
78743,
78747
],
[
80783,
80787
]
],
[
[
1533,
1570
],
[
45770,
45790
],
[
45851,
45871
],
[
45932,
45952
],
[
56263,
56283
],
[
61049,
61069
],
[
109575,
109595
]
],
[
[
1608,
1627
],
[
63481,
63492
],
[
73838,
73849
]
],
[
[
1665,
1702
],
[
27184,
27204
]
],
[
[
1740,
1765
],
[
5423,
5437
],
[
7530,
7544
],
[
95267,
95281
],
[
97401,
97415
]
],
[
[
1800,
1809
],
[
12826,
12835
],
[
13942,
13951
],
[
14486,
14495
],
[
18197,
18206
],
[
18224,
18233
],
[
29668,
29677
],
[
33919,
33928
],
[
33970,
33979
],
[
34298,
34307
],
[
34349,
34358
],
[
36528,
36537
],
[
36593,
36602
],
[
36687,
36696
],
[
36779,
36788
],
[
51553,
51562
],
[
64614,
64623
],
[
64655,
64664
],
[
64714,
64723
],
[
64850,
64859
],
[
64910,
64919
],
[
64976,
64985
],
[
66006,
66015
],
[
66484,
66493
],
[
66517,
66526
],
[
66579,
66588
],
[
66640,
66649
],
[
80910,
80919
],
[
80991,
81000
],
[
81107,
81116
],
[
82531,
82540
],
[
82854,
82863
],
[
82984,
82993
],
[
83056,
83065
],
[
83103,
83112
],
[
83244,
83253
],
[
113948,
113957
],
[
114050,
114059
],
[
114081,
114090
],
[
115844,
115853
],
[
116536,
116545
],
[
116739,
116748
],
[
116794,
116803
],
[
117276,
117285
],
[
117353,
117362
],
[
117382,
117391
],
[
117713,
117722
],
[
117783,
117792
],
[
121033,
121042
],
[
121100,
121109
],
[
121124,
121133
],
[
121222,
121231
],
[
121292,
121301
],
[
121355,
121364
],
[
127787,
127796
],
[
128569,
128578
],
[
128587,
128596
],
[
128657,
128666
],
[
128678,
128687
],
[
17124,
17133
],
[
35171,
35180
],
[
35197,
35206
],
[
81295,
81304
]
],
[
[
1844,
1853
],
[
66308,
66317
]
],
[
[
1888,
1896
],
[
37321,
37329
],
[
41175,
41183
],
[
48041,
48049
],
[
53100,
53108
],
[
57833,
57841
],
[
70743,
70751
],
[
105729,
105737
],
[
122908,
122916
],
[
13663,
13671
],
[
14208,
14216
],
[
14822,
14830
],
[
15284,
15292
],
[
26596,
26604
],
[
26733,
26741
],
[
26843,
26851
],
[
26962,
26970
],
[
30875,
30883
],
[
31522,
31530
],
[
32493,
32501
],
[
32977,
32985
],
[
92355,
92363
],
[
92750,
92758
],
[
117122,
117130
]
],
[
[
1931,
1941
],
[
81520,
81530
]
],
[
[
1976,
1984
],
[
64792,
64800
],
[
66075,
66083
],
[
79870,
79878
],
[
80281,
80289
],
[
80340,
80348
],
[
81931,
81939
],
[
82300,
82308
],
[
82700,
82708
],
[
83439,
83447
],
[
83568,
83576
],
[
116555,
116563
],
[
116634,
116642
],
[
117524,
117532
],
[
117618,
117626
],
[
120885,
120893
],
[
120909,
120917
],
[
120938,
120946
],
[
121334,
121342
],
[
122445,
122453
],
[
122465,
122473
],
[
122524,
122532
],
[
122568,
122576
],
[
122629,
122637
],
[
81281,
81289
],
[
81571,
81579
]
],
[
[
2019,
2021
],
[
40942,
40944
],
[
47868,
47870
],
[
52927,
52929
],
[
57660,
57662
],
[
70490,
70492
],
[
105556,
105558
],
[
129077,
129079
],
[
129155,
129157
],
[
129298,
129300
],
[
129364,
129366
],
[
33853,
33855
],
[
34045,
34047
],
[
34161,
34163
],
[
34424,
34426
],
[
36878,
36880
],
[
40344,
40346
],
[
51618,
51620
],
[
52514,
52516
],
[
82685,
82687
],
[
82910,
82912
],
[
83170,
83172
],
[
83325,
83327
],
[
83424,
83426
],
[
83498,
83500
],
[
92996,
92998
],
[
93159,
93161
],
[
100300,
100302
],
[
111957,
111959
],
[
113015,
113017
],
[
114010,
114012
],
[
117474,
117476
],
[
128510,
128512
],
[
15612,
15614
],
[
15758,
15760
]
],
[
[
2056,
2066
],
[
66134,
66144
]
],
[
[
2101,
2113
],
[
101601,
101613
],
[
101645,
101657
],
[
127896,
127908
]
],
[
[
2148,
2162
],
[
11974,
11988
],
[
25972,
25986
],
[
32179,
32193
],
[
32242,
32256
],
[
33286,
33300
],
[
39082,
39096
],
[
45515,
45529
],
[
50626,
50640
],
[
55829,
55843
],
[
60611,
60625
],
[
63316,
63330
],
[
73647,
73661
],
[
91005,
91019
],
[
103204,
103218
],
[
109173,
109187
],
[
113803,
113817
],
[
119301,
119315
],
[
120544,
120558
],
[
126717,
126731
],
[
127327,
127341
],
[
128092,
128106
]
],
[
[
2197,
2222
],
[
70137,
70149
]
],
[
[
2262,
2277
],
[
16746,
16761
],
[
16897,
16912
],
[
17446,
17461
],
[
17597,
17612
],
[
34767,
34782
],
[
34918,
34933
],
[
35551,
35566
],
[
35702,
35717
]
],
[
[
2322,
2328
]
],
[
[
2467,
2474
]
],
[
[
3535,
3551
],
[
5098,
5114
],
[
11863,
11879
],
[
12752,
12768
],
[
25809,
25825
],
[
26402,
26418
],
[
29732,
29748
],
[
33808,
33824
],
[
34116,
34132
],
[
36497,
36513
],
[
38971,
38987
],
[
39490,
39506
],
[
39606,
39622
],
[
55993,
56009
],
[
94942,
94958
],
[
108889,
108905
],
[
116032,
116048
]
],
[
[
3561,
3577
],
[
3919,
3935
],
[
8274,
8290
],
[
19057,
19073
],
[
37541,
37557
],
[
52891,
52907
],
[
93716,
93732
],
[
105508,
105524
],
[
5116,
5132
],
[
11881,
11897
],
[
12959,
12975
],
[
25827,
25843
],
[
38989,
39005
],
[
56011,
56027
],
[
94960,
94976
],
[
108907,
108923
],
[
115922,
115938
]
],
[
[
3587,
3604
],
[
7201,
7218
],
[
60775,
60792
],
[
97072,
97089
]
],
[
[
3615,
3632
],
[
5973,
5990
],
[
57623,
57640
],
[
95797,
95814
],
[
7220,
7237
],
[
60794,
60811
],
[
97091,
97108
]
],
[
[
3643,
3657
],
[
24380,
24394
]
],
[
[
3788,
3798
]
],
[
[
5842,
5852
]
],
[
[
7934,
7951
],
[
25229,
25246
]
],
[
[
18484,
18494
]
],
[
[
37246,
37254
]
],
[
[
40718,
40729
],
[
47702,
47713
],
[
47730,
47741
]
],
[
[
47686,
47699
],
[
129464,
129477
]
],
[
[
47714,
47727
],
[
129487,
129500
]
],
[
[
47763,
47785
],
[
129605,
129627
]
],
[
[
52770,
52793
],
[
129520,
129543
]
],
[
[
57502,
57525
],
[
129563,
129586
]
],
[
[
62288,
62295
]
],
[
[
63846,
63853
]
],
[
[
65461,
65482
],
[
68387,
68408
]
],
[
[
66204,
66224
],
[
68500,
68520
]
],
[
[
67345,
67359
]
],
[
[
68617,
68639
],
[
63395,
63417
],
[
69750,
69772
]
],
[
[
69500,
69522
],
[
25940,
25962
],
[
45436,
45458
],
[
55750,
55772
],
[
60532,
60554
],
[
73525,
73547
],
[
109019,
109041
]
],
[
[
69835,
69863
],
[
27991,
28019
],
[
28094,
28122
],
[
28240,
28268
],
[
28354,
28382
],
[
47154,
47182
],
[
47260,
47288
],
[
56954,
56982
],
[
57056,
57084
],
[
61740,
61768
],
[
61842,
61870
],
[
74395,
74423
],
[
74509,
74537
],
[
110518,
110546
],
[
110646,
110674
],
[
110797,
110825
]
],
[
[
70399,
70414
],
[
129046,
129061
],
[
129267,
129282
],
[
129333,
129348
],
[
129401,
129416
]
],
[
[
74967,
74970
],
[
84219,
84222
],
[
78385,
78388
],
[
87427,
87430
]
],
[
[
84283,
84286
]
],
[
[
87869,
87879
]
],
[
[
93585,
93595
]
],
[
[
95666,
95676
]
],
[
[
97800,
97804
]
],
[
[
100812,
100828
]
],
[
[
101911,
101928
],
[
128838,
128855
]
],
[
[
102157,
102163
]
],
[
[
103754,
103775
],
[
104110,
104131
]
],
[
[
103882,
103902
],
[
104061,
104081
]
],
[
[
104137,
104151
]
],
[
[
105365,
105388
],
[
129647,
129670
]
],
[
[
113296,
113303
]
],
[
[
114196,
114211
]
],
[
[
117952,
117957
]
],
[
[
119993,
120002
]
],
[
[
121397,
121415
]
],
[
[
122679,
122701
],
[
129117,
129139
],
[
129198,
129220
]
],
[
[
129022,
129025
]
],
[
[
129085,
129096
]
],
[
[
129164,
129177
]
],
[
[
129242,
129246
]
],
[
[
129307,
129312
]
],
[
[
129374,
129380
]
],
[
[
129455,
129461
]
],
[
[
129478,
129484
]
],
[
[
129501,
129517
]
],
[
[
129544,
129560
]
],
[
[
129587,
129602
]
],
[
[
129628,
129644
]
]
] |
#!/usr/bin/env python
""" Maps point charges obtained by GPAW and HORTON on the original'
' GROMACS topology initially modified by insertHbyList.py """
## jlh 2018/04/02
import ast
import h5py
import ase.io
from ase.io.cube import read_cube_data
import parmed as pmd
from parmed import gromacs
from insertHbyList import insertHbyList
import argparse
def main():
parser = argparse.ArgumentParser(\
description='Converts an all-atom cube file into united-atom'
' representation based on certain replacement rules')
#parser.add_argument('-c', '--charge',metavar='INTEGER_CHARGE',
# type=int,nargs='?', const=1, default=0)
#parser.add_argument('infile', nargs='?')
parser.add_argument('infile_pdb', nargs='?', metavar='infile.pdb',
default='system.pdb',
help="Original .pdb file, before insertion of implicit hydrogen.")
parser.add_argument('infile_top', nargs='?', metavar='infile.top',
default='system.top', help="Original GROMACS .top file")
parser.add_argument('infile_cube', nargs='?', metavar='infile.cube',
default='esp.cube',
help="ESP descrition (or other scalar field) in all-atom cube file.")
parser.add_argument('outfile_cube', nargs='?', metavar='outfile.cube',
default='esp_fitted_system.top', help="Output truncated by atoms only"
"present in all-atoms description")
parser.add_argument('-i','--insertion-rules',
default="{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}",
help="A string representation of a python dictionary, describing how "
"many implicit hydrogens have been inserted at which atom. Example: "
"{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}")
args = parser.parse_args()
#implicitHbondingPartners={'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}
print('Using replacement rules "{}"...'.format(args.insertion_rules))
implicitHbondingPartners = ast.literal_eval(args.insertion_rules)
aa2ua_cube(args.infile_pdb, args.infile_top, args.infile_cube,
args.outfile_cube,implicitHbondingPartners=implicitHbondingPartners)
def aa2ua_cube(infile_pdb, infile_top, infile_cube,
outfile_cube,implicitHbondingPartners=
{'CD4':1,'CD3':1,'CA2':2,'CA3':2,'CA4':2,'CB2':2,'CB3':2}):
#infile_pdb = args.infile_pdb
#infile_top = args.infile_top
#infile_cube = args.infile_cube
#outfile_cube = args.outfile_cube
ase_struct=ase.io.read(infile_pdb)
pmd_struct = pmd.load_file(infile_pdb)
pmd_top = gromacs.GromacsTopologyFile(infile_top,parametrize=False)
# throws some warnings on angle types, does not matter for bonding info
pmd_top.strip(':SOL,CL') # strip water and electrolyte from system
pmd_top.box = pmd_struct.box # Needed because .prmtop contains box info
pmd_top.positions = pmd_struct.positions
new_ase_struct, new_pmd_struct, names, residues = insertHbyList(
ase_struct,pmd_top,implicitHbondingPartners,1.0)
surplus_atoms = len(new_ase_struct) - len(ase_struct)
print("{} atoms are going to be truncated from file"
"{}...".format(surplus_atoms,infile_cube))
# hdf5 = h5py.File(infile_h5,'r')
cube_data, cube_atoms = read_cube_data(infile_cube)
ase.io.write(outfile_cube, cube_atoms[0:len(ase_struct)], data=cube_data)
# ATTENTION: this script just truncates atoms based on total count difference
# in UA and AA representations
if __name__ == '__main__':
main()
| [
[
[
189,
192
],
[
2030,
2033
]
],
[
[
200,
204
]
],
[
[
212,
218
],
[
2564,
2567
],
[
3366,
3369
]
],
[
[
243,
257
],
[
3334,
3348
]
],
[
[
265,
278
],
[
2605,
2608
]
],
[
[
298,
305
],
[
2645,
2652
]
],
[
[
332,
345
],
[
3026,
3039
]
],
[
[
354,
362
],
[
389,
397
]
],
[
[
368,
372
],
[
3589,
3593
]
],
[
[
2227,
2237
],
[
2073,
2083
]
]
] |
"""
This module contains the formulas for comparing Lab values with matrices
and vectors. The benefit of using NumPy's matrix capabilities is speed. These
calls can be used to efficiently compare large volumes of Lab colors.
"""
import numpy
def delta_e_cie1976(lab_color_vector, lab_color_matrix):
"""
Calculates the Delta E (CIE1976) between `lab_color_vector` and all
colors in `lab_color_matrix`.
"""
return numpy.sqrt(
numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
# noinspection PyPep8Naming
def delta_e_cie1994(lab_color_vector, lab_color_matrix,
K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
"""
Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles
"""
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
S_L = 1
S_C = 1 + K_1 * C_1
S_H = 1 + K_2 * C_1
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))
# noinspection PyPep8Naming
def delta_e_cmc(lab_color_vector, lab_color_matrix, pl=2, pc=1):
"""
Calculates the Delta E (CIE1994) of two colors.
CMC values
Acceptability: pl=2, pc=1
Perceptability: pl=1, pc=1
"""
L, a, b = lab_color_vector
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
H_1 = numpy.degrees(numpy.arctan2(b, a))
if H_1 < 0:
H_1 += 360
F = numpy.sqrt(numpy.power(C_1, 4) / (numpy.power(C_1, 4) + 1900.0))
# noinspection PyChainedComparisons
if 164 <= H_1 and H_1 <= 345:
T = 0.56 + abs(0.2 * numpy.cos(numpy.radians(H_1 + 168)))
else:
T = 0.36 + abs(0.4 * numpy.cos(numpy.radians(H_1 + 35)))
if L < 16:
S_L = 0.511
else:
S_L = (0.040975 * L) / (1 + 0.01765 * L)
S_C = ((0.0638 * C_1) / (1 + 0.0131 * C_1)) + 0.638
S_H = S_C * (F * T + 1 - F)
delta_C = C_1 - C_2
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[pl * S_L], [pc * S_C], [S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))
# noinspection PyPep8Naming
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):
"""
Calculates the Delta E (CIE2000) of two colors.
"""
L, a, b = lab_color_vector
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \
0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \
0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \
0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
return numpy.sqrt(
numpy.power(delta_Lp / (S_L * Kl), 2) +
numpy.power(delta_Cp / (S_C * Kc), 2) +
numpy.power(delta_Hp / (S_H * Kh), 2) +
R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh)))
| [
[
[
237,
242
],
[
435,
440
],
[
455,
460
],
[
465,
470
],
[
908,
913
],
[
919,
924
],
[
929,
934
],
[
978,
983
],
[
989,
994
],
[
999,
1004
],
[
1212,
1217
],
[
1222,
1227
],
[
1250,
1255
],
[
1331,
1336
],
[
1438,
1443
],
[
1493,
1498
],
[
1558,
1563
],
[
1569,
1574
],
[
1579,
1584
],
[
1904,
1909
],
[
1915,
1920
],
[
1925,
1930
],
[
1974,
1979
],
[
1985,
1990
],
[
1995,
2000
],
[
2201,
2206
],
[
2215,
2220
],
[
2281,
2286
],
[
2292,
2297
],
[
2315,
2320
],
[
2450,
2455
],
[
2460,
2465
],
[
2526,
2531
],
[
2536,
2541
],
[
2789,
2794
],
[
2799,
2804
],
[
2827,
2832
],
[
2908,
2913
],
[
2954,
2959
],
[
3009,
3014
],
[
3066,
3071
],
[
3077,
3082
],
[
3087,
3092
],
[
3389,
3394
],
[
3400,
3405
],
[
3410,
3415
],
[
3458,
3463
],
[
3469,
3474
],
[
3479,
3484
],
[
3582,
3587
],
[
3593,
3598
],
[
3624,
3629
],
[
3654,
3659
],
[
3761,
3766
],
[
3772,
3777
],
[
3794,
3799
],
[
3823,
3828
],
[
3834,
3839
],
[
3856,
3861
],
[
3944,
3949
],
[
3958,
3963
],
[
4019,
4024
],
[
4033,
4038
],
[
4121,
4126
],
[
4196,
4201
],
[
4206,
4211
],
[
4253,
4258
],
[
4263,
4268
],
[
4309,
4314
],
[
4319,
4324
],
[
4368,
4373
],
[
4378,
4383
],
[
4471,
4476
],
[
4630,
4635
],
[
4654,
4659
],
[
4664,
4669
],
[
4720,
4725
],
[
4751,
4756
],
[
4767,
4772
],
[
4892,
4897
],
[
4904,
4909
],
[
4956,
4961
],
[
4968,
4973
],
[
5002,
5007
],
[
5034,
5039
],
[
5080,
5085
],
[
5094,
5099
],
[
5131,
5136
],
[
5151,
5156
],
[
5199,
5204
],
[
5247,
5252
]
],
[
[
249,
264
]
],
[
[
561,
576
]
],
[
[
1652,
1663
]
],
[
[
3160,
3175
]
]
] |
import os
import webbrowser
import time
import random
import requests
from bs4 import BeautifulSoup
from prettytable import PrettyTable
from time import sleep
cont = 1
print("Welcome to Qp Bank !")
sleep(1)
print("Crafted with love by Mathan.S")
sleep(1)
print("Ensure your connectivity to Amrita Wifi for smooth experience :)")
# Captcha Disabled
"""a=["M234x","Ad34T","Fr45C","J234r","PKa67"]
z=random.randint(0,4)
print(a[z])
captcha=input("Enter the captcha")
while(captcha!=a[z]):
print("Enter the correct captcha..")
a=["M234x","Ad34T","Fr45C","J234r","PKa67"]
z=random.randint(0,4)
print(a[z])
captcha=input("Enter the captcha")"""
while(cont==1):
url="http://dspace.amritanet.edu:8080/xmlui/handle/123456789/150"
page=requests.get(url)
soup=BeautifulSoup(page.content,'html.parser')
div=soup.div
main_div=soup.find(id="aspect_artifactbrowser_CommunityViewer_div_community-view")
t=PrettyTable(["S.No","Attribute"])
main_list_item=main_div.ul
items=main_list_item.findAll("li")
for i in range(len(items)):
t.add_row([i+1,items[i].a.text.strip()])
print(t)
ch=int(input("Enter your Semester "))
while((ch>len(items)) or (ch<0)):
ch=int(input("Enter your Semester "))
url="http://dspace.amritanet.edu:8080"
url+=items[ch-1].a["href"]
print("Give me just a minute...")
sec_page=requests.get(url)
sec_soup=BeautifulSoup(sec_page.content,'html.parser')
u=sec_soup.findAll("ul")
if ch<=6:
sec_li=u[3].findAll("li")
else:
sec_li=u[2].findAll("li")
p=PrettyTable(["S.No","Title"])
for j in range(len(sec_li)):
p.add_row([j+1,sec_li[j].a.text.strip()])
print(p)
ch3=int(input("Enter your choice "))
while((ch3>len(sec_li)) or (ch3<0)):
ch3=int(input("Enter your choice "))
url="http://dspace.amritanet.edu:8080/"
url+=sec_li[ch3-1].a["href"]
third_page=requests.get(url)
third_soup=BeautifulSoup(third_page.content,'html.parser')
u3_div=third_soup.findAll("div",class_="ds-static-div secondary recent-submission")
third_div=u3_div[0].a.text.strip()
third_li=u3_div[0].findAll("li")
m=PrettyTable(["S.No","Title"])
m.add_row([1,third_div])
print(m)
ch4=int(input("Enter your choice "))
while((ch4>len(third_li)) or (ch4<0)):
ch4=int(input("Enter your choice "))
url="http://dspace.amritanet.edu:8080/"
url+=third_li[ch4-1].a["href"]
fourth_page=requests.get(url)
fourth_soup=BeautifulSoup(fourth_page.content,'html.parser')
u4_div=fourth_soup.findAll("div",class_="file-metadata")
v=PrettyTable(["S.No","Subjects"])
u4_temp=[]
mod_u4_temp=[]
for i in range(len(u4_div)):
u4_temp.append((u4_div[i].findAll("span")[1].text))
for j in range(len(u4_temp)):
mod_u4_temp=u4_temp[j].split(",")
v.add_row([j+1,mod_u4_temp[0]])
print(v)
last_ch=int(input("Enter your choice "))
last_div=fourth_soup.findAll("div",class_="file-link")
while((last_ch>len(last_div)) or (last_ch<0)):
last_ch=int(input("Enter your choice "))
url_last="http://dspace.amritanet.edu:8080"
url_last+=last_div[last_ch-1].a["href"]
print("All the very best for your exams :)")
sleep(1)
download=int(input("Enter 1 to download or 0 to open in browser "))
while(download!=1 and download!=0):
download=int(input("Enter 1 to download or 0 to open in browser "))
print("Give me just a minute :)")
if(download==1):
response = requests.get(url_last)
spliturl=url_last.split("/")
namewithext=spliturl[-1]
name_reduced=namewithext.split("?")
save=name_reduced[0]
#save+=".pdf"
with open(save,'wb') as f:
f.write(response.content)
print("The Qp is waiting for you at "+os.getcwd())
else:
print("The Qp is waiting for you :)")
webbrowser.open_new(url_last)
cont=int(input("Enter 1 to view another Qp or 0 to exit "))
| [
[
[
7,
9
],
[
3666,
3668
]
],
[
[
18,
28
],
[
3731,
3741
]
],
[
[
37,
41
]
],
[
[
50,
56
]
],
[
[
65,
73
],
[
772,
780
],
[
1366,
1374
],
[
1877,
1885
],
[
2399,
2407
],
[
3402,
3410
]
],
[
[
91,
104
],
[
797,
810
],
[
1395,
1408
],
[
1908,
1921
],
[
2431,
2444
]
],
[
[
130,
141
],
[
943,
954
],
[
1554,
1565
],
[
2120,
2131
],
[
2545,
2556
]
],
[
[
160,
165
],
[
212,
217
],
[
262,
267
],
[
3145,
3150
]
],
[
[
169,
173
],
[
683,
687
]
],
[
[
699,
702
],
[
785,
788
]
],
[
[
767,
771
],
[
811,
815
]
],
[
[
792,
796
],
[
845,
849
],
[
865,
869
]
],
[
[
841,
844
]
],
[
[
856,
864
],
[
994,
1002
]
],
[
[
941,
942
],
[
1076,
1077
],
[
1125,
1126
]
],
[
[
979,
993
],
[
1014,
1028
]
],
[
[
1008,
1013
],
[
1064,
1069
],
[
1091,
1096
],
[
1184,
1189
],
[
1293,
1298
]
],
[
[
1049,
1050
],
[
1087,
1088
],
[
1097,
1098
]
],
[
[
1130,
1132
],
[
1177,
1179
],
[
1196,
1198
],
[
1299,
1301
],
[
1475,
1477
]
],
[
[
1207,
1209
],
[
1177,
1179
],
[
1196,
1198
],
[
1299,
1301
],
[
1475,
1477
]
],
[
[
1247,
1250
],
[
1288,
1291
]
],
[
[
1357,
1365
],
[
1409,
1417
]
],
[
[
1386,
1394
],
[
1445,
1453
]
],
[
[
1443,
1444
],
[
1492,
1493
],
[
1529,
1530
]
],
[
[
1485,
1491
],
[
1605,
1611
],
[
1633,
1639
],
[
1729,
1735
],
[
1840,
1846
]
],
[
[
1522,
1528
],
[
1605,
1611
],
[
1633,
1639
],
[
1729,
1735
],
[
1840,
1846
]
],
[
[
1552,
1553
],
[
1618,
1619
],
[
1668,
1669
]
],
[
[
1590,
1591
],
[
1629,
1630
],
[
1640,
1641
]
],
[
[
1675,
1678
],
[
1721,
1724
],
[
1742,
1745
],
[
1847,
1850
]
],
[
[
1754,
1757
],
[
1721,
1724
],
[
1742,
1745
],
[
1847,
1850
]
],
[
[
1793,
1796
],
[
1835,
1838
]
],
[
[
1866,
1876
],
[
1922,
1932
]
],
[
[
1897,
1907
],
[
1965,
1975
]
],
[
[
1958,
1964
],
[
2054,
2060
],
[
2090,
2096
]
],
[
[
2044,
2053
],
[
2165,
2174
]
],
[
[
2081,
2089
],
[
2246,
2254
],
[
2359,
2367
]
],
[
[
2118,
2119
],
[
2152,
2153
],
[
2185,
2186
]
],
[
[
2192,
2195
],
[
2238,
2241
],
[
2261,
2264
],
[
2368,
2371
]
],
[
[
2273,
2276
],
[
2238,
2241
],
[
2261,
2264
],
[
2368,
2371
]
],
[
[
2312,
2315
],
[
2354,
2357
]
],
[
[
2387,
2398
],
[
2445,
2456
]
],
[
[
2419,
2430
],
[
2489,
2500
],
[
2867,
2878
]
],
[
[
2482,
2488
],
[
2629,
2635
],
[
2658,
2664
]
],
[
[
2543,
2544
],
[
2766,
2767
],
[
2808,
2809
]
],
[
[
2580,
2587
],
[
2642,
2649
],
[
2715,
2722
],
[
2741,
2748
]
],
[
[
2593,
2604
]
],
[
[
2614,
2615
],
[
2665,
2666
]
],
[
[
2700,
2701
],
[
2749,
2750
],
[
2777,
2778
]
],
[
[
2729,
2740
],
[
2781,
2792
]
],
[
[
2815,
2822
],
[
2922,
2929
],
[
2949,
2956
],
[
3073,
3080
]
],
[
[
2858,
2866
],
[
2934,
2942
],
[
3064,
3072
]
],
[
[
2965,
2972
],
[
2922,
2929
],
[
2949,
2956
],
[
3073,
3080
]
],
[
[
3008,
3016
],
[
3054,
3062
]
],
[
[
3156,
3164
],
[
3232,
3240
],
[
3248,
3256
],
[
3374,
3382
]
],
[
[
3265,
3273
],
[
3232,
3240
],
[
3248,
3256
],
[
3374,
3382
]
],
[
[
3391,
3399
],
[
3607,
3615
]
],
[
[
3428,
3436
],
[
3472,
3480
]
],
[
[
3460,
3471
],
[
3501,
3512
]
],
[
[
3488,
3500
],
[
3532,
3544
]
],
[
[
3527,
3531
],
[
3578,
3582
]
],
[
[
3592,
3593
],
[
3599,
3600
]
],
[
[
3763,
3767
],
[
683,
687
]
]
] |
from __future__ import annotations
from .data_structures import Stack
from .operation import Operation
class HistoryManager:
def __init__(self):
self.undo_stack: Stack[Operation] = Stack()
self.redo_stack: Stack[Operation] = Stack()
def add_operation(self, operation_instance: Operation):
self.undo_stack.append(operation_instance)
def undo(self) -> Operation:
operation_to_undo = self.undo_stack.pop()
self.redo_stack.append(operation_to_undo)
return operation_to_undo
def redo(self) -> Operation:
operation_to_redo = self.redo_stack.pop()
self.undo_stack.append(operation_to_redo)
return operation_to_redo
def __contains__(self, item):
if isinstance(item, Operation):
return item in self.undo_stack
| [
[
[
23,
34
]
],
[
[
65,
70
],
[
196,
201
],
[
177,
182
],
[
248,
253
],
[
229,
234
]
],
[
[
94,
103
],
[
183,
192
],
[
235,
244
],
[
305,
314
],
[
391,
400
],
[
558,
567
],
[
765,
774
]
],
[
[
112,
126
]
]
] |
import plotly.graph_objects as go
from mainapp.app_settings import cell_length_meter
def getLineChart(
data,
timestamp,
coordinates,
colorScale,
timeline,
color_range,
dragmode=False,
quick_select_range=True,
calibration_time=None,
show_legend=False):
if len(data) < 1: return {
'data': [],
'layout': go.Layout(title=go.layout.Title(text='No data found'))
}
x = data.iloc[:, 0].values
linechart_fig = go.Figure()
means = data.iloc[:, 1:].transpose().mean().transpose()
var = data.iloc[:, 1:].transpose().std().transpose()
# Add continuous error bars to the plot
'''error_colors = ['#d9d9d9', '#bdbdbd', '#969696']
for i in reversed(range(1, 4)):
fill_color = error_colors[i-1]
if data.shape[1] > 2:
linechart_fig.add_trace(go.Scatter(
x=x,
y=means - i * var,
mode='lines',
line=dict(width=1, color='black'),
showlegend=False
))
linechart_fig.add_trace(go.Scatter(
name='{} sigma'.format(i),
x=x,
y=means + i * var,
mode='lines',
marker=dict(color="#444"),
line=dict(width=1, color='black'),
fillcolor=fill_color,
fill='tonexty'))'''
# Add individual traces to the plot
ys = data.shape[1]
for y in range(1, ys):
coord = coordinates[y-1]
y = data.iloc[:, y].values
linechart_fig.add_trace(go.Scatter(
name='[{:2d},{:2d}]'.format(coord['x'], coord['y']),
x=x,
y=y,
mode='lines+markers',
line=dict(
width=1,
color='#292929'),
marker=dict(
size=2,
color='#292929'),
showlegend=show_legend
))
# Add central values to the plot
'''if data.shape[1] > 1:
if data.shape[1] == 2:
trace_name = '[{:d},{:d}]'.format(coordinates[0]['x'], coordinates[0]['y'])
else:
trace_name = 'Average'
linechart_fig.add_trace(go.Scatter(
name=trace_name,
x=x,
y=means,
mode='lines+markers',
line=dict(
color='#292929',
width=1,
),
marker=dict(
color='#292929',
size=3,
),
showlegend=True,
))'''
# Add vertical line representing selected timestamp
linechart_fig.add_shape(
# Line Vertical
dict(
name='selected timestamp',
type="line",
yref='paper',
x0=timestamp,
y0=0,
x1=timestamp,
y1=1,
line=dict(
color="black",
width=5
),
))
# Add vertical line representing selected calibration
if calibration_time is not None:
linechart_fig.add_shape(
# Line Vertical
dict(
name='calibration time',
type="line",
yref='paper',
x0=calibration_time,
y0=0,
x1=calibration_time,
y1=1,
line=dict(
color="green",
width=5
),
))
#Add colorbar to plot
if color_range['min'] is not None and color_range['max'] is not None:
min = color_range['min']
max = color_range['max']
width_of_line = (color_range['max'] - color_range['min']) / len(colorScale)
for i in range(len(colorScale)):
linechart_fig.add_shape(
dict(
type="rect",
xref="paper",
yref="y",
x0=0,
y0= min + i*width_of_line, #if i > 0 else 0 if min <= max else 12000,
x1=1,
y1=min + (i+1)*width_of_line, #if i < len(colorScale)-1 else 12000 if min <= max else 0,
fillcolor=colorScale[i][1],
opacity=0.6,
layer="below",
line_width=0,
)
)
range_selector = None
if quick_select_range:
range_selector = dict(
buttons=list([
dict(count=1, label="1m", step="minute", stepmode="backward"),
dict(count=1, label="1h", step="hour", stepmode="backward"),
dict(count=1, label="1d", step="day", stepmode="backward"),
dict(count=7, label="1w", step="day", stepmode="backward")
])
)
linechart_fig.update_layout(
xaxis=dict(
range=[timeline['start'], timeline['end']],
type="date",
linecolor='black',
gridcolor='LightGrey',
rangeselector=range_selector
),
yaxis=dict(
title='Resistivity (Ohm)',
rangemode='tozero',
linecolor='black',
gridcolor='LightGrey',
fixedrange=True
),
margin=dict(
l=15,
r=0,
t=30,
b=5,
pad=0
),
plot_bgcolor='white',
dragmode=dragmode,
height=250,
)
return linechart_fig
| [
[
[
7,
33
],
[
404,
406
],
[
420,
422
],
[
518,
520
],
[
1624,
1626
]
],
[
[
68,
85
]
],
[
[
92,
104
]
]
] |
from __future__ import absolute_import
from sentry.testutils import TestCase
from sentry.api.fields.actor import Actor
from sentry.models import ProjectOwnership, User, Team
from sentry.models.projectownership import resolve_actors
from sentry.ownership.grammar import Rule, Owner, Matcher, dump_schema
class ProjectOwnershipTestCase(TestCase):
def assert_ownership_equals(self, o1, o2):
assert (
sorted(o1[0]) == sorted(o2[0]) and
sorted(o1[1]) == sorted(o2[1])
)
def test_get_owners_default(self):
assert ProjectOwnership.get_owners(self.project.id, {}) == (ProjectOwnership.Everyone, None)
def test_get_owners_basic(self):
rule_a = Rule(
Matcher('path', '*.py'), [
Owner('team', self.team.slug),
])
rule_b = Rule(
Matcher('path', 'src/*'), [
Owner('user', self.user.email),
])
ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema([rule_a, rule_b]),
fallthrough=True,
)
# No data matches
assert ProjectOwnership.get_owners(self.project.id, {}) == (ProjectOwnership.Everyone, None)
# Match only rule_a
self.assert_ownership_equals(ProjectOwnership.get_owners(
self.project.id, {
'sentry.interfaces.Stacktrace': {
'frames': [{
'filename': 'foo.py',
}]
}
}
), ([Actor(self.team.id, Team)], [rule_a]))
# Match only rule_b
self.assert_ownership_equals(ProjectOwnership.get_owners(
self.project.id, {
'sentry.interfaces.Stacktrace': {
'frames': [{
'filename': 'src/thing.txt',
}]
}
}
), ([Actor(self.user.id, User)], [rule_b]))
# Matches both rule_a and rule_b
self.assert_ownership_equals(ProjectOwnership.get_owners(
self.project.id, {
'sentry.interfaces.Stacktrace': {
'frames': [{
'filename': 'src/foo.py',
}]
}
}
), ([Actor(self.user.id, User), Actor(self.team.id, Team)], [rule_a, rule_b]))
assert ProjectOwnership.get_owners(
self.project.id, {
'sentry.interfaces.Stacktrace': {
'frames': [{
'filename': 'xxxx',
}]
}
}
) == (ProjectOwnership.Everyone, None)
# When fallthrough = False, we don't implicitly assign to Everyone
ProjectOwnership.objects.filter(
project_id=self.project.id,
).update(fallthrough=False)
assert ProjectOwnership.get_owners(
self.project.id, {
'sentry.interfaces.Stacktrace': {
'frames': [{
'filename': 'xxxx',
}]
}
}
) == ([], None)
class ResolveActorsTestCase(TestCase):
def test_no_actors(self):
assert resolve_actors([], self.project.id) == {}
def test_basic(self):
owners = [
Owner('user', self.user.email),
Owner('team', self.team.slug),
]
assert resolve_actors(owners, self.project.id) == {
owners[0]: Actor(self.user.id, User),
owners[1]: Actor(self.team.id, Team),
}
def test_teams(self):
# Normal team
owner1 = Owner('team', self.team.slug)
actor1 = Actor(self.team.id, Team)
# Team that doesn't exist
owner2 = Owner('team', 'nope')
actor2 = None
# A team that's not ours
otherteam = Team.objects.exclude(projectteam__project_id=self.project.id)[0]
owner3 = Owner('team', otherteam.slug)
actor3 = None
assert resolve_actors([owner1, owner2, owner3], self.project.id) == {
owner1: actor1,
owner2: actor2,
owner3: actor3,
}
def test_users(self):
# Normal user
owner1 = Owner('user', self.user.email)
actor1 = Actor(self.user.id, User)
# An extra secondary email
email1 = self.create_useremail(self.user, None, is_verified=True).email
owner2 = Owner('user', email1)
actor2 = actor1 # They map to the same user since it's just a secondary email
# Another secondary email, that isn't verified
email2 = self.create_useremail(self.user, None, is_verified=False).email
owner3 = Owner('user', email2)
# Intentionally allow unverified emails
# actor3 = None
actor3 = actor1
# An entirely unknown user
owner4 = Owner('user', 'nope')
actor4 = None
# A user that doesn't belong with us
otheruser = self.create_user()
owner5 = Owner('user', otheruser.email)
actor5 = None
# Case-insensitive for user
owner6 = Owner('user', self.user.email.upper())
actor6 = actor1
assert resolve_actors([owner1, owner2, owner3, owner4, owner5, owner6], self.project.id) == {
owner1: actor1,
owner2: actor2,
owner3: actor3,
owner4: actor4,
owner5: actor5,
owner6: actor6,
}
| [
[
[
23,
38
]
],
[
[
69,
77
],
[
337,
345
],
[
3197,
3205
]
],
[
[
114,
119
],
[
1565,
1570
],
[
1934,
1939
],
[
2313,
2318
],
[
2340,
2345
],
[
3521,
3526
],
[
3571,
3576
],
[
3721,
3726
],
[
4318,
4323
]
],
[
[
146,
162
],
[
567,
583
],
[
620,
636
],
[
951,
967
],
[
1156,
1172
],
[
1209,
1225
],
[
1308,
1324
],
[
1670,
1686
],
[
2052,
2068
],
[
2403,
2419
],
[
2659,
2675
],
[
2776,
2792
],
[
2901,
2917
]
],
[
[
164,
168
],
[
1954,
1958
],
[
2333,
2337
],
[
3541,
3545
],
[
4338,
4342
]
],
[
[
170,
174
],
[
1585,
1589
],
[
2360,
2364
],
[
3591,
3595
],
[
3741,
3745
],
[
3897,
3901
]
],
[
[
218,
232
],
[
3253,
3267
],
[
3453,
3467
],
[
4047,
4061
],
[
5243,
5257
]
],
[
[
270,
274
],
[
708,
712
],
[
833,
837
]
],
[
[
276,
281
],
[
769,
774
],
[
895,
900
],
[
3353,
3358
],
[
3397,
3402
],
[
3674,
3679
],
[
3799,
3804
],
[
3979,
3984
],
[
4270,
4275
],
[
4477,
4482
],
[
4740,
4745
],
[
4911,
4916
],
[
5057,
5062
],
[
5164,
5169
]
],
[
[
283,
290
],
[
726,
733
],
[
851,
858
]
],
[
[
292,
303
],
[
1043,
1054
]
],
[
[
312,
336
]
],
[
[
3175,
3196
]
]
] |
import os
import time
# os.system("adb shell monkey -p com.xingin.xhs -c android.intent.category.LAUNCHER 1")
# os.system("sleep 4")
# os.system("adb shell input tap 1000 150")
# os.system("sleep 2")
# os.system("adb shell input text PUCO")
# os.system("sleep 2")
# os.system("adb shell input tap 1000 150")
# os.system("adb shell input swipe 340 800 340 500 1000")
for i in range(0,50):
os.system("adb shell input swipe 340 1200 340 400 1000")
os.system("adb shell input swipe 340 600 340 1800 1000")
time.sleep(1)
os.system("adb shell input swipe 340 1400 340 200 1000")
time.sleep(2)
| [
[
[
7,
9
],
[
394,
396
],
[
455,
457
],
[
534,
536
]
],
[
[
17,
21
],
[
516,
520
],
[
595,
599
]
],
[
[
372,
373
]
]
] |
import os
import helpers
import numpy
import pytest
import toughio
write_read = lambda output, writer_kws, reader_kws: helpers.write_read(
"output",
output,
toughio.write_output,
toughio.read_output,
writer_kws=writer_kws,
reader_kws=reader_kws,
)
@pytest.mark.parametrize(
"filename, data_ref",
[
(
"FOFT_A1912.csv",
{
"TIME": 4.393722000e9,
"PRES": 1.8740899675005e8,
"TEMP": 720.0,
"SAT_G": 0.0,
"SAT_L": 24.0,
},
),
(
"FOFT_A1912_T2.csv",
{
"TIME": 3.06639400e9,
"PRES": 1.83000721e8,
"TEMP": 660.0,
"SAT_G": 0.0,
"SAT_L": 22.0,
},
),
(
"GOFT_A1162.csv",
{"TIME": 4.393722000e9, "GEN": -30.0, "ENTG": 1.528048035348e7, "PWB": 0.0},
),
(
"GOFT_A1162_T2.csv",
{"TIME": 3.06639400e9, "GEN": -27.5, "ENTG": 1.40141971e7, "PWB": 0.0},
),
],
)
def test_history(filename, data_ref):
this_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(this_dir, "support_files", "outputs", filename)
data = toughio.read_history(filename)
for k, v in data_ref.items():
assert numpy.allclose(v, data[k].sum())
@pytest.mark.parametrize(
"filename, filename_ref",
[
("OUTPUT_ELEME.csv", "SAVE.out"),
("OUTPUT_ELEME.tec", "SAVE.out"),
("OUTPUT_ELEME_PETRASIM.csv", "SAVE.out"),
("OUTPUT.out", "SAVE.out"),
("OUTPUT_6.out", "SAVE_6.out"),
],
)
def test_output_eleme(filename, filename_ref):
this_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(this_dir, "support_files", "outputs", filename)
outputs = toughio.read_output(filename)
filename = os.path.join(this_dir, "support_files", "outputs", filename_ref)
save = toughio.read_output(filename)
assert len(outputs) == 5
times_ref = [
0.2592000e08,
0.3155800e08,
0.1577900e09,
0.3155800e09,
0.7889400e09,
]
keys_ref = ["POR", "PRES", "SAT_G", "TEMP", "X", "Y", "Z"]
for output, time_ref in zip(outputs, times_ref):
assert time_ref == output.time
assert (
save.labels.tolist() == output.labels.tolist()
if output.format in {"csv", "petrasim", "tough"}
else output.labels == None
)
if output.format != "tough":
assert keys_ref == sorted(list(output.data.keys()))
assert numpy.allclose(save.data["X1"], outputs[-1].data["PRES"])
assert numpy.allclose(save.data["X2"], outputs[-1].data["TEMP"], atol=0.1)
@pytest.mark.parametrize(
"filename",
["OUTPUT_CONNE.csv", "OUTPUT.out", "OUTPUT_6.out"],
)
def test_output_conne(filename):
this_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(this_dir, "support_files", "outputs", filename)
outputs = toughio.read_output(filename, connection=True)
times_ref = [
0.2592000e08,
0.3155800e08,
0.1577900e09,
0.3155800e09,
0.7889400e09,
]
data_ref = [
52542.0,
52475.0,
51146.0,
49600.0,
45623.0,
]
for output, time_ref, data in zip(outputs, times_ref, data_ref):
assert time_ref == output.time
assert (
len(set("".join(labels) for labels in output.labels))
== output.data["HEAT"].size
)
assert numpy.allclose(data, numpy.abs(output.data["HEAT"]).mean(), atol=1.0)
@pytest.mark.parametrize(
"output_ref, file_format",
[
(helpers.output_eleme, "csv"),
(helpers.output_eleme[0], "csv"),
(helpers.output_eleme, "petrasim"),
(helpers.output_eleme[0], "petrasim"),
(helpers.output_eleme, "tecplot"),
(helpers.output_eleme[0], "tecplot"),
(helpers.output_conne, "csv"),
(helpers.output_conne[0], "csv"),
],
)
def test_output(output_ref, file_format):
output = write_read(
output=output_ref,
writer_kws={"file_format": file_format},
reader_kws={},
)
output_ref = output_ref if isinstance(output_ref, list) else [output_ref]
for out_ref, out in zip(output_ref, output):
helpers.allclose_output(out_ref, out)
def test_save():
this_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(this_dir, "support_files", "outputs", "SAVE.out")
save = toughio.read_output(filename)
x_ref = [6.35804123e05, 1.42894499e02, 9.91868799e-01]
assert numpy.allclose(
x_ref, numpy.mean([save.data["X1"], save.data["X2"], save.data["X3"]], axis=1)
)
assert numpy.allclose(0.01, save.data["porosity"].mean())
assert "userx" not in save.data.keys()
| [
[
[
7,
9
],
[
1180,
1182
],
[
1196,
1198
],
[
1238,
1240
],
[
1770,
1772
],
[
1786,
1788
],
[
1828,
1830
],
[
1949,
1951
],
[
2961,
2963
],
[
2977,
2979
],
[
3019,
3021
],
[
4506,
4508
],
[
4522,
4524
],
[
4564,
4566
]
],
[
[
18,
25
],
[
3784,
3791
],
[
3823,
3830
],
[
3865,
3872
],
[
3909,
3916
],
[
3956,
3963
],
[
3999,
4006
],
[
4045,
4052
],
[
4084,
4091
],
[
122,
129
],
[
4434,
4441
]
],
[
[
33,
38
],
[
1391,
1396
],
[
2674,
2679
],
[
2743,
2748
],
[
3640,
3645
],
[
3661,
3666
],
[
4739,
4744
],
[
4770,
4775
],
[
4860,
4865
]
],
[
[
46,
52
],
[
278,
284
],
[
1427,
1433
],
[
2814,
2820
],
[
3713,
3719
]
],
[
[
61,
68
],
[
172,
179
],
[
198,
205
],
[
1310,
1317
],
[
1903,
1910
],
[
2025,
2032
],
[
3094,
3101
],
[
4638,
4645
]
],
[
[
70,
80
],
[
4181,
4191
]
],
[
[
1131,
1143
]
],
[
[
1712,
1729
]
],
[
[
2917,
2934
]
],
[
[
4130,
4141
]
],
[
[
4478,
4487
]
]
] |
#Question Link
#https://www.codechef.com/problems/XORAGN
t=int(input())
for a0 in range(t):
n=int(input())
a=list(map(int,input().split()))
res=0
for i in a:
res=res^i #xorring all the values present
print(2*res) #doubling the result obtained
| [
[
[
57,
58
],
[
88,
89
]
],
[
[
76,
78
]
],
[
[
96,
97
]
],
[
[
115,
116
],
[
171,
172
]
],
[
[
152,
155
],
[
186,
189
],
[
236,
239
]
],
[
[
166,
167
],
[
190,
191
]
],
[
[
182,
185
],
[
186,
189
],
[
236,
239
]
]
] |
import control as ctl
import numpy as np
def damp(sys,display=False):
pole_list = []
m_list = []
wn_list = []
for pole in sys.pole():
pole = pole.astype(complex) # WTF: the python control "damp" function is buggy due to this missing cast !
if ctl.isctime(sys):
pole_continuous = pole
else:
pole_continuous = np.log(pole)/sys.dt
wn = np.abs(pole_continuous)
m = -np.real(pole_continuous)/wn
pole_list.append(pole)
wn_list.append(wn)
m_list.append(m)
if display:
print("pole {:.3f} : wn={:.3f} rad/s, m= {:.3f}".format(pole, wn, m))
return wn_list, m_list, pole_list
| [
[
[
7,
21
],
[
278,
281
]
],
[
[
29,
40
],
[
375,
377
],
[
417,
419
],
[
454,
456
]
],
[
[
46,
50
]
]
] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: str
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:type partition: str
:param metastore_conn_id: reference to the metastore thrift service
connection id
:type metastore_conn_id: str
"""
template_fields = ('schema', 'table', 'partition',)
ui_color = '#C5CAE9'
@apply_defaults
def __init__(self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60 * 3,
*args,
**kwargs):
super().__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
self.log.info(
'Poking for table %s.%s, partition %s', self.schema, self.table, self.partition
)
if not hasattr(self, 'hook'):
hook = HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return hook.check_for_partition(
self.schema, self.table, self.partition)
| [
[
[
841,
858
],
[
2843,
2860
]
],
[
[
908,
926
],
[
1007,
1025
]
],
[
[
964,
978
],
[
1978,
1992
]
],
[
[
987,
1006
]
]
] |
import logging
from abc import abstractmethod
from dataclasses import dataclass, field
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type
from urllib.parse import quote_plus
import pydantic
from sqlalchemy import create_engine, inspect
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql import sqltypes as types
from datahub.configuration.common import AllowDenyPattern, ConfigModel
from datahub.emitter.mce_builder import DEFAULT_ENV
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import DatasetSnapshot
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.com.linkedin.pegasus2avro.schema import (
ArrayTypeClass,
BooleanTypeClass,
BytesTypeClass,
DateTypeClass,
EnumTypeClass,
MySqlDDL,
NullTypeClass,
NumberTypeClass,
RecordTypeClass,
SchemaField,
SchemaFieldDataType,
SchemaMetadata,
StringTypeClass,
TimeTypeClass,
)
from datahub.metadata.schema_classes import DatasetPropertiesClass
logger: logging.Logger = logging.getLogger(__name__)
def make_sqlalchemy_uri(
scheme: str,
username: Optional[str],
password: Optional[str],
at: Optional[str],
db: Optional[str],
uri_opts: Optional[Dict[str, Any]] = None,
) -> str:
url = f"{scheme}://"
if username is not None:
url += f"{quote_plus(username)}"
if password is not None:
url += f":{quote_plus(password)}"
url += "@"
if at is not None:
url += f"{at}"
if db is not None:
url += f"/{db}"
if uri_opts is not None:
if db is None:
url += "/"
params = "&".join(
f"{key}={quote_plus(value)}" for (key, value) in uri_opts.items() if value
)
url = f"{url}?{params}"
return url
@dataclass
class SQLSourceReport(SourceReport):
tables_scanned: int = 0
views_scanned: int = 0
filtered: List[str] = field(default_factory=list)
def report_entity_scanned(self, name: str, ent_type: str = "table") -> None:
"""
Entity could be a view or a table
"""
if ent_type == "table":
self.tables_scanned += 1
elif ent_type == "view":
self.views_scanned += 1
else:
raise KeyError(f"Unknown entity {ent_type}.")
def report_dropped(self, ent_name: str) -> None:
self.filtered.append(ent_name)
class SQLAlchemyConfig(ConfigModel):
env: str = DEFAULT_ENV
options: dict = {}
# Although the 'table_pattern' enables you to skip everything from certain schemas,
# having another option to allow/deny on schema level is an optimization for the case when there is a large number
# of schemas that one wants to skip and you want to avoid the time to needlessly fetch those tables only to filter
# them out afterwards via the table_pattern.
schema_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
table_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
view_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
include_views: Optional[bool] = True
include_tables: Optional[bool] = True
@abstractmethod
def get_sql_alchemy_url(self):
pass
def get_identifier(self, schema: str, table: str) -> str:
return f"{schema}.{table}"
def standardize_schema_table_names(
self, schema: str, entity: str
) -> Tuple[str, str]:
# Some SQLAlchemy dialects need a standardization step to clean the schema
# and table names. See BigQuery for an example of when this is useful.
return schema, entity
class BasicSQLAlchemyConfig(SQLAlchemyConfig):
username: Optional[str] = None
password: Optional[pydantic.SecretStr] = None
host_port: str
database: Optional[str] = None
database_alias: Optional[str] = None
scheme: str
def get_sql_alchemy_url(self, uri_opts=None):
return make_sqlalchemy_uri(
self.scheme,
self.username,
self.password.get_secret_value() if self.password else None,
self.host_port,
self.database,
uri_opts=uri_opts,
)
@dataclass
class SqlWorkUnit(MetadataWorkUnit):
pass
_field_type_mapping: Dict[Type[types.TypeEngine], Type] = {
types.Integer: NumberTypeClass,
types.Numeric: NumberTypeClass,
types.Boolean: BooleanTypeClass,
types.Enum: EnumTypeClass,
types._Binary: BytesTypeClass,
types.LargeBinary: BytesTypeClass,
types.PickleType: BytesTypeClass,
types.ARRAY: ArrayTypeClass,
types.String: StringTypeClass,
types.Date: DateTypeClass,
types.DATE: DateTypeClass,
types.Time: TimeTypeClass,
types.DateTime: TimeTypeClass,
types.DATETIME: TimeTypeClass,
types.TIMESTAMP: TimeTypeClass,
types.JSON: RecordTypeClass,
# When SQLAlchemy is unable to map a type into its internally hierarchy, it
# assigns the NullType by default. We want to carry this warning through.
types.NullType: NullTypeClass,
}
_known_unknown_field_types: Set[Type[types.TypeEngine]] = {
types.Interval,
types.CLOB,
}
def register_custom_type(
tp: Type[types.TypeEngine], output: Optional[Type] = None
) -> None:
if output:
_field_type_mapping[tp] = output
else:
_known_unknown_field_types.add(tp)
class _CustomSQLAlchemyDummyType(types.TypeDecorator):
impl = types.LargeBinary
def make_sqlalchemy_type(name: str) -> Type[types.TypeEngine]:
# This usage of type() dynamically constructs a class.
# See https://stackoverflow.com/a/15247202/5004662 and
# https://docs.python.org/3/library/functions.html#type.
sqlalchemy_type: Type[types.TypeEngine] = type(
name,
(_CustomSQLAlchemyDummyType,),
{
"__repr__": lambda self: f"{name}()",
},
)
return sqlalchemy_type
def get_column_type(
sql_report: SQLSourceReport, dataset_name: str, column_type: Any
) -> SchemaFieldDataType:
"""
Maps SQLAlchemy types (https://docs.sqlalchemy.org/en/13/core/type_basics.html) to corresponding schema types
"""
TypeClass: Optional[Type] = None
for sql_type in _field_type_mapping.keys():
if isinstance(column_type, sql_type):
TypeClass = _field_type_mapping[sql_type]
break
if TypeClass is None:
for sql_type in _known_unknown_field_types:
if isinstance(column_type, sql_type):
TypeClass = NullTypeClass
break
if TypeClass is None:
sql_report.report_warning(
dataset_name, f"unable to map type {column_type!r} to metadata schema"
)
TypeClass = NullTypeClass
return SchemaFieldDataType(type=TypeClass())
def get_schema_metadata(
sql_report: SQLSourceReport, dataset_name: str, platform: str, columns: List[dict]
) -> SchemaMetadata:
canonical_schema: List[SchemaField] = []
for column in columns:
field = SchemaField(
fieldPath=column["name"],
type=get_column_type(sql_report, dataset_name, column["type"]),
nativeDataType=column.get("full_type", repr(column["type"])),
description=column.get("comment", None),
nullable=column["nullable"],
recursive=False,
)
canonical_schema.append(field)
schema_metadata = SchemaMetadata(
schemaName=dataset_name,
platform=f"urn:li:dataPlatform:{platform}",
version=0,
hash="",
platformSchema=MySqlDDL(tableSchema=""),
fields=canonical_schema,
)
return schema_metadata
class SQLAlchemySource(Source):
"""A Base class for all SQL Sources that use SQLAlchemy to extend"""
def __init__(self, config: SQLAlchemyConfig, ctx: PipelineContext, platform: str):
super().__init__(ctx)
self.config = config
self.platform = platform
self.report = SQLSourceReport()
def get_inspectors(self) -> Iterable[Inspector]:
# This method can be overridden in the case that you want to dynamically
# run on multiple databases.
url = self.config.get_sql_alchemy_url()
logger.debug(f"sql_alchemy_url={url}")
engine = create_engine(url, **self.config.options)
inspector = inspect(engine)
yield inspector
def get_workunits(self) -> Iterable[SqlWorkUnit]:
sql_config = self.config
if logger.isEnabledFor(logging.DEBUG):
# If debug logging is enabled, we also want to echo each SQL query issued.
sql_config.options["echo"] = True
for inspector in self.get_inspectors():
for schema in inspector.get_schema_names():
if not sql_config.schema_pattern.allowed(schema):
self.report.report_dropped(f"{schema}.*")
continue
if sql_config.include_tables:
yield from self.loop_tables(inspector, schema, sql_config)
if sql_config.include_views:
yield from self.loop_views(inspector, schema, sql_config)
def loop_tables(
self,
inspector: Inspector,
schema: str,
sql_config: SQLAlchemyConfig,
) -> Iterable[SqlWorkUnit]:
for table in inspector.get_table_names(schema):
schema, table = sql_config.standardize_schema_table_names(schema, table)
dataset_name = sql_config.get_identifier(schema, table)
self.report.report_entity_scanned(dataset_name, ent_type="table")
if not sql_config.table_pattern.allowed(dataset_name):
self.report.report_dropped(dataset_name)
continue
columns = inspector.get_columns(table, schema)
if len(columns) == 0:
self.report.report_warning(dataset_name, "missing column information")
try:
# SQLALchemy stubs are incomplete and missing this method.
# PR: https://github.com/dropbox/sqlalchemy-stubs/pull/223.
table_info: dict = inspector.get_table_comment(table, schema) # type: ignore
except NotImplementedError:
description: Optional[str] = None
properties: Dict[str, str] = {}
else:
description = table_info["text"]
# The "properties" field is a non-standard addition to SQLAlchemy's interface.
properties = table_info.get("properties", {})
# TODO: capture inspector.get_pk_constraint
# TODO: capture inspector.get_sorted_table_and_fkc_names
dataset_snapshot = DatasetSnapshot(
urn=f"urn:li:dataset:(urn:li:dataPlatform:{self.platform},{dataset_name},{self.config.env})",
aspects=[],
)
if description is not None or properties:
dataset_properties = DatasetPropertiesClass(
description=description,
customProperties=properties,
)
dataset_snapshot.aspects.append(dataset_properties)
schema_metadata = get_schema_metadata(
self.report, dataset_name, self.platform, columns
)
dataset_snapshot.aspects.append(schema_metadata)
mce = MetadataChangeEvent(proposedSnapshot=dataset_snapshot)
wu = SqlWorkUnit(id=dataset_name, mce=mce)
self.report.report_workunit(wu)
yield wu
def loop_views(
self,
inspector: Inspector,
schema: str,
sql_config: SQLAlchemyConfig,
) -> Iterable[SqlWorkUnit]:
for view in inspector.get_view_names(schema):
schema, view = sql_config.standardize_schema_table_names(schema, view)
dataset_name = sql_config.get_identifier(schema, view)
self.report.report_entity_scanned(dataset_name, ent_type="view")
if not sql_config.view_pattern.allowed(dataset_name):
self.report.report_dropped(dataset_name)
continue
try:
columns = inspector.get_columns(view, schema)
except KeyError:
# For certain types of views, we are unable to fetch the list of columns.
self.report.report_warning(
dataset_name, "unable to get schema for this view"
)
schema_metadata = None
else:
schema_metadata = get_schema_metadata(
self.report, dataset_name, self.platform, columns
)
try:
# SQLALchemy stubs are incomplete and missing this method.
# PR: https://github.com/dropbox/sqlalchemy-stubs/pull/223.
view_info: dict = inspector.get_table_comment(view, schema) # type: ignore
except NotImplementedError:
description: Optional[str] = None
properties: Dict[str, str] = {}
else:
description = view_info["text"]
# The "properties" field is a non-standard addition to SQLAlchemy's interface.
properties = view_info.get("properties", {})
try:
view_definition = inspector.get_view_definition(view, schema)
if view_definition is None:
view_definition = ""
else:
# Some dialects return a TextClause instead of a raw string,
# so we need to convert them to a string.
view_definition = str(view_definition)
except NotImplementedError:
view_definition = ""
properties["view_definition"] = view_definition
properties["is_view"] = "True"
dataset_snapshot = DatasetSnapshot(
urn=f"urn:li:dataset:(urn:li:dataPlatform:{self.platform},{dataset_name},{self.config.env})",
aspects=[],
)
if description is not None or properties:
dataset_properties = DatasetPropertiesClass(
description=description,
customProperties=properties,
# uri=dataset_name,
)
dataset_snapshot.aspects.append(dataset_properties)
if schema_metadata:
dataset_snapshot.aspects.append(schema_metadata)
mce = MetadataChangeEvent(proposedSnapshot=dataset_snapshot)
wu = SqlWorkUnit(id=dataset_name, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_report(self):
return self.report
def close(self):
pass
| [
[
[
7,
14
],
[
1262,
1269
],
[
1245,
1252
],
[
8706,
8713
]
],
[
[
31,
45
],
[
3392,
3406
]
],
[
[
70,
79
],
[
2030,
2039
],
[
4407,
4416
]
],
[
[
81,
86
],
[
2158,
2163
]
],
[
[
106,
109
],
[
1471,
1474
],
[
6207,
6210
]
],
[
[
111,
115
],
[
4486,
4490
],
[
1461,
1465
],
[
10523,
10527
],
[
13273,
13277
]
],
[
[
117,
125
],
[
8233,
8241
],
[
8619,
8627
],
[
9501,
9509
],
[
11909,
11917
]
],
[
[
127,
131
],
[
2146,
2150
],
[
7105,
7109
],
[
7159,
7163
]
],
[
[
133,
141
],
[
3322,
3330
],
[
3364,
3372
],
[
3914,
3922
],
[
3949,
3957
],
[
4018,
4026
],
[
4059,
4067
],
[
1348,
1356
],
[
1377,
1385
],
[
1400,
1408
],
[
1423,
1431
],
[
1452,
1460
],
[
5438,
5446
],
[
6383,
6391
],
[
10474,
10482
],
[
13224,
13232
]
],
[
[
143,
146
],
[
5300,
5303
]
],
[
[
148,
153
],
[
3642,
3647
]
],
[
[
155,
159
],
[
4491,
4495
],
[
4515,
4519
],
[
5304,
5308
],
[
5406,
5410
],
[
5447,
5451
],
[
5707,
5711
],
[
5931,
5935
],
[
6392,
6396
]
],
[
[
185,
195
],
[
1567,
1577
],
[
1646,
1656
],
[
1904,
1914
]
],
[
[
204,
212
],
[
3958,
3966
]
],
[
[
236,
249
],
[
8485,
8498
]
],
[
[
251,
258
],
[
8547,
8554
]
],
[
[
300,
309
],
[
8242,
8251
],
[
9422,
9431
],
[
11830,
11839
]
],
[
[
337,
354
],
[
4529,
4534
],
[
4565,
4570
],
[
4601,
4606
],
[
4638,
4643
],
[
4669,
4674
],
[
4704,
4709
],
[
4743,
4748
],
[
4781,
4786
],
[
4814,
4819
],
[
4849,
4854
],
[
4880,
4885
],
[
4911,
4916
],
[
4942,
4947
],
[
4977,
4982
],
[
5012,
5017
],
[
5048,
5053
],
[
5239,
5244
],
[
4496,
4501
],
[
5336,
5341
],
[
5356,
5361
],
[
5309,
5314
],
[
5615,
5620
],
[
5648,
5653
],
[
5411,
5416
],
[
5712,
5717
],
[
5936,
5941
]
],
[
[
397,
413
],
[
3140,
3156
],
[
3121,
3137
],
[
3207,
3223
],
[
3188,
3204
],
[
3273,
3289
],
[
3254,
3270
]
],
[
[
415,
426
],
[
2662,
2673
]
],
[
[
467,
478
],
[
2691,
2702
]
],
[
[
520,
535
],
[
8035,
8050
]
],
[
[
577,
583
],
[
7898,
7904
]
],
[
[
585,
597
],
[
2062,
2074
]
],
[
[
641,
657
],
[
4435,
4451
]
],
[
[
731,
746
],
[
10926,
10941
],
[
14133,
14148
]
],
[
[
806,
825
],
[
11601,
11620
],
[
14754,
14773
]
],
[
[
894,
908
],
[
4794,
4808
]
],
[
[
914,
930
],
[
4616,
4632
]
],
[
[
936,
950
],
[
4684,
4698
],
[
4723,
4737
],
[
4761,
4775
]
],
[
[
956,
969
],
[
4861,
4874
],
[
4892,
4905
]
],
[
[
975,
988
],
[
4650,
4663
]
],
[
[
994,
1002
],
[
7781,
7789
]
],
[
[
1008,
1021
],
[
5255,
5268
],
[
6727,
6740
],
[
6938,
6951
]
],
[
[
1027,
1042
],
[
4544,
4559
],
[
4580,
4595
]
],
[
[
1048,
1063
],
[
5060,
5075
]
],
[
[
1069,
1080
],
[
7164,
7175
],
[
7225,
7236
]
],
[
[
1086,
1105
],
[
6216,
6235
],
[
6964,
6983
]
],
[
[
1111,
1125
],
[
7121,
7135
],
[
7621,
7635
]
],
[
[
1131,
1146
],
[
4828,
4843
]
],
[
[
1152,
1165
],
[
4923,
4936
],
[
4958,
4971
],
[
4993,
5006
],
[
5029,
5042
]
],
[
[
1213,
1235
],
[
11186,
11208
],
[
14393,
14415
]
],
[
[
1237,
1243
],
[
1245,
1259
],
[
8429,
8435
],
[
8686,
8692
]
],
[
[
1296,
1315
],
[
4162,
4181
]
],
[
[
2046,
2061
],
[
6158,
6173
],
[
7045,
7060
],
[
8182,
8197
]
],
[
[
2645,
2661
],
[
3881,
3897
],
[
8012,
8028
],
[
9474,
9490
],
[
11882,
11898
]
],
[
[
3859,
3880
]
],
[
[
4423,
4434
],
[
8628,
8639
],
[
9510,
9521
],
[
11673,
11684
],
[
11918,
11929
],
[
14826,
14837
]
],
[
[
4465,
4484
],
[
4486,
4520
],
[
5494,
5513
],
[
6425,
6444
],
[
6523,
6542
]
],
[
[
5272,
5298
],
[
5300,
5327
],
[
5545,
5571
],
[
6621,
6647
]
],
[
[
5376,
5396
]
],
[
[
5588,
5614
],
[
5985,
6011
]
],
[
[
5672,
5692
]
],
[
[
6125,
6140
],
[
7293,
7308
]
],
[
[
7008,
7027
],
[
11420,
11439
],
[
12785,
12804
]
],
[
[
7881,
7897
]
]
] |
""" Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import _string
import codecs
import itertools
import operator
import struct
import string
import sys
import unittest
import warnings
from test import support, string_tests
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
def duplicate_string(text):
"""
Try to get a fresh clone of the specified text:
new object with a reference count of 1.
This is a best-effort: latin1 single letters and the empty
string ('') are singletons and cannot be cloned.
"""
return text.encode().decode()
class StrSubclass(str):
pass
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
unittest.TestCase):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
# test mixed kinds
self.checkequal(10, '\u0102' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + 'a' * 10, 'count', 'a')
self.checkequal(10, '\U00100304' + '\u0102' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\u0102')
self.checkequal(0, 'a' * 10, 'count', '\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\U00100304')
self.checkequal(10, '\u0102' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + 'a_' * 10, 'count', 'a_')
self.checkequal(10, '\U00100304' + '\u0102_' * 10, 'count', '\u0102_')
self.checkequal(0, 'a' * 10, 'count', 'a\u0102')
self.checkequal(0, 'a' * 10, 'count', 'a\U00100304')
self.checkequal(0, '\u0102' * 10, 'count', '\u0102\U00100304')
def test_find(self):
string_tests.CommonTest.test_find(self)
# test implementation details of the memchr fast path
self.checkequal(100, 'a' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0201')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0120')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0220')
self.checkequal(100, 'a' * 100 + '\U00100304', 'find', '\U00100304')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00100204')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00102004')
# check mixed argument types
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'find', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\u0102')
self.checkequal(-1, 'a' * 100, 'find', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'find', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'find', '\u0102_')
self.checkequal(-1, 'a' * 100, 'find', 'a\u0102')
self.checkequal(-1, 'a' * 100, 'find', 'a\U00100304')
self.checkequal(-1, '\u0102' * 100, 'find', '\u0102\U00100304')
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# test implementation details of the memrchr fast path
self.checkequal(0, '\u0102' + 'a' * 100 , 'rfind', '\u0102')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0201')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0120')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0220')
self.checkequal(0, '\U00100304' + 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00100204')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00102004')
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rfind', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rfind', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rfind', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rfind', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rfind', '_\u0102')
self.checkequal(-1, 'a' * 100, 'rfind', '\u0102a')
self.checkequal(-1, 'a' * 100, 'rfind', '\U00100304a')
self.checkequal(-1, '\u0102' * 100, 'rfind', '\U00100304\u0102')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
# test mixed kinds
self.checkequal(100, '\u0102' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + 'a', 'index', 'a')
self.checkequal(100, '\U00100304' * 100 + '\u0102', 'index', '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\u0102')
self.assertRaises(ValueError, ('a' * 100).index, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\U00100304')
self.checkequal(100, '\u0102' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + 'a_', 'index', 'a_')
self.checkequal(100, '\U00100304' * 100 + '\u0102_', 'index', '\u0102_')
self.assertRaises(ValueError, ('a' * 100).index, 'a\u0102')
self.assertRaises(ValueError, ('a' * 100).index, 'a\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).index, '\u0102\U00100304')
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
# test mixed kinds
self.checkequal(0, 'a' + '\u0102' * 100, 'rindex', 'a')
self.checkequal(0, 'a' + '\U00100304' * 100, 'rindex', 'a')
self.checkequal(0, '\u0102' + '\U00100304' * 100, 'rindex', '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304')
self.checkequal(0, '_a' + '\u0102' * 100, 'rindex', '_a')
self.checkequal(0, '_a' + '\U00100304' * 100, 'rindex', '_a')
self.checkequal(0, '_\u0102' + '\U00100304' * 100, 'rindex', '_\u0102')
self.assertRaises(ValueError, ('a' * 100).rindex, '\u0102a')
self.assertRaises(ValueError, ('a' * 100).rindex, '\U00100304a')
self.assertRaises(ValueError, ('\u0102' * 100).rindex, '\U00100304\u0102')
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
# various tests switching from ASCII to latin1 or the opposite;
# same length, remove a letter, or replace with a longer string.
self.assertEqual("[a]".translate(str.maketrans('a', 'X')),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'X'})),
"[X]")
self.assertEqual("[a]".translate(str.maketrans({'a': None})),
"[]")
self.assertEqual("[a]".translate(str.maketrans({'a': 'XXX'})),
"[XXX]")
self.assertEqual("[a]".translate(str.maketrans({'a': '\xe9'})),
"[\xe9]")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '123'})),
"x123")
self.assertEqual('axb'.translate(str.maketrans({'a': None, 'b': '\xe9'})),
"x\xe9")
# test non-ASCII (don't take the fast-path)
self.assertEqual("[a]".translate(str.maketrans({'a': '<\xe9>'})),
"[<\xe9>]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': 'a'})),
"[a]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': None})),
"[]")
self.assertEqual("[\xe9]".translate(str.maketrans({'\xe9': '123'})),
"[123]")
self.assertEqual("[a\xe9]".translate(str.maketrans({'a': '<\u20ac>'})),
"[<\u20ac>\xe9]")
# invalid Unicode characters
invalid_char = 0x10ffff+1
for before in "a\xe9\u20ac\U0010ffff":
mapping = str.maketrans({before: invalid_char})
text = "[%s]" % before
self.assertRaises(ValueError, text.translate, mapping)
# errors
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'split', delim)
self.checkequal([left, right],
left + delim + right, 'split', delim)
self.checkequal([left + right],
left + right, 'split', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'split', delim *2)
def test_rsplit(self):
string_tests.CommonTest.test_rsplit(self)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal([left + right],
left + right, 'rsplit', delim)
self.checkequal([left, right],
left + delim + right, 'rsplit', delim)
self.checkequal([left + right],
left + right, 'rsplit', delim * 2)
self.checkequal([left, right],
left + delim * 2 + right, 'rsplit', delim *2)
def test_partition(self):
string_tests.MixinStrUnicodeUserStringTest.test_partition(self)
# test mixed kinds
self.checkequal(('ABCDEFGH', '', ''), 'ABCDEFGH', 'partition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal((left + right, '', ''),
left + right, 'partition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'partition', delim)
self.checkequal((left + right, '', ''),
left + right, 'partition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'partition', delim * 2)
def test_rpartition(self):
string_tests.MixinStrUnicodeUserStringTest.test_rpartition(self)
# test mixed kinds
self.checkequal(('', '', 'ABCDEFGH'), 'ABCDEFGH', 'rpartition', '\u4200')
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim)
self.checkequal((left, delim, right),
left + delim + right, 'rpartition', delim)
self.checkequal(('', '', left + right),
left + right, 'rpartition', delim * 2)
self.checkequal((left, delim * 2, right),
left + delim * 2 + right, 'rpartition', delim * 2)
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
@unittest.skipIf(sys.maxsize > 2**32,
'needs too much memory on a 64-bit platform')
def test_join_overflow(self):
size = int(sys.maxsize**0.5) + 1
seq = ('A' * size,) * size
self.assertRaises(OverflowError, ''.join, seq)
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
# test mixed kinds
for left, right in ('ba', '\u0101\u0100', '\U00010301\U00010300'):
left *= 9
right *= 9
for delim in ('c', '\u0102', '\U00010302'):
for repl in ('d', '\u0103', '\U00010303'):
self.checkequal(left + right,
left + right, 'replace', delim, repl)
self.checkequal(left + repl + right,
left + delim + right,
'replace', delim, repl)
self.checkequal(left + right,
left + right, 'replace', delim * 2, repl)
self.checkequal(left + repl + right,
left + delim * 2 + right,
'replace', delim * 2, repl)
@support.cpython_only
def test_replace_id(self):
pattern = 'abc'
text = 'abc def'
self.assertIs(text.replace(pattern, pattern), text)
def test_bytes_comparison(self):
with support.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
super().test_islower()
self.checkequalnofix(False, '\u1FFc', 'islower')
self.assertFalse('\u2167'.islower())
self.assertTrue('\u2177'.islower())
# non-BMP, uppercase
self.assertFalse('\U00010401'.islower())
self.assertFalse('\U00010427'.islower())
# non-BMP, lowercase
self.assertTrue('\U00010429'.islower())
self.assertTrue('\U0001044E'.islower())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.islower())
self.assertFalse('\U0001F46F'.islower())
def test_isupper(self):
super().test_isupper()
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
self.assertTrue('\u2167'.isupper())
self.assertFalse('\u2177'.isupper())
# non-BMP, uppercase
self.assertTrue('\U00010401'.isupper())
self.assertTrue('\U00010427'.isupper())
# non-BMP, lowercase
self.assertFalse('\U00010429'.isupper())
self.assertFalse('\U0001044E'.isupper())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isupper())
self.assertFalse('\U0001F46F'.isupper())
def test_istitle(self):
super().test_istitle()
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
# non-BMP, uppercase + lowercase
self.assertTrue('\U00010401\U00010429'.istitle())
self.assertTrue('\U00010427\U0001044E'.istitle())
# apparently there are no titlecased (Lt) non-BMP chars in Unicode 6
for ch in ['\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.istitle(), '{!a} is not title'.format(ch))
def test_isspace(self):
super().test_isspace()
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
# apparently there are no non-BMP spaces chars in Unicode 6
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isspace(), '{!a} is not space.'.format(ch))
def test_isalnum(self):
super().test_isalnum()
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isalnum(), '{!a} is alnum.'.format(ch))
def test_isalpha(self):
super().test_isalpha()
self.checkequalnofix(True, '\u1FFc', 'isalpha')
# non-BMP, cased
self.assertTrue('\U00010401'.isalpha())
self.assertTrue('\U00010427'.isalpha())
self.assertTrue('\U00010429'.isalpha())
self.assertTrue('\U0001044E'.isalpha())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isalpha())
self.assertFalse('\U0001F46F'.isalpha())
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065', '\U0001F107']:
self.assertFalse(ch.isdecimal(), '{!a} is not decimal.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0']:
self.assertTrue(ch.isdecimal(), '{!a} is decimal.'.format(ch))
def test_isdigit(self):
super().test_isdigit()
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065']:
self.assertFalse(ch.isdigit(), '{!a} is not a digit.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isdigit(), '{!a} is a digit.'.format(ch))
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isnumeric(), '{!a} is not numeric.'.format(ch))
for ch in ['\U00011065', '\U0001D7F6', '\U00011066',
'\U000104A0', '\U0001F107']:
self.assertTrue(ch.isnumeric(), '{!a} is numeric.'.format(ch))
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
self.assertTrue('\U0001F46F'.isprintable())
self.assertFalse('\U000E0020'.isprintable())
def test_surrogates(self):
for s in ('a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertTrue(s.islower())
self.assertFalse(s.isupper())
self.assertFalse(s.istitle())
for s in ('A\uD800B\uDFFF', 'A\uDFFFB\uD800',
'A\uD800B\uDFFFA', 'A\uDFFFB\uD800A'):
self.assertFalse(s.islower())
self.assertTrue(s.isupper())
self.assertTrue(s.istitle())
for meth_name in ('islower', 'isupper', 'istitle'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
for meth_name in ('isalpha', 'isalnum', 'isdigit', 'isspace',
'isdecimal', 'isnumeric',
'isidentifier', 'isprintable'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
def test_lower(self):
string_tests.CommonTest.test_lower(self)
self.assertEqual('\U00010427'.lower(), '\U0001044F')
self.assertEqual('\U00010427\U00010427'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('\U00010427\U0001044F'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.lower(),
'x\U0001044Fx\U0001044F')
self.assertEqual('fi'.lower(), 'fi')
self.assertEqual('\u0130'.lower(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.lower(), '\u03c3')
self.assertEqual('\u0345\u03a3'.lower(), '\u0345\u03c3')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u0345\u03a3a'.lower(), 'a\u0345\u03c3a')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u03a3\u0345'.lower(), 'a\u03c2\u0345')
self.assertEqual('\u03a3\u0345 '.lower(), '\u03c3\u0345 ')
self.assertEqual('\U0008fffe'.lower(), '\U0008fffe')
self.assertEqual('\u2177'.lower(), '\u2177')
def test_casefold(self):
self.assertEqual('hello'.casefold(), 'hello')
self.assertEqual('hELlo'.casefold(), 'hello')
self.assertEqual('ß'.casefold(), 'ss')
self.assertEqual('fi'.casefold(), 'fi')
self.assertEqual('\u03a3'.casefold(), '\u03c3')
self.assertEqual('A\u0345\u03a3'.casefold(), 'a\u03b9\u03c3')
self.assertEqual('\u00b5'.casefold(), '\u03bc')
def test_upper(self):
string_tests.CommonTest.test_upper(self)
self.assertEqual('\U0001044F'.upper(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('X\U00010427x\U0001044F'.upper(),
'X\U00010427X\U00010427')
self.assertEqual('fi'.upper(), 'FI')
self.assertEqual('\u0130'.upper(), '\u0130')
self.assertEqual('\u03a3'.upper(), '\u03a3')
self.assertEqual('ß'.upper(), 'SS')
self.assertEqual('\u1fd2'.upper(), '\u0399\u0308\u0300')
self.assertEqual('\U0008fffe'.upper(), '\U0008fffe')
self.assertEqual('\u2177'.upper(), '\u2167')
def test_capitalize(self):
string_tests.CommonTest.test_capitalize(self)
self.assertEqual('\U0001044F'.capitalize(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.capitalize(),
'X\U0001044Fx\U0001044F')
self.assertEqual('h\u0130'.capitalize(), 'H\u0069\u0307')
exp = '\u0399\u0308\u0300\u0069\u0307'
self.assertEqual('\u1fd2\u0130'.capitalize(), exp)
self.assertEqual('finnish'.capitalize(), 'FInnish')
self.assertEqual('A\u0345\u03a3'.capitalize(), 'A\u0345\u03c2')
def test_title(self):
super().test_title()
self.assertEqual('\U0001044F'.title(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.title(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U0001044F \U0001044F\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F \U00010427\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427 \U0001044F\U00010427'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F X\U00010427x\U0001044F'.title(),
'X\U0001044Fx\U0001044F X\U0001044Fx\U0001044F')
self.assertEqual('fiNNISH'.title(), 'Finnish')
self.assertEqual('A\u03a3 \u1fa1xy'.title(), 'A\u03c2 \u1fa9xy')
self.assertEqual('A\u03a3A'.title(), 'A\u03c3a')
def test_swapcase(self):
string_tests.CommonTest.test_swapcase(self)
self.assertEqual('\U0001044F'.swapcase(), '\U00010427')
self.assertEqual('\U00010427'.swapcase(), '\U0001044F')
self.assertEqual('\U0001044F\U0001044F'.swapcase(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.swapcase(),
'\U0001044F\U00010427')
self.assertEqual('\U0001044F\U00010427'.swapcase(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.swapcase(),
'x\U0001044FX\U00010427')
self.assertEqual('fi'.swapcase(), 'FI')
self.assertEqual('\u0130'.swapcase(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('\u0345\u03a3'.swapcase(), '\u0399\u03c3')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u0345\u03a3a'.swapcase(), 'a\u0399\u03c3A')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u03a3\u0345'.swapcase(), 'a\u03c2\u0399')
self.assertEqual('\u03a3\u0345 '.swapcase(), '\u03c3\u0399 ')
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('ß'.swapcase(), 'SS')
self.assertEqual('\u1fd2'.swapcase(), '\u0399\u0308\u0300')
def test_center(self):
string_tests.CommonTest.test_center(self)
self.assertEqual('x'.center(2, '\U0010FFFF'),
'x\U0010FFFF')
self.assertEqual('x'.center(3, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF')
self.assertEqual('x'.center(4, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF\U0010FFFF')
@unittest.skipUnless(sys.maxsize == 2**31 - 1, "requires 32-bit system")
@support.cpython_only
def test_case_operation_overflow(self):
# Issue #22643
size = 2**32//12 + 1
try:
s = "ü" * size
except MemoryError:
self.skipTest('no enough memory (%.0f MiB required)' % (size / 2**20))
try:
self.assertRaises(OverflowError, s.upper)
finally:
del s
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
# test mixed kinds
for fill in ('a', '\u0100', '\U00010300'):
fill *= 9
for delim in ('c', '\u0102', '\U00010302'):
self.assertNotIn(delim, fill)
self.assertIn(delim, fill + delim)
self.assertNotIn(delim * 2, fill)
self.assertIn(delim * 2, fill + delim * 2)
def test_issue18183(self):
'\U00010000\U00100000'.lower()
'\U00010000\U00100000'.casefold()
'\U00010000\U00100000'.upper()
'\U00010000\U00100000'.capitalize()
'\U00010000\U00100000'.title()
'\U00010000\U00100000'.swapcase()
'\U00100000'.center(3, '\U00010000')
'\U00100000'.ljust(3, '\U00010000')
'\U00100000'.rjust(3, '\U00010000')
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
class M:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'M(' + self.x + ')'
__str__ = None
class N:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'N(' + self.x + ')'
__format__ = None
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# issue 12546: use \x00 as a fill character
self.assertEqual('{0:\x00<6s}'.format('foo'), 'foo\x00\x00\x00')
self.assertEqual('{0:\x01<6s}'.format('foo'), 'foo\x01\x01\x01')
self.assertEqual('{0:\x00^6s}'.format('foo'), '\x00foo\x00\x00')
self.assertEqual('{0:^6s}'.format('foo'), ' foo ')
self.assertEqual('{0:\x00<6}'.format(3), '3\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3), '3\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^12}'.format(3+2.0j), '\x00\x00\x00(3+2j)\x00\x00\x00')
self.assertEqual('{0:^12}'.format(3+2.0j), ' (3+2j) ')
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertRaises(TypeError, '{0:^10}'.format, E('data'))
self.assertRaises(TypeError, '{0:^10s}'.format, E('data'))
self.assertRaises(TypeError, '{0:>15s}'.format, G('data'))
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(ValueError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
# Non-ASCII
self.assertEqual("{0:s}{1:s}".format("ABC", "\u0410\u0411\u0412"),
'ABC\u0410\u0411\u0412')
self.assertEqual("{0:.3s}".format("ABC\u0410\u0411\u0412"),
'ABC')
self.assertEqual("{0:.0s}".format("ABC\u0410\u0411\u0412"),
'')
self.assertEqual("{[{}]}".format({"{}": 5}), "5")
self.assertEqual("{[{}]}".format({"{}" : "a"}), "a")
self.assertEqual("{[{]}".format({"{" : "a"}), "a")
self.assertEqual("{[}]}".format({"}" : "a"}), "a")
self.assertEqual("{[[]}".format({"[" : "a"}), "a")
self.assertEqual("{[!]}".format({"!" : "a"}), "a")
self.assertRaises(ValueError, "{a{}b}".format, 42)
self.assertRaises(ValueError, "{a{b}".format, 42)
self.assertRaises(ValueError, "{[}".format, 42)
self.assertEqual("0x{:0{:d}X}".format(0x0,16), "0x0000000000000000")
# Blocking fallback
m = M('data')
self.assertEqual("{!r}".format(m), 'M(data)')
self.assertRaises(TypeError, "{!s}".format, m)
self.assertRaises(TypeError, "{}".format, m)
n = N('data')
self.assertEqual("{!r}".format(n), 'N(data)')
self.assertEqual("{!s}".format(n), 'N(data)')
self.assertRaises(TypeError, "{}".format, n)
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, ''.format_map)
self.assertRaises(TypeError, 'a'.format_map)
self.assertRaises(ValueError, '{'.format_map, {})
self.assertRaises(ValueError, '}'.format_map, {})
self.assertRaises(ValueError, 'a{'.format_map, {})
self.assertRaises(ValueError, 'a}'.format_map, {})
self.assertRaises(ValueError, '{a'.format_map, {})
self.assertRaises(ValueError, '}a'.format_map, {})
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
def test_format_huge_precision(self):
format_string = ".{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_width(self):
format_string = "{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_item_number(self):
format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string.format(2.34)
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
# PEP 393
self.assertEqual('%.1s' % "a\xe9\u20ac", 'a')
self.assertEqual('%.2s' % "a\xe9\u20ac", 'a\xe9')
#issue 19995
class PseudoInt:
def __init__(self, value):
self.value = int(value)
def __int__(self):
return self.value
def __index__(self):
return self.value
class PseudoFloat:
def __init__(self, value):
self.value = float(value)
def __int__(self):
return int(self.value)
pi = PseudoFloat(3.1415)
letter_m = PseudoInt(109)
self.assertEqual('%x' % 42, '2a')
self.assertEqual('%X' % 15, 'F')
self.assertEqual('%o' % 9, '11')
self.assertEqual('%c' % 109, 'm')
self.assertEqual('%x' % letter_m, '6d')
self.assertEqual('%X' % letter_m, '6D')
self.assertEqual('%o' % letter_m, '155')
self.assertEqual('%c' % letter_m, 'm')
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not float', operator.mod, '%x', 3.14),
self.assertRaisesRegex(TypeError, '%X format: an integer is required, not float', operator.mod, '%X', 2.11),
self.assertRaisesRegex(TypeError, '%o format: an integer is required, not float', operator.mod, '%o', 1.79),
self.assertRaisesRegex(TypeError, '%x format: an integer is required, not PseudoFloat', operator.mod, '%x', pi),
self.assertRaises(TypeError, operator.mod, '%c', pi),
def test_formatting_with_enum(self):
# issue18780
import enum
class Float(float, enum.Enum):
PI = 3.1415926
class Int(enum.IntEnum):
IDES = 15
class Str(str, enum.Enum):
ABC = 'abc'
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % (Str.ABC, Str.ABC),
'Str.ABC, Str.ABC')
self.assertEqual("%s, %s, %d, %i, %u, %f, %5.2f" %
(Str.ABC, Str.ABC,
Int.IDES, Int.IDES, Int.IDES,
Float.PI, Float.PI),
'Str.ABC, Str.ABC, 15, 15, 15, 3.141593, 3.14')
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':Str.ABC},
'...Str.ABC...')
self.assertEqual('...%(foo)s...' % {'foo':Int.IDES},
'...Int.IDES...')
self.assertEqual('...%(foo)i...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)d...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)u...' % {'foo':Int.IDES, 'def':Float.PI},
'...15...')
self.assertEqual('...%(foo)f...' % {'foo':Float.PI,'def':123},
'...3.141593...')
def test_formatting_huge_precision(self):
format_string = "%.{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_issue28598_strsubclass_rhs(self):
# A subclass of str with an __rmod__ method should be able to hook
# into the % operator
class SubclassedStr(str):
def __rmod__(self, other):
return 'Success, self.__rmod__({!r}) was called'.format(other)
self.assertEqual('lhs %% %r' % SubclassedStr('rhs'),
"Success, self.__rmod__('lhs %% %r') was called")
@support.cpython_only
def test_formatting_huge_precision_c_limits(self):
from _testcapi import INT_MAX
format_string = "%.{}f".format(INT_MAX + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_formatting_huge_width(self):
format_string = "%{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
for text in ('ascii', '\xe9', '\u20ac', '\U0010FFFF'):
subclass = StrSubclass(text)
self.assertEqual(str(subclass), text)
self.assertEqual(len(subclass), len(text))
if text == 'ascii':
self.assertEqual(subclass.encode('ascii'), b'ascii')
self.assertEqual(subclass.encode('utf-8'), b'ascii')
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_constructor_keyword_args(self):
"""Pass various keyword argument combinations to the constructor."""
# The object argument can be passed as a keyword.
self.assertEqual(str(object='foo'), 'foo')
self.assertEqual(str(object=b'foo', encoding='utf-8'), 'foo')
# The errors argument without encoding triggers "decode" mode.
self.assertEqual(str(b'foo', errors='strict'), 'foo') # not "b'foo'"
self.assertEqual(str(object=b'foo', errors='strict'), 'foo')
def test_constructor_defaults(self):
"""Check the constructor argument defaults."""
# The object argument defaults to '' or b''.
self.assertEqual(str(), '')
self.assertEqual(str(errors='strict'), '')
utf8_cent = '¢'.encode('utf-8')
# The encoding argument defaults to utf-8.
self.assertEqual(str(utf8_cent, errors='strict'), '¢')
# The errors argument defaults to strict.
self.assertRaises(UnicodeDecodeError, str, utf8_cent, encoding='ascii')
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
(r'\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates are passed through
self.assertEqual('\uD801'.encode('utf-7'), b'+2AE-')
self.assertEqual('\uD801x'.encode('utf-7'), b'+2AE-x')
self.assertEqual('\uDC01'.encode('utf-7'), b'+3AE-')
self.assertEqual('\uDC01x'.encode('utf-7'), b'+3AE-x')
self.assertEqual(b'+2AE-'.decode('utf-7'), '\uD801')
self.assertEqual(b'+2AE-x'.decode('utf-7'), '\uD801x')
self.assertEqual(b'+3AE-'.decode('utf-7'), '\uDC01')
self.assertEqual(b'+3AE-x'.decode('utf-7'), '\uDC01x')
self.assertEqual('\uD801\U000abcde'.encode('utf-7'), b'+2AHab9ze-')
self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
self.assertEqual('\U00010002'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\U00023456'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
self.assertEqual(('\U00010002'*10).encode('utf-8'),
b'\xf0\x90\x80\x82'*10)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to code points < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to code points > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 3 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def assertCorrectUTF8Decoding(self, seq, res, err):
"""
Check that an invalid UTF-8 sequence raises a UnicodeDecodeError when
'strict' is used, returns res when 'replace' is used, and that doesn't
return anything when 'ignore' is used.
"""
with self.assertRaises(UnicodeDecodeError) as cm:
seq.decode('utf-8')
exc = cm.exception
self.assertIn(err, str(exc))
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'replace'),
'aaaa' + res + 'bbbb')
res = res.replace('\ufffd', '')
self.assertEqual(seq.decode('utf-8', 'ignore'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'ignore'),
'aaaa' + res + 'bbbb')
def test_invalid_start_byte(self):
"""
Test that an 'invalid start byte' error is raised when the first byte
is not in the ASCII range or is not a valid start byte of a 2-, 3-, or
4-bytes sequence. The invalid start byte is replaced with a single
U+FFFD when errors='replace'.
E.g. <80> is a continuation byte and can appear only after a start byte.
"""
FFFD = '\ufffd'
for byte in b'\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF':
self.assertCorrectUTF8Decoding(bytes([byte]), '\ufffd',
'invalid start byte')
def test_unexpected_end_of_data(self):
"""
Test that an 'unexpected end of data' error is raised when the string
ends after a start byte of a 2-, 3-, or 4-bytes sequence without having
enough continuation bytes. The incomplete sequence is replaced with a
single U+FFFD when errors='replace'.
E.g. in the sequence <F3 80 80>, F3 is the start byte of a 4-bytes
sequence, but it's followed by only 2 valid continuation bytes and the
last continuation bytes is missing.
Note: the continuation bytes must be all valid, if one of them is
invalid another error will be raised.
"""
sequences = [
'C2', 'DF',
'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF',
'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF',
'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF',
'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF',
'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF',
'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF'
]
FFFD = '\ufffd'
for seq in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), '\ufffd',
'unexpected end of data')
def test_invalid_cb_for_2bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte of a 2-bytes sequence is invalid. The start byte
is replaced by a single U+FFFD and the second byte is handled
separately when errors='replace'.
E.g. in the sequence <C2 41>, C2 is the start byte of a 2-bytes
sequence, but 41 is not a valid continuation byte because it's the
ASCII letter 'A'.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('C2 00', FFFD+'\x00'), ('C2 7F', FFFD+'\x7f'),
('C2 C0', FFFDx2), ('C2 FF', FFFDx2),
('DF 00', FFFD+'\x00'), ('DF 7F', FFFD+'\x7f'),
('DF C0', FFFDx2), ('DF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_3bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 3-bytes sequence are invalid. When
errors='replace', if the first continuation byte is valid, the first
two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the
third byte is handled separately, otherwise only the start byte is
replaced with a U+FFFD and the other continuation bytes are handled
separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
Python 2 used to consider all the bytes in range 80..BF valid when the
start byte was ED. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('E0 00', FFFD+'\x00'), ('E0 7F', FFFD+'\x7f'), ('E0 80', FFFDx2),
('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2),
('E0 A0 00', FFFD+'\x00'), ('E0 A0 7F', FFFD+'\x7f'),
('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2),
('E0 BF 00', FFFD+'\x00'), ('E0 BF 7F', FFFD+'\x7f'),
('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+'\x00'),
('E1 7F', FFFD+'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2),
('E1 80 00', FFFD+'\x00'), ('E1 80 7F', FFFD+'\x7f'),
('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2),
('E1 BF 00', FFFD+'\x00'), ('E1 BF 7F', FFFD+'\x7f'),
('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+'\x00'),
('EC 7F', FFFD+'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2),
('EC 80 00', FFFD+'\x00'), ('EC 80 7F', FFFD+'\x7f'),
('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2),
('EC BF 00', FFFD+'\x00'), ('EC BF 7F', FFFD+'\x7f'),
('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+'\x00'),
('ED 7F', FFFD+'\x7f'),
('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^
('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+'\x00'),
('ED 80 7F', FFFD+'\x7f'), ('ED 80 C0', FFFDx2),
('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+'\x00'),
('ED 9F 7F', FFFD+'\x7f'), ('ED 9F C0', FFFDx2),
('ED 9F FF', FFFDx2), ('EE 00', FFFD+'\x00'),
('EE 7F', FFFD+'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2),
('EE 80 00', FFFD+'\x00'), ('EE 80 7F', FFFD+'\x7f'),
('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2),
('EE BF 00', FFFD+'\x00'), ('EE BF 7F', FFFD+'\x7f'),
('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+'\x00'),
('EF 7F', FFFD+'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2),
('EF 80 00', FFFD+'\x00'), ('EF 80 7F', FFFD+'\x7f'),
('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2),
('EF BF 00', FFFD+'\x00'), ('EF BF 7F', FFFD+'\x7f'),
('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_4bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 4-bytes sequence are invalid. When
errors='replace',the start byte and all the following valid
continuation bytes are replaced with a single U+FFFD, and all the bytes
starting from the first invalid continuation bytes (included) are
handled separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
However, when the start byte is ED, Python 2 considers all the bytes
in range 80..BF valid. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('F0 00', FFFD+'\x00'), ('F0 7F', FFFD+'\x7f'), ('F0 80', FFFDx2),
('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2),
('F0 90 00', FFFD+'\x00'), ('F0 90 7F', FFFD+'\x7f'),
('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2),
('F0 BF 00', FFFD+'\x00'), ('F0 BF 7F', FFFD+'\x7f'),
('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2),
('F0 90 80 00', FFFD+'\x00'), ('F0 90 80 7F', FFFD+'\x7f'),
('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2),
('F0 90 BF 00', FFFD+'\x00'), ('F0 90 BF 7F', FFFD+'\x7f'),
('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2),
('F0 BF 80 00', FFFD+'\x00'), ('F0 BF 80 7F', FFFD+'\x7f'),
('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2),
('F0 BF BF 00', FFFD+'\x00'), ('F0 BF BF 7F', FFFD+'\x7f'),
('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2),
('F1 00', FFFD+'\x00'), ('F1 7F', FFFD+'\x7f'), ('F1 C0', FFFDx2),
('F1 FF', FFFDx2), ('F1 80 00', FFFD+'\x00'),
('F1 80 7F', FFFD+'\x7f'), ('F1 80 C0', FFFDx2),
('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+'\x00'),
('F1 BF 7F', FFFD+'\x7f'), ('F1 BF C0', FFFDx2),
('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+'\x00'),
('F1 80 80 7F', FFFD+'\x7f'), ('F1 80 80 C0', FFFDx2),
('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+'\x00'),
('F1 80 BF 7F', FFFD+'\x7f'), ('F1 80 BF C0', FFFDx2),
('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+'\x00'),
('F1 BF 80 7F', FFFD+'\x7f'), ('F1 BF 80 C0', FFFDx2),
('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+'\x00'),
('F1 BF BF 7F', FFFD+'\x7f'), ('F1 BF BF C0', FFFDx2),
('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+'\x00'),
('F3 7F', FFFD+'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2),
('F3 80 00', FFFD+'\x00'), ('F3 80 7F', FFFD+'\x7f'),
('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2),
('F3 BF 00', FFFD+'\x00'), ('F3 BF 7F', FFFD+'\x7f'),
('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2),
('F3 80 80 00', FFFD+'\x00'), ('F3 80 80 7F', FFFD+'\x7f'),
('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2),
('F3 80 BF 00', FFFD+'\x00'), ('F3 80 BF 7F', FFFD+'\x7f'),
('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2),
('F3 BF 80 00', FFFD+'\x00'), ('F3 BF 80 7F', FFFD+'\x7f'),
('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2),
('F3 BF BF 00', FFFD+'\x00'), ('F3 BF BF 7F', FFFD+'\x7f'),
('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2),
('F4 00', FFFD+'\x00'), ('F4 7F', FFFD+'\x7f'), ('F4 90', FFFDx2),
('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2),
('F4 80 00', FFFD+'\x00'), ('F4 80 7F', FFFD+'\x7f'),
('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2),
('F4 8F 00', FFFD+'\x00'), ('F4 8F 7F', FFFD+'\x7f'),
('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2),
('F4 80 80 00', FFFD+'\x00'), ('F4 80 80 7F', FFFD+'\x7f'),
('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2),
('F4 80 BF 00', FFFD+'\x00'), ('F4 80 BF 7F', FFFD+'\x7f'),
('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2),
('F4 8F 80 00', FFFD+'\x00'), ('F4 8F 80 7F', FFFD+'\x7f'),
('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2),
('F4 8F BF 00', FFFD+'\x00'), ('F4 8F BF 7F', FFFD+'\x7f'),
('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2)
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(bytes.fromhex(seq), res,
'invalid continuation byte')
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in PyUnicode_TransformDecimalToASCII())
self.assertRaises(UnicodeError, float, "\ud800")
self.assertRaises(UnicodeError, float, "\udf00")
self.assertRaises(UnicodeError, complex, "\ud800")
self.assertRaises(UnicodeError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Default encoding is utf-8
self.assertEqual('\u2603'.encode(), b'\xe2\x98\x83')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
with warnings.catch_warnings():
# unicode-internal has been deprecated
warnings.simplefilter("ignore", DeprecationWarning)
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
with warnings.catch_warnings():
# unicode-internal has been deprecated
warnings.simplefilter("ignore", DeprecationWarning)
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all code points
# (except surrogates, which are forbidden).
u = ''.join(map(chr, list(range(0, 0xd800)) +
list(range(0xe000, 0x110000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9',
'koi8_r', 'koi8_t', 'koi8_u', 'kz1048', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'koi8_u', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7', 'koi8_t', 'kz1048',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print('abc', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc\n', file=out)
print('abc\n', end=' ', file=out)
print('abc\n', end=' ', file=out)
print('def\n', file=out)
print('def\n', file=out)
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __str__() works properly
class ObjectToStr:
def __str__(self):
return "foo"
class StrSubclassToStr(str):
def __str__(self):
return "foo"
class StrSubclassToStrSubclass(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
self.assertEqual(str(ObjectToStr()), "foo")
self.assertEqual(str(StrSubclassToStr("bar")), "foo")
s = str(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclassToStrSubclass)
s = StrSubclass(StrSubclassToStrSubclass("foo"))
self.assertEqual(s, "foofoo")
self.assertIs(type(s), StrSubclass)
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
@support.cpython_only
def test_expandtabs_optimization(self):
s = 'abc'
self.assertIs(s.expandtabs(), s)
def test_raiseMemError(self):
if struct.calcsize('P') == 8:
# 64 bits pointers
ascii_struct_size = 48
compact_struct_size = 72
else:
# 32 bits pointers
ascii_struct_size = 24
compact_struct_size = 36
for char in ('a', '\xe9', '\u20ac', '\U0010ffff'):
code = ord(char)
if code < 0x100:
char_size = 1 # sizeof(Py_UCS1)
struct_size = ascii_struct_size
elif code < 0x10000:
char_size = 2 # sizeof(Py_UCS2)
struct_size = compact_struct_size
else:
char_size = 4 # sizeof(Py_UCS4)
struct_size = compact_struct_size
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t. Strings of maxlen-1 should in principle
# be allocatable, given enough memory.
maxlen = ((sys.maxsize - struct_size) // char_size)
alloc = lambda: char * maxlen
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
def test_subclass_add(self):
class S(str):
def __add__(self, o):
return "3"
self.assertEqual(S("4") + S("5"), "3")
class S(str):
def __iadd__(self, o):
return "3"
s = S("1")
s += "4"
self.assertEqual(s, "3")
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
def test_resize(self):
for length in range(1, 100, 7):
# generate a fresh string (refcount=1)
text = 'a' * length + 'b'
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# fill wstr internal field
abc = text.encode('unicode_internal')
self.assertEqual(abc.decode('unicode_internal'), text)
# resize text: wstr field must be cleared and then recomputed
text += 'c'
abcdef = text.encode('unicode_internal')
self.assertNotEqual(abc, abcdef)
self.assertEqual(abcdef.decode('unicode_internal'), text)
def test_compare(self):
# Issue #17615
N = 10
ascii = 'a' * N
ascii2 = 'z' * N
latin = '\x80' * N
latin2 = '\xff' * N
bmp = '\u0100' * N
bmp2 = '\uffff' * N
astral = '\U00100000' * N
astral2 = '\U0010ffff' * N
strings = (
ascii, ascii2,
latin, latin2,
bmp, bmp2,
astral, astral2)
for text1, text2 in itertools.combinations(strings, 2):
equal = (text1 is text2)
self.assertEqual(text1 == text2, equal)
self.assertEqual(text1 != text2, not equal)
if equal:
self.assertTrue(text1 <= text2)
self.assertTrue(text1 >= text2)
# text1 is text2: duplicate strings to skip the "str1 == str2"
# optimization in unicode_compare_eq() and really compare
# character per character
copy1 = duplicate_string(text1)
copy2 = duplicate_string(text2)
self.assertIsNot(copy1, copy2)
self.assertTrue(copy1 == copy2)
self.assertFalse(copy1 != copy2)
self.assertTrue(copy1 <= copy2)
self.assertTrue(copy2 >= copy2)
self.assertTrue(ascii < ascii2)
self.assertTrue(ascii < latin)
self.assertTrue(ascii < bmp)
self.assertTrue(ascii < astral)
self.assertFalse(ascii >= ascii2)
self.assertFalse(ascii >= latin)
self.assertFalse(ascii >= bmp)
self.assertFalse(ascii >= astral)
self.assertFalse(latin < ascii)
self.assertTrue(latin < latin2)
self.assertTrue(latin < bmp)
self.assertTrue(latin < astral)
self.assertTrue(latin >= ascii)
self.assertFalse(latin >= latin2)
self.assertFalse(latin >= bmp)
self.assertFalse(latin >= astral)
self.assertFalse(bmp < ascii)
self.assertFalse(bmp < latin)
self.assertTrue(bmp < bmp2)
self.assertTrue(bmp < astral)
self.assertTrue(bmp >= ascii)
self.assertTrue(bmp >= latin)
self.assertFalse(bmp >= bmp2)
self.assertFalse(bmp >= astral)
self.assertFalse(astral < ascii)
self.assertFalse(astral < latin)
self.assertFalse(astral < bmp2)
self.assertTrue(astral < astral2)
self.assertTrue(astral >= ascii)
self.assertTrue(astral >= latin)
self.assertTrue(astral >= bmp2)
self.assertFalse(astral >= astral2)
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, str)
support.check_free_after_iterating(self, reversed, str)
class CAPITest(unittest.TestCase):
# Test PyUnicode_FromFormat()
def test_from_format(self):
support.import_module('ctypes')
from ctypes import (
pythonapi, py_object, sizeof,
c_int, c_long, c_longlong, c_ssize_t,
c_uint, c_ulong, c_ulonglong, c_size_t, c_void_p)
name = "PyUnicode_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
def check_format(expected, format, *args):
text = PyUnicode_FromFormat(format, *args)
self.assertEqual(expected, text)
# ascii format, non-ascii argument
check_format('ascii\x7f=unicode\xe9',
b'ascii\x7f=%U', 'unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
r'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
# test "%c"
check_format('\uabcd',
b'%c', c_int(0xabcd))
check_format('\U0010ffff',
b'%c', c_int(0x10ffff))
with self.assertRaises(OverflowError):
PyUnicode_FromFormat(b'%c', c_int(0x110000))
# Issue #18183
check_format('\U00010000\U00100000',
b'%c%c', c_int(0x10000), c_int(0x100000))
# test "%"
check_format('%',
b'%')
check_format('%',
b'%%')
check_format('%s',
b'%%s')
check_format('[%]',
b'[%%]')
check_format('%abc',
b'%%%s', b'abc')
# truncated string
check_format('abc',
b'%.3s', b'abcdef')
check_format('abc[\ufffd',
b'%.5s', 'abc[\u20ac]'.encode('utf8'))
check_format("'\\u20acABC'",
b'%A', '\u20acABC')
check_format("'\\u20",
b'%.5A', '\u20acABCDEF')
check_format("'\u20acABC'",
b'%R', '\u20acABC')
check_format("'\u20acA",
b'%.3R', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3S', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3U', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3V', '\u20acABCDEF', None)
check_format('abc[\ufffd',
b'%.5V', None, 'abc[\u20ac]'.encode('utf8'))
# following tests comes from #7330
# test width modifier and precision modifier with %S
check_format("repr= abc",
b'repr=%5S', 'abc')
check_format("repr=ab",
b'repr=%.2S', 'abc')
check_format("repr= ab",
b'repr=%5.2S', 'abc')
# test width modifier and precision modifier with %R
check_format("repr= 'abc'",
b'repr=%8R', 'abc')
check_format("repr='ab",
b'repr=%.3R', 'abc')
check_format("repr= 'ab",
b'repr=%5.3R', 'abc')
# test width modifier and precision modifier with %A
check_format("repr= 'abc'",
b'repr=%8A', 'abc')
check_format("repr='ab",
b'repr=%.3A', 'abc')
check_format("repr= 'ab",
b'repr=%5.3A', 'abc')
# test width modifier and precision modifier with %s
check_format("repr= abc",
b'repr=%5s', b'abc')
check_format("repr=ab",
b'repr=%.2s', b'abc')
check_format("repr= ab",
b'repr=%5.2s', b'abc')
# test width modifier and precision modifier with %U
check_format("repr= abc",
b'repr=%5U', 'abc')
check_format("repr=ab",
b'repr=%.2U', 'abc')
check_format("repr= ab",
b'repr=%5.2U', 'abc')
# test width modifier and precision modifier with %V
check_format("repr= abc",
b'repr=%5V', 'abc', b'123')
check_format("repr=ab",
b'repr=%.2V', 'abc', b'123')
check_format("repr= ab",
b'repr=%5.2V', 'abc', b'123')
check_format("repr= 123",
b'repr=%5V', None, b'123')
check_format("repr=12",
b'repr=%.2V', None, b'123')
check_format("repr= 12",
b'repr=%5.2V', None, b'123')
# test integer formats (%i, %d, %u)
check_format('010',
b'%03i', c_int(10))
check_format('0010',
b'%0.4i', c_int(10))
check_format('-123',
b'%i', c_int(-123))
check_format('-123',
b'%li', c_long(-123))
check_format('-123',
b'%lli', c_longlong(-123))
check_format('-123',
b'%zi', c_ssize_t(-123))
check_format('-123',
b'%d', c_int(-123))
check_format('-123',
b'%ld', c_long(-123))
check_format('-123',
b'%lld', c_longlong(-123))
check_format('-123',
b'%zd', c_ssize_t(-123))
check_format('123',
b'%u', c_uint(123))
check_format('123',
b'%lu', c_ulong(123))
check_format('123',
b'%llu', c_ulonglong(123))
check_format('123',
b'%zu', c_size_t(123))
# test long output
min_longlong = -(2 ** (8 * sizeof(c_longlong) - 1))
max_longlong = -min_longlong - 1
check_format(str(min_longlong),
b'%lld', c_longlong(min_longlong))
check_format(str(max_longlong),
b'%lld', c_longlong(max_longlong))
max_ulonglong = 2 ** (8 * sizeof(c_ulonglong)) - 1
check_format(str(max_ulonglong),
b'%llu', c_ulonglong(max_ulonglong))
PyUnicode_FromFormat(b'%p', c_void_p(-1))
# test padding (width and/or precision)
check_format('123'.rjust(10, '0'),
b'%010i', c_int(123))
check_format('123'.rjust(100),
b'%100i', c_int(123))
check_format('123'.rjust(100, '0'),
b'%.100i', c_int(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80i', c_int(123))
check_format('123'.rjust(10, '0'),
b'%010u', c_uint(123))
check_format('123'.rjust(100),
b'%100u', c_uint(123))
check_format('123'.rjust(100, '0'),
b'%.100u', c_uint(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80u', c_uint(123))
check_format('123'.rjust(10, '0'),
b'%010x', c_int(0x123))
check_format('123'.rjust(100),
b'%100x', c_int(0x123))
check_format('123'.rjust(100, '0'),
b'%.100x', c_int(0x123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80x', c_int(0x123))
# test %A
check_format(r"%A:'abc\xe9\uabcd\U0010ffff'",
b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
# test %V
check_format('repr=abc',
b'repr=%V', 'abc', b'xyz')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
check_format('repr=\u4eba\u6c11',
b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
#Test replace error handler.
check_format('repr=abc\ufffd',
b'repr=%V', None, b'abc\xff')
# not supported: copy the raw format string. these tests are just here
# to check for crashes and should not be considered as specifications
check_format('%s',
b'%1%s', b'abc')
check_format('%1abc',
b'%1abc')
check_format('%+i',
b'%+i', c_int(10))
check_format('%.%s',
b'%.%s', b'abc')
# Test PyUnicode_AsWideChar()
@support.cpython_only
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
@support.cpython_only
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsUCS4()
@support.cpython_only
def test_asucs4(self):
from _testcapi import unicode_asucs4
for s in ['abc', '\xa1\xa2', '\u4f60\u597d', 'a\U0001f600',
'a\ud800b\udfffc', '\ud834\udd1e']:
l = len(s)
self.assertEqual(unicode_asucs4(s, l, 1), s+'\0')
self.assertEqual(unicode_asucs4(s, l, 0), s+'\uffff')
self.assertEqual(unicode_asucs4(s, l+1, 1), s+'\0\uffff')
self.assertEqual(unicode_asucs4(s, l+1, 0), s+'\0\uffff')
self.assertRaises(SystemError, unicode_asucs4, s, l-1, 1)
self.assertRaises(SystemError, unicode_asucs4, s, l-2, 0)
s = '\0'.join([s, s])
self.assertEqual(unicode_asucs4(s, len(s), 1), s+'\0')
self.assertEqual(unicode_asucs4(s, len(s), 0), s+'\uffff')
# Test PyUnicode_FindChar()
@support.cpython_only
def test_findchar(self):
from _testcapi import unicode_findchar
for str in "\xa1", "\u8000\u8080", "\ud800\udc02", "\U0001f100\U0001f1f1":
for i, ch in enumerate(str):
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), 1), i)
self.assertEqual(unicode_findchar(str, ord(ch), 0, len(str), -1), i)
str = "!>_<!"
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), 1), -1)
self.assertEqual(unicode_findchar(str, 0x110000, 0, len(str), -1), -1)
# start < end
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, 1), 4)
self.assertEqual(unicode_findchar(str, ord('!'), 1, len(str)+1, -1), 4)
# start >= end
self.assertEqual(unicode_findchar(str, ord('!'), 0, 0, 1), -1)
self.assertEqual(unicode_findchar(str, ord('!'), len(str), 0, 1), -1)
# negative
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, 1), 0)
self.assertEqual(unicode_findchar(str, ord('!'), -len(str), -1, -1), 0)
# Test PyUnicode_CopyCharacters()
@support.cpython_only
def test_copycharacters(self):
from _testcapi import unicode_copycharacters
strings = [
'abcde', '\xa1\xa2\xa3\xa4\xa5',
'\u4f60\u597d\u4e16\u754c\uff01',
'\U0001f600\U0001f601\U0001f602\U0001f603\U0001f604'
]
for idx, from_ in enumerate(strings):
# wide -> narrow: exceed maxchar limitation
for to in strings[:idx]:
self.assertRaises(
SystemError,
unicode_copycharacters, to, 0, from_, 0, 5
)
# same kind
for from_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, 0, from_, from_start, 5),
(from_[from_start:from_start+5].ljust(5, '\0'),
5-from_start)
)
for to_start in range(5):
self.assertEqual(
unicode_copycharacters(from_, to_start, from_, to_start, 5),
(from_[to_start:to_start+5].rjust(5, '\0'),
5-to_start)
)
# narrow -> wide
# Tests omitted since this creates invalid strings.
s = strings[0]
self.assertRaises(IndexError, unicode_copycharacters, s, 6, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, -1, s, 0, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, 6, 5)
self.assertRaises(IndexError, unicode_copycharacters, s, 0, s, -1, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 1, s, 0, 5)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, s, 0, -1)
self.assertRaises(SystemError, unicode_copycharacters, s, 0, b'', 0, 0)
@support.cpython_only
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
self.assertEqual(unicode_encodedecimal('123'),
b'123')
self.assertEqual(unicode_encodedecimal('\u0663.\u0661\u0664'),
b'3.14')
self.assertEqual(unicode_encodedecimal("\N{EM SPACE}3.14\N{EN SPACE}"),
b' 3.14 ')
self.assertRaises(UnicodeEncodeError,
unicode_encodedecimal, "123\u20ac", "strict")
self.assertRaisesRegex(
ValueError,
"^'decimal' codec can't encode character",
unicode_encodedecimal, "123\u20ac", "replace")
@support.cpython_only
def test_transform_decimal(self):
from _testcapi import unicode_transformdecimaltoascii as transform_decimal
self.assertEqual(transform_decimal('123'),
'123')
self.assertEqual(transform_decimal('\u0663.\u0661\u0664'),
'3.14')
self.assertEqual(transform_decimal("\N{EM SPACE}3.14\N{EN SPACE}"),
"\N{EM SPACE}3.14\N{EN SPACE}")
self.assertEqual(transform_decimal('123\u20ac'),
'123\u20ac')
@support.cpython_only
def test_pep393_utf8_caching_bug(self):
# Issue #25709: Problem with string concatenation and utf-8 cache
from _testcapi import getargs_s_hash
for k in 0x24, 0xa4, 0x20ac, 0x1f40d:
s = ''
for i in range(5):
# Due to CPython specific optimization the 's' string can be
# resized in-place.
s += chr(k)
# Parsing with the "s#" format code calls indirectly
# PyUnicode_AsUTF8AndSize() which creates the UTF-8
# encoded string cached in the Unicode object.
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
# Check that the second call returns the same result
self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1))
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
if __name__ == "__main__":
unittest.main()
| [
[
[
165,
172
],
[
131486,
131493
],
[
132127,
132134
],
[
130638,
130645
],
[
131613,
131620
]
],
[
[
180,
186
],
[
894,
900
]
],
[
[
194,
203
],
[
111512,
111521
]
],
[
[
211,
219
],
[
66775,
66783
],
[
66892,
66900
],
[
67009,
67017
],
[
67132,
67140
],
[
67194,
67202
]
],
[
[
227,
233
],
[
107977,
107983
],
[
108364,
108370
]
],
[
[
241,
247
]
],
[
[
255,
258
],
[
23861,
23864
],
[
42053,
42056
],
[
107950,
107953
],
[
2633,
2636
],
[
5138,
5141
],
[
23989,
23992
],
[
28508,
28511
],
[
60668,
60671
],
[
60854,
60857
],
[
61053,
61056
],
[
63302,
63305
],
[
68693,
68696
],
[
69576,
69579
],
[
71726,
71729
],
[
72124,
72127
],
[
108175,
108178
],
[
109313,
109316
]
],
[
[
266,
274
],
[
1399,
1407
],
[
23845,
23853
],
[
42033,
42041
],
[
107934,
107942
],
[
113807,
113815
],
[
130530,
130538
],
[
132199,
132207
]
],
[
[
282,
290
],
[
25548,
25556
],
[
101182,
101190
],
[
101288,
101296
],
[
101939,
101947
],
[
102029,
102037
]
],
[
[
308,
315
],
[
25298,
25305
],
[
42110,
42117
],
[
69243,
69250
],
[
69984,
69991
],
[
108194,
108201
],
[
122571,
122578
],
[
123780,
123787
],
[
124568,
124575
],
[
125424,
125431
],
[
126570,
126577
],
[
128388,
128395
],
[
129099,
129106
],
[
129658,
129665
],
[
25510,
25517
],
[
110472,
110479
],
[
113674,
113681
],
[
113734,
113741
],
[
113902,
113909
],
[
122680,
122687
],
[
123901,
123908
]
],
[
[
317,
329
],
[
1272,
1284
],
[
1305,
1317
],
[
1357,
1369
],
[
7847,
7859
],
[
9286,
9298
],
[
11096,
11108
],
[
12802,
12814
],
[
14308,
14320
],
[
19504,
19516
],
[
20250,
20262
],
[
21003,
21015
],
[
21907,
21919
],
[
22811,
22823
],
[
23225,
23237
],
[
23530,
23542
],
[
24138,
24150
],
[
35858,
35870
],
[
37463,
37475
],
[
38300,
38312
],
[
40208,
40220
],
[
41673,
41685
],
[
62543,
62555
]
],
[
[
373,
388
],
[
910,
925
]
],
[
[
932,
948
],
[
112032,
112048
],
[
112080,
112096
]
],
[
[
1226,
1237
],
[
70495,
70506
],
[
107097,
107108
],
[
107211,
107222
]
],
[
[
1260,
1271
]
],
[
[
113798,
113806
]
],
[
[
130513,
130529
]
]
] |
#!/usr/bin/env python
# Copyright 2016 Tesora, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import json
from collections import OrderedDict
import io
import os
import re
import sys
from pylint import lint
from pylint.reporters import text
DEFAULT_CONFIG_FILE = "tools/trove-pylint.config"
DEFAULT_IGNORED_FILES = ['trove/tests']
DEFAULT_IGNORED_CODES = []
DEFAULT_IGNORED_MESSAGES = []
DEFAULT_ALWAYS_ERROR = [
"Undefined variable '_'",
"Undefined variable '_LE'",
"Undefined variable '_LI'",
"Undefined variable '_LW'",
"Undefined variable '_LC'"]
MODE_CHECK = "check"
MODE_REBUILD = "rebuild"
class Config(object):
def __init__(self, filename=DEFAULT_CONFIG_FILE):
self.default_config = {
"include": ["*.py"],
"folder": "trove",
"options": ["--rcfile=./pylintrc", "-E"],
"ignored_files": DEFAULT_IGNORED_FILES,
"ignored_codes": DEFAULT_IGNORED_CODES,
"ignored_messages": DEFAULT_IGNORED_MESSAGES,
"ignored_file_codes": [],
"ignored_file_messages": [],
"ignored_file_code_messages": [],
"always_error_messages": DEFAULT_ALWAYS_ERROR
}
self.config = self.default_config
def sort_config(self):
sorted_config = OrderedDict()
for key in sorted(self.config.keys()):
value = self.get(key)
if isinstance(value, list) and not isinstance(value,str):
sorted_config[key] = sorted(value)
else:
sorted_config[key] = value
return sorted_config
def save(self, filename=DEFAULT_CONFIG_FILE):
if os.path.isfile(filename):
os.rename(filename, "%s~" % filename)
with open(filename, 'w') as fp:
json.dump(self.sort_config(), fp, encoding="utf-8",
indent=2, separators=(',', ': '))
def load(self, filename=DEFAULT_CONFIG_FILE):
with open(filename) as fp:
self.config = json.load(fp, encoding="utf-8")
def get(self, attribute):
return self.config[attribute]
def is_file_ignored(self, f):
if any(f.startswith(i)
for i in self.config['ignored_files']):
return True
return False
def is_file_included(self, f):
if any(fnmatch.fnmatch(f, wc) for wc in self.config['include']):
return True
return False
def is_always_error(self, message):
if message in self.config['always_error_messages']:
return True
return False
def ignore(self, filename, code, codename, message):
# the high priority checks
if self.is_file_ignored(filename):
return True
# never ignore messages
if self.is_always_error(message):
return False
if code in self.config['ignored_codes']:
return True
if codename in self.config['ignored_codes']:
return True
if message and any(message.startswith(ignore_message)
for ignore_message
in self.config['ignored_messages']):
return True
if filename and message and (
[filename, message] in self.config['ignored_file_messages']):
return True
if filename and code and (
[filename, code] in self.config['ignored_file_codes']):
return True
if filename and codename and (
[filename, codename] in self.config['ignored_file_codes']):
return True
for fcm in self.config['ignored_file_code_messages']:
if filename != fcm[0]:
# This ignore rule is for a different file.
continue
if fcm[1] not in (code, codename):
# This ignore rule is for a different code or codename.
continue
if message.startswith(fcm[2]):
return True
return False
def ignore_code(self, c):
_c = set(self.config['ignored_codes'])
_c.add(c)
self.config['ignored_codes'] = list(_c)
def ignore_files(self, f):
_c = set(self.config['ignored_files'])
_c.add(f)
self.config['ignored_files'] = list(_c)
def ignore_message(self, m):
_c = set(self.config['ignored_messages'])
_c.add(m)
self.config['ignored_messages'] = list(_c)
def ignore_file_code(self, f, c):
_c = set(self.config['ignored_file_codes'])
_c.add((f, c))
self.config['ignored_file_codes'] = list(_c)
def ignore_file_message(self, f, m):
_c = set(self.config['ignored_file_messages'])
_c.add((f, m))
self.config['ignored_file_messages'] = list(_c)
def ignore_file_code_message(self, f, c, m, fn):
_c = set(self.config['ignored_file_code_messages'])
_c.add((f, c, m, fn))
self.config['ignored_file_code_messages'] = list(_c)
def main():
if len(sys.argv) == 1 or sys.argv[1] == "check":
return check()
elif sys.argv[1] == "rebuild":
return rebuild()
elif sys.argv[1] == "initialize":
return initialize()
else:
return usage()
def usage():
print("Usage: %s [check|rebuild]" % sys.argv[0])
print("\tUse this tool to perform a lint check of the trove project.")
print("\t check: perform the lint check.")
print("\t rebuild: rebuild the list of exceptions to ignore.")
return 0
class ParseableTextReporter(text.TextReporter):
name = 'parseable'
line_format = '{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'
# that's it folks
class LintRunner(object):
def __init__(self):
self.config = Config()
self.idline = re.compile("^[*]* Module .*")
self.detail = re.compile(r"(\S+):(\d+): \[(\S+)\((\S+)\),"
r" (\S+)?] (.*)")
def dolint(self, filename):
exceptions = set()
buffer = io.StringIO()
reporter = ParseableTextReporter(output=buffer)
options = list(self.config.get('options'))
options.append(filename)
lint.Run(options, reporter=reporter, exit=False)
output = buffer.getvalue()
buffer.close()
for line in output.splitlines():
if self.idline.match(line):
continue
if self.detail.match(line):
mo = self.detail.search(line)
tokens = mo.groups()
fn = tokens[0]
ln = tokens[1]
code = tokens[2]
codename = tokens[3]
func = tokens[4]
message = tokens[5]
if not self.config.ignore(fn, code, codename, message):
exceptions.add((fn, ln, code, codename, func, message))
return exceptions
def process(self, mode=MODE_CHECK):
files_processed = 0
files_with_errors = 0
errors_recorded = 0
exceptions_recorded = 0
all_exceptions = []
for (root, dirs, files) in os.walk(self.config.get('folder')):
# if we shouldn't even bother about this part of the
# directory structure, we can punt quietly
if self.config.is_file_ignored(root):
continue
# since we are walking top down, let's clean up the dirs
# that we will walk by eliminating any dirs that will
# end up getting ignored
for d in dirs:
p = os.path.join(root, d)
if self.config.is_file_ignored(p):
dirs.remove(d)
# check if we can ignore the file and process if not
for f in files:
p = os.path.join(root, f)
if self.config.is_file_ignored(p):
continue
if not self.config.is_file_included(f):
continue
files_processed += 1
exceptions = self.dolint(p)
file_had_errors = 0
for e in exceptions:
# what we do with this exception depents on the
# kind of exception, and the mode
if self.config.is_always_error(e[5]):
all_exceptions.append(e)
errors_recorded += 1
file_had_errors += 1
elif mode == MODE_REBUILD:
# parameters to ignore_file_code_message are
# filename, code, message and function
self.config.ignore_file_code_message(e[0], e[2], e[-1], e[4])
self.config.ignore_file_code_message(e[0], e[3], e[-1], e[4])
exceptions_recorded += 1
elif mode == MODE_CHECK:
all_exceptions.append(e)
errors_recorded += 1
file_had_errors += 1
if file_had_errors:
files_with_errors += 1
for e in sorted(all_exceptions):
print("ERROR: %s %s: %s %s, %s: %s" %
(e[0], e[1], e[2], e[3], e[4], e[5]))
return (files_processed, files_with_errors, errors_recorded,
exceptions_recorded)
def rebuild(self):
self.initialize()
(files_processed,
files_with_errors,
errors_recorded,
exceptions_recorded) = self.process(mode=MODE_REBUILD)
if files_with_errors > 0:
print("Rebuild failed. %s files processed, %s had errors, "
"%s errors recorded." % (
files_processed, files_with_errors, errors_recorded))
return 1
self.config.save()
print("Rebuild completed. %s files processed, %s exceptions recorded." %
(files_processed, exceptions_recorded))
return 0
def check(self):
self.config.load()
(files_processed,
files_with_errors,
errors_recorded,
exceptions_recorded) = self.process(mode=MODE_CHECK)
if files_with_errors > 0:
print("Check failed. %s files processed, %s had errors, "
"%s errors recorded." % (
files_processed, files_with_errors, errors_recorded))
return 1
print("Check succeeded. %s files processed" % files_processed)
return 0
def initialize(self):
self.config.save()
return 0
def check():
exit(LintRunner().check())
def rebuild():
exit(LintRunner().rebuild())
def initialize():
exit(LintRunner().initialize())
if __name__ == "__main__":
main()
| [
[
[
608,
615
],
[
2854,
2861
]
],
[
[
623,
627
],
[
2318,
2322
],
[
2538,
2542
]
],
[
[
652,
663
],
[
1820,
1831
]
],
[
[
671,
673
],
[
6553,
6555
]
],
[
[
681,
683
],
[
2189,
2191
],
[
2227,
2229
],
[
7654,
7656
],
[
8105,
8107
],
[
8327,
8329
]
],
[
[
691,
693
],
[
6327,
6329
],
[
6379,
6381
]
],
[
[
701,
704
],
[
5561,
5564
],
[
5579,
5582
],
[
5635,
5638
],
[
5695,
5698
],
[
5839,
5842
]
],
[
[
725,
729
],
[
6715,
6719
]
],
[
[
759,
763
],
[
6087,
6091
]
],
[
[
765,
784
],
[
1197,
1216
],
[
2156,
2175
],
[
2455,
2474
]
],
[
[
815,
836
],
[
1399,
1420
]
],
[
[
855,
876
],
[
1451,
1472
]
],
[
[
882,
906
],
[
1506,
1530
]
],
[
[
912,
932
],
[
1694,
1714
]
],
[
[
1096,
1106
],
[
7459,
7469
],
[
9423,
9433
],
[
10711,
10721
]
],
[
[
1117,
1129
],
[
9023,
9035
],
[
10089,
10101
]
],
[
[
1149,
1155
],
[
6296,
6302
]
],
[
[
5542,
5546
],
[
11310,
11314
]
],
[
[
5790,
5795
],
[
5777,
5782
]
],
[
[
6065,
6086
],
[
6586,
6607
]
],
[
[
6230,
6240
],
[
11152,
11162
],
[
11199,
11209
],
[
11251,
11261
]
],
[
[
11134,
11139
],
[
5618,
5623
]
],
[
[
11179,
11186
],
[
5676,
5683
]
],
[
[
11228,
11238
],
[
5739,
5749
]
]
] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from retrograph.modeling import modeling_adapter as modeling
from retrograph.modeling import optimization_adapter as optimization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=20,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| [
[
[
706,
721
]
],
[
[
745,
753
]
],
[
[
777,
791
]
],
[
[
800,
802
],
[
18290,
18292
]
],
[
[
835,
863
],
[
9512,
9520
],
[
9590,
9598
],
[
9686,
9694
],
[
11337,
11345
],
[
12086,
12094
],
[
15745,
15753
],
[
4663,
4671
],
[
5554,
5562
]
],
[
[
896,
932
],
[
6281,
6293
]
],
[
[
940,
956
],
[
966,
968
],
[
2898,
2900
],
[
3106,
3108
],
[
3318,
3320
],
[
3537,
3539
],
[
18778,
18780
],
[
9184,
9186
],
[
9356,
9358
],
[
9409,
9411
],
[
9857,
9859
],
[
9957,
9959
],
[
9994,
9996
],
[
10065,
10067
],
[
10117,
10119
],
[
10169,
10171
],
[
10217,
10219
],
[
10271,
10273
],
[
10338,
10340
],
[
10626,
10628
],
[
10695,
10697
],
[
10761,
10763
],
[
11166,
11168
],
[
11230,
11232
],
[
11415,
11417
],
[
11478,
11480
],
[
11516,
11518
],
[
11587,
11589
],
[
11639,
11641
],
[
11687,
11689
],
[
11733,
11735
],
[
11767,
11769
],
[
11803,
11805
],
[
11865,
11867
],
[
12256,
12258
],
[
12274,
12276
],
[
12304,
12306
],
[
12356,
12358
],
[
12424,
12426
],
[
12540,
12542
],
[
15234,
15236
],
[
15468,
15470
],
[
15488,
15490
],
[
15560,
15562
],
[
15585,
15587
],
[
15807,
15809
],
[
15938,
15940
],
[
15971,
15973
],
[
16047,
16049
],
[
16181,
16183
],
[
16318,
16320
],
[
16380,
16382
],
[
16609,
16611
],
[
17203,
17205
],
[
17429,
17431
],
[
17481,
17483
],
[
17851,
17853
],
[
17905,
17907
],
[
18350,
18352
],
[
18405,
18407
],
[
18497,
18499
],
[
4097,
4099
],
[
4180,
4182
],
[
4621,
4623
],
[
5385,
5387
],
[
5835,
5837
],
[
5903,
5905
],
[
6095,
6097
],
[
6235,
6237
],
[
6414,
6416
],
[
6576,
6578
],
[
8651,
8653
],
[
13032,
13034
],
[
13069,
13071
],
[
13114,
13116
],
[
13151,
13153
],
[
13197,
13199
],
[
13234,
13236
],
[
13288,
13290
],
[
13334,
13336
],
[
13382,
13384
],
[
13428,
13430
],
[
13480,
13482
],
[
13526,
13528
],
[
13583,
13585
],
[
13607,
13609
],
[
13798,
13800
],
[
13833,
13835
],
[
14220,
14222
],
[
14271,
14273
],
[
14429,
14431
],
[
14873,
14875
],
[
5679,
5681
],
[
5759,
5761
],
[
6909,
6911
],
[
7051,
7053
],
[
7116,
7118
],
[
7159,
7161
],
[
7224,
7226
],
[
7284,
7286
],
[
7349,
7351
],
[
7520,
7522
],
[
7642,
7644
],
[
7768,
7770
],
[
7837,
7839
],
[
7878,
7880
],
[
7950,
7952
],
[
8085,
8087
]
],
[
[
958,
963
],
[
984,
989
],
[
1020,
1025
],
[
1191,
1196
],
[
1303,
1308
],
[
1442,
1447
],
[
1560,
1565
],
[
1823,
1828
],
[
1977,
1982
],
[
2043,
2048
],
[
2119,
2124
],
[
2199,
2204
],
[
2273,
2278
],
[
2355,
2360
],
[
2433,
2438
],
[
2509,
2514
],
[
2627,
2632
],
[
2749,
2754
],
[
2827,
2832
],
[
3614,
3619
],
[
18640,
18645
],
[
18684,
18689
],
[
18734,
18739
]
],
[
[
976,
981
],
[
15612,
15617
],
[
15635,
15640
],
[
15780,
15785
],
[
15825,
15830
],
[
15886,
15891
],
[
16120,
16125
],
[
16138,
16143
],
[
16237,
16242
],
[
16258,
16263
],
[
16282,
16287
],
[
16455,
16460
],
[
16485,
16490
],
[
16532,
16537
],
[
16665,
16670
],
[
16713,
16718
],
[
16871,
16876
],
[
16914,
16919
],
[
16957,
16962
],
[
17003,
17008
],
[
17041,
17046
],
[
17085,
17090
],
[
17246,
17251
],
[
17334,
17339
],
[
17380,
17385
],
[
17409,
17414
],
[
17518,
17523
],
[
17637,
17642
],
[
17691,
17696
],
[
17803,
17808
],
[
17832,
17837
],
[
17942,
17947
],
[
18060,
18065
],
[
18114,
18119
],
[
18244,
18249
],
[
18303,
18308
]
],
[
[
3739,
3755
],
[
16800,
16816
]
],
[
[
8942,
8962
],
[
4969,
4989
]
],
[
[
10884,
10908
],
[
5221,
5245
]
],
[
[
11951,
11965
],
[
9136,
9150
]
],
[
[
12617,
12633
],
[
17563,
17579
],
[
17986,
18002
]
],
[
[
15130,
15144
],
[
14931,
14945
]
],
[
[
15549,
15553
]
]
] |
def lend_money(debts, person, amount):
value = debts.get(person, 0)
quantity = [amount]
if value != 0:
debts[person] = value + quantity
else:
debts[person] = quantity
print(debts)
def amount_owed_by(debts, person):
value = debts.get(person, [0])
out = sum(value)
return out
def total_amount_owed(debts):
my_money = 0
for values in debts.values():
for numbers in values:
my_money += numbers
return my_money | [
[
[
4,
14
]
],
[
[
236,
250
]
],
[
[
348,
365
]
]
] |
from __future__ import print_function
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
from scratchML.supervised_learning import LDA
from scratchML.utils import calculate_covariance_matrix, accuracy_score
from scratchML.utils import normalize, standardize, train_test_split, Plot
from scratchML.unsupervised_learning import PCA
def main():
# Load the dataset
data = datasets.load_iris()
X = data.data
y = data.target
# Three -> two classes
X = X[y != 2]
y = y[y != 2]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# Fit and predict using LDA
lda = LDA()
lda.fit(X_train, y_train)
y_pred = lda.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy:", accuracy)
Plot().plot_in_2d(X_test, y_pred, title="LDA", accuracy=accuracy)
if __name__ == "__main__":
main()
| [
[
[
23,
37
]
],
[
[
58,
66
],
[
408,
416
]
],
[
[
74,
98
]
],
[
[
106,
117
]
],
[
[
161,
164
],
[
653,
656
]
],
[
[
193,
220
]
],
[
[
222,
236
],
[
738,
752
]
],
[
[
265,
274
]
],
[
[
276,
287
]
],
[
[
289,
305
],
[
571,
587
]
],
[
[
307,
311
],
[
808,
812
]
],
[
[
356,
359
]
],
[
[
366,
370
],
[
907,
911
]
]
] |
from setuptools import find_packages, setup
def get_version():
version = {}
with open("dagster_papertrail/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster-papertrail",
version=get_version(),
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="Package for papertrail Dagster framework components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-papertrail",
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
install_requires=["dagster"],
zip_safe=False,
)
| [
[
[
23,
36
],
[
945,
958
]
],
[
[
38,
43
],
[
262,
267
]
],
[
[
50,
61
],
[
320,
331
]
]
] |
# -*- coding: utf-8 -*-
#
# Django Ratelimit documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 4 15:55:31 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Ratelimit'
copyright = u'2014, James Socol'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoRatelimitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoRatelimit.tex', u'Django Ratelimit Documentation',
u'James Socol', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoratelimit', u'Django Ratelimit Documentation',
[u'James Socol'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoRatelimit', u'Django Ratelimit Documentation',
u'James Socol', 'DjangoRatelimit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
[
[
434,
437
]
],
[
[
439,
441
]
],
[
[
1046,
1056
]
],
[
[
1136,
1150
]
],
[
[
1203,
1216
]
],
[
[
1322,
1332
]
],
[
[
1385,
1392
]
],
[
[
1415,
1424
]
],
[
[
1650,
1657
]
],
[
[
1716,
1723
]
],
[
[
2203,
2219
]
],
[
[
2777,
2791
]
],
[
[
2803,
2821
]
],
[
[
3106,
3116
]
],
[
[
4190,
4206
]
],
[
[
5616,
5633
]
],
[
[
5741,
5755
]
],
[
[
6110,
6125
]
],
[
[
6975,
6984
]
],
[
[
7401,
7418
]
]
] |
import sys, os, re
from setuptools import setup, Command, find_packages
from setuptools.command.test import test
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./*.pyc ./*.egg-info')
def run_tests(*args):
from assessment.tests import run_tests
errors = run_tests()
if errors:
sys.exit(1)
else:
sys.exit(0)
test.run_tests = run_tests
NAME = "django-assess"
# get version without importing
with open("assessment/__init__.py", "rb") as f:
VERSION = str(re.search('__version__ = "(.+?)"', f.read().decode()).group(1))
# pull requirements
with open('requirements.txt', "r") as f:
INSTALL_REQUIREMENTS = f.read().splitlines()
setup(
name=NAME,
version=VERSION,
packages=find_packages(include=['assessment', 'assessment.*']),
python_requires='>=3.5, <4',
install_requires = INSTALL_REQUIREMENTS + [
'setuptools-git', # apparently needed to handle include_package_data from git repo?
],
license="MIT",
include_package_data=True, # declarations in MANIFEST.in
description=("Basic custom assessments as a reusable django app."),
long_description=open("README.rst").read(),
long_description_content_type="text/x-rst",
author="powderflask",
author_email="powderflask@gmail.com",
maintainer="powderflask",
maintainer_email="powderflask@gmail.com",
url="https://github.com/powderflask/django_assess",
download_url="https://github.com/powderflask/django_assess/archive/v{}.tar.gz".format(VERSION),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Framework :: Django',
],
cmdclass={
'clean' : CleanCommand,
},
test_suite="dummy",
)
| [
[
[
7,
10
],
[
502,
505
],
[
532,
535
]
],
[
[
12,
14
],
[
345,
347
]
],
[
[
16,
18
],
[
696,
698
]
],
[
[
43,
48
],
[
872,
877
]
],
[
[
50,
57
],
[
134,
141
]
],
[
[
59,
72
],
[
928,
941
]
],
[
[
109,
113
],
[
546,
550
]
],
[
[
121,
133
],
[
2074,
2086
]
],
[
[
393,
402
],
[
563,
572
]
],
[
[
574,
578
],
[
888,
892
]
],
[
[
675,
676
],
[
731,
732
]
],
[
[
682,
689
],
[
906,
913
],
[
1705,
1712
]
],
[
[
819,
820
],
[
849,
850
]
],
[
[
826,
846
],
[
1039,
1059
]
]
] |
import logging
import re
import pytest
import yaml
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.platform_api import chassis
from platform_api_test_base import PlatformApiTestBase
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
pytest.mark.topology('any')
]
REGEX_MAC_ADDRESS = r'^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$'
REGEX_SERIAL_NUMBER = r'^[A-Za-z0-9]+$'
# Valid OCP ONIE TlvInfo EEPROM type codes as defined here:
# https://opencomputeproject.github.io/onie/design-spec/hw_requirements.html
ONIE_TLVINFO_TYPE_CODE_PRODUCT_NAME = '0x21' # Product Name
ONIE_TLVINFO_TYPE_CODE_PART_NUMBER = '0x22' # Part Number
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER = '0x23' # Serial Number
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR = '0x24' # Base MAC Address
ONIE_TLVINFO_TYPE_CODE_MFR_DATE = '0x25' # Manufacture Date
ONIE_TLVINFO_TYPE_CODE_DEVICE_VERSION = '0x26' # Device Version
ONIE_TLVINFO_TYPE_CODE_LABEL_REVISION = '0x27' # Label Revision
ONIE_TLVINFO_TYPE_CODE_PLATFORM_NAME = '0x28' # Platform Name
ONIE_TLVINFO_TYPE_CODE_ONIE_VERSION = '0x29' # ONIE Version
ONIE_TLVINFO_TYPE_CODE_NUM_MACS = '0x2A' # Number of MAC Addresses
ONIE_TLVINFO_TYPE_CODE_MANUFACTURER = '0x2B' # Manufacturer
ONIE_TLVINFO_TYPE_CODE_COUNTRY_CODE = '0x2C' # Country Code
ONIE_TLVINFO_TYPE_CODE_VENDOR = '0x2D' # Vendor
ONIE_TLVINFO_TYPE_CODE_DIAG_VERSION = '0x2E' # Diag Version
ONIE_TLVINFO_TYPE_CODE_SERVICE_TAG = '0x2F' # Service Tag
ONIE_TLVINFO_TYPE_CODE_VENDOR_EXT = '0xFD' # Vendor Extension
ONIE_TLVINFO_TYPE_CODE_CRC32 = '0xFE' # CRC-32
class TestChassisApi(PlatformApiTestBase):
''' Platform API test cases for the Chassis class'''
#
# Functions to test methods inherited from DeviceBase class
#
def test_get_name(self, duthost, localhost, platform_api_conn):
name = chassis.get_name(platform_api_conn)
pytest_assert(name is not None, "Unable to retrieve chassis name")
pytest_assert(isinstance(name, str), "Chassis name appears incorrect")
def test_get_presence(self, duthost, localhost, platform_api_conn):
presence = chassis.get_presence(platform_api_conn)
pytest_assert(presence is not None, "Unable to retrieve chassis presence")
pytest_assert(isinstance(presence, bool), "Chassis presence appears incorrect")
# Chassis should always be present
pytest_assert(presence is True, "Chassis is not present")
def test_get_model(self, duthost, localhost, platform_api_conn):
model = chassis.get_model(platform_api_conn)
pytest_assert(model is not None, "Unable to retrieve chassis model")
pytest_assert(isinstance(model, str), "Chassis model appears incorrect")
def test_get_serial(self, duthost, localhost, platform_api_conn):
serial = chassis.get_serial(platform_api_conn)
pytest_assert(serial is not None, "Unable to retrieve chassis serial number")
pytest_assert(isinstance(serial, str), "Chassis serial number appears incorrect")
def test_get_status(self, duthost, localhost, platform_api_conn):
status = chassis.get_status(platform_api_conn)
pytest_assert(status is not None, "Unable to retrieve chassis status")
pytest_assert(isinstance(status, bool), "Chassis status appears incorrect")
#
# Functions to test methods defined in ChassisBase class
#
def test_get_base_mac(self, duthost, localhost, platform_api_conn):
# Ensure the base MAC address is sane
base_mac = chassis.get_base_mac(platform_api_conn)
pytest_assert(base_mac is not None, "Failed to retrieve base MAC address")
pytest_assert(re.match(REGEX_MAC_ADDRESS, base_mac), "Base MAC address appears to be incorrect")
if 'base_mac' in duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars:
expected_base_mac = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['base_mac']
pytest_assert(base_mac == expected_base_mac, "Base MAC address is incorrect")
else:
logger.warning('Inventory file does not contain base MAC address for {}'.format(duthost.hostname))
def test_get_serial_number(self, duthost, localhost, platform_api_conn):
# Ensure the serial number is sane
# Note: It appears that when retrieving some variable-length fields,
# the value is padded with trailing '\x00' bytes because the field
# length is longer than the actual value, so we strip those bytes
# here before comparing. We may want to change the EEPROM parsing
# logic to ensure that trailing '\x00' bytes are removed when retreiving
# a variable-length value.
serial = chassis.get_serial_number(platform_api_conn).rstrip('\x00')
pytest_assert(serial is not None, "Failed to retrieve serial number")
pytest_assert(re.match(REGEX_SERIAL_NUMBER, serial), "Serial number appears to be incorrect")
if 'serial' in duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars:
expected_serial = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['serial']
pytest_assert(serial == expected_serial, "Serial number is incorrect")
else:
logger.warning('Inventory file does not contain serial number for {}'.format(duthost.hostname))
def test_get_system_eeprom_info(self, duthost, localhost, platform_api_conn):
''' Test that we can retrieve sane system EEPROM info from the DUT via the platform API
'''
# OCP ONIE TlvInfo EEPROM type codes defined here: https://opencomputeproject.github.io/onie/design-spec/hw_requirements.html
VALID_ONIE_TLVINFO_TYPE_CODES_LIST = [
ONIE_TLVINFO_TYPE_CODE_PRODUCT_NAME,
ONIE_TLVINFO_TYPE_CODE_PART_NUMBER,
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER,
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR,
ONIE_TLVINFO_TYPE_CODE_MFR_DATE,
ONIE_TLVINFO_TYPE_CODE_DEVICE_VERSION,
ONIE_TLVINFO_TYPE_CODE_LABEL_REVISION,
ONIE_TLVINFO_TYPE_CODE_PLATFORM_NAME,
ONIE_TLVINFO_TYPE_CODE_ONIE_VERSION,
ONIE_TLVINFO_TYPE_CODE_NUM_MACS,
ONIE_TLVINFO_TYPE_CODE_MANUFACTURER,
ONIE_TLVINFO_TYPE_CODE_COUNTRY_CODE,
ONIE_TLVINFO_TYPE_CODE_VENDOR,
ONIE_TLVINFO_TYPE_CODE_DIAG_VERSION,
ONIE_TLVINFO_TYPE_CODE_SERVICE_TAG,
ONIE_TLVINFO_TYPE_CODE_VENDOR_EXT,
ONIE_TLVINFO_TYPE_CODE_CRC32
]
MINIMUM_REQUIRED_TYPE_CODES_LIST = [
ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER,
ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR,
ONIE_TLVINFO_TYPE_CODE_CRC32
]
syseeprom_info_dict = chassis.get_system_eeprom_info(platform_api_conn)
pytest_assert(syseeprom_info_dict is not None, "Failed to retrieve system EEPROM data")
pytest_assert(isinstance(syseeprom_info_dict, dict), "System EEPROM data is not in the expected format")
syseeprom_type_codes_list = syseeprom_info_dict.keys()
# Ensure that all keys in the resulting dictionary are valid ONIE TlvInfo type codes
pytest_assert(set(syseeprom_type_codes_list) <= set(VALID_ONIE_TLVINFO_TYPE_CODES_LIST), "Invalid TlvInfo type code found")
# Ensure that we were able to obtain the minimum required type codes
pytest_assert(set(MINIMUM_REQUIRED_TYPE_CODES_LIST) <= set(syseeprom_type_codes_list), "Minimum required TlvInfo type codes not provided")
# Ensure the base MAC address is sane
base_mac = syseeprom_info_dict[ONIE_TLVINFO_TYPE_CODE_BASE_MAC_ADDR]
pytest_assert(base_mac is not None, "Failed to retrieve base MAC address")
pytest_assert(re.match(REGEX_MAC_ADDRESS, base_mac), "Base MAC address appears to be incorrect")
# Ensure the serial number is sane
serial = syseeprom_info_dict[ONIE_TLVINFO_TYPE_CODE_SERIAL_NUMBER]
pytest_assert(serial is not None, "Failed to retrieve serial number")
pytest_assert(re.match(REGEX_SERIAL_NUMBER, serial), "Serial number appears to be incorrect")
if 'syseeprom_info' in duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars:
expected_syseeprom_info_dict = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['syseeprom_info']
pytest_assert(syseeprom_info_dict == expected_syseeprom_info_dict, "System EEPROM info is incorrect")
else:
logger.warning('Inventory file does not contain system EEPROM info for {}'.format(duthost.hostname))
def test_get_reboot_cause(self, duthost, localhost, platform_api_conn):
# TODO: Compare return values to potential combinations
reboot_cause = chassis.get_reboot_cause(platform_api_conn)
# Actual return value is a tuple, but since we're using the HTTP server
# to make the call and it uses JSON, the tuple is changed to a list
pytest_assert(reboot_cause is not None, "Failed to retrieve reboot cause")
pytest_assert(isinstance(reboot_cause, list) and len(reboot_cause) == 2, "Reboot cause appears to be incorrect")
def test_components(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of components and that the returned list is correct for this platform
try:
num_components = int(chassis.get_num_components(platform_api_conn))
except:
pytest.fail("num_components is not an integer")
component_list = chassis.get_all_components(platform_api_conn)
pytest_assert(component_list is not None, "Failed to retrieve components")
pytest_assert(isinstance(component_list, list) and len(component_list) == num_components, "Components appear to be incorrect")
for i in range(num_components):
component = chassis.get_component(platform_api_conn, i)
self.expect(component and component == component_list[i], "Component {} is incorrect".format(i))
self.assert_expectations()
def test_modules(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of modules and that the returned list is correct for this platform
try:
num_modules = int(chassis.get_num_modules(platform_api_conn))
except:
pytest.fail("num_modules is not an integer")
module_list = chassis.get_all_modules(platform_api_conn)
pytest_assert(module_list is not None, "Failed to retrieve modules")
pytest_assert(isinstance(module_list, list) and len(module_list) == num_modules, "Modules appear to be incorrect")
for i in range(num_modules):
module = chassis.get_module(platform_api_conn, i)
self.expect(module and module == module_list[i], "Module {} is incorrect".format(i))
self.assert_expectations()
def test_fans(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of fans and that the returned list is correct for this platform
try:
num_fans = int(chassis.get_num_fans(platform_api_conn))
except:
pytest.fail("num_fans is not an integer")
fan_list = chassis.get_all_fans(platform_api_conn)
pytest_assert(fan_list is not None, "Failed to retrieve fans")
pytest_assert(isinstance(fan_list, list) and len(fan_list) == num_fans, "Fans appear to be incorrect")
for i in range(num_fans):
fan = chassis.get_fan(platform_api_conn, i)
self.expect(fan and fan == fan_list[i], "Fan {} is incorrect".format(i))
self.assert_expectations()
def test_fan_drawers(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of fan drawers and that the returned list is correct for this platform
try:
num_fan_drawers = int(chassis.get_num_fan_drawers(platform_api_conn))
except:
pytest.fail("num_fan_drawers is not an integer")
fan_drawer_list = chassis.get_all_fan_drawers(platform_api_conn)
pytest_assert(fan_drawer_list is not None, "Failed to retrieve fan drawers")
pytest_assert(isinstance(fan_drawer_list, list) and len(fan_drawer_list) == num_fan_drawers, "Fan drawerss appear to be incorrect")
for i in range(num_fan_drawers):
fan_drawer = chassis.get_fan_drawer(platform_api_conn, i)
self.expect(fan_drawer and fan_drawer == fan_drawer_list[i], "Fan drawer {} is incorrect".format(i))
self.assert_expectations()
def test_psus(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of PSUs and that the returned list is correct for this platform
try:
num_psus = int(chassis.get_num_psus(platform_api_conn))
except:
pytest.fail("num_psus is not an integer")
psu_list = chassis.get_all_psus(platform_api_conn)
pytest_assert(psu_list is not None, "Failed to retrieve PSUs")
pytest_assert(isinstance(psu_list, list) and len(psu_list) == num_psus, "PSUs appear to be incorrect")
for i in range(num_psus):
psu = chassis.get_psu(platform_api_conn, i)
self.expect(psu and psu == psu_list[i], "PSU {} is incorrect".format(i))
self.assert_expectations()
def test_thermals(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of thermals and that the returned list is correct for this platform
try:
num_thermals = int(chassis.get_num_thermals(platform_api_conn))
except:
pytest.fail("num_thermals is not an integer")
thermal_list = chassis.get_all_thermals(platform_api_conn)
pytest_assert(thermal_list is not None, "Failed to retrieve thermals")
pytest_assert(isinstance(thermal_list, list) and len(thermal_list) == num_thermals, "Thermals appear to be incorrect")
for i in range(num_thermals):
thermal = chassis.get_thermal(platform_api_conn, i)
self.expect(thermal and thermal == thermal_list[i], "Thermal {} is incorrect".format(i))
self.assert_expectations()
def test_sfps(self, duthost, localhost, platform_api_conn):
# TODO: Ensure the number of SFPs and that the returned list is correct for this platform
try:
num_sfps = int(chassis.get_num_sfps(platform_api_conn))
except:
pytest.fail("num_sfps is not an integer")
sfp_list = chassis.get_all_sfps(platform_api_conn)
pytest_assert(sfp_list is not None, "Failed to retrieve SFPs")
pytest_assert(isinstance(sfp_list, list) and len(sfp_list) == num_sfps, "SFPs appear to be incorrect")
for i in range(num_sfps):
sfp = chassis.get_sfp(platform_api_conn, i)
self.expect(sfp and sfp == sfp_list[i], "SFP {} is incorrect".format(i))
self.assert_expectations()
def test_status_led(self, duthost, localhost, platform_api_conn):
# TODO: Get a platform-specific list of available colors for the status LED
LED_COLOR_LIST = [
"off",
"red",
"amber",
"green",
]
for color in LED_COLOR_LIST:
result = chassis.set_status_led(platform_api_conn, color)
if self.expect(result is not None, "Failed to perform set_status_led"):
self.expect(result is True, "Failed to set status_led to {}".format(color))
color_actual = chassis.get_status_led(platform_api_conn)
if self.expect(color_actual is not None, "Failed to retrieve status_led"):
if self.expect(isinstance(color_actual, str), "Status LED color appears incorrect"):
self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {})".format(color, color_actual))
self.assert_expectations()
def test_get_thermal_manager(self, duthost, localhost, platform_api_conn):
thermal_mgr = chassis.get_thermal_manager(platform_api_conn)
pytest_assert(thermal_mgr is not None, "Failed to retrieve thermal manager")
def test_get_watchdog(self, duthost, localhost, platform_api_conn):
watchdog = chassis.get_watchdog(platform_api_conn)
pytest_assert(watchdog is not None, "Failed to retrieve watchdog")
def test_get_eeprom(self, duthost, localhost, platform_api_conn):
eeprom = chassis.get_eeprom(platform_api_conn)
pytest_assert(eeprom is not None, "Failed to retrieve system EEPROM")
| [
[
[
7,
14
],
[
231,
238
]
],
[
[
22,
24
],
[
3800,
3802
],
[
5031,
5033
],
[
7956,
7958
],
[
8258,
8260
]
],
[
[
33,
39
],
[
279,
285
],
[
349,
355
],
[
9686,
9692
],
[
10561,
10567
],
[
11376,
11382
],
[
12171,
12177
],
[
13051,
13057
],
[
13834,
13840
],
[
14665,
14671
]
],
[
[
47,
51
]
],
[
[
97,
110
],
[
2013,
2026
],
[
2088,
2101
],
[
2299,
2312
],
[
2382,
2395
],
[
2513,
2526
],
[
2702,
2715
],
[
2779,
2792
],
[
2986,
2999
],
[
3072,
3085
],
[
3288,
3301
],
[
3367,
3380
],
[
3703,
3716
],
[
3786,
3799
],
[
4114,
4127
],
[
4939,
4952
],
[
5017,
5030
],
[
5336,
5349
],
[
7011,
7024
],
[
7107,
7120
],
[
7378,
7391
],
[
7588,
7601
],
[
7859,
7872
],
[
7942,
7955
],
[
8166,
8179
],
[
8244,
8257
],
[
8592,
8605
],
[
9194,
9207
],
[
9277,
9290
],
[
9814,
9827
],
[
9897,
9910
],
[
10680,
10693
],
[
10757,
10770
],
[
11486,
11499
],
[
11557,
11570
],
[
12302,
12315
],
[
12387,
12400
],
[
13161,
13174
],
[
13232,
13245
],
[
13956,
13969
],
[
14035,
14048
],
[
14775,
14788
],
[
14846,
14859
],
[
16306,
16319
],
[
16523,
16536
],
[
16724,
16737
]
],
[
[
157,
164
],
[
1969,
1976
],
[
2251,
2258
],
[
2657,
2664
],
[
2940,
2947
],
[
3242,
3249
],
[
3655,
3662
],
[
4871,
4878
],
[
6953,
6960
],
[
8985,
8992
],
[
9611,
9618
],
[
9760,
9767
],
[
10089,
10096
],
[
10489,
10496
],
[
10629,
10636
],
[
10931,
10938
],
[
11307,
11314
],
[
11438,
11445
],
[
11713,
11720
],
[
12095,
12102
],
[
12247,
12254
],
[
12586,
12593
],
[
12982,
12989
],
[
13113,
13120
],
[
13388,
13395
],
[
13761,
13768
],
[
13904,
13911
],
[
14215,
14222
],
[
14596,
14603
],
[
14727,
14734
],
[
15002,
15009
],
[
15491,
15498
],
[
15744,
15751
],
[
16251,
16258
],
[
16475,
16482
],
[
16678,
16685
]
],
[
[
201,
220
],
[
1729,
1748
]
],
[
[
222,
228
],
[
4218,
4224
],
[
5433,
5439
],
[
8720,
8726
]
],
[
[
260,
270
]
],
[
[
380,
397
],
[
3809,
3826
],
[
7965,
7982
]
],
[
[
442,
461
],
[
5040,
5059
],
[
8267,
8286
]
],
[
[
620,
655
],
[
5913,
5948
]
],
[
[
683,
717
],
[
5962,
5996
]
],
[
[
745,
781
],
[
6010,
6046
],
[
6783,
6819
],
[
8120,
8156
]
],
[
[
809,
845
],
[
6060,
6096
],
[
6833,
6869
],
[
7813,
7849
]
],
[
[
876,
907
],
[
6110,
6141
]
],
[
[
943,
980
],
[
6155,
6192
]
],
[
[
1008,
1045
],
[
6206,
6243
]
],
[
[
1073,
1109
],
[
6257,
6293
]
],
[
[
1137,
1172
],
[
6307,
6342
]
],
[
[
1200,
1231
],
[
6356,
6387
]
],
[
[
1274,
1309
],
[
6401,
6436
]
],
[
[
1337,
1372
],
[
6450,
6485
]
],
[
[
1400,
1429
],
[
6499,
6528
]
],
[
[
1457,
1492
],
[
6542,
6577
]
],
[
[
1520,
1554
],
[
6591,
6625
]
],
[
[
1582,
1615
],
[
6639,
6672
]
],
[
[
1649,
1677
],
[
6686,
6714
],
[
6883,
6911
]
],
[
[
1714,
1728
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.