hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70001f658d4dfaa72dd4f0d1b3176492f6658bb | 6,442 | py | Python | spider/openwrt.py | CNDB/CNDB | 2e3a41111f604cf2f4f22a7c9370bb3f753e3e88 | [
"BSD-3-Clause"
] | null | null | null | spider/openwrt.py | CNDB/CNDB | 2e3a41111f604cf2f4f22a7c9370bb3f753e3e88 | [
"BSD-3-Clause"
] | null | null | null | spider/openwrt.py | CNDB/CNDB | 2e3a41111f604cf2f4f22a7c9370bb3f753e3e88 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# #*** <License> ************************************************************#
# This module is part of the repository CNDB.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
from _TFL.pyk import pyk
from rsclib.HTML_Parse import tag, Page_Tree
from rsclib.autosuper import autosuper
from spider.common import Interface, Inet4, Inet6, unroutable
from spider.common import WLAN_Config
from spider.luci import Version_Mixin
class Status (Page_Tree, Version_Mixin) :
url = 'cgi-bin/luci/freifunk/status/status'
retries = 2
timeout = 10
html_charset = 'utf-8' # force utf-8 encoding
wl_names = dict \
( ssid = 'ssid'
, _bsiid = 'bssid'
, channel = 'channel'
, mode = 'mode'
)
def parse (self) :
root = self.tree.getroot ()
self.wlans = []
self.routes = {}
for div in root.findall (".//%s" % tag ("div")) :
id = div.get ('id')
if id == 'cbi-wireless' :
wlan_div = div
elif id == 'cbi-routes' :
route_div = div
self.try_get_version (div)
for d in self.tbl_iter (wlan_div) :
for k, newkey in pyk.iteritems (self.wl_names) :
if k in d :
d [newkey] = d [k]
wl = WLAN_Config (** d)
self.wlans.append (wl)
for d in self.tbl_iter (route_div) :
iface = d.get ('iface')
gw = d.get ('gateway')
if iface and gw :
self.routes [iface] = gw
self.set_version (root)
# end def parse
def tbl_iter (self, div) :
tbl = div.find (".//%s" % tag ("table"))
assert tbl.get ('class') == 'cbi-section-table'
d = {}
for tr in tbl :
if 'cbi-section-table-row' not in tr.get ('class').split () :
continue
for input in tr.findall (".//%s" % tag ('input')) :
name = input.get ('id').split ('.') [-1]
val = input.get ('value')
d [name] = val
if not d :
continue
yield d
# end def tbl_iter
# end class Status
class Table_Iter (Page_Tree) :
def table_iter (self) :
root = self.tree.getroot ()
for div in root.findall (".//%s" % tag ("div")) :
if div.get ('id') == 'maincontent' :
break
tbl = div.find (".//%s" % tag ("table"))
if tbl is None :
return
for tr in tbl :
if tr [0].tag == tag ('th') :
continue
yield (self.tree.get_text (x) for x in tr)
# end def table_iter
# end class Table_Iter
class OLSR_Connections (Table_Iter) :
url = 'cgi-bin/luci/freifunk/olsr/'
retries = 2
timeout = 10
html_charset = 'utf-8' # force utf-8 encoding
def parse (self) :
self.neighbors = {}
for l in self.table_iter () :
neighbor, ip, lq, nlq, etx = l
lq, nlq, etx = (float (x) for x in (lq, nlq, etx))
self.neighbors [neighbor] = [ip, lq, nlq, etx]
# end def parse
# end class OLSR_Connections
class OLSR_Routes (Table_Iter) :
url = 'cgi-bin/luci/freifunk/olsr/routes'
retries = 2
timeout = 10
html_charset = 'utf-8' # force utf-8 encoding
def parse (self) :
self.iface_by_gw = {}
for l in self.table_iter () :
announced, gw, iface, metric, etx = l
if gw in self.iface_by_gw :
assert iface == self.iface_by_gw [gw]
else :
self.iface_by_gw [gw] = iface
# end def parse
# end class OLSR_Routes
class OpenWRT (autosuper) :
def __init__ (self, site, request) :
self.site = site
self.request = request
if 'interfaces' in self.request or 'ips' in self.request :
st = Status (site = site)
conn = OLSR_Connections (site = site)
route = OLSR_Routes (site = site)
self.version = st.version
assert len (st.wlans) <= 1
interfaces = {}
ips = {}
count = 0
for gw, ifname in pyk.iteritems (route.iface_by_gw) :
ip, lq, nlq, etx = conn.neighbors [gw]
i4 = Inet4 (ip, None, None, iface = ifname)
ips [i4] = 1
is_wlan = True
if lq == nlq == etx == 1.0 :
is_wlan = False
if ifname in interfaces :
iface = interfaces [ifname]
if not iface.is_wlan and is_wlan :
iface.is_wlan = True
iface.wlan_info = st.wlans [0]
else :
iface = Interface (count, ifname, None)
iface.is_wlan = is_wlan
if is_wlan :
iface.wlan_info = st.wlans [0]
count += 1
interfaces [ifname] = iface
if i4 not in iface.inet4 :
iface.append_inet4 (i4)
wl_if = None
for iface in pyk.itervalues (interfaces) :
if iface.is_wlan :
if wl_if :
m = "Duplicate wlan: %s/%s" % (iface.name, wl_if.name)
raise ValueError (m)
wl_if = iface
# check own ip
n = 'unknown'
i4 = Inet4 (self.request ['ip'], None, None, iface = n)
if i4 not in ips :
assert n not in interfaces
iface = interfaces [n] = Interface (count, n, None)
iface.append_inet4 (i4)
iface.is_wlan = False
if not wl_if and st.wlans :
iface.is_wlan = True
iface.wlan_info = st.wlans [0]
ips [i4] = True
self.request ['ips'] = ips
self.request ['interfaces'] = interfaces
self.request ['version'] = st.version
# end def __init__
# end class OpenWRT
| 34.449198 | 78 | 0.472369 |
f7001ad17b839c3551d7b4c8edcc8b1d1d322b6f | 6,412 | py | Python | asv/plugins/conda.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | 2 | 2019-08-18T11:05:25.000Z | 2019-11-17T02:07:18.000Z | asv/plugins/conda.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | 1 | 2019-02-19T17:11:38.000Z | 2019-02-19T17:11:38.000Z | asv/plugins/conda.py | prisae/asv | 57c386d7cc27f91ecd8daf1ad2e0413f2efdd39c | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import re
import os
import tempfile
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
def _find_conda():
"""Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda
class Conda(environment.Environment):
"""
Manage an environment using conda.
Dependencies are installed using ``conda``. The benchmarked
project is installed using ``pip`` (since ``conda`` doesn't have a
method to install from an arbitrary ``setup.py``).
"""
tool_name = "conda"
_matches_cache = {}
def __init__(self, conf, python, requirements):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
self._python = python
self._requirements = requirements
self._conda_channels = conf.conda_channels
super(Conda, self).__init__(conf, python, requirements)
@classmethod
def matches(cls, python):
# Calling conda can take a long time, so remember the result
if python not in cls._matches_cache:
cls._matches_cache[python] = cls._matches(python)
return cls._matches_cache[python]
@classmethod
def _matches(cls, python):
if not re.match(r'^[0-9].*$', python):
# The python name should be a version number
return False
try:
conda = _find_conda()
except IOError:
return False
else:
# This directory never gets created, since we're just
# doing a dry run below. All it needs to be is something
# that doesn't already exist.
path = os.path.join(tempfile.gettempdir(), 'check')
# Check that the version number is valid
try:
util.check_call([
conda,
'create',
'--yes',
'-p',
path,
'python={0}'.format(python),
'--dry-run'], display_error=False, dots=False)
except util.ProcessError:
return False
else:
return True
def _setup(self):
try:
conda = _find_conda()
except IOError as e:
raise util.UserError(str(e))
log.info("Creating conda environment for {0}".format(self.name))
# create a temporary environment.yml file
# and use that to generate the env for benchmarking
env_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".yml")
try:
env_file.write('name: {0}\n'
'channels:\n'.format(self.name))
env_file.writelines((' - %s\n' % ch for ch in self._conda_channels))
env_file.write('dependencies:\n'
' - python={0}\n'
' - wheel\n'
' - pip\n'.format(self._python))
# categorize & write dependencies based on pip vs. conda
conda_args, pip_args = self._get_requirements(conda)
env_file.writelines((' - %s\n' % s for s in conda_args))
if pip_args:
# and now specify the packages that are to be installed in
# the pip subsection
env_file.write(' - pip:\n')
env_file.writelines((' - %s\n' % s for s in pip_args))
env_file.close()
util.check_output([conda] + ['env', 'create', '-f', env_file.name,
'-p', self._path, '--force'])
except Exception as exc:
if os.path.isfile(env_file.name):
with open(env_file.name, 'r') as f:
text = f.read()
log.info("conda env create failed: in {} with:\n{}".format(self._path, text))
raise
finally:
os.unlink(env_file.name)
def _get_requirements(self, conda):
if self._requirements:
# retrieve and return all conda / pip dependencies
conda_args = []
pip_args = []
for key, val in six.iteritems(self._requirements):
if key.startswith('pip+'):
if val:
pip_args.append("{0}=={1}".format(key[4:], val))
else:
pip_args.append(key[4:])
else:
if val:
conda_args.append("{0}={1}".format(key, val))
else:
conda_args.append(key)
return conda_args, pip_args
else:
return [], []
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
def run_executable(self, executable, args, **kwargs):
# Conda doesn't guarantee that user site directories are excluded
kwargs["env"] = dict(kwargs.pop("env", os.environ),
PYTHONNOUSERSITE=str("True"))
return super(Conda, self).run_executable(executable, args, **kwargs)
| 33.570681 | 93 | 0.547723 |
f700701e51582a6f314450ea9547949094b4db62 | 3,429 | py | Python | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 7 | 2019-03-11T16:17:33.000Z | 2020-10-22T21:57:51.000Z | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 3 | 2019-11-05T20:22:16.000Z | 2019-12-11T17:09:04.000Z | fineract/objects/group.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 2 | 2020-11-19T16:00:36.000Z | 2021-11-19T09:36:13.000Z | from fineract.objects.fineract_object import DataFineractObject
from fineract.objects.types import Type
class Group(DataFineractObject):
"""
This class represents a Group.
"""
def __repr__(self):
return self.get__repr__({'group_id': self.id})
def _init_attributes(self):
self.id = None
self.account_no = None
self.external_id = None
self.name = None
self.status = None
self.active = None
self.activation_date = None
self.office_id = None
self.office_name = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.account_no = attributes.get('accountNo', None)
self.external_id = attributes.get('externalId', None)
self.name = attributes.get('name', None)
self.status = self._make_fineract_object(GroupStatus, attributes.get('status', None))
self.active = attributes.get('active', None)
self.activation_date = self._make_date_object(attributes.get('activationDate', None))
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.hierarchy = attributes.get('hierarchy', None)
def add_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=associateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
def remove_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=disassociateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
@classmethod
def create(cls, request_handler, name, office_id, active=True, activation_date=None):
"""Create a group
:param request_handler:
:param name:
:param office_id:
:param active:
:param activation_date:
:rtype: :class:`fineract.objects.group.Group`
"""
data = {
'name': name,
'officeId': office_id,
'active': active,
'activationDate': activation_date or cls._get_current_date()
}
res = request_handler.make_request(
'POST',
'/groups',
json=data
)
group_id = res['groupId']
return cls(request_handler,
request_handler.make_request(
'GET',
'/groups/{}'.format(group_id)
), False)
@classmethod
def get_group_by_name(cls, request_handler, name):
"""Get a group by name
:param request_handler:
:param name:
:rtype: :class:`fineract.objects.group.Group`
"""
data = request_handler.make_request(
'GET',
'/groups'
)
if data:
for item in data:
if item['name'] == name:
print(item)
return cls(request_handler, item, False)
return None
class GroupStatus(Type):
"""
This class represents a Group status.
"""
pass
| 29.307692 | 93 | 0.567221 |
f7007e7d6cadbb4707818ec05e6fcbc50ba52dfb | 2,656 | py | Python | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/common/service.py | starlingx-staging/stx-config | ccbf0392d1941e7cad6673f6351bd905a5a5d419 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from sysinv.openstack.common import context
from sysinv.openstack.common import log
from sysinv.openstack.common import periodic_task
from sysinv.openstack.common import rpc
from sysinv.openstack.common.rpc import service as rpc_service
from oslo_service import service
cfg.CONF.register_opts([
cfg.IntOpt('periodic_interval',
default=60,
help='seconds between running periodic tasks'),
cfg.StrOpt('host',
default=socket.getfqdn(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
])
CONF = cfg.CONF
class PeriodicService(rpc_service.Service, periodic_task.PeriodicTasks):
def start(self):
super(PeriodicService, self).start()
admin_context = context.RequestContext('admin', 'admin', is_admin=True)
self.tg.add_timer(cfg.CONF.periodic_interval,
self.manager.periodic_tasks,
context=admin_context)
def prepare_service(argv=None):
if argv is None:
argv = []
rpc.set_defaults(control_exchange='sysinv')
cfg.set_defaults(log.log_opts,
default_log_levels=['amqplib=WARN',
'qpid.messaging=INFO',
'sqlalchemy=WARN',
'keystoneclient=INFO',
'stevedore=INFO',
'eventlet.wsgi.server=WARN'
])
cfg.CONF(argv[1:], project='sysinv')
log.setup('sysinv')
def process_launcher():
return service.ProcessLauncher(CONF)
| 34.947368 | 79 | 0.622364 |
f7008bf87b5a1c0780d7272b314fa3142ffb3ef3 | 18,984 | py | Python | netbox_agent/server.py | freberkivra/netbox-agent | 5f0aae6c011cd43f7d9e6d322f90a6b0f5195c61 | [
"Apache-2.0"
] | 24 | 2019-08-05T15:14:20.000Z | 2020-02-02T11:05:45.000Z | netbox_agent/server.py | freberkivra/netbox-agent | 5f0aae6c011cd43f7d9e6d322f90a6b0f5195c61 | [
"Apache-2.0"
] | 39 | 2019-08-04T18:12:07.000Z | 2020-01-30T21:42:38.000Z | netbox_agent/server.py | freberkivra/netbox-agent | 5f0aae6c011cd43f7d9e6d322f90a6b0f5195c61 | [
"Apache-2.0"
] | 8 | 2019-09-03T20:51:22.000Z | 2020-01-15T06:00:23.000Z | import netbox_agent.dmidecode as dmidecode
from netbox_agent.config import config
from netbox_agent.config import netbox_instance as nb
from netbox_agent.inventory import Inventory
from netbox_agent.location import Datacenter, Rack, Tenant
from netbox_agent.misc import create_netbox_tags, get_device_role, get_device_type, get_device_platform
from netbox_agent.network import ServerNetwork
from netbox_agent.power import PowerSupply
from pprint import pprint
import subprocess
import logging
import socket
import sys
class ServerBase():
def __init__(self, dmi=None):
if dmi:
self.dmi = dmi
else:
self.dmi = dmidecode.parse()
self.baseboard = dmidecode.get_by_type(self.dmi, 'Baseboard')
self.bios = dmidecode.get_by_type(self.dmi, 'BIOS')
self.chassis = dmidecode.get_by_type(self.dmi, 'Chassis')
self.system = dmidecode.get_by_type(self.dmi, 'System')
self.device_platform = get_device_platform(config.device.platform)
self.network = None
self.tags = list(set([
x.strip() for x in config.device.tags.split(',') if x.strip()
])) if config.device.tags else []
self.nb_tags = list(create_netbox_tags(self.tags))
config_cf = set([
f.strip() for f in config.device.custom_fields.split(",")
if f.strip()
])
self.custom_fields = {}
self.custom_fields.update(dict([
(k.strip(), v.strip()) for k, v in
[f.split("=", 1) for f in config_cf]
]))
def get_tenant(self):
tenant = Tenant()
return tenant.get()
def get_netbox_tenant(self):
tenant = self.get_tenant()
if tenant is None:
return None
nb_tenant = nb.tenancy.tenants.get(
slug=self.get_tenant()
)
return nb_tenant
def get_datacenter(self):
dc = Datacenter()
return dc.get()
def get_netbox_datacenter(self):
dc = self.get_datacenter()
if dc is None:
logging.error("Specificing a datacenter (Site) is mandatory in Netbox")
sys.exit(1)
nb_dc = nb.dcim.sites.get(
slug=dc,
)
if nb_dc is None:
logging.error("Site (slug: {}) has not been found".format(dc))
sys.exit(1)
return nb_dc
def update_netbox_location(self, server):
dc = self.get_datacenter()
nb_rack = self.get_netbox_rack()
nb_dc = self.get_netbox_datacenter()
update = False
if dc and server.site and server.site.slug != nb_dc.slug:
logging.info('Datacenter location has changed from {} to {}, updating'.format(
server.site.slug,
nb_dc.slug,
))
update = True
server.site = nb_dc.id
if (
server.rack
and nb_rack
and server.rack.id != nb_rack.id
):
logging.info('Rack location has changed from {} to {}, updating'.format(
server.rack,
nb_rack,
))
update = True
server.rack = nb_rack
if nb_rack is None:
server.face = None
server.position = None
return update, server
def update_netbox_expansion_location(self, server, expansion):
update = False
if expansion.tenant != server.tenant:
expansion.tenant = server.tenant
update = True
if expansion.site != server.site:
expansion.site = server.site
update = True
if expansion.rack != server.rack:
expansion.rack = server.rack
update = True
return update
def get_rack(self):
rack = Rack()
return rack.get()
def get_netbox_rack(self):
rack = self.get_rack()
datacenter = self.get_netbox_datacenter()
if not rack:
return None
if rack and not datacenter:
logging.error("Can't get rack if no datacenter is configured or found")
sys.exit(1)
return nb.dcim.racks.get(
name=rack,
site_id=datacenter.id,
)
def get_product_name(self):
"""
Return the Chassis Name from dmidecode info
"""
return self.system[0]['Product Name'].strip()
def get_service_tag(self):
"""
Return the Service Tag from dmidecode info
"""
return self.system[0]['Serial Number'].strip()
def get_expansion_service_tag(self):
"""
Return the virtual Service Tag from dmidecode info host
with 'expansion'
"""
return self.system[0]['Serial Number'].strip() + " expansion"
def get_hostname(self):
if config.hostname_cmd is None:
return '{}'.format(socket.gethostname())
return subprocess.getoutput(config.hostname_cmd)
def is_blade(self):
raise NotImplementedError
def get_blade_slot(self):
raise NotImplementedError
def get_chassis(self):
raise NotImplementedError
def get_chassis_name(self):
raise NotImplementedError
def get_chassis_service_tag(self):
raise NotImplementedError
def get_bios_version(self):
raise NotImplementedError
def get_bios_version_attr(self):
raise NotImplementedError
def get_bios_release_date(self):
raise NotImplementedError
def get_power_consumption(self):
raise NotImplementedError
def get_expansion_product(self):
raise NotImplementedError
def _netbox_create_chassis(self, datacenter, tenant, rack):
device_type = get_device_type(self.get_chassis())
device_role = get_device_role(config.device.chassis_role)
serial = self.get_chassis_service_tag()
logging.info('Creating chassis blade (serial: {serial})'.format(
serial=serial))
new_chassis = nb.dcim.devices.create(
name=self.get_chassis_name(),
device_type=device_type.id,
serial=serial,
device_role=device_role.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_chassis
def _netbox_create_blade(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_product_name())
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info(
'Creating blade (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
custom_fields=self.custom_fields,
)
return new_blade
def _netbox_create_blade_expansion(self, chassis, datacenter, tenant, rack):
device_role = get_device_role(config.device.blade_role)
device_type = get_device_type(self.get_expansion_product())
serial = self.get_expansion_service_tag()
hostname = self.get_hostname() + " expansion"
logging.info(
'Creating expansion (serial: {serial}) {hostname} on chassis {chassis_serial}'.format(
serial=serial, hostname=hostname, chassis_serial=chassis.serial
))
new_blade = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
parent_device=chassis.id,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_blade
def _netbox_deduplicate_server(self):
serial = self.get_service_tag()
hostname = self.get_hostname()
server = nb.dcim.devices.get(name=hostname)
if server and server.serial != serial:
server.delete()
def _netbox_create_server(self, datacenter, tenant, rack):
device_role = get_device_role(config.device.server_role)
device_type = get_device_type(self.get_product_name())
if not device_type:
raise Exception('Chassis "{}" doesn\'t exist'.format(self.get_chassis()))
serial = self.get_service_tag()
hostname = self.get_hostname()
logging.info('Creating server (serial: {serial}) {hostname}'.format(
serial=serial, hostname=hostname))
new_server = nb.dcim.devices.create(
name=hostname,
serial=serial,
device_role=device_role.id,
device_type=device_type.id,
platform=self.device_platform,
site=datacenter.id if datacenter else None,
tenant=tenant.id if tenant else None,
rack=rack.id if rack else None,
tags=[{'name': x} for x in self.tags],
)
return new_server
def get_netbox_server(self, expansion=False):
if expansion is False:
return nb.dcim.devices.get(serial=self.get_service_tag())
else:
return nb.dcim.devices.get(serial=self.get_expansion_service_tag())
def _netbox_set_or_update_blade_slot(self, server, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = server.parent_device.device_bay \
if server.parent_device else None
actual_chassis = actual_device_bay.device \
if actual_device_bay else None
slot = self.get_blade_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if real_device_bays:
logging.info(
'Setting device ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=server.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = server
real_device_bay.save()
else:
logging.error('Could not find slot {slot} for chassis'.format(
slot=slot
))
def _netbox_set_or_update_blade_expansion_slot(self, expansion, chassis, datacenter):
# before everything check if right chassis
actual_device_bay = expansion.parent_device.device_bay if expansion.parent_device else None
actual_chassis = actual_device_bay.device if actual_device_bay else None
slot = self.get_blade_expansion_slot()
if actual_chassis and \
actual_chassis.serial == chassis.serial and \
actual_device_bay.name == slot:
return
real_device_bays = nb.dcim.device_bays.filter(
device_id=chassis.id,
name=slot,
)
if not real_device_bays:
logging.error('Could not find slot {slot} expansion for chassis'.format(
slot=slot
))
return
logging.info(
'Setting device expansion ({serial}) new slot on {slot} '
'(Chassis {chassis_serial})..'.format(
serial=expansion.serial, slot=slot, chassis_serial=chassis.serial
))
# reset actual device bay if set
if actual_device_bay:
# Forces the evaluation of the installed_device attribute to
# workaround a bug probably due to lazy loading optimization
# that prevents the value change detection
actual_device_bay.installed_device
actual_device_bay.installed_device = None
actual_device_bay.save()
# setup new device bay
real_device_bay = next(real_device_bays)
real_device_bay.installed_device = expansion
real_device_bay.save()
def netbox_create_or_update(self, config):
"""
Netbox method to create or update info about our server/blade
Handle:
* new chassis for a blade
* new slot for a blade
* hostname update
* Network infos
* Inventory management
* PSU management
"""
datacenter = self.get_netbox_datacenter()
rack = self.get_netbox_rack()
tenant = self.get_netbox_tenant()
if config.purge_old_devices:
self._netbox_deduplicate_server()
if self.is_blade():
chassis = nb.dcim.devices.get(
serial=self.get_chassis_service_tag()
)
# Chassis does not exist
if not chassis:
chassis = self._netbox_create_chassis(datacenter, tenant, rack)
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_blade(chassis, datacenter, tenant, rack)
# Set slot for blade
self._netbox_set_or_update_blade_slot(server, chassis, datacenter)
else:
server = nb.dcim.devices.get(serial=self.get_service_tag())
if not server:
server = self._netbox_create_server(datacenter, tenant, rack)
logging.debug('Updating Server...')
# check network cards
if config.register or config.update_all or config.update_network:
self.network = ServerNetwork(server=self)
self.network.create_or_update_netbox_network_cards()
update_inventory = config.inventory and (config.register or
config.update_all or config.update_inventory)
# update inventory if feature is enabled
self.inventory = Inventory(server=self)
if update_inventory:
self.inventory.create_or_update()
# update psu
if config.register or config.update_all or config.update_psu:
self.power = PowerSupply(server=self)
self.power.create_or_update_power_supply()
self.power.report_power_consumption()
expansion = nb.dcim.devices.get(serial=self.get_expansion_service_tag())
if self.own_expansion_slot() and config.expansion_as_device:
logging.debug('Update Server expansion...')
if not expansion:
expansion = self._netbox_create_blade_expansion(chassis, datacenter, tenant, rack)
# set slot for blade expansion
self._netbox_set_or_update_blade_expansion_slot(expansion, chassis, datacenter)
if update_inventory:
# Updates expansion inventory
inventory = Inventory(server=self, update_expansion=True)
inventory.create_or_update()
elif self.own_expansion_slot() and expansion:
expansion.delete()
expansion = None
update = 0
# for every other specs
# check hostname
if server.name != self.get_hostname():
server.name = self.get_hostname()
update += 1
server_tags = sorted(set([x.name for x in server.tags]))
tags = sorted(set(self.tags))
if server_tags != tags:
new_tags_ids = [x.id for x in self.nb_tags]
if not config.preserve_tags:
server.tags = new_tags_ids
else:
server_tags_ids = [x.id for x in server.tags]
server.tags = sorted(set(new_tags_ids + server_tags_ids))
update += 1
if server.custom_fields != self.custom_fields:
server.custom_fields = self.custom_fields
update += 1
if config.update_all or config.update_location:
ret, server = self.update_netbox_location(server)
update += ret
if server.platform != self.device_platform:
server.platform = self.device_platform
update += 1
if update:
server.save()
if expansion:
update = 0
expansion_name = server.name + ' expansion'
if expansion.name != expansion_name:
expansion.name = expansion_name
update += 1
if self.update_netbox_expansion_location(server, expansion):
update += 1
if update:
expansion.save()
logging.debug('Finished updating Server!')
def print_debug(self):
self.network = ServerNetwork(server=self)
print('Datacenter:', self.get_datacenter())
print('Netbox Datacenter:', self.get_netbox_datacenter())
print('Rack:', self.get_rack())
print('Netbox Rack:', self.get_netbox_rack())
print('Is blade:', self.is_blade())
print('Got expansion:', self.own_expansion_slot())
print('Product Name:', self.get_product_name())
print('Platform:', self.device_platform)
print('Chassis:', self.get_chassis())
print('Chassis service tag:', self.get_chassis_service_tag())
print('Service tag:', self.get_service_tag())
print('NIC:',)
pprint(self.network.get_network_cards())
pass
def own_expansion_slot(self):
"""
Indicates if the device hosts an expansion card
"""
return False
def own_gpu_expansion_slot(self):
"""
Indicates if the device hosts a GPU expansion card
"""
return False
def own_drive_expansion_slot(self):
"""
Indicates if the device hosts a drive expansion bay
"""
return False
| 36.43762 | 103 | 0.608038 |
f700bd3e668d5f4fe3f075fecf18bb44137fc1c9 | 11,470 | py | Python | tools/azure-sdk-tools/packaging_tools/swaggertosdk/SwaggerToSdkCore.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | tools/azure-sdk-tools/packaging_tools/swaggertosdk/SwaggerToSdkCore.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | tools/azure-sdk-tools/packaging_tools/swaggertosdk/SwaggerToSdkCore.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | """SwaggerToSdk core tools.
"""
from enum import Enum, unique
import json
import logging
import os
import re
import tempfile
from pathlib import Path
import requests
from github import Github, UnknownObjectException
from .autorest_tools import (
autorest_latest_version_finder,
autorest_bootstrap_version_finder,
autorest_swagger_to_sdk_conf,
)
from azure_devtools.ci_tools.github_tools import get_files, GithubLink
_LOGGER = logging.getLogger(__name__)
CONFIG_FILE = "swagger_to_sdk_config_autorest.json"
CONFIG_FILE_DPG = "swagger_to_sdk_config_dpg.json"
DEFAULT_COMMIT_MESSAGE = "Generated from {hexsha}"
def build_file_content():
autorest_version = autorest_latest_version_finder()
autorest_bootstrap_version = autorest_bootstrap_version_finder()
return {
"autorest": autorest_version,
"autorest_bootstrap": autorest_bootstrap_version,
}
def get_repo_tag_meta(meta_conf):
repotag = meta_conf.get("repotag")
if repotag:
return repotag
# Guess for now, "repotag" should be added everywhere
if "go" in meta_conf["autorest_options"]:
return "azure-sdk-for-go"
if "ruby" in meta_conf["autorest_options"]:
return "azure-sdk-for-ruby"
if "java" in meta_conf["autorest_options"]:
return "azure-sdk-for-java"
if "nodejs" in meta_conf["autorest_options"]:
return "azure-sdk-for-node"
if "typescript" in meta_conf["autorest_options"]:
return "azure-sdk-for-js"
raise ValueError("No repotag found or infered")
@unique
class Language(str, Enum):
GOLANG = "go"
RUBY = "ruby"
JAVA = "java"
NODEJS = "nodejs"
CSHARP = "csharp"
PYTHON = "python"
TYPESCRIPT = "typescript"
def get_language_from_conf(meta_conf):
"""Detect the language based on the default Autorest options.
Assuming all language use --mylanguage in the config file.
If I don't find anything, well just say I don't know...
This is based on autorest language flags.
:rtype: Language
"""
autorest_options_lang = set(meta_conf["autorest_options"].keys())
languages = set()
for value in Language:
if value in autorest_options_lang:
languages.add(value)
if not languages:
_LOGGER.warning("No detected language from this conf")
return None # I don't what this conf is about?
language = languages.pop()
if languages:
_LOGGER.warning("This SwaggerToSdk conf seems to generate too much language in one call, assume we don't know")
return None
return language
def get_context_tag_from_git_object(git_object):
files_list = [file.filename for file in get_files(git_object)]
return get_context_tag_from_file_list(files_list)
def get_context_tag_from_file_list(files_list):
context_tags = set()
for filename in files_list:
filepath = Path(filename)
filename = filepath.as_posix()
if "/examples/" in filename:
# Do not compute context for example that are not used in SDK
continue
# Match if RP name
match = re.match(r"specification/(.*)/Microsoft.\w*/(stable|preview)/", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# Match if stable/preview but not RP like ARM (i.e. Cognitive Services)
match = re.match(r"specification/(.*)/(stable|preview)/", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# Match Readme
# Do it last step, because if some weird Readme for ServiceFabric...
match = re.match(r"specification/(.*)/readme.\w*.?md", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# No context-tags
return context_tags
def this_conf_will_generate_for_this_pr(git_object, config):
"""Try to guess if this PR has a chance to generate something for this conf.
Right now, just match the language in the conf with the presence
of ONLY "readme.language.md" files.
"""
lang = get_language_from_conf(config)
filenames = [file.filename.lower() for file in get_files(git_object)]
readme_lang = [name for name in filenames if re.match(r"(.*)readme.\w+.md", name)]
if len(readme_lang) != len(filenames):
return True # This means there is files that are not language specific readme
return bool([name for name in readme_lang if name.endswith("readme.{}.md".format(lang))])
def get_readme_files_from_git_object(git_object, base_dir=Path(".")):
files_list = [file.filename for file in get_files(git_object)]
return get_readme_files_from_file_list(files_list, base_dir)
def get_readme_files_from_file_list(files_list, base_dir=Path(".")):
"""Get readme files from this PR.
Algo is to look for context, and then search for Readme inside this context.
"""
readme_files = set()
context_tags = get_context_tag_from_file_list(files_list)
for context_tag in context_tags:
expected_folder = Path(base_dir) / Path("specification/{}".format(context_tag))
if not expected_folder.is_dir():
_LOGGER.warning("From context {} I didn't find folder {}".format(context_tag, expected_folder))
continue
for expected_readme in [l for l in expected_folder.iterdir() if l.is_file()]:
# Need to do a case-insensitive test.
match = re.match(r"readme.\w*.?md", expected_readme.name, re.I)
if match:
readme_files.add(expected_readme.relative_to(Path(base_dir)))
return readme_files
def read_config(sdk_git_folder, config_file):
"""Read the configuration file and return JSON"""
config_path = os.path.join(sdk_git_folder, config_file)
with open(config_path, "r") as config_fd:
return json.loads(config_fd.read())
def read_config_from_github(sdk_id, branch="main", gh_token=None):
raw_link = str(get_configuration_github_path(sdk_id, branch))
_LOGGER.debug("Will try to download: %s", raw_link)
_LOGGER.debug("Token is defined: %s", gh_token is not None)
headers = {"Authorization": "token {}".format(gh_token)} if gh_token else {}
response = requests.get(raw_link, headers=headers)
if response.status_code != 200:
raise ValueError(
"Unable to download conf file for SDK {} branch {}: status code {}".format(
sdk_id, branch, response.status_code
)
)
return json.loads(response.text)
def extract_conf_from_readmes(swagger_files_in_pr, restapi_git_folder, sdk_git_id, config, force_generation=False):
readme_files_in_pr = {
readme for readme in swagger_files_in_pr if getattr(readme, "name", readme).lower().endswith("readme.md")
}
for readme_file in readme_files_in_pr:
build_swaggertosdk_conf_from_json_readme(
readme_file, sdk_git_id, config, base_folder=restapi_git_folder, force_generation=force_generation
)
def get_readme_path(readme_file, base_folder="."):
"""Get a readable Readme path.
If start with http, assume online, ignore base_folder and convert to raw link if necessary.
If base_folder is not None, assume relative to base_folder.
"""
if not isinstance(readme_file, Path) and readme_file.startswith("http"):
return GithubLink.from_string(readme_file).as_raw_link()
else:
if base_folder is None:
base_folder = "."
return str(Path(base_folder) / Path(readme_file))
def build_swaggertosdk_conf_from_json_readme(readme_file, sdk_git_id, config, base_folder=".", force_generation=False):
"""Get the JSON conf of this README, and create SwaggerToSdk conf.
Readme path can be any readme syntax accepted by autorest.
readme_file will be project key as-is.
:param str readme_file: A path that Autorest accepts. Raw GH link or absolute path.
:param str sdk_dit_id: Repo ID. IF org/login is provided, will be stripped.
:param dict config: Config where to update the "projects" key.
:param bool force_generation: If no Swagger to SDK section is found, force once with the Readme as input
"""
readme_full_path = get_readme_path(readme_file, base_folder)
with tempfile.TemporaryDirectory() as temp_dir:
readme_as_conf = autorest_swagger_to_sdk_conf(readme_full_path, temp_dir, config)
generated_config = {
"markdown": readme_full_path,
}
sdk_git_short_id = sdk_git_id.split("/")[-1].lower()
_LOGGER.info("Looking for tag {} in readme {}".format(sdk_git_short_id, readme_file))
for swagger_to_sdk_conf in readme_as_conf:
if not isinstance(swagger_to_sdk_conf, dict):
continue
repo = swagger_to_sdk_conf.get("repo", "")
repo = repo.split("/")[-1].lower() # Be sure there is no org/login part
if repo == sdk_git_short_id:
_LOGGER.info("This Readme contains a swagger-to-sdk section for repo {}".format(repo))
generated_config.update(
{
"autorest_options": swagger_to_sdk_conf.get("autorest_options", {}),
"after_scripts": swagger_to_sdk_conf.get("after_scripts", []),
}
)
config.setdefault("projects", {})[str(readme_file)] = generated_config
return generated_config
else:
_LOGGER.info("Skip mismatch {} from {}".format(repo, sdk_git_short_id))
if not force_generation:
_LOGGER.info(
"Didn't find tag {} in readme {}. Did you forget to update the SwaggerToSdk section?".format(
sdk_git_short_id, readme_file
)
)
else:
_LOGGER.info("Didn't find tag {} in readme {}. Forcing it.".format(sdk_git_short_id, readme_file))
config.setdefault("projects", {})[str(readme_file)] = generated_config
def get_input_paths(global_conf, local_conf):
"""Returns a 2-tuple:
- Markdown Path or None
- Input-file Paths or empty list
"""
del global_conf # Unused
relative_markdown_path = None # Markdown is optional
input_files = [] # Input file could be empty
if "markdown" in local_conf:
relative_markdown_path = Path(local_conf["markdown"])
input_files = local_conf.get("autorest_options", {}).get("input-file", [])
if input_files and not isinstance(input_files, list):
input_files = [input_files]
input_files = [Path(input_file) for input_file in input_files]
if not relative_markdown_path and not input_files:
raise ValueError("No input file found")
return (relative_markdown_path, input_files)
def solve_relative_path(autorest_options, sdk_root):
"""Solve relative path in conf.
If a key is prefixed by "sdkrel:", it's solved against SDK root.
"""
SDKRELKEY = "sdkrel:"
solved_autorest_options = {}
for key, value in autorest_options.items():
if key.startswith(SDKRELKEY):
_LOGGER.debug("Found a sdkrel pair: %s/%s", key, value)
subkey = key[len(SDKRELKEY) :]
solved_value = Path(sdk_root, value).resolve()
solved_autorest_options[subkey] = str(solved_value)
else:
solved_autorest_options[key] = value
return solved_autorest_options
def get_configuration_github_path(sdk_id, branch="master"):
return GithubLink(sdk_id, "raw", branch, CONFIG_FILE)
| 38.233333 | 119 | 0.682476 |
f700c767ff92c13aef1a23a878df02eea4e86053 | 3,656 | py | Python | src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2018-04-02T15:38:10.000Z | 2018-04-02T15:38:10.000Z | src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/pymodules_old/circuits/core/values.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | # Package: values
# Date: 11th April 2010
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Values
This defines the Value object used by components and events.
"""
from types import ListType
from itertools import imap
from events import Event
class ValueChanged(Event):
"""Value Changed Event
This Event is triggered when the return Value of an Event Handler has
changed it's value.
"""
def __init__(self, value):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(ValueChanged, self).__init__(value)
class Value(object):
"""Create a new future Value Object
Creates a new future Value Object which is used by Event Objects and the
Manager to store the result(s) of an Event Handler's exeuction of some
Event in the system.
:param event: The Event this Value is associated with.
:type event: Event instance
:param manager: The Manager/Component used to trigger notifications.
:type manager: A Manager/Component instance.
:param onSet: The channel used when triggering ValueChagned events.
:type onSet: A (channel, target) tuple.
:ivar result: True if this value has been changed.
:ivar errors: True if while setting this value an exception occured.
This is a Future/Promise implementation.
"""
def __init__(self, event=None, manager=None, onSet=None):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
self.event = event
self.manager = manager
self.onSet = onSet
self.result = False
self.errors = False
self._parent = self
self._value = None
def __getstate__(self):
keys = ("event", "onSet", "result", "errors", "_value")
return dict([(k, getattr(self, k, None)) for k in keys])
def __contains__(self, y):
value = self.value
return y in value if type(value) is ListType else y == value
def __getitem__(self, y):
v = self.value[y]
if isinstance(v, Value):
return v.value
else:
return v
def __iter__(self):
return imap(lambda v: v.value if isinstance(v, Value) else v,
self.value)
def __repr__(self):
"x.__repr__() <==> repr(x)"
value = ""
if self.result:
value = repr(self.value)
format = "<Value (%s) result: %r errors: %r for %r"
return format % (value, self.result, self.errors, self.event)
def __str__(self):
"x.__str__() <==> str(x)"
return str(self.value)
def getValue(self):
value = self._value
while isinstance(value, Value):
value = value._value
return value
def setValue(self, value):
if isinstance(value, Value):
value._parent = self
if self.result and type(self._value) is ListType:
self._value.append(value)
elif self.result:
self._value = [self._value]
self._value.append(value)
else:
self._value = value
def notify(o, v):
if not isinstance(v, Value) and v is not None:
o.result = True
if o.manager is not None and o.onSet is not None:
o.manager.fireEvent(ValueChanged(o), *o.onSet)
elif isinstance(v, Value):
o.errors = v.errors
o.result = v.result
if not o._parent == o:
notify(o._parent, v)
notify(self, value)
value = property(getValue, setValue, None, "Value of this Value")
| 28.341085 | 78 | 0.602298 |
f700e68836d56c80b1eb23849bcf903eda4dfa6c | 5,105 | py | Python | nova/virt/hyperv/imagecache.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/hyperv/imagecache.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/virt/hyperv/imagecache.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import nova.conf
from nova import exception
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class ImageCache(object):
def __init__(self):
self._pathutils = pathutils.PathUtils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=root_vhd_size, image_size=vhd_size)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance):
image_id = instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd':
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
return vhd_path
| 39.573643 | 78 | 0.591773 |
f700f20444454593e2536cb9e2591f4eae5a213c | 7,178 | py | Python | src/config.py | volovodenko/English | 860ae0f971909b9aa299c193ea7d0161c88d0b22 | [
"Apache-2.0"
] | null | null | null | src/config.py | volovodenko/English | 860ae0f971909b9aa299c193ea7d0161c88d0b22 | [
"Apache-2.0"
] | null | null | null | src/config.py | volovodenko/English | 860ae0f971909b9aa299c193ea7d0161c88d0b22 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import json
import os.path
import unittest
reg_cmnt = re.compile(r"/\*.*?\*/", re.DOTALL)
class Config:
"Работа с конфигурационным файлом"
def __init__(self, main_path=None, user_path=None):
if main_path is None:
self._main_path = "config.json5"
else:
self._main_path = main_path
if user_path is None:
self._user_path = "config_user.json5"
else:
self._user_path = user_path
self._cfg_dict = {}
def __getitem__(self, key):
return self._cfg_dict[key]
def __len__(self):
return len(self._cfg_dict)
def _load_json(self, path):
data = {}
if os.path.exists(path):
txt = open(path).read()
txt = reg_cmnt.sub("", txt) # remove comments
data = json.loads(txt)
return data
def _set_default(self, cfg):
cfg["path_to_dict"] = cfg.get("path_to_dict", "dict.json")
cfg["path_to_stat"] = cfg.get("path_to_stat", "statistic.json")
cfg["words_per_lesson"] = int(cfg.get("words_per_lesson", 5))
cfg["CntStudyWords"] = int(cfg.get("CntStudyWords", 50))
cfg["MinPercent"] = float(cfg.get("MinPercent", 97.0))
cfg["MinSuccessCnt"] = int(cfg.get("MinSuccessCnt", 10))
cfg["retry_time"] = int(cfg.get("retry_time", 1800))
cfg["hide_transcription"] = cfg.get("hide_transcription", "no")
cfg["start_time_delay"] = int(cfg.get("start_time_delay", 1))
cfg["stat_count_row"] = int(cfg.get("stat_count_row", 200))
cfg["right_answer_percent"] = float(cfg.get("right_answer_percent", 10.0))
cfg["wrong_answer_percent"] = float(cfg.get("wrong_answer_percent", 40.0))
cfg["empty_answer_is_error"] = cfg.get("empty_answer_is_error", "no")
cfg["internet_dictionary_url"] = cfg.get("internet_dictionary_url",
{"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"})
def create_default_user_config(self):
if not os.path.isfile(self._user_path):
txt = "{\n /*\n User config\n */\n\n}"
open(self._user_path, "wt").write(txt)
def reload(self):
self._cfg_dict = {}
self._cfg_dict.update(self._load_json(self._main_path))
self._cfg_dict.update(self._load_json(self._user_path))
self._set_default(self._cfg_dict)
return self._cfg_dict
def get_dict(self):
return self._cfg_dict
class ConfigTestCase(unittest.TestCase):
"Набор тестов для класса Config"
def setUp(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def tearDown(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def equal_cfg(self, cfg, test_dict):
for key, val in test_dict.items():
self.assertEqual(cfg[key], val)
self.assertEqual(len(cfg), 14)
def test_main(self):
"Тестирование загрузки основного файла с конфигурацией"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user(self):
"Тестирование загрузки пользовательского файла с конфигурацией"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11,
"retry_time": 1801,
"hide_transcription": "yes",
"start_time_delay": 2,
"stat_count_row": 300,
"right_answer_percent": 20.0,
"wrong_answer_percent": 50.0,
"empty_answer_is_error": "yes",
"internet_dictionary_url": {"EN_RU": "http1://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http1://slovari.yandex.ru/{word}/en/#lingvo/"}}
json.dump(test_dict, open("test_config_user.json", "w"))
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user_part(self):
"Тестирование загрузки пользовательского файла с конфигурацией, который перекрывает только часть настроек"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11}
json.dump(test_dict, open("test_config_user.json", "w"))
test_dict.update({
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no"})
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_not_exists(self):
"Тестирование выставления дефолтных настроек"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
cfg = Config("fake_config.json", "fake_config_user.json")
cfg.reload()
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
suite = unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 36.622449 | 114 | 0.57314 |
f70101d2e677dfa1c95b8d12717565b56481d031 | 11,171 | py | Python | server/server/organizations/models.py | connectiveproject/connective | 8866082b2147feef0e5254ac4215987b9d881396 | [
"MIT"
] | 4 | 2021-07-05T10:49:26.000Z | 2021-11-24T11:34:43.000Z | server/server/organizations/models.py | connectiveproject/connective | 8866082b2147feef0e5254ac4215987b9d881396 | [
"MIT"
] | 39 | 2021-06-21T15:02:37.000Z | 2022-02-28T15:07:42.000Z | server/server/organizations/models.py | connectiveproject/connective | 8866082b2147feef0e5254ac4215987b9d881396 | [
"MIT"
] | 17 | 2021-06-16T08:59:45.000Z | 2021-09-29T11:35:38.000Z | from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from taggit.managers import TaggableManager
from server.connective_tags.models import ConnectiveTaggedItem
from server.schools.models import School
from server.utils.db_utils import get_base_model
from server.utils.model_fields import random_slug
class SchoolActivityGroupManager(models.Manager):
def get_activity_container_only_group(self, activity_group):
container_only_groups = self.filter(
activity_order=activity_group.activity_order,
group_type=SchoolActivityGroup.GroupTypes.CONTAINER_ONLY,
)
if container_only_groups.exists():
return container_only_groups[0]
class ImportedOrganization(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
organization_number = models.CharField(max_length=10, unique=True)
email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=4096, null=True, blank=True)
website_url = models.URLField(null=True, blank=True)
name = models.CharField(max_length=256, null=True, blank=True)
goal = models.CharField(max_length=4096, null=True, blank=True)
year_founded = models.CharField(max_length=128, null=True, blank=True)
status = models.CharField(max_length=50, null=True, blank=True)
target_audience = models.JSONField(null=True, blank=True)
number_of_employees = models.PositiveIntegerField(null=True, blank=True)
number_of_members = models.PositiveIntegerField(null=True, blank=True)
number_of_volunteers = models.PositiveIntegerField(null=True, blank=True)
location_lon = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
location_lat = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
address_city = models.CharField(max_length=256, null=True, blank=True)
address_street = models.CharField(max_length=256, null=True, blank=True)
address_house_num = models.CharField(max_length=30, null=True, blank=True)
address_zipcode = models.CharField(max_length=9, null=True, blank=True)
cities = models.JSONField(null=True, blank=True)
districts = models.JSONField(null=True, blank=True)
union_type = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return f"{self.name} | {self.organization_number} | {self.slug}"
class Organization(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
organization_number = models.CharField(max_length=10, unique=True, null=True)
email = models.EmailField()
description = models.CharField(max_length=300)
website_url = models.URLField(null=True, blank=True)
name = models.CharField(max_length=100)
goal = models.CharField(max_length=300, null=True, blank=True)
year_founded = models.CharField(max_length=4, null=True, blank=True)
status = models.CharField(max_length=50, null=True, blank=True)
target_audience = models.JSONField(null=True, blank=True)
number_of_employees = models.PositiveIntegerField(null=True, blank=True)
number_of_members = models.PositiveIntegerField(null=True, blank=True)
number_of_volunteers = models.PositiveIntegerField(null=True, blank=True)
location_lon = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
location_lat = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
address_city = models.CharField(max_length=150, null=True, blank=True)
address_street = models.CharField(max_length=150, null=True, blank=True)
address_house_num = models.CharField(max_length=20, null=True, blank=True)
address_zipcode = models.CharField(max_length=9, null=True, blank=True)
cities = models.JSONField(null=True, blank=True)
districts = models.JSONField(null=True, blank=True)
union_type = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return f"{self.name} | {self.organization_number} | {self.slug}"
class Activity(get_base_model()):
class Domain(models.TextChoices):
SCIENCE_AND_TECH = "SCIENCE_AND_TECH", "Science And Tech"
EXTREME_SPORTS = "EXTREME_SPORTS", "Extreme Sports"
FIELD = "FIELD", "Field"
OTHER = "OTHER", "Other"
tags = TaggableManager(blank=True, through=ConnectiveTaggedItem)
slug = models.CharField(max_length=40, default=random_slug, unique=True)
name = models.CharField(max_length=35)
target_audience = models.JSONField()
domain = models.CharField(max_length=55, null=True, choices=Domain.choices)
originization = models.ForeignKey(
Organization,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="activities",
)
activity_website_url = models.URLField(max_length=750, null=True, blank=True)
activity_email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=550, default="")
contact_name = models.CharField(max_length=60, default="")
logo = models.ImageField(blank=True, null=True)
phone_number = models.CharField(
blank=True,
max_length=15,
validators=[
RegexValidator(
regex=r"^\d{9,15}$",
message=_("phone number must be between 9-15 digits"),
)
],
)
def __str__(self):
try:
return f"{self.name} | {self.slug} | {self.originization.name}"
except AttributeError:
return f"{self.name} | {self.slug}"
class ImportedActivity(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
activity_code = models.IntegerField()
name = models.CharField(max_length=550)
raw_name = models.CharField(max_length=550)
target_audience = models.JSONField()
organization_number = models.IntegerField()
organization_name = models.CharField(max_length=1550, default="")
target_gender = models.JSONField()
target_gender = models.JSONField()
target_population = models.JSONField()
target_time = models.JSONField()
target_size = models.JSONField()
target_migzar = models.JSONField()
target_pikuah = models.JSONField()
profession = models.JSONField()
goal = models.CharField(max_length=1550, default="")
is_active = models.BooleanField()
activity_website_url = models.URLField(max_length=750, null=True, blank=True)
activity_email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=1550, default="")
contact_name = models.CharField(max_length=100, default="")
phone_number = models.CharField(
blank=True,
max_length=15,
validators=[
RegexValidator(
regex=r"^\d{9,15}$",
message=_("phone number must be between 9-15 digits"),
)
],
)
def __str__(self):
return f"{self.name} | {self.slug} | {self.activity_code}"
class ActivityMedia(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
name = models.CharField(max_length=40, null=True, blank=True)
image_url = models.ImageField(blank=True, null=True)
video_url = models.URLField(blank=True, null=True)
activity = models.ForeignKey(
Activity,
on_delete=models.CASCADE,
related_name="rich_media",
)
def __str__(self):
return f"{self.name} | {self.slug} | {self.activity.name}"
class OrganizationMember(get_base_model()):
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="organization_member"
)
organization = models.ForeignKey(
Organization,
on_delete=models.CASCADE,
related_name="organization_member",
)
def __str__(self):
return f"{self.user.email} | {self.organization.name}"
class SchoolActivityOrder(get_base_model()):
class Meta:
constraints = [
models.UniqueConstraint(fields=["school", "activity"], name="unique_order")
]
class Status(models.TextChoices):
CANCELLED = "CANCELLED", "Cancelled"
PENDING_ADMIN_APPROVAL = "PENDING_ADMIN_APPROVAL", "Pending Admin Approval"
APPROVED = "APPROVED", "Approved"
DENIED = "DENIED", "Denied"
base_status = Status.PENDING_ADMIN_APPROVAL
slug = models.CharField(max_length=40, default=random_slug, unique=True)
requested_by = models.ForeignKey(
"users.User",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="requested_orders",
)
last_updated_by = models.ForeignKey(
"users.User",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="last_updated_by_me_orders",
)
school = models.ForeignKey(
School, on_delete=models.CASCADE, related_name="school_activity_orders"
)
activity = models.ForeignKey(
Activity, on_delete=models.CASCADE, related_name="school_activity_orders"
)
status = models.CharField(
_("status"), max_length=50, choices=Status.choices, default=base_status
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status_reason = models.CharField(
max_length=250,
blank=True,
)
def __str__(self):
return f"{self.activity} | {self.school} | {self.status} | {self.pk}"
class SchoolActivityGroup(get_base_model()):
class GroupTypes(models.TextChoices):
CONTAINER_ONLY = "CONTAINER_ONLY", "Container Only"
DISABLED_CONSUMERS = "DISABLED_CONSUMERS", "Disabled Consumers"
NO_REGISTRATION = "NO_REGISTRATION", "No Registration"
DEFAULT = "DEFAULT", "Default"
objects = SchoolActivityGroupManager()
slug = models.CharField(max_length=40, default=random_slug, unique=True)
activity_order = models.ForeignKey(
SchoolActivityOrder, on_delete=models.CASCADE, related_name="activity_groups"
)
name = models.CharField(_("name"), max_length=50)
description = models.CharField(_("description"), max_length=550)
consumers = models.ManyToManyField(
"users.Consumer",
related_name="activity_groups",
blank=True,
)
group_type = models.CharField(
_("group type"),
max_length=50,
choices=GroupTypes.choices,
default=GroupTypes.DEFAULT,
)
instructor = models.ForeignKey(
"users.Instructor",
on_delete=models.SET_NULL,
related_name="managed_activity_groups",
null=True,
blank=True,
)
def __str__(self):
return f"""
{self.name} : {self.group_type} : {self.slug} :
{self.activity_order.activity.name} : {self.activity_order.school.name}
"""
| 37.612795 | 87 | 0.688569 |
f70153728cb260c3c86bc652b2c6fedfd73c3c53 | 4,548 | py | Python | core/assembly_system.py | YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | [
"MIT"
] | 27 | 2019-10-31T15:16:13.000Z | 2022-03-29T03:56:57.000Z | core/assembly_system.py | YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | [
"MIT"
] | 4 | 2020-03-25T14:18:04.000Z | 2022-02-10T00:34:58.000Z | core/assembly_system.py | YifanQie/Deep_Learning_for_Manufacturing | 9ba19e41f69c561b04b8573ab9c52c0969f45bfd | [
"MIT"
] | 7 | 2020-02-23T22:12:37.000Z | 2021-12-08T20:14:41.000Z | import numpy as np
import pandas as pd
""" Contains core classes and methods for initializing a Assembly System, the inputs are provided in assemblyconfig file in utilities"""
class AssemblySystem:
"""Assembly System Class
:param assembly_type: Type of assembly Single-Station/Multi-Station
:type assembly_system: str (required)
:param assembly_kccs: Number of KCCs for the assembly
:type assembly_kccs: int (required)
:param assembly_kpis: Number of Kpis for the assembly
:type assembly_kpis: int (required)
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis):
self.assembly_type=assembly_type
self.assembly_kccs=assembly_kccs
self.assembly_kpis=assembly_kpis
class PartType(AssemblySystem):
"""Part System Class, inherits the Assembly System Class, additional parameters for this class include
:param voxel_dim: Dimension of the voxel
:type assembly_system: int (required)
:param voxel_dim: Dimension of the voxel Channel, single channel output - 1 or multi channel - 2,3 (use 1 for deviations in one direction, 2 or 3 if data for multiple deviation directions are present)
:type assembly_system: int (required)
:param voxel_dim: Dimension of the voxel
:type assembly_system: int (required)
The class contains two functions - get_nominal_cop and get_nominal_cop_database
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim):
super().__init__(assembly_type,assembly_kccs,assembly_kpis)
self.part_name=part_name
self.part_type=part_type
self.voxel_dim=voxel_dim
self.voxel_channels=voxel_channels
self.point_dim=point_dim
def get_nominal_cop(self,file_name):
"""Import nominal cloud-of-point of the assembly from a text/csv file
:param file_name: Name of the input file
:type file_name: str (required)
:returns: numpy array of nominal COP
:rtype: numpy.array [point_dim,3]
"""
df=pd.read_csv(file_name, sep=',',header=None)
nominal_cop=df.values
return nominal_cop
def get_nominal_cop_database(self,conn_str,table_name):
"""Import nominal cloud-of-point of the assembly from a SQL database assumes the table only contains three columns of the nominal COPs in order of the Node IDs
:param conn_str: Connection String for Database
:type conn_str: str (required)
:param table_name: Name of table in the database
:type table_name: str (required)
:returns: numpy array of dim points * 3
:rtype: numpy.array [point_dim,3]
"""
engine = create_engine(conn_str)
squery ='select * from '+table_name
df_nom = pd.read_sql_query(squery,con=engine)
df_nom = df_nom.values
return df_nom
class VRMSimulationModel(PartType):
"""VRM Simulation Model class inherits the part type class, additional parameters of this class include
:param noise_level: The level of artificial noise to be added to simulated data, typically set to 0.1 mm from the measurement system class depending on the scanner
:type noise_level: float (required)
:param noise_type: The type of noise to be added, can be Gaussian or uniform , for Gaussian noise_level is set as standard deviation and mean as zero for uniform the min and max are set -noise_level and +noise_level respectively
:type noise_type: str (optional)
:param convergency_flag: Flag to denote if the simulation model had converged while simulating, is set to 1 by default
:type convergency_flag: int (optional)
The class contains one function kpi_calculator that needs to be defined by the user depending on the assembly output
"""
def __init__(self,assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,noise_level,noise_type='uniform',convergency_flag=1):
super().__init__(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim)
self.noise_level=noise_level
self.noise_type=noise_type
self.convergency_flag=convergency_flag
def kpi_calculator(self,cop_data,kpi_params=[]):
""" User defined function to calculate KPI from Cloud of Point Data [KPI]=f(Cop)
:param cop_data: CoP data for a given sample
:type cop_data: np_array [point_dim,3] (required)
:param kpi_params: Various parameters required to calculate the KPI, can be blank if no parameters are required to calculate KPI from CoP
:type kpi_params: list (optional)
:returns: list of multivariate KPIs for the given CoP
:rtype: list
"""
kpi=[None]*self.assembly_kpis
#define function here
return kpi | 39.547826 | 230 | 0.776165 |
f701a87736fbc584f7e9ffd3e6d8d63f457be0ba | 2,204 | py | Python | lhotse/manipulation.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | lhotse/manipulation.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | lhotse/manipulation.py | freewym/lhotse | 66e9bbaf25b75011388ab00189baa162c3c1d435 | [
"Apache-2.0"
] | null | null | null | from functools import reduce
from itertools import chain
from operator import add
from typing import Iterable, Optional, TypeVar
from lhotse.audio import Recording, RecordingSet
from lhotse.cut import Cut, CutSet, MixedCut
from lhotse.features import FeatureSet, Features
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, load_yaml
ManifestItem = TypeVar('ManifestItem', Recording, SupervisionSegment, Features, Cut, MixedCut)
Manifest = TypeVar('Manifest', RecordingSet, SupervisionSet, FeatureSet, CutSet)
def combine(*manifests: Manifest) -> Manifest:
"""Combine multiple manifests of the same type into one."""
return reduce(add, manifests)
def to_manifest(items: Iterable[ManifestItem]) -> Optional[Manifest]:
"""
Take an iterable of data types in Lhotse such as Recording, SupervisonSegment or Cut, and create the manifest of the
corresponding type. When the iterable is empty, returns None.
"""
items = iter(items)
try:
first_item = next(items)
except StopIteration:
return None
items = chain([first_item], items)
if isinstance(first_item, Recording):
return RecordingSet.from_recordings(items)
if isinstance(first_item, SupervisionSegment):
return SupervisionSet.from_segments(items)
if isinstance(first_item, (Cut, MixedCut)):
return CutSet.from_cuts(items)
if isinstance(first_item, Features):
raise ValueError("FeatureSet generic construction from iterable is not possible, as the config information "
"would have been lost. Call FeatureSet.from_features() directly instead.")
raise ValueError(f"Unknown type of manifest item: {first_item}")
def load_manifest(path: Pathlike) -> Manifest:
"""Generic utility for reading an arbitrary manifest."""
raw_data = load_yaml(path)
data_set = None
for manifest_type in [RecordingSet, SupervisionSet, FeatureSet, CutSet]:
try:
data_set = manifest_type.from_dicts(raw_data)
except Exception:
pass
if data_set is None:
raise ValueError(f'Unknown type of manifest: {path}')
return data_set
| 38 | 120 | 0.72686 |
f701ad039addc3139e0d9bb52293365f52a99e55 | 5,544 | py | Python | tests/unit/modules/brew_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 2 | 2017-09-17T21:10:35.000Z | 2019-08-26T03:00:12.000Z | tests/unit/modules/brew_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/brew_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 3 | 2021-02-23T08:12:48.000Z | 2021-02-23T08:13:13.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@satlstack.com>`
'''
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.mock import MagicMock, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import brew
# Global Variables
brew.__context__ = {}
brew.__salt__ = {}
TAPS_STRING = 'homebrew/dupes\nhomebrew/science\nhomebrew/x11'
TAPS_LIST = ['homebrew/dupes', 'homebrew/science', 'homebrew/x11']
HOMEBREW_BIN = '/usr/local/bin/brew'
class BrewTestCase(TestCase):
'''
TestCase for salt.modules.brew module
'''
# '_list_taps' function tests: 1
def test_list_taps(self):
'''
Tests the return of the list of taps
'''
mock_taps = MagicMock(return_value=TAPS_STRING)
with patch.dict(brew.__salt__, {'cmd.run': mock_taps}):
self.assertEqual(brew._list_taps(), TAPS_LIST)
# '_tap' function tests: 3
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap_installed(self):
'''
Tests if tap argument is already installed or not
'''
self.assertTrue(brew._tap('homebrew/science'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value={}))
def test_tap_failure(self):
'''
Tests if the tap installation failed
'''
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_failure}):
self.assertFalse(brew._tap('homebrew/test'))
@patch('salt.modules.brew._list_taps', MagicMock(return_value=TAPS_LIST))
def test_tap(self):
'''
Tests adding unofficial Github repos to the list of brew taps
'''
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'cmd.retcode': mock_success}):
self.assertTrue(brew._tap('homebrew/test'))
# '_homebrew_bin' function tests: 1
def test_homebrew_bin(self):
'''
Tests the path to the homebrew binary
'''
mock_path = MagicMock(return_value='/usr/local')
with patch.dict(brew.__salt__, {'cmd.run': mock_path}):
self.assertEqual(brew._homebrew_bin(), '/usr/local/bin/brew')
# 'list_pkgs' function tests: 2
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_list_pkgs_removed(self):
'''
Tests removed implementation
'''
self.assertEqual(brew.list_pkgs(removed=True), {})
def test_list_pkgs_versions_true(self):
'''
Tests if pkg.list_pkgs is already in context and is a list
'''
mock_context = {'foo': ['bar']}
with patch.dict(brew.__context__, {'pkg.list_pkgs': mock_context}):
self.assertEqual(brew.list_pkgs(versions_as_list=True),
mock_context)
# 'version' function tests: 1
def test_version(self):
'''
Tests version name returned
'''
mock_version = MagicMock(return_value='0.1.5')
with patch.dict(brew.__salt__, {'pkg_resource.version': mock_version}):
self.assertEqual(brew.version('foo'), '0.1.5')
# 'latest_version' function tests: 0
# It has not been fully implemented
# 'remove' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
@patch('salt.modules.brew.list_pkgs',
MagicMock(return_value={'test': '0.1.5'}))
def test_remove(self):
'''
Tests if package to be removed exists
'''
mock_params = MagicMock(return_value=({'foo': None}, 'repository'))
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.remove('foo'), {})
# 'refresh_db' function tests: 2
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db_failure(self):
'''
Tests an update of homebrew package repository failure
'''
mock_user = MagicMock(return_value='foo')
mock_failure = MagicMock(return_value=1)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_failure}):
self.assertFalse(brew.refresh_db())
@patch('salt.modules.brew._homebrew_bin',
MagicMock(return_value=HOMEBREW_BIN))
def test_refresh_db(self):
'''
Tests a successful update of homebrew package repository
'''
mock_user = MagicMock(return_value='foo')
mock_success = MagicMock(return_value=0)
with patch.dict(brew.__salt__, {'file.get_user': mock_user,
'cmd.retcode': mock_success}):
self.assertTrue(brew.refresh_db())
# 'install' function tests: 1
# Only tested a few basics
# Full functionality should be tested in integration phase
def test_install(self):
'''
Tests if package to be installed exists
'''
mock_params = MagicMock(return_value=[None, None])
with patch.dict(brew.__salt__,
{'pkg_resource.parse_targets': mock_params}):
self.assertEqual(brew.install('name=foo'), {})
if __name__ == '__main__':
from integration import run_tests
run_tests(BrewTestCase, needs_daemon=False)
| 33.197605 | 79 | 0.628066 |
f701dbb60581a894fa82d654ad38824ba276b7a5 | 4,113 | py | Python | model/seg_models/pspnet.py | AceCoooool/segmentation | 2f4d5ac193cab580eb8ba789e79db6dadcfecfd0 | [
"MIT"
] | 2 | 2019-06-08T13:09:08.000Z | 2020-09-21T04:03:09.000Z | model/seg_models/pspnet.py | AceCoooool/segmentation | 2f4d5ac193cab580eb8ba789e79db6dadcfecfd0 | [
"MIT"
] | 2 | 2019-05-20T11:56:02.000Z | 2019-06-02T13:22:55.000Z | model/seg_models/pspnet.py | AceCoooool/segmentation | 2f4d5ac193cab580eb8ba789e79db6dadcfecfd0 | [
"MIT"
] | 1 | 2020-09-22T03:55:39.000Z | 2020-09-22T03:55:39.000Z | """Pyramid Scene Parsing Network"""
import os
import torch
from torch import nn
import torch.nn.functional as F
from model.seg_models.segbase import SegBaseModel
from model.module.basic import _FCNHead
__all__ = ['PSPNet', 'get_psp',
'get_psp_resnet101_voc',
'get_psp_resnet101_citys']
# head
def _PSP1x1Conv(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
class _PyramidPooling(nn.Module):
def __init__(self, in_channels):
super(_PyramidPooling, self).__init__()
out_channels = in_channels // 4
self.conv1 = _PSP1x1Conv(in_channels, out_channels)
self.conv2 = _PSP1x1Conv(in_channels, out_channels)
self.conv3 = _PSP1x1Conv(in_channels, out_channels)
self.conv4 = _PSP1x1Conv(in_channels, out_channels)
@staticmethod
def pool(x, size):
return F.adaptive_avg_pool2d(x, output_size=size)
@staticmethod
def upsample(x, h, w):
return F.interpolate(x, (h, w), mode='bilinear', align_corners=True)
def forward(self, x):
_, _, h, w = x.shape
feat1 = self.upsample(self.conv1(self.pool(x, 1)), h, w)
feat2 = self.upsample(self.conv2(self.pool(x, 2)), h, w)
feat3 = self.upsample(self.conv3(self.pool(x, 3)), h, w)
feat4 = self.upsample(self.conv4(self.pool(x, 4)), h, w)
return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
class _PSPHead(nn.Module):
def __init__(self, nclass, **kwargs):
super(_PSPHead, self).__init__(**kwargs)
self.psp = _PyramidPooling(2048)
self.block = list()
self.block.append(nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False))
self.block.append(nn.BatchNorm2d(512))
self.block.append(nn.ReLU(inplace=True))
self.block.append(nn.Dropout(0.1))
self.block.append(nn.Conv2d(512, nclass, kernel_size=1))
self.block = nn.Sequential(*self.block)
def forward(self, x):
x = self.psp(x)
return self.block(x)
class PSPNet(SegBaseModel):
def __init__(self, nclass, backbone='resnet50', aux=True, dilated=True, jpu=False,
pretrained_base=True, base_size=520, crop_size=480, **kwargs):
super(PSPNet, self).__init__(nclass, aux, backbone, base_size=base_size, dilated=dilated, jpu=jpu,
crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)
self.head = _PSPHead(nclass, **kwargs)
if self.aux:
self.auxlayer = _FCNHead(1024, nclass, **kwargs)
self.__setattr__('others', ['head', 'auxlayer'] if self.aux else ['head'])
def forward(self, x):
c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
x = F.interpolate(x, self._up_kwargs, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, self._up_kwargs, mode='bilinear', align_corners=True)
outputs.append(auxout)
return tuple(outputs)
def get_psp(dataset='pascal_voc', backbone='resnet101', pretrained=False, pretrained_base=True,
jpu=False, root=os.path.expanduser('~/.torch/models'), **kwargs):
acronyms = {
'pascal_voc': 'voc',
'citys': 'citys',
}
from data import datasets
# infer number of classes
model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone,
pretrained_base=pretrained_base, jpu=jpu, **kwargs)
if pretrained:
from model.model_store import get_model_file
name = 'psp_%s_%s' % (backbone, acronyms[dataset])
name = name + '_jpu' if jpu else name
model.load_state_dict(torch.load(get_model_file(name, root=root)))
return model
def get_psp_resnet101_voc(**kwargs):
return get_psp('pascal_voc', 'resnet101', **kwargs)
def get_psp_resnet101_citys(**kwargs):
return get_psp('citys', 'resnet101', **kwargs)
| 36.39823 | 106 | 0.644299 |
f701f97e1f188d4e04e78e513ce8208e4d9f71ef | 1,360 | py | Python | deploy.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | 2 | 2018-08-15T21:27:59.000Z | 2018-08-21T17:56:12.000Z | deploy.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | null | null | null | deploy.py | blockchainhelppro/CelvinRost | aa2661747d06e4610928466521e4da1db77aeadc | [
"MIT"
] | 1 | 2021-12-06T04:03:32.000Z | 2021-12-06T04:03:32.000Z | import itertools
import toposort
from populus.utils.contracts import (
compute_direct_dependency_graph,
compute_recursive_contract_dependencies,
)
def compute_deploy_order(dependency_graph):
"""
Given a dictionary that maps contract to their dependencies,
determine the overall dependency ordering for that set of contracts.
"""
return toposort.toposort_flatten(dict(dependency_graph))
def get_deploy_order(contracts_to_deploy, compiled_contracts):
# Extract and dependencies that exist due to library linking.
dependency_graph = compute_direct_dependency_graph(compiled_contracts.values())
global_deploy_order = compute_deploy_order(dependency_graph)
# Compute the full set of dependencies needed to deploy the desired
# contracts.
all_deploy_dependencies = set(itertools.chain.from_iterable(
compute_recursive_contract_dependencies(contract_name, dependency_graph)
for contract_name in contracts_to_deploy
))
all_contracts_to_deploy = all_deploy_dependencies.union(contracts_to_deploy)
# Now compute the order that the contracts should be deployed based on
# their dependencies.
deploy_order = tuple(
contract_name
for contract_name
in global_deploy_order
if contract_name in all_contracts_to_deploy
)
return deploy_order
| 33.170732 | 83 | 0.772794 |
f7020126c0821383f6a8544cd6c1e7094992bb87 | 25 | py | Python | btd6_memory_info/generated/Unity/Collections/LowLevel/Unsafe/UnsafeUtility/unsafe_utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Unity/Collections/LowLevel/Unsafe/UnsafeUtility/unsafe_utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | btd6_memory_info/generated/Unity/Collections/LowLevel/Unsafe/UnsafeUtility/unsafe_utility.py | 56kyle/bloons_auto | 419d55b51d1cddc49099593970adf1c67985b389 | [
"MIT"
] | null | null | null | class UnsafeUtility: pass | 25 | 25 | 0.88 |
f702024c3c01565b670bab7999a264ce4f0d7f8d | 260 | py | Python | slack_app/tasks.py | webscopeio/django-slack-app | 65abb3717460c51a19c1238eb0572f25c47b2a42 | [
"MIT"
] | 3 | 2020-06-23T10:02:48.000Z | 2020-10-28T11:59:28.000Z | slack_app/tasks.py | webscopeio/django-slack-integration | 65abb3717460c51a19c1238eb0572f25c47b2a42 | [
"MIT"
] | 2 | 2020-02-17T11:42:03.000Z | 2020-02-18T13:46:38.000Z | slack_app/tasks.py | webscopeio/django-slack-integration | 65abb3717460c51a19c1238eb0572f25c47b2a42 | [
"MIT"
] | 4 | 2020-10-11T11:02:58.000Z | 2022-03-14T08:23:42.000Z | from celery import shared_task
from .signals import slack_event_received
@shared_task
def receive_slack_signal_task(sender, event_type, event_data, **data):
slack_event_received.send(sender=sender, event_type=event_type, event_data=event_data, **data)
| 26 | 98 | 0.819231 |
f70218f2b4f389dac4b6b4a28a071cb1c97475d0 | 5,869 | py | Python | office365/sharepoint/tenant/administration/tenant.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/tenant/administration/tenant.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | office365/sharepoint/tenant/administration/tenant.py | wreiner/Office365-REST-Python-Client | 476bbce4f5928a140b4f5d33475d0ac9b0783530 | [
"MIT"
] | null | null | null | from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.tenant.administration.hubSiteProperties import HubSiteProperties
from office365.sharepoint.tenant.administration.secondary_administrators_fields_data import \
SecondaryAdministratorsFieldsData
from office365.sharepoint.tenant.administration.secondary_administrators_info import SecondaryAdministratorsInfo
from office365.sharepoint.tenant.administration.site_properties import SiteProperties
from office365.sharepoint.tenant.administration.site_properties_collection import SitePropertiesCollection
from office365.sharepoint.tenant.administration.sitePropertiesEnumerableFilter import SitePropertiesEnumerableFilter
from office365.sharepoint.tenant.administration.spo_operation import SpoOperation
class Tenant(BaseEntity):
def __init__(self, context):
super().__init__(context, ResourcePath("Microsoft.Online.SharePoint.TenantAdministration.Tenant"),
"Microsoft.Online.SharePoint.TenantAdministration")
def get_site_secondary_administrators(self, site_id):
"""
Gets site collection administrators
:type site_id: str
"""
return_type = ClientValueCollection(SecondaryAdministratorsInfo)
payload = SecondaryAdministratorsFieldsData(site_id)
qry = ServiceOperationQuery(self, "GetSiteSecondaryAdministrators", None, payload,
"secondaryAdministratorsFieldsData", return_type)
self.context.add_query(qry)
return return_type
def set_site_secondary_administrators(self, site_id, emails, names=None):
"""
Sets site collection administrators
:type names: list[str] or None
:type emails: list[str]
:type site_id: str
"""
payload = SecondaryAdministratorsFieldsData(site_id, emails, names)
qry = ServiceOperationQuery(self, "SetSiteSecondaryAdministrators", None, payload,
"secondaryAdministratorsFieldsData", None)
self.context.add_query(qry)
return self
def register_hub_site(self, site_url):
"""
Registers an existing site as a hub site.
:param str site_url:
:return:
"""
return_type = HubSiteProperties(self.context)
params = {"siteUrl": site_url}
qry = ServiceOperationQuery(self, "RegisterHubSite", None, params, None, return_type)
self.context.add_query(qry)
return return_type
def unregister_hub_site(self, siteUrl):
"""
Unregisters a hub site so that it is no longer a hub site.
:param str siteUrl:
:return:
"""
params = {"siteUrl": siteUrl}
qry = ServiceOperationQuery(self, "UnregisterHubSite", None, params, None, None)
self.context.add_query(qry)
return self
def create_site(self, site_create_props):
"""Queues a site collection for creation with the specified properties.
:param SiteCreationProperties site_create_props:
A SiteCreationProperties object that contains the initial properties
of the new site collection.
"""
result = SpoOperation(self.context)
qry = ServiceOperationQuery(self, "CreateSite", None, site_create_props, "siteCreationProperties", result)
self.context.add_query(qry)
return result
def remove_site(self, site_url):
"""Deletes the site with the specified URL
:param str site_url: A string representing the URL of the site.
"""
result = SpoOperation(self.context)
qry = ServiceOperationQuery(self, "removeSite", [site_url], None, None, result)
self.context.add_query(qry)
return result
def remove_deleted_site(self, site_url):
pass
def restore_deleted_site(self, site_url):
pass
def get_site_properties_by_url(self, url, include_detail):
"""
:param str url: A string that represents the site URL.
:param bool include_detail: A Boolean value that indicates whether to include all of the SPSite properties.
"""
site_props = SiteProperties(self.context)
self._sites.add_child(site_props)
payload = {
'url': url,
'includeDetail': include_detail
}
qry = ServiceOperationQuery(self, "getSitePropertiesByUrl", None, payload, None, site_props)
self.context.add_query(qry)
return site_props
def get_site_properties_from_sharepoint_by_filters(self, _filter, start_index=0, include_detail=False):
"""
:param bool include_detail:
:param int start_index:
:param str _filter:
"""
site_props_col = SitePropertiesCollection(self.context)
qry = ServiceOperationQuery(self, "getSitePropertiesFromSharePointByFilters",
None,
SitePropertiesEnumerableFilter(_filter, start_index, include_detail),
"speFilter",
site_props_col)
self.context.add_query(qry)
return site_props_col
@property
def root_site_url(self):
"""
:rtype: str or None
"""
return self.properties.get('RootSiteUrl', None)
@property
def _sites(self):
"""Gets a collection of sites."""
if self.is_property_available('sites'):
return self.properties['sites']
else:
return SitePropertiesCollection(self.context, ResourcePath("sites", self.resource_path))
| 39.655405 | 116 | 0.679332 |
f7022b106191f7e769f494a9e9e6e19c38892823 | 1,472 | py | Python | qnarre/doc/justifier.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/doc/justifier.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/doc/justifier.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | # Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
class Justifier:
def __init__(self, **kw):
super().__init__(**kw)
self.justs = [0] * 9
self.offsets = [(0, 0, 0, 1, 1, 1, 1, 1, 1),
(0, -1, -2, 0, 0, 0, 1, 1, 1),
(0, -1, -2, 0, -1, -2, 0, 0, 0)]
def init_justs(self, justs):
for i in justs:
i = i // 3
os = self.offsets[i]
if os:
self.justs = [sum(x) for x in zip(self.justs, os)]
self.offsets[i] = None
def calc_just(self, justs):
for i in justs:
i = self.justs[i] + (i % 3)
if i == 1:
return 'justify-content-center'
elif i > 1:
return 'justify-content-end'
return 'justify-content-start'
| 35.902439 | 79 | 0.536005 |
f7022f7075bdd6537b307688382d872a3f7fd177 | 53 | py | Python | Interfaces/__init__.py | ahmadryan/TurbAn | b8866d103a2ca2f5fbad73bcd4416f19299f22b2 | [
"BSD-2-Clause-Patent"
] | null | null | null | Interfaces/__init__.py | ahmadryan/TurbAn | b8866d103a2ca2f5fbad73bcd4416f19299f22b2 | [
"BSD-2-Clause-Patent"
] | null | null | null | Interfaces/__init__.py | ahmadryan/TurbAn | b8866d103a2ca2f5fbad73bcd4416f19299f22b2 | [
"BSD-2-Clause-Patent"
] | 10 | 2019-03-22T15:30:12.000Z | 2021-02-10T02:55:50.000Z | from . import Simulations
from . import Spacecraft
| 17.666667 | 26 | 0.773585 |
f7023d3f50a4bcdd656f0e33b9e318facfcd714f | 892 | py | Python | kubi_ecs_logger/models/fields/destination.py | kumina/kubi_ecs_logger | 64d9519e0759a24253a4edc53e0c024675033d1c | [
"BSD-3-Clause"
] | 6 | 2019-12-15T12:47:06.000Z | 2022-01-11T08:54:58.000Z | kubi_ecs_logger/models/fields/destination.py | kumina/kubi_ecs_logger | 64d9519e0759a24253a4edc53e0c024675033d1c | [
"BSD-3-Clause"
] | null | null | null | kubi_ecs_logger/models/fields/destination.py | kumina/kubi_ecs_logger | 64d9519e0759a24253a4edc53e0c024675033d1c | [
"BSD-3-Clause"
] | null | null | null | from marshmallow import fields
from .field_set import FieldSet, FieldSetSchema
class Destination(FieldSet):
def __init__(self,
address: str = None,
bytes: int = None,
domain: str = None,
ip: str = None,
mac: str = None,
packets: int = None,
port: int = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.address = address
self.bytes = bytes
self.domain = domain
self.ip = ip
self.mac = mac
self.packets = packets
self.port = port
class DestinationSchema(FieldSetSchema):
address = fields.String()
bytes = fields.Integer()
domain = fields.String()
ip = fields.String()
mac = fields.String()
packets = fields.Integer()
port = fields.Integer()
| 24.777778 | 47 | 0.533632 |
f7025167168843760aa99b53b10d6a7a0fc912e1 | 2,035 | py | Python | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/key.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/key.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/key.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Key(object):
@classmethod
def from_path(cls, *args, **kwds):
raise NotImplementedError("Paths are not currently supported")
def __init__(self, encoded=None, obj=None):
self.name = None
if obj:
self.id = obj.id
self.kind = obj.kind()
else:
self.id = None
self.kind = None
def app(self):
raise NotImplementedError("Applications are not currently supported")
def kind(self):
return self.kind
def id(self):
return self.id
def name(self):
raise NotImplementedError("Key Names are not currently supported")
def id_or_name(self):
return self.id
def has_id_or_name(self):
return self.id is not None
def parent(self):
raise NotImplementedError("Key parents are not currently supported")
def __str__(self):
return self.id_or_name()
| 33.916667 | 77 | 0.696314 |
f7025258811b22755058146106a8a59727a8d6a1 | 14,181 | py | Python | lib/geomet/wkt.py | davasqueza/eriskco_conector_CloudSQL | 99304b5eed06e9bba3646535a82d7fc98b0838b7 | [
"Apache-2.0"
] | null | null | null | lib/geomet/wkt.py | davasqueza/eriskco_conector_CloudSQL | 99304b5eed06e9bba3646535a82d7fc98b0838b7 | [
"Apache-2.0"
] | null | null | null | lib/geomet/wkt.py | davasqueza/eriskco_conector_CloudSQL | 99304b5eed06e9bba3646535a82d7fc98b0838b7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Lars Butler & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tokenize
try:
import StringIO
except ImportError:
import io
StringIO = io
INVALID_WKT_FMT = 'Invalid WKT: `%s`'
def dump(obj, dest_file):
"""
Dump GeoJSON-like `dict` to WKT and write it to the `dest_file`.
:param dict obj:
A GeoJSON-like dictionary. It must at least the keys 'type' and
'coordinates'.
:param dest_file:
Open and writable file-like object.
"""
dest_file.write(dumps(obj))
def load(source_file):
"""
Load a GeoJSON `dict` object from a ``source_file`` containing WKT.
:param source_file:
Open and readable file-like object.
:returns:
A GeoJSON `dict` representing the geometry read from the file.
"""
return loads(source_file.read())
def dumps(obj, decimals=16):
"""
Dump a GeoJSON-like `dict` to a WKT string.
"""
geom_type = obj['type']
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
fmt = '%%.%df' % decimals
return exporter(obj, fmt)
def loads(string):
"""
Construct a GeoJSON `dict` from WKT (`string`).
"""
sio = StringIO.StringIO(string)
# NOTE: This is not the intended purpose of `tokenize`, but it works.
tokens = (x[1] for x in tokenize.generate_tokens(sio.readline))
tokens = _tokenize_wkt(tokens)
geom_type = next(tokens)
importer = _loads_registry.get(geom_type)
if importer is None:
_unsupported_geom_type(geom_type)
return importer(tokens, string)
def _tokenize_wkt(tokens):
"""
Since the tokenizer treats "-" and numeric strings as separate values,
combine them and yield them as a single token. This utility encapsulates
parsing of negative numeric values from WKT can be used generically in all
parsers.
"""
negative = False
for t in tokens:
if t == '-':
negative = True
continue
else:
if negative:
yield '-%s' % t
else:
yield t
negative = False
def _unsupported_geom_type(geom_type):
raise ValueError("Unsupported geometry type '%s'" % geom_type)
def _dump_point(obj, fmt):
"""
Dump a GeoJSON-like Point object to WKT.
:param dict obj:
A GeoJSON-like `dict` representing a Point.
:param str fmt:
Format string which indicates the number of digits to display after the
decimal point when formatting coordinates.
:returns:
WKT representation of the input GeoJSON Point ``obj``.
"""
coords = obj['coordinates']
pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords)
return pt
def _dump_linestring(obj, fmt):
"""
Dump a GeoJSON-like LineString object to WKT.
Input parameters and return value are the LINESTRING equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
ls = 'LINESTRING (%s)'
ls %= ', '.join(' '.join(fmt % c for c in pt) for pt in coords)
return ls
def _dump_polygon(obj, fmt):
"""
Dump a GeoJSON-like Polygon object to WKT.
Input parameters and return value are the POLYGON equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
poly = 'POLYGON (%s)'
rings = (', '.join(' '.join(fmt % c for c in pt) for pt in ring)
for ring in coords)
rings = ('(%s)' % r for r in rings)
poly %= ', '.join(rings)
return poly
def _dump_multipoint(obj, fmt):
"""
Dump a GeoJSON-like MultiPoint object to WKT.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOINT (%s)'
points = (' '.join(fmt % c for c in pt) for pt in coords)
# Add parens around each point.
points = ('(%s)' % pt for pt in points)
mp %= ', '.join(points)
return mp
def _dump_multilinestring(obj, fmt):
"""
Dump a GeoJSON-like MultiLineString object to WKT.
Input parameters and return value are the MULTILINESTRING equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mlls = 'MULTILINESTRING (%s)'
linestrs = ('(%s)' % ', '.join(' '.join(fmt % c for c in pt)
for pt in linestr) for linestr in coords)
mlls %= ', '.join(ls for ls in linestrs)
return mlls
def _dump_multipolygon(obj, fmt):
"""
Dump a GeoJSON-like MultiPolygon object to WKT.
Input parameters and return value are the MULTIPOLYGON equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOLYGON (%s)'
polys = (
# join the polygons in the multipolygon
', '.join(
# join the rings in a polygon,
# and wrap in parens
'(%s)' % ', '.join(
# join the points in a ring,
# and wrap in parens
'(%s)' % ', '.join(
# join coordinate values of a vertex
' '.join(fmt % c for c in pt)
for pt in ring)
for ring in poly)
for poly in coords)
)
mp %= polys
return mp
def _dump_geometrycollection(obj, fmt):
"""
Dump a GeoJSON-like GeometryCollection object to WKT.
Input parameters and return value are the GEOMETRYCOLLECTION equivalent to
:func:`_dump_point`.
The WKT conversions for each geometry in the collection are delegated to
their respective functions.
"""
gc = 'GEOMETRYCOLLECTION (%s)'
geoms = obj['geometries']
geoms_wkt = []
for geom in geoms:
geom_type = geom['type']
geoms_wkt.append(_dumps_registry.get(geom_type)(geom, fmt))
gc %= ','.join(geoms_wkt)
return gc
def _load_point(tokens, string):
"""
:param tokens:
A generator of string tokens for the input WKT, begining just after the
geometry type. The geometry type is consumed before we get to here. For
example, if :func:`loads` is called with the input 'POINT(0.0 1.0)',
``tokens`` would generate the following values:
.. code-block:: python
['(', '0.0', '1.0', ')']
:param str string:
The original WKT string.
:returns:
A GeoJSON `dict` Point representation of the WKT ``string``.
"""
if not next(tokens) == '(':
raise ValueError(INVALID_WKT_FMT % string)
coords = []
try:
for t in tokens:
if t == ')':
break
else:
coords.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='Point', coordinates=coords)
def _load_linestring(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling LINESTRING geometry.
:returns:
A GeoJSON `dict` LineString representation of the WKT ``string``.
"""
if not next(tokens) == '(':
raise ValueError(INVALID_WKT_FMT % string)
# a list of lists
# each member list represents a point
coords = []
try:
pt = []
for t in tokens:
if t == ')':
coords.append(pt)
break
elif t == ',':
# it's the end of the point
coords.append(pt)
pt = []
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='LineString', coordinates=coords)
def _load_polygon(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling POLYGON geometry.
:returns:
A GeoJSON `dict` Polygon representation of the WKT ``string``.
"""
open_parens = next(tokens), next(tokens)
if not open_parens == ('(', '('):
raise ValueError(INVALID_WKT_FMT % string)
# coords contains a list of rings
# each ring contains a list of points
# each point is a list of 2-4 values
coords = []
ring = []
on_ring = True
try:
pt = []
for t in tokens:
if t == ')' and on_ring:
# The ring is finished
ring.append(pt)
coords.append(ring)
on_ring = False
elif t == ')' and not on_ring:
# it's the end of the polygon
break
elif t == '(':
# it's a new ring
ring = []
pt = []
on_ring = True
elif t == ',' and on_ring:
# it's the end of a point
ring.append(pt)
pt = []
elif t == ',' and not on_ring:
# there's another ring.
# do nothing
pass
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='Polygon', coordinates=coords)
def _load_multipoint(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTIPOINT geometry.
:returns:
A GeoJSON `dict` MultiPoint representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
coords = []
pt = []
paren_depth = 1
try:
for t in tokens:
if t == '(':
paren_depth += 1
elif t == ')':
paren_depth -= 1
if paren_depth == 0:
break
elif t == '':
pass
elif t == ',':
# the point is done
coords.append(pt)
pt = []
else:
pt.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
# Given the way we're parsing, we'll probably have to deal with the last
# point after the loop
if len(pt) > 0:
coords.append(pt)
return dict(type='MultiPoint', coordinates=coords)
def _load_multipolygon(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTIPOLYGON geometry.
:returns:
A GeoJSON `dict` MultiPolygon representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
polygons = []
while True:
try:
poly = _load_polygon(tokens, string)
polygons.append(poly['coordinates'])
t = next(tokens)
if t == ')':
# we're done; no more polygons.
break
except StopIteration:
# If we reach this, the WKT is not valid.
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='MultiPolygon', coordinates=polygons)
def _load_multilinestring(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling MULTILINESTRING geometry.
:returns:
A GeoJSON `dict` MultiLineString representation of the WKT ``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
linestrs = []
while True:
try:
linestr = _load_linestring(tokens, string)
linestrs.append(linestr['coordinates'])
t = next(tokens)
if t == ')':
# we're done; no more linestrings.
break
except StopIteration:
# If we reach this, the WKT is not valid.
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='MultiLineString', coordinates=linestrs)
def _load_geometrycollection(tokens, string):
"""
Has similar inputs and return value to to :func:`_load_point`, except is
for handling GEOMETRYCOLLECTIONs.
Delegates parsing to the parsers for the individual geometry types.
:returns:
A GeoJSON `dict` GeometryCollection representation of the WKT
``string``.
"""
open_paren = next(tokens)
if not open_paren == '(':
raise ValueError(INVALID_WKT_FMT % string)
geoms = []
result = dict(type='GeometryCollection', geometries=geoms)
while True:
try:
t = next(tokens)
if t == ')':
break
elif t == ',':
# another geometry still
continue
else:
geom_type = t
load_func = _loads_registry.get(geom_type)
geom = load_func(tokens, string)
geoms.append(geom)
except StopIteration:
raise ValueError(INVALID_WKT_FMT % string)
return result
_dumps_registry = {
'Point': _dump_point,
'LineString': _dump_linestring,
'Polygon': _dump_polygon,
'MultiPoint': _dump_multipoint,
'MultiLineString': _dump_multilinestring,
'MultiPolygon': _dump_multipolygon,
'GeometryCollection': _dump_geometrycollection,
}
_loads_registry = {
'POINT': _load_point,
'LINESTRING': _load_linestring,
'POLYGON': _load_polygon,
'MULTIPOINT': _load_multipoint,
'MULTILINESTRING': _load_multilinestring,
'MULTIPOLYGON': _load_multipolygon,
'GEOMETRYCOLLECTION': _load_geometrycollection,
}
| 28.192843 | 79 | 0.586348 |
f70271444a8a7d243bda48a6efd9534b633a6c2b | 1,169 | py | Python | server/openapi_server/controllers/text_date_annotation_controller.py | cascadianblue/phi-annotator | 0da6c102ec1068e6b15c613e2a90a78f79d15935 | [
"Apache-2.0"
] | null | null | null | server/openapi_server/controllers/text_date_annotation_controller.py | cascadianblue/phi-annotator | 0da6c102ec1068e6b15c613e2a90a78f79d15935 | [
"Apache-2.0"
] | 19 | 2021-07-29T03:14:38.000Z | 2022-03-01T06:03:14.000Z | server/openapi_server/controllers/text_date_annotation_controller.py | cascadianblue/phi-annotator | 0da6c102ec1068e6b15c613e2a90a78f79d15935 | [
"Apache-2.0"
] | null | null | null | import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_date_annotation_request import \
TextDateAnnotationRequest # noqa: E501
from openapi_server.models.text_date_annotation_response import \
TextDateAnnotationResponse # noqa: E501
def create_text_date_annotations(): # noqa: E501
"""Annotate dates in a clinical note
Return the date annotations found in a clinical note # noqa: E501
:rtype: TextDateAnnotations
"""
res = None
status = None
if connexion.request.is_json:
try:
annotation_request = TextDateAnnotationRequest.from_dict(
connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(note, phi_type=PhiType.DATE)
res = TextDateAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| 35.424242 | 70 | 0.707442 |
f702747b82118bbd64d8fc67a01e1f638cbb45dd | 26,042 | py | Python | src/transformersX/models/cutoffbert/modeling_cutoffbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | 2 | 2021-08-06T05:43:55.000Z | 2022-03-17T22:31:21.000Z | src/transformersX/models/cutoffbert/modeling_cutoffbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | src/transformersX/models/cutoffbert/modeling_cutoffbert.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
| 44.9 | 213 | 0.672183 |
f7028f059677a83cf6bbecfd7df23260f585b48f | 785 | py | Python | sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/media/azure-mgmt-media/azure/mgmt/media/aio/__init__.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._azure_media_services import AzureMediaServices
__all__ = ['AzureMediaServices']
# `._patch.py` is used for handwritten extensions to the generated code
# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
from ._patch import patch_sdk
patch_sdk()
| 49.0625 | 114 | 0.636943 |
f702939c992f164058c986345c72844ea2c3df0a | 2,852 | py | Python | tests/data_tests/writer_tests/json_writer_test.py | alueschow/polymatheia | e46a38b3686139bbab3a2fcfaa914d4ca938654e | [
"MIT"
] | 3 | 2020-09-15T15:15:34.000Z | 2021-06-15T10:35:07.000Z | tests/data_tests/writer_tests/json_writer_test.py | alueschow/polymatheia | e46a38b3686139bbab3a2fcfaa914d4ca938654e | [
"MIT"
] | 7 | 2020-09-03T12:53:34.000Z | 2020-10-05T09:14:29.000Z | tests/data_tests/writer_tests/json_writer_test.py | alueschow/polymatheia | e46a38b3686139bbab3a2fcfaa914d4ca938654e | [
"MIT"
] | 2 | 2020-10-13T09:12:21.000Z | 2021-04-15T14:19:06.000Z | """Tests for the :mod:`~polymatheia.data.writer` package."""
import json
import os
from shutil import rmtree
from polymatheia.data import NavigableDict
from polymatheia.data.writer import JSONWriter
DOCUMENTS = [NavigableDict(r) for r in [
{
'id': '1',
'name': {
'first': 'A',
'last': 'Person'
},
'age': 32,
'special tags': 'The first'
},
{
'id': '2',
'name': {
'first': ['Another', {'abbr': 'Nameless'}],
'last': 'Parrot'
},
'age': 23,
},
{
'id': '3',
'name': {
'first': 'The',
'last': 'Last'
},
'age': 65,
},
]]
def test_local_json_writing():
"""Test writing to the local filesystem."""
rmtree('tmp/json_writer_test', ignore_errors=True)
writer = JSONWriter('tmp/json_writer_test', 'id')
writer.write(DOCUMENTS)
count = 0
for basepath, _, filenames in os.walk('tmp/json_writer_test'):
for filename in filenames:
if filename.endswith('.json'):
count = count + len(filenames)
with open(os.path.join(basepath, filename)) as in_f:
doc = json.load(in_f)
assert 'id' in doc
assert 'name' in doc
if doc['id'] == '2':
assert 'first' in doc['name']
assert len(doc['name']['first']) == 2
else:
assert 'first' in doc['name']
assert 'last' in doc['name']
assert 'age' in doc
if doc['id'] == '1':
assert 'special tags' in doc
assert count == 3
def test_local_json_writing_pre_split_id_path():
"""Test writing to the local filesystem."""
rmtree('tmp/json_writer_test', ignore_errors=True)
writer = JSONWriter('tmp/json_writer_test', ['id'])
writer.write(DOCUMENTS)
count = 0
for basepath, _, filenames in os.walk('tmp/json_writer_test'):
for filename in filenames:
if filename.endswith('.json'):
count = count + len(filenames)
with open(os.path.join(basepath, filename)) as in_f:
doc = json.load(in_f)
assert 'id' in doc
assert 'name' in doc
if doc['id'] == '2':
assert 'first' in doc['name']
assert len(doc['name']['first']) == 2
else:
assert 'first' in doc['name']
assert 'last' in doc['name']
assert 'age' in doc
if doc['id'] == '1':
assert 'special tags' in doc
assert count == 3
| 31.688889 | 68 | 0.471599 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 44