text
stringlengths 2
999k
|
|---|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationsamlidppolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to authenticationsamlidppolicy_binding.
"""
def __init__(self) :
self._name = None
self.authenticationsamlidppolicy_vpnvserver_binding = []
self.authenticationsamlidppolicy_authenticationvserver_binding = []
@property
def name(self) :
r"""Name of the SAML IdentityProvider (IdP) policy for which to display detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the SAML IdentityProvider (IdP) policy for which to display detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def authenticationsamlidppolicy_authenticationvserver_bindings(self) :
r"""authenticationvserver that can be bound to authenticationsamlidppolicy.
"""
try :
return self._authenticationsamlidppolicy_authenticationvserver_binding
except Exception as e:
raise e
@property
def authenticationsamlidppolicy_vpnvserver_bindings(self) :
r"""vpnvserver that can be bound to authenticationsamlidppolicy.
"""
try :
return self._authenticationsamlidppolicy_vpnvserver_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationsamlidppolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationsamlidppolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
r""" Use this API to fetch authenticationsamlidppolicy_binding resource.
"""
try :
if not name :
obj = authenticationsamlidppolicy_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = authenticationsamlidppolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [authenticationsamlidppolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class authenticationsamlidppolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationsamlidppolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationsamlidppolicy_binding = [authenticationsamlidppolicy_binding() for _ in range(length)]
|
import math
import numpy
import pytest
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField, ParameterList
from simtk import unit
@pytest.fixture()
def buckingham_water_force_field() -> ForceField:
"""Create a buckingham water model Forcefield object."""
force_field = ForceField(load_plugins=True)
# Add in a constraint handler to ensure the correct H-O-H geometry.
constraint_handler = force_field.get_parameter_handler("Constraints")
# Keep the H-O bond length fixed at 0.9572 angstroms.
constraint_handler.add_parameter(
{"smirks": "[#1:1]-[#8X2H2+0:2]-[#1]", "distance": 0.9572 * unit.angstrom}
)
# Keep the H-O-H angle fixed at 104.52 degrees.
constraint_handler.add_parameter(
{"smirks": "[#1:1]-[#8X2H2+0]-[#1:2]", "distance": 1.5139 * unit.angstrom}
)
# Add a default vdW handler which is currently required by the OFF TK.
vdw_handler = force_field.get_parameter_handler("vdW")
vdw_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0]-[#1]",
"epsilon": 0.0 * unit.kilojoule_per_mole,
"sigma": 1.0 * unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#1]-[#8X2H2+0:1]-[#1]",
"epsilon": 0.0 * unit.kilojoules_per_mole,
"sigma": 0.0 * unit.nanometers,
# "epsilon": 0.680946 * unit.kilojoules_per_mole,
# "sigma": 0.316435 * unit.nanometers,
}
)
# Add a charge handler to zero the charges on water. The charges will be
# applied by the virtual site handler instead.
force_field.get_parameter_handler("Electrostatics")
force_field.get_parameter_handler(
"ChargeIncrementModel",
{"version": "0.3", "partial_charge_method": "formal_charge"},
)
# Add a virtual site handler to add the virtual charge site.
virtual_site_handler = force_field.get_parameter_handler("VirtualSites")
virtual_site_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0:2]-[#1:3]",
"type": "DivalentLonePair",
"distance": -0.0106 * unit.nanometers,
"outOfPlaneAngle": 0.0 * unit.degrees,
"match": "once",
"charge_increment1": 1.0552 * 0.5 * unit.elementary_charge,
"charge_increment2": 0.0 * unit.elementary_charge,
"charge_increment3": 1.0552 * 0.5 * unit.elementary_charge,
}
)
virtual_site_handler._parameters = ParameterList(virtual_site_handler._parameters)
# Finally add the custom buckingham charge handler.
buckingham_handler = force_field.get_parameter_handler("DampedBuckingham68")
buckingham_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0]-[#1]",
"a": 0.0 * unit.kilojoule_per_mole,
"b": 0.0 / unit.nanometer,
"c6": 0.0 * unit.kilojoule_per_mole * unit.nanometer ** 6,
"c8": 0.0 * unit.kilojoule_per_mole * unit.nanometer ** 8,
}
)
buckingham_handler.add_parameter(
{
"smirks": "[#1]-[#8X2H2+0:1]-[#1]",
"a": 1600000.0 * unit.kilojoule_per_mole,
"b": 42.00 / unit.nanometer,
"c6": 0.003 * unit.kilojoule_per_mole * unit.nanometer ** 6,
"c8": 0.00003 * unit.kilojoule_per_mole * unit.nanometer ** 8,
}
)
return force_field
@pytest.fixture()
def water_box_topology() -> Topology:
mol = Molecule.from_smiles("O")
mol.generate_conformers()
n_molecules = 256
topology: Topology = Topology.from_molecules([mol] * n_molecules)
# Create some coordinates (without the v-sites) and estimate box vectors.
topology.box_vectors = (
numpy.eye(3) * math.ceil(n_molecules ** (1 / 3) + 2) * 2.5 * unit.angstrom
)
return topology
@pytest.fixture()
def ideal_water_force_field() -> ForceField:
"""Returns a force field that will assign constraints, a vdW handler and
a library charge handler to a three site water molecule with all LJ
``epsilon=0.0`` and all ``q=0.0``.
"""
ff = ForceField(load_plugins=True)
constraint_handler = ff.get_parameter_handler("Constraints")
constraint_handler.add_parameter(
{"smirks": "[#1:1]-[#8X2H2+0:2]-[#1]", "distance": 0.9572 * unit.angstrom}
)
constraint_handler.add_parameter(
{"smirks": "[#1:1]-[#8X2H2+0]-[#1:2]", "distance": 1.5139 * unit.angstrom}
)
# add a dummy vdW term
vdw_handler = ff.get_parameter_handler("vdW")
vdw_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0]-[#1]",
"epsilon": 0.0 * unit.kilojoule_per_mole,
"sigma": 1.0 * unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#1]-[#8X2H2+0:1]-[#1]",
"epsilon": 0.0 * unit.kilojoules_per_mole,
"sigma": 0.0 * unit.nanometers,
}
)
# add the library charges
library_charge = ff.get_parameter_handler("LibraryCharges")
library_charge.add_parameter(
{"smirks": "[#1]-[#8X2H2+0:1]-[#1]", "charge1": 0 * unit.elementary_charge}
)
library_charge.add_parameter(
{"smirks": "[#1:1]-[#8X2H2+0]-[#1]", "charge1": 0 * unit.elementary_charge}
)
return ff
|
#!/usr/bin/env python3
#################################################################################################
# Ev3TrackedExplor3r #
# Version 1.0 #
# #
# Happily shared under the MIT License (MIT) #
# #
# Copyright(c) 2017 SmallRobots.it #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR #
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE #
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, #
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE #
# OR OTHER DEALINGS IN THE SOFTWARE. #
# #
# Visit http://www.smallrobots.it for tutorials and videos #
# #
# Credits #
# The Ev3TrackedExlpor3r is built with Lego Mindstorms Ev3 and Lego Technic Parts #
#################################################################################################
import socket
import os
from ev3_remoted import *
import ev3te
from ev3te.ev3_tracked_explor3r import Ev3TrackedExplor3r
class Launcher(object):
"""Main class used to launcher the Ev3TrackedExplor3r with remote control"""
@staticmethod
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
socket_name = s.getsockname()[0]
s.close()
return socket_name
def __init__(self):
""" Default constructor """
self.local_ip_address = self.get_ip_address()
self.local_ip_port = "15999"
self.robot_model = Ev3TrackedExplor3r()
self.server = Ev3Server(host_name = self.local_ip_address, host_port = self.local_ip_port,
robot_model = self.robot_model)
def start(self):
""" Starting point for this application """
# Check whether the operating system is Windows based or Unix based
os.system('cls' if os.name == 'nt' else 'clear')
# Present splash screen
print("**********************************************************************")
print("* Ev3 Tracked Explor3r *")
print("* Smallrobots.it *")
print("* *")
print("* Local Ev3 host IP Address: " + self.local_ip_address + " *")
print("* Local Ev3 host IP Port: " + self.local_ip_port + " *")
print("* *")
print("* Connect to the IP address and port above with the the remote app. *")
print("* *")
print("* Press ctrl-c to stop the Local Ev3 host server *")
print("**********************************************************************")
self.server.start()
#self.tracked_explor3r.start()
# Main code
a_launcher = Launcher()
a_launcher.start()
|
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various neural networks and related utilities.
"""
from texar.torch.modules.networks.network_base import FeedForwardNetworkBase
from texar.torch.utils.utils import get_output_size
__all__ = [
"FeedForwardNetwork",
]
class FeedForwardNetwork(FeedForwardNetworkBase):
r"""Feed-forward neural network that consists of a sequence of layers.
Args:
layers (list, optional): A list of :torch_nn:`Linear`
instances composing the network. If not given, layers are created
according to :attr:`hparams`.
hparams (dict, optional): Embedder hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
See :meth:`forward` for the inputs and outputs.
Example:
.. code-block:: python
hparams = { # Builds a two-layer dense NN
"layers": [
{ "type": "Dense", "kwargs": { "units": 256 },
{ "type": "Dense", "kwargs": { "units": 10 }
]
}
nn = FeedForwardNetwork(hparams=hparams)
inputs = torch.randn([64, 100])
outputs = nn(inputs)
# outputs == Tensor of shape [64, 10]
"""
def __init__(self, layers=None, hparams=None):
super().__init__(hparams=hparams)
self._build_layers(layers=layers, layer_hparams=self._hparams.layers)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"layers": [],
"name": "NN"
}
Here:
`"layers"`: list
A list of layer hyperparameters. See
:func:`~texar.torch.core.get_layer` for details on layer
hyperparameters.
`"name"`: str
Name of the network.
"""
return {
"layers": [],
"name": "NN"
}
@property
def output_size(self) -> int:
r"""The feature size of network layers output. If output size is
only determined by input, the feature size is equal to ``-1``.
"""
for i, layer in enumerate(reversed(self._layers)):
size = get_output_size(layer)
size_ext = getattr(layer, 'output_size', None)
if size_ext is not None:
size = size_ext
if size is None:
break
elif size > 0:
return size
elif i == len(self._layers) - 1:
return -1
raise ValueError("'output_size' can not be calculated because "
"'FeedForwardNetwork' contains submodule "
"whose output size cannot be determined.")
|
# vim: set fileencoding=utf-8 :
# Copyright 2012 Alexander Else <aelse@else.id.au>.
#
# This file is part of the python-crowd library.
#
# python-crowd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-crowd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-crowd. If not, see <http://www.gnu.org/licenses/>.
import re, sys
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler # Py27
except ImportError:
from http.server import HTTPServer, BaseHTTPRequestHandler # Py3k
try:
from urllib2 import urlparse # Py27
except ImportError:
from urllib import parse as urlparse # Py3k
import json
httpd = None
app_auth = {}
user_auth = {}
user_attributes = {}
group_auth = {}
session_auth = {}
def add_app(app_name, app_pass):
global app_auth
app_auth[app_name] = app_pass
def remove_app(app_name):
global app_auth
try:
del app_auth[app_name]
except KeyError: pass
def check_app_auth(headers):
"""Authenticate an application from Authorization HTTP header"""
import base64
try:
auth_header = headers["Authorization"]
except KeyError:
return False
# Only handle HTTP Basic authentication
m = re.match("Basic (\w+==)", auth_header)
if not m:
return False
encoded = m.groups()[0].encode('ascii')
decoded = base64.decodestring(encoded).decode('ascii')
m = re.match("([^:]+):(.+)", decoded)
if not m:
# Invalid authorization format
return False
app_user, app_pass = m.groups()
global app_auth
try:
if app_auth[app_user] == app_pass:
return True
except KeyError:
# No such user, fall through
pass
return False
def add_user_to_group(username, group):
global group_auth
if username not in group_auth:
group_auth[username] = []
if group not in group_auth[username]:
group_auth[username].append(group)
def remove_user_from_group(username, group):
global group_auth
try:
group_auth[username] = list(
filter(lambda x: x != group, group_auth[username])
)
except KeyError: pass
def user_exists_in_group(username, group):
"""Check that user exists in a group"""
global group_auth
try:
return group in group_auth[username]
except:
pass
return False
def get_user_group_membership(username):
"""List of groups user is in"""
global group_auth
try:
return group_auth[username]
except:
pass
return []
def get_group_users(groupname):
"""List of users in the group"""
global group_auth
users = []
for username, groups in group_auth.items():
try:
if groupname in groups:
users.append(username)
except:
pass
return users
def add_user(username, password, attributes=None):
global user_auth
global user_attributes
user_auth[username] = password
if attributes:
user_attributes[username] = attributes
def remove_user(username):
global user_auth
try:
del user_auth[username]
except KeyError: pass
def get_user_attributes(username):
try:
attributes = user_attributes[username]
except KeyError:
attributes = {}
return attributes
def user_exists(username):
"""Check that user exists"""
global user_auth
return (username in user_auth)
def check_user_auth(username, password):
"""Authenticate an application from Authorization HTTP header"""
global user_auth
try:
if user_auth[username] == password:
return True
except KeyError:
# No such user, fall through
pass
return False
def create_session(username, remote):
"""Create a user session for an authenticated user"""
import hashlib
global session_auth
token = hashlib.md5((username + remote).encode('utf-8')).hexdigest()[:24]
session_auth[token] = { "username": username, "remote": remote, }
return token
def validate_session(token, remote):
"""Validate a user session"""
global session_auth
session = None
try: session = session_auth[token]
except KeyError: pass
# Unknown session token
if not session:
return None
# Check any validation factors (just remote now)
if session["remote"] != remote:
return None
# User has authenticated, return a session object
response = {
"token": token,
"user": build_user_dict(session["username"]),
}
return response
def delete_session(token):
global session_auth
del session_auth[token]
def build_user_dict(username):
user_dict = {
"name": username, "first-name": username,
"last-name": username, "display-name": username,
"email": u'%s@does.not.ëxist' % username, "active": True,
}
return user_dict
class CrowdServerStub(BaseHTTPRequestHandler):
# Disable logging of fulfilled requests
def log_request(self, format, *args):
return
def _default_handler(self):
self.send_response(404)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("Sorry, location does not exist\n".encode('ascii'))
def _do_app_failed_auth(self):
self.send_response(401)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("Application failed to authenticate\n".encode('ascii'))
def _do_user_failed_auth(self, bad_user=False, bad_pass=False):
response = {}
if bad_user:
response["reason"] = "USER_NOT_FOUND"
response["message"] = "User <whatever> does not exist"
if bad_pass:
response["reason"] = "INVALID_USER_AUTHENTICATION"
response["message"] = "Failed to authenticate principal, password was invalid"
self.send_response(400)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _do_terminate(self):
# Mark server object for termination
self.server.keep_running = False
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("Terminating\n".encode('ascii'))
def _auth_user(self):
username = self.get_params['username'][0]
password = self.json_data['value']
user_authenticated = check_user_auth(username, password)
response = {}
response_code = 0
# Either user may authenticate, used an invalid password,
# or user does not exist.
if user_authenticated:
response_code = 200
response = build_user_dict(username)
elif user_exists(username):
response_code = 400
response = {
"reason": "INVALID_USER_AUTHENTICATION",
"message": "Failed to authenticate principal, password was invalid",
}
else:
response_code = 400
response = {
"reason": "USER_NOT_FOUND",
"message": 'User <%s> does not exist' % username
}
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _get_session(self):
username = self.json_data['username']
password = self.json_data['password']
v_factor = self.json_data['validation-factors']['validationFactors']
remote = ''
for f in v_factor:
if f['name'] == 'remote_address':
remote = f['value']
user_authenticated = check_user_auth(username, password)
response = {}
response_code = 0
# Either user may authenticate, used an invalid password,
# or user does not exist.
if user_authenticated:
response_code = 200
token = create_session(username, remote)
response = {
"token": token,
"user": build_user_dict(username),
}
elif user_exists(username):
response_code = 400
response = {
"reason": "INVALID_USER_AUTHENTICATION",
"message": "Failed to authenticate principal, password was invalid",
}
else:
response_code = 400
response = {
"reason": "USER_NOT_FOUND",
"message": 'User <%s> does not exist' % username
}
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _validate_session(self):
v_factor = self.json_data['validationFactors']
remote = ''
for f in v_factor:
if f['name'] == 'remote_address':
remote = f['value']
token = None
m = re.search('/([A-Za-z\d]{24})', self.path)
if m:
token = m.groups()[0]
session = validate_session(token, remote)
else:
session = None
response = {}
response_code = 0
if session:
response_code = 200
response = session
else:
response_code = 404
response = {
"reason": "INVALID_SSO_TOKEN",
"message":"Token does not validate."
}
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _delete_session(self):
m = re.search('/([A-Za-z\d]{24})', self.path)
if m:
token = m.groups()[0]
response = {}
response_code = 0
if token:
try:
delete_session(token)
response_code = 204
except KeyError:
response_code = 404
response = {
"reason": "INVALID_SSO_TOKEN",
"message": "Token does not exist."
}
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _get_groups(self):
username = self.get_params['username'][0]
groups = get_user_group_membership(username)
response = {u'groups': [{u'name': x} for x in groups]}
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _get_group_users(self):
groupname = self.get_params['groupname'][0]
users = get_group_users(groupname)
response = {u'users': [{u'name': x} for x in users]}
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _get_user(self):
username = self.get_params['username'][0]
if user_exists(username):
response = {u'user': {u'name': username}}
try:
if self.get_params['expand'][0] == 'attributes':
response['attributes'] = get_user_attributes(username)
except: pass
self.send_response(200)
else:
response = {}
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _add_user(self):
username = self.json_data['name']
password = self.json_data['password']
if not user_exists(username):
add_user(username, password, attributes=self.json_data)
self.send_response(201)
else:
response = {u'reason': u'INVALID_USER',
u'message': u'User already exists'}
self.send_response(400)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(response).encode('ascii'))
def _do_COMMON(self, data={}):
handlers = [
{
"url": r"/terminate",
"action": self._do_terminate,
"require_auth": False,
},
{
"url": r"/rest/usermanagement/1/authentication$",
"action": self._auth_user,
"require_auth": True,
"method": "POST",
},
{
"url": r"/rest/usermanagement/1/session$",
"action": self._get_session,
"require_auth": True,
"method": "POST",
},
{
"url": r"/rest/usermanagement/1/session/[A-Za-z0-9]{24}$",
"action": self._validate_session,
"require_auth": True,
"method": "POST",
},
{
"url": r"/rest/usermanagement/1/session/[A-Za-z0-9]{24}$",
"action": self._delete_session,
"require_auth": True,
"method": "DELETE",
},
{
"url": r"/rest/usermanagement/1/user/group/direct$",
"action": self._get_groups,
"require_auth": True,
"method": "GET",
},
{
"url": r"/rest/usermanagement/1/user/group/nested$",
"action": self._get_groups,
"require_auth": True,
"method": "GET",
},
{
"url": r"/rest/usermanagement/1/group/user/nested$",
"action": self._get_group_users,
"require_auth": True,
"method": "GET",
},
{
"url": r"/rest/usermanagement/1/user$",
"action": self._get_user,
"require_auth": True,
"method": "GET",
},
{
"url": r"/rest/usermanagement/1/user$",
"action": self._add_user,
"require_auth": True,
"method": "POST",
},
# Default handler for unmatched requests
{
"url": r".*",
"action": self._default_handler,
"require_auth": True,
},
]
p = urlparse.urlparse(self.path)
self.json_data = data
self.get_params = urlparse.parse_qs(p.query)
for handler in handlers:
method = handler.get('method')
if (re.match(handler['url'], p.path)
and (not method or method == self.command)):
# Authenticate application if required
require_auth = handler.get('require_auth')
if require_auth and not check_app_auth(self.headers):
self._do_app_failed_auth()
return
# Run the handler's action
handler['action']()
return
# An unhandled path was encountered.
self.send_response(500)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write('Oops, should not be here for {}'.format(self.path).encode('ascii'))
def do_GET(self):
self._do_COMMON()
def do_POST(self):
ct = self.headers.get('Content-Type')
if ct != 'application/json':
print("Received unwanted Content-Type (%s) in POST" % ct)
cl = int(self.headers.get('Content-Length', 0))
if cl > 0:
data = self.rfile.read(cl).decode('utf-8')
else:
data = ""
jdata = json.loads(data)
self._do_COMMON(data=jdata)
def do_PUT(self):
self._do_COMMON()
def do_DELETE(self):
self._do_COMMON()
def init_server(port):
global httpd
httpd = HTTPServer(("", port), CrowdServerStub)
return httpd
def run_server(port):
if not httpd:
init_server(port)
httpd.keep_running = True
while httpd.keep_running:
httpd.handle_request()
if __name__ == "__main__":
run_server(8001)
|
# The App listening to new blocks written read the exstrincs and store the transactions in a mysql/mariadb database.
# the database must be created, the app will create the tables and indexes used.
# import libraries
# system packages
import sys
import os
import json
# Substrate module
from substrateinterface import SubstrateInterface, Keypair,ExtrinsicReceipt
from substrateinterface.exceptions import SubstrateRequestException
# base64 encoder/decoder
import base64
# base58 encoder/decoder
import base58
#import scale library to load data types
import scalecodec
# import mysql connector
import mysql.connector
currentime=""
# read environment variables
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PWD=os.environ['DB_PWD']
DB_HOST=os.environ['DB_HOST']
NODE=os.environ['NODE']
except NameError:
print("System Variables have not been set")
exit(1)
# function to load data types registry
def load_type_registry_file(file_path: str) -> dict:
with open(os.path.abspath(file_path), 'r') as fp:
data = fp.read()
return json.loads(data)
# function to create tables required
def create_tables():
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor()
# use database
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
print(err)
exit(1)
# create tables
createtx="CREATE TABLE `transactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,`txhash` VARCHAR(66) NOT NULL, \
`sender` VARCHAR(64) NOT NULL, `recipient` VARCHAR(64) NOT NULL, \
`amount` numeric(32,0) NOT NULL, \
`gasfees` numeric(32,0) NOT NULL, \
`dtblockchain` DATETIME NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table TRANSACTIONS...")
cursor.execute(createtx)
except mysql.connector.Error as err:
if(err.msg!="Table 'transactions' already exists"):
print(err.msg)
else:
print("OK")
# create indexes
createidxtx="CREATE INDEX txhash on transactions(txhash)"
try:
print("Creating index TXHASH on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'txhash'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX sender on transactions(sender)"
try:
print("Creating index SENDER on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'sender'"):
print(err.msg)
else:
print("OK")
createidxtx="CREATE INDEX recipient on transactions(recipient)"
try:
print("Creating index RECIPIENT on TRANSACTIONS...")
cursor.execute(createidxtx)
except mysql.connector.Error as err:
if(err.msg!="Duplicate key name 'recipient'"):
print(err.msg)
else:
print("OK")
# creating sync table to keep syncronisation info
createsync="CREATE TABLE `sync` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`lastblocknumberverified` INT(11) NOT NULL, \
`lastapprovalrequestprocessed` int(11) default 0 not null,\
PRIMARY KEY (id))"
try:
print("Creating table SYNC...")
cursor.execute(createsync)
except mysql.connector.Error as err:
if(err.msg!="Table 'sync' already exists"):
print(err.msg)
else:
print("OK")
# creating categories table for impact actions
createcategories="CREATE TABLE `impactactionscategories` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(64) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), PRIMARY KEY (id))"
try:
print("Creating table impactactionscategories...")
cursor.execute(createcategories)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionscategories' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactions table for impact actions
createactions="CREATE TABLE `impactactions` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`category` INT(11) NOT NULL,`auditors` INT(11) NOT NULL,`blockstart` INT(11) NOT NULL,\
`blockend` INT(11) NOT NULL, `rewardstoken` INT(11) NOT NULL, `rewardsamount` INT(32) NOT NULL,\
`rewardsoracle` INT(32) NOT NULL,`rewardauditors` INT(32) NOT NULL,\
`slashingsauditors` INT(32) NOT NULL,`maxerrorsauditor` INT(11) NOT NULL,\
`fields` varchar(8192) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash), \
PRIMARY KEY (id))"
try:
print("Creating table impactactions...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactions' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsoracles table for impact actions
createactions="CREATE TABLE `impactactionsoracles` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsoracles...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsoracles' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsauditors table for impact actions
createactions="CREATE TABLE `impactactionsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`description` VARCHAR(128) NOT NULL,\
`account` VARCHAR(48) NOT NULL,`categories` VARCHAR(128) NOT NULL,\
`area` VARCHAR(64) NOT NULL,`otherinfo` VARCHAR(66) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table impactactionsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsproxy table for impact actions
createactions="CREATE TABLE `impactactionsproxy` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`account` VARCHAR(48) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsproxy...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsproxy' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequests table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequests` (`id` MEDIUMINT NOT NULL,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`info` VARCHAR(8192) NOT NULL,\
`dtapproved` DATETIME,\
`dtrefused` DATETIME,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequests...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequests' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestsauditors table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestsauditors` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`auditor` VARCHAR(48) NOT NULL,\
`maxdays` INT(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestsauditors...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestsauditors' already exists"):
print(err.msg)
else:
print("OK")
# creating impactactionsapprovalrequestvotes table for impact actions
createactions="CREATE TABLE `impactactionsapprovalrequestauditorvotes` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`approvalrequestid` int(11) NOT NULL,\
`vote` VARCHAR(1) NOT NULL,\
`otherinfo` VARCHAR(66) NOT NULL,\
`dtrewards` DATETIME NOT NULL,\
CONSTRAINT txhash_unique UNIQUE (txhash),PRIMARY KEY (id))"
try:
print("Creating table impactactionsapprovalrequestauditorvotes...")
cursor.execute(createactions)
except mysql.connector.Error as err:
if(err.msg!="Table 'impactactionsapprovalrequestauditorvotes' already exists"):
print(err.msg)
else:
print("OK")
# creating assets table for FT
createassets="CREATE TABLE `ftassets` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`assetid` int(11) NOT NULL,\
`owner` VARCHAR(48) NOT NULL,\
`maxzombies` int(11) NOT NULL,\
`minbalance` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table ftassets...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'ftassets' already exists"):
print(err.msg)
else:
print("OK")
# creating transaction for fungible tokens
createassets="CREATE TABLE `fttransactions` (`id` MEDIUMINT NOT NULL AUTO_INCREMENT,\
`blocknumber` INT(11) NOT NULL,\
`txhash` VARCHAR(66) NOT NULL,\
`dtblockchain` DATETIME NOT NULL,\
`signer` VARCHAR(48) NOT NULL,\
`sender` VARCHAR(48) NOT NULL,\
`category` VARCHAR(20) NOT NULL,\
`assetid` int(11) NOT NULL,\
`recipient` VARCHAR(48) NOT NULL,\
`amount` int(11) NOT NULL, CONSTRAINT txhash_unique UNIQUE (txhash),\
PRIMARY KEY (id))"
try:
print("Creating table fttransactions...")
cursor.execute(createassets)
except mysql.connector.Error as err:
if(err.msg!="Table 'fttransactions' already exists"):
print(err.msg)
else:
print("OK")
#closing database
cursor.close()
cnx.close()
# function to syncronise the blockchain reading the old blocks if not yet loaded
def sync_blockchain(substrate):
# we get the the last block from the blockchain
r=substrate.rpc_request(method='chain_getHeader',params=[],result_handler=None)
rs=r.get('result')
lastblockhex=rs.get('number')
lastblocknumber=int(lastblockhex,16)
print("[Info] Last Block: ",lastblocknumber)
# we check the last block reconcilied
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
cursor = cnx.cursor(dictionary=True)
lastblocknumberverified=0
query="select * from sync limit 1"
try:
cursor.execute(query)
for row in cursor:
lastblocknumberverified=row['lastblocknumberverified']
#lastblocknumberverified=row.get('lastblocknumberverified')
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=0
print("[INFO] Last block number verified:",lastblocknumberverified)
# loop the new block number to find gaps and fill them in case
x=lastblocknumberverified+1
cursor.close()
cursorb = cnx.cursor()
print("[INFO] Syncing previous blocks...")
while x<=lastblocknumber:
# get block data
print("Syncing block # ",x)
# process the block of data
process_block(x)
# update sync
sqlst=""
if(lastblocknumberverified==0):
sqlst="insert into sync set lastblocknumberverified="+str(x)
else:
sqlst="update sync set lastblocknumberverified="+str(x)
try:
cursorb.execute(sqlst)
cnx.commit()
except mysql.connector.Error as err:
print(err.msg)
lastblocknumberverified=x
# increase block number
x=x+1
#end while loop
cursorb.close()
cnx.close()
# function to store a new transaction
def store_transaction(blocknumber,txhash,sender,recipient,amount,currenttime,gasfees):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Transaction")
print("TxHash: ",txhash)
print("Current time: ",currentime)
print("Sender: ",sender)
print("Recipient: ",recipient)
print("Amount: ",amount)
print("`Gas fees`: ",gasfees)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into transactions set blocknumber=%s,txhash=%s,sender=%s,recipient=%s,amount=%s,gasfees=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,sender,recipient,amount,gasfees,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print(err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Impact Action
def impactactions_newimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idimpactaction)
print("Data: ",data)
print("Category: ",j['category'])
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactions set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,category=%s,auditors=%s,blockstart=%s,blockend=%s,rewardstoken=%s,rewardsamount=%s,rewardsoracle=%s"
addtx=addtx+",rewardauditors=%s,slashingsauditors=%s,maxerrorsauditor=%s,fields=%s"
if 'fields' in j:
f=j['fields']
else:
f={}
datatx=(blocknumber,txhash,signer,dtblockchain,idimpactaction,j['description'],j['category'],j['auditors'],j['blockstart'],j['blockend'],j['rewardstoken'],j['rewardsamount'],j['rewardsoracle'],j['rewardsauditors'],j['slashingsauditors'],j['maxerrorsauditor'],json.dumps(f))
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Impact Actions
def impactactions_destroyimpactaction(blocknumber,txhash,signer,currenttime,idimpactaction):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Impact Action")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Impact Action: ",idimpactaction)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactions where id=%s"
datatx=(idimpactaction,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Oracle
def impactactions_neworacle(blocknumber,txhash,signer,currenttime,idoracle,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",idoracle)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsoracles set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s"
addtx=addtx+",description=%s,account=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,idoracle,j['description'],j['account'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Oracle
def impactactions_destroyoracle(blocknumber,txhash,signer,currenttime,idoracle):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Oracle")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Oracle: ",idoracle)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsoracles where id=%s"
datatx=(idoracle,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Approval Request
def impactactions_newapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,info):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing New Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id: ",approvalrequestid)
print("Info: ",info)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequests set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,info=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,info)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Vote Approval Request
def impactactions_voteapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
j=json.loads(data)
vote=j['vote']
otherinfo=j['otherinfo']
print("Storing Vote of an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id Approval: ",approvalrequestid)
print("Vote: ",vote)
print("Other Info: ",otherinfo)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestauditorvotes set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,vote=%s,otherinfo=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,vote,otherinfo)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Assign Auditor to Approval Request
def impactactions_assignauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor,maxdays):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
print("Storing Assigned Auditor for an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request Id: ",approvalrequestid)
print("Auditor: ",auditor)
print("Max days: ",maxdays)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsapprovalrequestsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,approvalrequestid=%s,auditor=%s,maxdays=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,approvalrequestid,auditor,maxdays)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destory_assignedauditorapprovalrequest(blocknumber,txhash,signer,currenttime,approvalrequestid,auditor):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Assigned Auditor to an Approval Request")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Approval Request id: ",approvalrequestid)
print("Auditor: ",auditor)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsapprovalrequestsauditors where approvalrequestid=%s and auditor=%s"
datatx=(approvalrequestid,auditor)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Auditor
def impactactions_newauditor(blocknumber,txhash,signer,currenttime,account,data):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
#decode json structure
j=json.loads(data)
print("Storing New Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
print("Data: ",data)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsauditors set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",description=%s,account=%s,categories=%s,area=%s,otherinfo=%s"
if 'otherinfo' in j:
o=j['otherinfo']
else:
o=''
datatx=(blocknumber,txhash,signer,dtblockchain,j['description'],account,json.dumps(j['categories']),j['area'],o)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Auditor
def impactactions_destroyauditor(blocknumber,txhash,signer,currenttime,account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Auditor")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsauditors where account=%s"
datatx=(account,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Proxy
def impactactions_newproxy(blocknumber,txhash,signer,currenttime,idproxy, account):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Account: ",account)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionsproxy set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s"
addtx=addtx+",id=%s,account=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idproxy,account)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Proxy
def impactactions_destroyproxy(blocknumber,txhash,signer,currenttime,idproxy):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Proxy")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("id Proxy: ",idproxy)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionsproxy where id=%s"
datatx=(idproxy,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - New Category
def impactactions_newcategory(blocknumber,txhash,signer,currenttime,idcategory,description):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Storing New Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
print("Description: ",description)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into impactactionscategories set blocknumber=%s,txhash=%s,signer=%s,dtblockchain=%s,id=%s,description=%s"
datatx=(blocknumber,txhash,signer,dtblockchain,idcategory,description)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to store Impact Actions - Destroy Category
def impactactions_destroycategory(blocknumber,txhash,signer,currenttime,idcategory):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Category")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Id category: ",idcategory)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from impactactionscategories where id=%s"
datatx=(idcategory,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to create new asset from Sudo
def assets_force_create(blocknumber,txhash,signer,currenttime,assetid,owner,maxzombies,minbalance):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Create Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Owner : ",owner)
print("Max Zombies : ",maxzombies)
print("Min Balance : ",minbalance)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into ftassets set blocknumber=%s,txhash=%s,signer=%s,assetid=%s,owner=%s,maxzombies=%s,minbalance=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,assetid,owner,maxzombies,minbalance,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to mint assets in favor of an account
def assets_mint(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Minted"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to burn assets decrease the balance of an account
def assets_burn(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Burned"
print("Burn Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to transfer assets in favor of an account
def assets_transfer(blocknumber,txhash,signer,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to force transfer assets in favor of an account
def assets_forcetransfer(blocknumber,txhash,signer,sender,currenttime,assetid,recipient,amount):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
category="Transfer"
print("Mint Assets (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id : ",assetid)
print("Recipient : ",recipient)
print("Amount : ",amount)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
addtx="insert into fttransactions set blocknumber=%s,txhash=%s,signer=%s,sender=%s,category=%s,assetid=%s,recipient=%s,amount=%s,dtblockchain=%s"
datatx=(blocknumber,txhash,signer,signer,category,assetid,recipient,amount,dtblockchain)
try:
cursor.execute(addtx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to destroy asset (Fungible Tokens) from Sudo
def assets_force_destroy(blocknumber,txhash,signer,currenttime,assetid,witnesszombies):
cnx = mysql.connector.connect(user=DB_USER, password=DB_PWD,host=DB_HOST,database=DB_NAME)
print("Destroy Asset (Fungible Tokens)")
print("BlockNumber: ",blocknumber)
print("TxHash: ",txhash)
print("Current time: ",currenttime)
print("Signer: ",signer)
print("Asset Id: ",assetid)
print("Witnesses Zombies: ",witnesszombies)
cursor = cnx.cursor()
dtblockchain=currenttime.replace("T"," ")
dtblockchain=dtblockchain[0:19]
deltx="delete from ftassets where assetid=%s"
datatx=(assetid,)
try:
cursor.execute(deltx,datatx)
except mysql.connector.Error as err:
print("[Error] ",err.msg)
cnx.commit()
cursor.close()
cnx.close()
# function to process a block of data
def process_block(blocknumber):
# Retrieve extrinsics in block
print("Processing Block # ",blocknumber)
result = substrate.get_block(block_number=blocknumber)
print ("##########################")
print(result)
print("Block Hash: ",result['header']['hash'])
blockhash=result['header']['hash']
print ("##########################")
events=substrate.get_events(result['header']['hash'])
print ("#######EVENTS##############")
print(events)
print ("##########################")
# retrieve receipt
cnt=0
for extrinsic in result['extrinsics']:
if extrinsic.address:
signed_by_address = extrinsic.address.value
else:
signed_by_address = None
print('\nPallet: {}\nCall: {}\nSigned by: {}'.format(
extrinsic.call_module.name,
extrinsic.call.name,
signed_by_address
))
# check for exstrinc success or not
try:
error=events[cnt].params[0]['value'].get('Error')
except:
error=None
if events[cnt].event.name=="ExtrinsicFailed" or error!=None :
print("Extrinsic has failed")
cnt=cnt+1
continue
else:
print("Extrinsic succeded: ",events[cnt].event.name)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("extrinsic: ",extrinsic)
print("blockhash: ",blockhash)
gasfees=0
if (extrinsic.extrinsic_hash!=None):
# get receipt of the extrisinc
receipt = ExtrinsicReceipt(
substrate=substrate,
extrinsic_hash=extrinsic.extrinsic_hash,
block_hash=blockhash
)
print("************RECEIPT**************")
print("blockhash: ",blockhash)
print("extrinsic.extrinsic_hash: ",extrinsic.extrinsic_hash)
print("receipt.total_fee_amount: ",receipt.total_fee_amount)
print(receipt.is_success)
print(receipt.extrinsic.call_module.name)
print(receipt.extrinsic.call.name)
print(receipt.weight)
print("*********************************")
gasfees=receipt.total_fee_amount
#for TimeStamp call we set the time of the following transactions
if extrinsic.call_module.name=="Timestamp" and extrinsic.call.name=="set":
currentime=extrinsic.params[0]['value']
#Balance Transfer we update the transactions
if extrinsic.call_module.name=="Balances" and ( extrinsic.call.name=="transfer" or extrinsic.call.name=="transfer_keep_alive"):
## store the transaction in the database
store_transaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,extrinsic.params[0]['value'],extrinsic.params[1]['value'],currentime,gasfees)
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="vote_approval_request":
impactactions_voteapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Vote Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="request_approval":
impactactions_newapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Impact Actions - Assign Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="assign_auditor":
impactactions_assignauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Impact Actions - Remove Assigned Auditor to Approval Request
if extrinsic.call_module.name=="ImpactActions" and extrinsic.call.name=="destroy_assigned_auditor":
impactactions_destory_assignedauditorapprovalrequest(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Create new asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="create":
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'],extrinsic.params[3]['value'])
#Assets - Destroy asset as regular user
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="destroy":
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'])
#Assets - Mint assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="mint":
assets_mint(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Burn assets decreasing the balance of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="burn":
assets_burn(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
#Assets - Transfer assets in favor of an account
if extrinsic.call_module.name=="Assets" and extrinsic.call.name=="transfer":
assets_transfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,extrinsic.params[0]['value'],extrinsic.params[1]['value'],extrinsic.params[2]['value'])
# Sudo Calls
if extrinsic.call_module.name=="Sudo" and extrinsic.call.name=="sudo":
print(extrinsic.params[0].get('value'))
c=extrinsic.params[0].get('value')
# new impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='create_impact_action':
print("Impact Actions - Create New Impact Action")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy impact action
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_impact_action':
print("Impact Actions - Destroy Impact Action")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyimpactaction(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='create_oracle':
print("Impact Actions - Create New Oracle")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_neworacle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy oracle
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_oracle':
print("Impact Actions - Destroy Oracle")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyoracle(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='create_auditor':
print("Impact Actions - Create New Auditor")
print("id: ",c['call_args'][0]['value'])
print("data: ",c['call_args'][1]['value'])
impactactions_newauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy auditor
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_auditor':
print("Impact Actions - Destroy Auditor")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyauditor(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new proxy account
if c['call_module']== 'ImpactActions' and c['call_function']=='create_proxy':
print("Impact Actions - Create New Proxy")
print("id: ",c['call_args'][0]['value'])
print("account: ",c['call_args'][1]['value'])
impactactions_newproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy proxy
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_proxy':
print("Impact Actions - Destroy Proxy")
print("id: ",c['call_args'][0]['value'])
impactactions_destroyproxy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# new category
if c['call_module']== 'ImpactActions' and c['call_function']=='create_category':
print("Impact Actions - Create New Category")
print("id: ",c['call_args'][0]['value'])
print("description: ",c['call_args'][1]['value'])
impactactions_newcategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# destroy category
if c['call_module']== 'ImpactActions' and c['call_function']=='destroy_category':
print("Impact Actions - Destroy Category")
print("id: ",c['call_args'][0]['value'])
impactactions_destroycategory(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'])
# Force Create Asset
if c['call_module']== 'Assets' and c['call_function']=='force_create':
print("Fungibile Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Owner: ",c['call_args'][1]['value'])
print("Max Zombies: ",c['call_args'][2]['value'])
print("Minimum Deposit: ",c['call_args'][3]['value'])
assets_force_create(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force transfer Assets
if c['call_module']== 'Assets' and c['call_function']=='force_transfer':
print("Fungible Tokens - Force Transfer")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_forcetransfer(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,c['call_args'][1]['value'],currentime,c['call_args'][0]['value'],c['call_args'][2]['value'],c['call_args'][3]['value'])
# Force Destroy Asset
if c['call_module']== 'Assets' and c['call_function']=='force_destroy':
print("Fungible Tokens - Create Asset")
print("id: ",c['call_args'][0]['value'])
print("Witnesses Zombies: ",c['call_args'][1]['value'])
assets_force_destroy(blocknumber,'0x'+extrinsic.extrinsic_hash,extrinsic.address.value,currentime,c['call_args'][0]['value'],c['call_args'][1]['value'])
# Loop through call params
for param in extrinsic.params:
if param['type'] == 'Compact<Balance>':
param['value'] = '{} {}'.format(param['value'] / 10 ** substrate.token_decimals, substrate.token_symbol)
print("Param '{}': {}".format(param['name'], param['value']))
cnt=cnt+1
# subscription handler for new blocks written
def subscription_handler(obj, update_nr, subscription_id):
print(f"New block #{obj['header']['number']} produced by {obj['author']} hash: {obj['header']['hash']}")
# call the block management function
process_block(obj['header']['number'])
## MAIN
# load custom data types
custom_type_registry = load_type_registry_file("../assets/types.json")
# define connection parameters
substrate = SubstrateInterface(
url=NODE,
ss58_format=42,
type_registry_preset='default',
type_registry=custom_type_registry
)
# create database tables
create_tables()
# syncronise the blockchain
if(len(sys.argv)>1):
if (sys.argv[1]== '--sync' or sys.argv[1]=="-s"):
sync_blockchain(substrate)
# subscribe to new block writing and process them in real time
result = substrate.subscribe_block_headers(subscription_handler, include_author=True)
print(result)
|
#!/bin/env python
from app import create_app, socketio
app = create_app(debug=True)
if __name__ == '__main__':
socketio.run(app, host="0.0.0.0")
|
'''OpenGL extension EXT.pixel_transform
This module customises the behaviour of the
OpenGL.raw.GL.EXT.pixel_transform to provide a more
Python-friendly API
Overview (from the spec)
This extension provides support for scaling, rotation, translation and
shearing of two-dimensional pixel rectangles in the pixel rasterizer.
The transformation is defined via a 4x4 matrix, where only those entries
which apply as a 2D affine transformation will be accepted and used.
These matrices can be manipulated using the same functions as the other
OpenGL matrix stacks.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/pixel_transform.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.pixel_transform import *
from OpenGL.raw.GL.EXT.pixel_transform import _EXTENSION_NAME
def glInitPixelTransformEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glPixelTransformParameterivEXT=wrapper.wrapper(glPixelTransformParameterivEXT).setInputArraySize(
'params', 1
)
glPixelTransformParameterfvEXT=wrapper.wrapper(glPixelTransformParameterfvEXT).setInputArraySize(
'params', 1
)
glGetPixelTransformParameterivEXT=wrapper.wrapper(glGetPixelTransformParameterivEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetPixelTransformParameterfvEXT=wrapper.wrapper(glGetPixelTransformParameterfvEXT).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
|
# Generated by Django 3.2.9 on 2021-11-25 06:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('meetups', '0004_remove_meetup_location'),
]
operations = [
migrations.AddField(
model_name='meetup',
name='location',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='meetups.location'),
),
]
|
import torch
import torch.nn as nn
from collections import OrderedDict
from .utils import load_state_dict_from_url
from .backbone_utils import darknet_backbone
from .transform import YOLOTransform
from .loss import YOLOLoss
__all__ = [
"YOLOv3", "yolov3_darknet53",
]
class YOLOv3(nn.Module):
def __init__(self,
backbone, num_classes,
input_size=(416, 416),
conf_thresh=0.05,
nms_thresh=0.1,
max_detections=100,
anchors=[[[116, 90], [156, 198], [373, 326]],
[[30, 61], [62, 45], [59, 119]],
[[10, 13], [16, 30], [33, 23]]]):
super(YOLOv3, self).__init__()
self.num_classes = num_classes
self.input_size = input_size
self.conf_thresh = conf_thresh
self.nms_thresh = nms_thresh
self.max_detections = max_detections
self.anchors = anchors
# transform
self.transform = YOLOTransform(self.input_size, self.conf_thresh, self.nms_thresh, self.max_detections)
# backbone
self.backbone = backbone
_out_filters = self.backbone.layers_out_filters
# embedding0
final_out_filter0 = len(self.anchors[0]) * (4 + self.num_classes)
self.embedding0 = self._make_embedding([512, 1024], _out_filters[-1], final_out_filter0)
# embedding1
final_out_filter1 = len(self.anchors[1]) * (4 + self.num_classes)
self.embedding1_cbl = self._make_cbl(512, 256, 1)
self.embedding1_upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.embedding1 = self._make_embedding([256, 512], _out_filters[-2] + 256, final_out_filter1)
# embedding2
final_out_filter2 = len(self.anchors[2]) * (4 + self.num_classes)
self.embedding2_cbl = self._make_cbl(256, 128, 1)
self.embedding2_upsample = nn.Upsample(scale_factor=2, mode="nearest")
self.embedding2 = self._make_embedding([128, 256], _out_filters[-3] + 128, final_out_filter2)
# losses
self.losses = nn.ModuleList([YOLOLoss(self.num_classes, self.input_size, self.anchors[i]) for i in range(3)])
def _make_cbl(self, _in, _out, ks):
''' cbl = conv + batch_norm + leaky_relu
'''
pad = (ks - 1) // 2 if ks else 0
return nn.Sequential(OrderedDict([
("conv", nn.Conv2d(_in, _out, kernel_size=ks, stride=1, padding=pad, bias=False)),
("bn", nn.BatchNorm2d(_out)),
("relu", nn.LeakyReLU(0.1)),
]))
def _make_embedding(self, filters_list, in_filters, out_filter):
m = nn.ModuleList([
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3)])
m.add_module("conv_out", nn.Conv2d(filters_list[1], out_filter, kernel_size=1,
stride=1, padding=0, bias=True))
return m
def forward(self, images, targets=None):
x, gt = self.transform(images, targets)
def _branch(_embedding, _in):
for i, e in enumerate(_embedding):
_in = e(_in)
if i == 4:
out_branch = _in
return _in, out_branch
# backbone
x2, x1, x0 = self.backbone(x)
# yolo branch 0
out0, out0_branch = _branch(self.embedding0, x0)
# yolo branch 1
x1_in = self.embedding1_cbl(out0_branch)
x1_in = self.embedding1_upsample(x1_in)
x1_in = torch.cat([x1_in, x1], 1)
out1, out1_branch = _branch(self.embedding1, x1_in)
# yolo branch 2
x2_in = self.embedding2_cbl(out1_branch)
x2_in = self.embedding2_upsample(x2_in)
x2_in = torch.cat([x2_in, x2], 1)
out2, out2_branch = _branch(self.embedding2, x2_in)
outputs = [out0, out1, out2]
pred = [loss(outputs[i], gt) for i, loss in enumerate(self.losses)]
if self.training:
losses = [sum(loss) for loss in zip(*pred)]
loss_dict = {
"loss_box_x": losses[0],
"loss_box_y": losses[1],
"loss_box_width": losses[2],
"loss_box_height": losses[3],
"loss_objectness": losses[4],
"loss_classifier": losses[5]
}
return loss_dict
else:
img_sizes = [img.shape[1:] for img in images]
return self.transform.postprocess(pred, img_sizes)
model_urls = {
"yolov3_darknet53_coco": "https://media.githubusercontent.com/media/danilopeixoto/pretrained-weights/master/yolov3_darknet53_coco.pth"
}
def yolov3_darknet53(num_classes=81, pretrained=False, pretrained_backbone=True,
progress=True, **kwargs):
if pretrained:
# no need to download the backbone if pretrained is set
pretrained_backbone = False
backbone = darknet_backbone("darknet53", pretrained_backbone, progress)
model = YOLOv3(backbone, num_classes, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls["yolov3_darknet53_coco"],
progress=progress)
model.load_state_dict(state_dict)
return model
|
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from io import BytesIO
from twisted.internet import defer
from synapse.api.errors import Codes, SynapseError
from synapse.crypto.keyring import ServerKeyFetcher
from synapse.http.server import (
DirectServeResource,
respond_with_json_bytes,
wrap_json_request_handler,
)
from synapse.http.servlet import parse_integer, parse_json_object_from_request
logger = logging.getLogger(__name__)
class RemoteKey(DirectServeResource):
"""HTTP resource for retreiving the TLS certificate and NACL signature
verification keys for a collection of servers. Checks that the reported
X.509 TLS certificate matches the one used in the HTTPS connection. Checks
that the NACL signature for the remote server is valid. Returns a dict of
JSON signed by both the remote server and by this server.
Supports individual GET APIs and a bulk query POST API.
Requsts:
GET /_matrix/key/v2/query/remote.server.example.com HTTP/1.1
GET /_matrix/key/v2/query/remote.server.example.com/a.key.id HTTP/1.1
POST /_matrix/v2/query HTTP/1.1
Content-Type: application/json
{
"server_keys": {
"remote.server.example.com": {
"a.key.id": {
"minimum_valid_until_ts": 1234567890123
}
}
}
}
Response:
HTTP/1.1 200 OK
Content-Type: application/json
{
"server_keys": [
{
"server_name": "remote.server.example.com"
"valid_until_ts": # posix timestamp
"verify_keys": {
"a.key.id": { # The identifier for a key.
key: "" # base64 encoded verification key.
}
}
"old_verify_keys": {
"an.old.key.id": { # The identifier for an old key.
key: "", # base64 encoded key
"expired_ts": 0, # when the key stop being used.
}
}
"tls_fingerprints": [
{ "sha256": # fingerprint }
]
"signatures": {
"remote.server.example.com": {...}
"this.server.example.com": {...}
}
}
]
}
"""
isLeaf = True
def __init__(self, hs):
self.fetcher = ServerKeyFetcher(hs)
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
@wrap_json_request_handler
async def _async_render_GET(self, request):
if len(request.postpath) == 1:
server, = request.postpath
query = {server.decode("ascii"): {}}
elif len(request.postpath) == 2:
server, key_id = request.postpath
minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts")
arguments = {}
if minimum_valid_until_ts is not None:
arguments["minimum_valid_until_ts"] = minimum_valid_until_ts
query = {server.decode("ascii"): {key_id.decode("ascii"): arguments}}
else:
raise SynapseError(404, "Not found %r" % request.postpath, Codes.NOT_FOUND)
await self.query_keys(request, query, query_remote_on_cache_miss=True)
@wrap_json_request_handler
async def _async_render_POST(self, request):
content = parse_json_object_from_request(request)
query = content["server_keys"]
await self.query_keys(request, query, query_remote_on_cache_miss=True)
@defer.inlineCallbacks
def query_keys(self, request, query, query_remote_on_cache_miss=False):
logger.info("Handling query for keys %r", query)
store_queries = []
for server_name, key_ids in query.items():
if (
self.federation_domain_whitelist is not None
and server_name not in self.federation_domain_whitelist
):
logger.debug("Federation denied with %s", server_name)
continue
if not key_ids:
key_ids = (None,)
for key_id in key_ids:
store_queries.append((server_name, key_id, None))
cached = yield self.store.get_server_keys_json(store_queries)
json_results = set()
time_now_ms = self.clock.time_msec()
cache_misses = dict()
for (server_name, key_id, from_server), results in cached.items():
results = [(result["ts_added_ms"], result) for result in results]
if not results and key_id is not None:
cache_misses.setdefault(server_name, set()).add(key_id)
continue
if key_id is not None:
ts_added_ms, most_recent_result = max(results)
ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
req_key = query.get(server_name, {}).get(key_id, {})
req_valid_until = req_key.get("minimum_valid_until_ts")
miss = False
if req_valid_until is not None:
if ts_valid_until_ms < req_valid_until:
logger.debug(
"Cached response for %r/%r is older than requested"
": valid_until (%r) < minimum_valid_until (%r)",
server_name,
key_id,
ts_valid_until_ms,
req_valid_until,
)
miss = True
else:
logger.debug(
"Cached response for %r/%r is newer than requested"
": valid_until (%r) >= minimum_valid_until (%r)",
server_name,
key_id,
ts_valid_until_ms,
req_valid_until,
)
elif (ts_added_ms + ts_valid_until_ms) / 2 < time_now_ms:
logger.debug(
"Cached response for %r/%r is too old"
": (added (%r) + valid_until (%r)) / 2 < now (%r)",
server_name,
key_id,
ts_added_ms,
ts_valid_until_ms,
time_now_ms,
)
# We more than half way through the lifetime of the
# response. We should fetch a fresh copy.
miss = True
else:
logger.debug(
"Cached response for %r/%r is still valid"
": (added (%r) + valid_until (%r)) / 2 < now (%r)",
server_name,
key_id,
ts_added_ms,
ts_valid_until_ms,
time_now_ms,
)
if miss:
cache_misses.setdefault(server_name, set()).add(key_id)
json_results.add(bytes(most_recent_result["key_json"]))
else:
for ts_added, result in results:
json_results.add(bytes(result["key_json"]))
if cache_misses and query_remote_on_cache_miss:
yield self.fetcher.get_keys(cache_misses)
yield self.query_keys(request, query, query_remote_on_cache_miss=False)
else:
result_io = BytesIO()
result_io.write(b'{"server_keys":')
sep = b"["
for json_bytes in json_results:
result_io.write(sep)
result_io.write(json_bytes)
sep = b","
if sep == b"[":
result_io.write(sep)
result_io.write(b"]}")
respond_with_json_bytes(request, 200, result_io.getvalue())
|
import csv
from pathlib import Path
from piecash import open_book
fields = [
"DATE",
"TRANSACTION VALUE",
"DEBIT/CREDIT INDICATOR",
"ACCOUNT",
"ACCOUNT CODE",
"CONTRA ACCOUNT",
"CONTRA ACCOUNT CODE",
"ENTRY TEXT",
]
GNUCASH_BOOK = "../gnucash_books/simple_sample.gnucash"
CSV_EXPORT = "export.csv"
REPORTING_YEAR = 2019
# open the book and the export file
with open_book(GNUCASH_BOOK, readonly=True, open_if_lock=True) as mybook, Path(CSV_EXPORT).open(
"w", newline=""
) as f:
# initialise the CSV writer
csv_writer = csv.DictWriter(f, fieldnames=fields)
csv_writer.writeheader()
# iterate on all the transactions in the book
for transaction in mybook.transactions:
# filter transactions not in REPORTING_YEAR
if transaction.post_date.year != REPORTING_YEAR:
continue
# handle only transactions with 2 splits
if len(transaction.splits) != 2:
print(
f"skipping transaction {transaction} as it has more"
f" than 2 splits in the transaction, dunno what to export to CSV"
)
continue
# assign the two splits of the transaction
split_one, split_two = transaction.splits
# build the dictionary with the data of the transaction
data = dict(
zip(
fields,
[
transaction.post_date,
split_one.value,
split_one.is_debit,
split_one.account.name,
split_one.account.code,
split_two.account.name,
split_two.account.code,
transaction.description,
],
)
)
# write the transaction to the CSV
csv_writer.writerow(data)
|
def get_info(path2file):
to_return = dict()
to_return["train_acc"] = []
to_return["train_loss"] = []
to_return["val_acc"] = []
to_return["val_loss"] = []
with open(path2file) as f:
for line in f:
if "train" in line:
to_return["train_loss"].append(float(line.split()[2]))
to_return["train_acc"].append(float(line.split()[4]))
elif "val" in line and not "Best" in line: #avoid last line
to_return["val_loss"].append(float(line.split()[2]))
to_return["val_acc"].append(float(line.split()[4]))
return(to_return)
d = get_info("TL_2fam1.out")
with open("../../Analyse/your_results/train_accuracy_TL2f1.csv", "a") as csv:
csv.write("\nacc_TL2f1,")
for i in range(len(d["train_acc"])):
if i < len(d["train_acc"])-1:
csv.write(str(d["train_acc"][i])+ ",")
else:
csv.write(str(d["train_acc"][i]))
with open("../../Analyse/your_results/train_accuracy_TL2f1.csv", "a") as csv:
csv.write("\nloss_TL2f1,")
for i in range(len(d["train_loss"])):
if i < len(d["train_loss"])-1:
csv.write(str(d["train_loss"][i])+ ",")
else:
csv.write(str(d["train_loss"][i]))
with open("../../Analyse/your_results/accuracies.csv", "a") as csv:
csv.write("\nacc_T2f1,")
for i in range(len(d["val_acc"])):
if i < len(d["val_acc"])-1:
csv.write(str(d["val_acc"][i])+ ",")
else:
csv.write(str(d["val_acc"][i]))
with open("../../Analyse/your_results/accuracies.csv", "a") as csv:
csv.write("\nloss_T2f1,")
for i in range(len(d["val_loss"])):
if i < len(d["val_loss"])-1:
csv.write(str(d["val_loss"][i])+ ",")
else:
csv.write(str(d["val_loss"][i]))
|
"""
pythonbible-api is an API wrapper for the pythonbible library using FastAPI.
"""
__version__ = "0.0.2"
|
# adding a label to gui
import tkinter as tk
from tkinter import ttk #ttk = themed tk
win = tk.Tk() # win is short for windows #constructor
win.title("Python GUI with label")
ttk.Label(win, text="A Label").grid(column=0, row=0) #set label text name an grid coordinates
win.mainloop()
|
import struct
import pytest
from puslib import get_policy
from puslib.ident import PusIdent
from puslib.packet import PusTcPacket, AckFlag
from puslib.parameter import UInt32Parameter, Int16Parameter, Real64Parameter
from puslib.services import RequestVerification, PusService20
from puslib.streams.buffer import QueuedOutput
@pytest.fixture(name="service_20_setup")
def fixture_service_20_setup():
ident = PusIdent(apid=10)
tm_stream = QueuedOutput()
pus_service_1 = RequestVerification(ident, tm_stream)
params = {
0: UInt32Parameter(1),
3: Int16Parameter(-2),
5: Real64Parameter(3.0)
}
pus_service_20 = PusService20(ident, pus_service_1, tm_stream, params)
return pus_service_20, ident, tm_stream, params
def test_report_parameter_values(service_20_setup):
pus_service_20, ident, tm_stream, _ = service_20_setup
app_data = get_policy().function_management.count_type(1).to_bytes() + get_policy().common.param_id_type(0).to_bytes()
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.NONE, service_type=20, service_subtype=1, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 20
assert report.subservice == 2
assert report.source_data is not None
fmt = '>' + (get_policy().function_management.count_type().format + get_policy().common.param_id_type().format + UInt32Parameter().format).replace('>', '')
num_values, param_id, param_value = struct.unpack(fmt, report.source_data)
assert num_values == 1
assert param_id == 0
assert param_value == 1
app_data = get_policy().function_management.count_type(2).to_bytes() + get_policy().common.param_id_type(0).to_bytes() + get_policy().common.param_id_type(3).to_bytes()
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.NONE, service_type=20, service_subtype=1, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 20
assert report.subservice == 2
assert report.source_data is not None
fmt = '>' + (get_policy().function_management.count_type().format + get_policy().common.param_id_type().format + UInt32Parameter().format + get_policy().common.param_id_type().format + Int16Parameter().format).replace('>', '')
num_values, param1_id, param1_value, param2_id, param2_value = struct.unpack(fmt, report.source_data)
assert num_values == 2
assert param1_id == 0
assert param1_value == 1
assert param2_id == 3
assert param2_value == -2
app_data = get_policy().function_management.count_type(1).to_bytes() + get_policy().common.param_id_type(1).to_bytes() # non-existant parameter ID
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.ACCEPTANCE, service_type=20, service_subtype=1, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 1
assert report.subservice == 2
app_data = get_policy().function_management.count_type(2).to_bytes() + get_policy().common.param_id_type(0).to_bytes() + get_policy().common.param_id_type(1).to_bytes() # non-existant parameter ID
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.ACCEPTANCE, service_type=20, service_subtype=1, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 1
assert report.subservice == 2
app_data = get_policy().function_management.count_type(3).to_bytes() + get_policy().common.param_id_type(0).to_bytes() + get_policy().common.param_id_type(1).to_bytes() # mismatch between N and number of parameter IDs
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.ACCEPTANCE, service_type=20, service_subtype=1, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 1
assert report.subservice == 2
def test_set_parameter_values(service_20_setup):
pus_service_20, ident, tm_stream, params = service_20_setup
app_data = get_policy().function_management.count_type(1).to_bytes() + get_policy().common.param_id_type(0).to_bytes() + UInt32Parameter(11).to_bytes()
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.NONE, service_type=20, service_subtype=3, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 0
assert params[0].value == 11
assert params[3].value == -2
assert params[5].value == 3.0
app_data = get_policy().function_management.count_type(2).to_bytes() + get_policy().common.param_id_type(3).to_bytes() + Int16Parameter(-12).to_bytes() + get_policy().common.param_id_type(5).to_bytes() + Real64Parameter(13.0).to_bytes()
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.NONE, service_type=20, service_subtype=3, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 0
assert params[0].value == 11
assert params[3].value == -12
assert params[5].value == 13.0
app_data = get_policy().function_management.count_type(1).to_bytes() + get_policy().common.param_id_type(1).to_bytes() + UInt32Parameter(11).to_bytes() # non-existant parameter ID
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.ACCEPTANCE, service_type=20, service_subtype=3, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 1
assert report.subservice == 2
assert params[0].value == 11
assert params[3].value == -12
assert params[5].value == 13.0
app_data = get_policy().function_management.count_type(1).to_bytes() + get_policy().common.param_id_type(1).to_bytes() + UInt32Parameter(1).to_bytes() + get_policy().common.param_id_type(2).to_bytes() + UInt32Parameter(666).to_bytes() # non-existant parameter ID
packet = PusTcPacket.create(apid=ident.apid, name=0, ack_flags=AckFlag.ACCEPTANCE, service_type=20, service_subtype=3, data=app_data)
pus_service_20.enqueue(packet)
pus_service_20.process()
assert tm_stream.size == 1
report = tm_stream.get()
assert report.service == 1
assert report.subservice == 2
assert params[0].value == 11
assert params[3].value == -12
assert params[5].value == 13.0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL',
'amqp://guest:guest@localhost:5672//')
CELERY_PROJECT_NAME = os.getenv('CELERY_PROJECT_NAME', 'dcifeeder')
API_URL = os.getenv('API_URL', 'http://127.0.0.1:5000')
API_DEBUG = os.getenv('API_DEBUG', False)
LOGLEVEL = logging.DEBUG
|
import computer
import numpy as np
import time
Position = (0, 0)
Canvas = np.full([200, 200], -1, dtype=int)
Canvas[0, 0] = 1
Corners = [(0, 0), (0, 0)]
TileCount = 0
Direction = 0
def AddVectors(vec1, vec2):
if(len(vec1) != len(vec2)):
return None
out = []
for v in range(len(vec1)):
out += [vec1[v] + vec2[v]]
return tuple(out)
def SendInput():
global Canvas
global Position
if(Canvas[Position] == 1):
return 1
else:
return 0
def MoveRobot():
global Direction
global Position
global Corners
if(Direction == 0):
Position = AddVectors(Position, (0, 1))
elif(Direction == 1):
Position = AddVectors(Position, (1, 0))
elif(Direction == 2):
Position = AddVectors(Position, (0, -1))
elif(Direction == 3):
Position = AddVectors(Position, (-1, 0))
print(Position)
if(Position[0] < Corners[0][0] or Position[1] < Corners[0][1]):
Corners[0] = Position
elif(Position[0] > Corners[1][0] or Position[1] > Corners[1][1]):
Corners[1] = Position
Turning = False
def RecieveOutput(out):
global Turning
global Direction
global Canvas
global Position
global TileCount
if(not Turning):
if(Canvas[Position] == -1):
TileCount += 1
Canvas[Position] = out
else:
if(out == 0):
Direction -= 1
else:
Direction += 1
if(Direction < 0):
Direction += 4
elif(Direction > 3):
Direction -= 4
MoveRobot()
Turning = not Turning
computer.Run(RecieveOutput, SendInput)
blackChar = u"\u25A0"
whiteChar = u"\u25A1"
for x in range(Corners[0][0] - 1, Corners[1][0] + 2):
out = ""
for y in range(Corners[0][1] - 1, Corners[1][1] + 2):
if(Canvas[x, y] == 1):
out += whiteChar
else:
out += blackChar
print(out)
time.sleep(0.2)
|
import os
import argparse
import json
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
# from asteroid import TransMask
from asteroid import DPTrans
# from asteroid.engine import schedulers
# from asteroid.data.wham_dataset import WhamDataset
from asteroid.data import LibriMix
from asteroid.engine.optimizers import make_optimizer
from asteroid.engine.system import System
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
# Keys which are not in the conf.yml file can be added here.
# In the hierarchical dictionary created when parsing, the key `key` can be
# found at dic['main_args'][key]
# By default train.py will use all available GPUs. The `id` option in run.sh
# will limit the number of available GPUs for train.py .
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", default="exp/tmp", help="Full path to save best validation model")
parser.add_argument("--config", default="local/conf.yml", help="config yaml file")
def main(conf):
# train_set = WhamDataset(
# conf["data"]["train_dir"],
# conf["data"]["task"],
# sample_rate=conf["data"]["sample_rate"],
# segment=conf["data"]["segment"],
# nondefault_nsrc=conf["data"]["nondefault_nsrc"],
# )
# val_set = WhamDataset(
# conf["data"]["valid_dir"],
# conf["data"]["task"],
# sample_rate=conf["data"]["sample_rate"],
# nondefault_nsrc=conf["data"]["nondefault_nsrc"],
# )
train_set = LibriMix(
csv_dir=conf["data"]["train_dir"],
task=conf["data"]["task"],
sample_rate=conf["data"]["sample_rate"],
n_src=conf["masknet"]["n_src"],
segment=conf["data"]["segment"],
)
print(conf["data"]["train_dir"])
val_set = LibriMix(
csv_dir=conf["data"]["valid_dir"],
task=conf["data"]["task"],
sample_rate=conf["data"]["sample_rate"],
n_src=conf["masknet"]["n_src"],
segment=conf["data"]["segment"],
)
train_loader = DataLoader(
train_set,
shuffle=True,
batch_size=conf["training"]["batch_size"],
num_workers=conf["training"]["num_workers"],
drop_last=True,
)
val_loader = DataLoader(
val_set,
shuffle=False,
batch_size=conf["training"]["batch_size"],
num_workers=conf["training"]["num_workers"],
drop_last=True,
)
# Update number of source values (It depends on the task)
conf["masknet"].update({"n_src": train_set.n_src})
# TODO params
# model = TransMask(**conf["filterbank"], **conf["masknet"])
model = DPTrans(**conf["filterbank"], **conf["masknet"], sample_rate=conf['data']['sample_rate'])
# from torchsummary import summary
# model.cuda()
# summary(model, (24000,))
# import pdb
# pdb.set_trace()
optimizer = make_optimizer(model.parameters(), **conf["optim"])
# Define scheduler
scheduler = None
if conf["training"]["half_lr"]:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
# # TODO warmup for transformer
# from asteroid.engine.schedulers import DPTNetScheduler
# schedulers = {
# "scheduler": DPTNetScheduler(
# # optimizer, len(train_loader) // conf["training"]["batch_size"], 64
# # optimizer, len(train_loader), 64,
# optimizer, len(train_loader), 128,
# stride=2,
# # exp_max=0.0004 * 16,
# # warmup_steps=1000
# ),
# "interval": "batch",
# }
# from torch.optim.lr_scheduler import ReduceLROnPlateau
# if conf["training"]["half_lr"]:
# print('Use ReduceLROnPlateau halflr...........')
# schedulers = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir = conf["main_args"]["exp_dir"]
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, "conf.yml")
with open(conf_path, "w") as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
system = System(
model=model,
loss_func=loss_func,
optimizer=optimizer,
train_loader=train_loader,
val_loader=val_loader,
scheduler=scheduler,
config=conf,
)
# Define callbacks
callbacks = []
checkpoint_dir = os.path.join(exp_dir, "checkpoints/")
# checkpoint_dir = os.path.join(exp_dir)
checkpoint = ModelCheckpoint(
checkpoint_dir, monitor="val_loss", mode="min", save_top_k=5, verbose=True
)
callbacks.append(checkpoint)
if conf["training"]["early_stop"]:
callbacks.append(EarlyStopping(monitor="val_loss", mode="min", patience=30, verbose=True))
# Don't ask GPU if they are not available.
gpus = -1 if torch.cuda.is_available() else None
distributed_backend = "ddp" if torch.cuda.is_available() else None
if conf["training"]["cont"]:
from glob import glob
ckpts = glob('%s/*.ckpt' % checkpoint_dir)
ckpts.sort()
latest_ckpt = ckpts[-1]
trainer = pl.Trainer(
max_epochs=conf["training"]["epochs"],
callbacks=callbacks,
default_root_dir=exp_dir,
gpus=gpus,
distributed_backend=distributed_backend,
limit_train_batches=1.0, # Useful for fast experiment
gradient_clip_val=conf["training"]["gradient_clipping"],
resume_from_checkpoint=latest_ckpt
)
else:
trainer = pl.Trainer(
max_epochs=conf["training"]["epochs"],
callbacks=callbacks,
default_root_dir=exp_dir,
gpus=gpus,
distributed_backend=distributed_backend,
limit_train_batches=1.0, # Useful for fast experiment
gradient_clip_val=conf["training"]["gradient_clipping"],
)
trainer.fit(system)
best_k = {k: v.item() for k, v in checkpoint.best_k_models.items()}
with open(os.path.join(exp_dir, "best_k_models.json"), "w") as f:
json.dump(best_k, f, indent=0)
state_dict = torch.load(checkpoint.best_model_path)
# state_dict = torch.load('exp/train_transmask_rnn_acous_gelu_6layer_peconv_stride2_batch6/_ckpt_epoch_208.ckpt')
system.load_state_dict(state_dict=state_dict["state_dict"])
system.cpu()
to_save = system.model.serialize()
to_save.update(train_set.get_infos())
torch.save(to_save, os.path.join(exp_dir, "best_model.pth"))
if __name__ == "__main__":
import yaml
from pprint import pprint as print
from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict
# We start with opening the config file conf.yml as a dictionary from
# which we can create parsers. Each top level key in the dictionary defined
# by the YAML file creates a group in the parser.
with open(parser.parse_args().config) as f:
def_conf = yaml.safe_load(f)
parser = prepare_parser_from_dict(def_conf, parser=parser)
# Arguments are then parsed into a hierarchical dictionary (instead of
# flat, as returned by argparse) to facilitate calls to the different
# asteroid methods (see in main).
# plain_args is the direct output of parser.parse_args() and contains all
# the attributes in an non-hierarchical structure. It can be useful to also
# have it so we included it here but it is not used.
arg_dic, plain_args = parse_args_as_dict(parser, return_plain_args=True)
print(arg_dic)
main(arg_dic)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plain_base_29138.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
def f(x):
# Primera operación
respuesta = 0
# Segunda operacion. Sin importar de x este loop correrá 1000 veces.
for i in range(1000):
respuesta += 1
# Tercera operación. Este loop correrá el valor de x
for i in range(x):
respuesta += x
# Cuarta operación. Esta parte esta corriendo 2 loop. Esto será 2x²
for i in range(x):
for j in range(x):
respuesta += 1
respuesta += 1
# Quinta operación.
return respuesta
if __name__ == '__main__':
print(f(2))
|
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Parameter Class for variable parameters.
"""
class Parameter():
"""Parameter Class for variable parameters"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
|
from xdist.plugin import (
is_xdist_worker,
is_xdist_master,
get_xdist_worker_id,
is_xdist_controller,
)
from xdist._version import version as __version__
__all__ = [
"__version__",
"is_xdist_worker",
"is_xdist_master",
"is_xdist_controller",
"get_xdist_worker_id",
]
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="len", parent_name="volume.colorbar", **kwargs):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
# predict functions for predict.py
# Danny Olesh 22.11.2021
# Resources used:
# Study learning notes and code from the course
# https://pytorch.org/
# Udacity deeplearning pytorch help
# Self study and experiminationion using ATOM in Anaconda3 environment
# Edited code snippets for certain Network definitions https://github.com/silviomori/udacity-deeplearning-pytorch-challenge-lab
#################################################
# imports #
#################################################
import sys
import os
from os.path import exists
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision.transforms.functional as TF
from torch.utils.data.dataset import Subset
from collections import OrderedDict
from PIL import Image
#################################################
# Get args and process #
#################################################
#get all the options required (after the fist 3, predict.py /path/to/image /path/to/checkpoint)
def getOptions(args=sys.argv[3:]):
parser = argparse.ArgumentParser(description="Parses command.")
parser.add_argument("--top_k", "--top_k", type=int, help="Return top KK most likely classes", default=3)
parser.add_argument("--catagory_names", "--catagory_names", help="Use a mapping of categories to real names(json file).", default="cat_to_name.json")
parser.add_argument("--gpu", dest='gpu',action='store_false', help="GPU mode True or False.", default=True )
options = parser.parse_args(args)
if options.gpu: #assign GPU if true
print("GPU mode is ON if available")
else:
print("GPU mode is OFF")
if exists(options.catagory_names)==True: #check catagory name file exists
print('Catagory names :', options.catagory_names)
else:
print('Catagory names file cannot be found please enter valid name')
sys.exit(2)
if options.top_k > 1 or options.top_k < 100: #check top k valid range
print('The number of classes to compare :', options.top_k)
else:
print('top K needs to be a number between 1 and 100')
sys.exit(2)
return options
#################################################
# Process an image #
#################################################
def process_image(image, normalize_mean, normalize_std):
# Process a PIL image for use in a PyTorch model
image = TF.resize(image, 256)
upper_pixel = (image.height - 224) // 2
left_pixel = (image.width - 224) // 2
image = TF.crop(image, upper_pixel, left_pixel, 224, 224)
image = TF.to_tensor(image)
image = TF.normalize(image, normalize_mean, normalize_std)
return image
#################################################
# Predictions #
#################################################
def predict(image_path, model_ft, topk,normalize_mean, normalize_std, device):
# Implement the code to predict the class from an image file
image = Image.open(image_path)
image = process_image(image, normalize_mean, normalize_std)
with torch.no_grad():
model_ft.eval()
image = image.view(1,3,224,224)
image = image.to(device)
predictions = model_ft.forward(image)
predictions = torch.exp(predictions)
top_ps, top_class = predictions.topk(topk, dim=1)
return top_ps, top_class
|
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import pkgutil
from typing import List
import requests
from airbyte_protocol import AirbyteCatalog, ConnectorSpecification
from base_python_test import StandardSourceTestIface
class GoogleAnalyticsStandardSourceTest(StandardSourceTestIface):
def get_spec(self) -> ConnectorSpecification:
raw_spec = pkgutil.get_data(self.__class__.__module__.split(".")[0], "spec.json")
return ConnectorSpecification.parse_obj(json.loads(raw_spec))
def get_config(self) -> object:
return json.loads(pkgutil.get_data(self.__class__.__module__.split(".")[0], "config.json"))
def get_catalog(self) -> AirbyteCatalog:
raw_spec = pkgutil.get_data(self.__class__.__module__.split(".")[0], "test_catalog.json")
return AirbyteCatalog.parse_obj(json.loads(raw_spec))
# send a page view to GA using a URL constructed with
# the documentation from https://developers.google.com/analytics/devguides/collection/protocol/v1/devguide#page
# and the hit builder at https://ga-dev-tools.appspot.com/hit-builder/
# and converted into Python
def setup(self) -> None:
tracker = pkgutil.get_data(self.__class__.__module__.split(".")[0], "tracker.txt").strip()
url = "https://www.google-analytics.com/collect"
payload = {"v": "1", "t": "pageview", "tid": tracker, "cid": "555", "dh": "mydemo.com", "dp": "/home5", "dt": "homepage"}
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"origin": "https://ga-dev-tools.appspot.com",
"referer": "https://ga-dev-tools.appspot.com/",
}
requests.post(url, data=payload, headers=headers)
def get_regex_tests(self) -> List[str]:
return [
"(.*)RECORD(.*)website_overview(.*)",
"(.*)RECORD(.*)traffic_sources(.*)",
"(.*)RECORD(.*)pages(.*)",
"(.*)RECORD(.*)locations(.*)",
"(.*)RECORD(.*)monthly_active_users(.*)",
"(.*)RECORD(.*)four_weekly_active_users(.*)",
"(.*)RECORD(.*)two_weekly_active_users(.*)",
"(.*)RECORD(.*)weekly_active_users(.*)",
"(.*)RECORD(.*)daily_active_users(.*)",
"(.*)RECORD(.*)devices(.*)",
]
def teardown(self) -> None:
pass
# todo: add regexes that must match and regexes that must not match
|
#The is_positive function should return True if the number received is positive and False if it isn't.
#Can you fill in the gaps to make that happen?
def is_positive(number):
if number > 0:
return True
else:
return False
|
import os
class Base(object):
"""
Base object designed for providing generic functionality.
The Base object has the functionality to set title,
set working directories and make directories.
Attributes
----------
title : str
Title of the object.
cwdir : str
Current working directory.
"""
def __init__(self, title="Unnamed"):
""" Create a new Base object."""
self._title = title
self._cwdir = os.path.join(os.getcwd(), '')
def set_title(self, title):
"""
Set a new title for a curve.
Parameters
-------
title : str
Title of the object.
"""
self._title = str(title).strip()
return
def get_title(self):
"""
Returns a title of a curve.
Returns
-------
out : str
Title of the object.
"""
return self._title
def set_cwdir(self, path):
"""
Set path for current working directory.
Parameters
----------
path : str
Path of current working directory.
"""
self._cwdir = os.path.normpath(os.path.join(os.getcwd(), path, ''))
return
def get_cwdir(self):
"""
Get path for current working directory.
Returns
-------
out : str
Path of current working directory.
"""
return self._cwdir
@staticmethod
def _mkdir(dir_name):
"""
Create a directory.
Parameters
----------
dir_name : str
Name of the directory.
"""
if os.path.isdir(dir_name):
print("Such folder already exists: {name}".format(name=dir_name))
return
else:
os.mkdir(dir_name)
return
|
# Copyright 2020-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
def main():
settings = get_settings_from_env()
server = server_factory(**settings)
server.serve_forever()
def get_settings_from_env(controller_port=None,
visualization_server_image=None, frontend_image=None,
visualization_server_tag=None, frontend_tag=None, disable_istio_sidecar=None,
minio_access_key=None, minio_secret_key=None, kfp_default_pipeline_root=None):
"""
Returns a dict of settings from environment variables relevant to the controller
Environment settings can be overridden by passing them here as arguments.
Settings are pulled from the all-caps version of the setting name. The
following defaults are used if those environment variables are not set
to enable backwards compatibility with previous versions of this script:
visualization_server_image: gcr.io/ml-pipeline/visualization-server
visualization_server_tag: value of KFP_VERSION environment variable
frontend_image: gcr.io/ml-pipeline/frontend
frontend_tag: value of KFP_VERSION environment variable
disable_istio_sidecar: Required (no default)
minio_access_key: Required (no default)
minio_secret_key: Required (no default)
"""
settings = dict()
settings["controller_port"] = \
controller_port or \
os.environ.get("CONTROLLER_PORT", "8080")
settings["visualization_server_image"] = \
visualization_server_image or \
os.environ.get("VISUALIZATION_SERVER_IMAGE", "gcr.io/ml-pipeline/visualization-server")
settings["frontend_image"] = \
frontend_image or \
os.environ.get("FRONTEND_IMAGE", "gcr.io/ml-pipeline/frontend")
# Look for specific tags for each image first, falling back to
# previously used KFP_VERSION environment variable for backwards
# compatibility
settings["visualization_server_tag"] = \
visualization_server_tag or \
os.environ.get("VISUALIZATION_SERVER_TAG") or \
os.environ["KFP_VERSION"]
settings["frontend_tag"] = \
frontend_tag or \
os.environ.get("FRONTEND_TAG") or \
os.environ["KFP_VERSION"]
settings["disable_istio_sidecar"] = \
disable_istio_sidecar if disable_istio_sidecar is not None \
else os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
settings["minio_access_key"] = \
minio_access_key or \
base64.b64encode(bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
settings["minio_secret_key"] = \
minio_secret_key or \
base64.b64encode(bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
# KFP_DEFAULT_PIPELINE_ROOT is optional
settings["kfp_default_pipeline_root"] = \
kfp_default_pipeline_root or \
os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
return settings
def server_factory(visualization_server_image,
visualization_server_tag, frontend_image, frontend_tag,
disable_istio_sidecar, minio_access_key,
minio_secret_key, kfp_default_pipeline_root=None,
url="", controller_port=8080):
"""
Returns an HTTPServer populated with Handler with customized settings
"""
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
desired_configmap_count = 1
desired_resources = []
if kfp_default_pipeline_root:
desired_configmap_count = 2
desired_resources += [{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "kfp-launcher",
"namespace": namespace,
},
"data": {
"defaultPipelineRoot": kfp_default_pipeline_root,
},
}]
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready":
len(children["Secret.v1"]) == 1 and
len(children["ConfigMap.v1"]) == desired_configmap_count and
len(children["Deployment.apps/v1"]) == 2 and
len(children["Service.v1"]) == 2 and
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and
len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and
"True" or "False"
}
# Generate the desired child object(s).
desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image": f"{visualization_server_image}:{visualization_server_tag}",
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
"resources": {
"requests": {
"cpu": "50m",
"memory": "200Mi"
},
"limits": {
"cpu": "500m",
"memory": "1Gi"
},
}
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "security.istio.io/v1beta1",
"kind": "AuthorizationPolicy",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
}
},
"rules": [{
"from": [{
"source": {
"principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
}
}]
}]
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image": f"{frontend_image}:{frontend_tag}",
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}],
"env": [
{
"name": "MINIO_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "accesskey",
"name": "mlpipeline-minio-artifact"
}
}
},
{
"name": "MINIO_SECRET_KEY",
"valueFrom": {
"secretKeyRef": {
"key": "secretkey",
"name": "mlpipeline-minio-artifact"
}
}
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "70Mi"
},
"limits": {
"cpu": "100m",
"memory": "500Mi"
},
}
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:\n', json.dumps(parent, sort_keys=True))
print('Desired resources except secrets:\n', json.dumps(desired_resources, sort_keys=True))
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": minio_access_key,
"secretkey": minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
return HTTPServer((url, int(controller_port)), Controller)
if __name__ == "__main__":
main()
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
'''
A custom Keras layer to generate anchor boxes.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.layers import InputSpec,Layer
from bounding_box_utils.bounding_box_utils import convert_coordinates
class AnchorBoxes(Layer):
'''
A Keras layer to create an output tensor containing anchor box coordinates
and variances based on the input tensor and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of
the input tensor. The number of anchor boxes created per unit depends on the arguments
`aspect_ratios` and `two_boxes_for_ar1`, in the default case it is 4. The boxes
are parameterized by the coordinate tuple `(xmin, xmax, ymin, ymax)`.
The logic implemented by this layer is identical to the logic in the module
`ssd_box_encode_decode_utils.py`.
The purpose of having this layer in the network is to make the model self-sufficient
at inference time. Since the model is predicting offsets to the anchor boxes
(rather than predicting absolute box coordinates directly), one needs to know the anchor
box coordinates in order to construct the final prediction boxes from the predicted offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary
information to convert the predicted offsets back to absolute coordinates would be missing
in the model output. The reason why it is necessary to predict offsets to the anchor boxes
rather than to predict absolute box coordinates directly is explained in `README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`. The last axis contains
the four anchor box coordinates and the four variance values for each box.
'''
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=[0.5, 1.0, 2.0],
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
coords='centroids',
normalize_coords=False,
**kwargs):
'''
All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is undefined.
Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in [0, 1], the scaling factor for the size of the generated anchor boxes
as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
aspect_ratios (list, optional): The list of aspect ratios for which default boxes are to be
generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height),
'corners' for the format `(xmin, ymin, xmax, ymax)`, or 'minmax' for the format `(xmin, xmax, ymin, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
'''
if K.backend() != 'tensorflow':
raise TypeError("This layer only supports TensorFlow at the moment, but you are using the {} backend.".format(K.backend()))
if (this_scale < 0) or (next_scale < 0) or (this_scale > 1):
raise ValueError("`this_scale` must be in [0, 1] and `next_scale` must be >0, but `this_scale` == {}, `next_scale` == {}".format(this_scale, next_scale))
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
self.img_height = img_height
self.img_width = img_width
self.this_scale = this_scale
self.next_scale = next_scale
self.aspect_ratios = aspect_ratios
self.two_boxes_for_ar1 = two_boxes_for_ar1
self.this_steps = this_steps
self.this_offsets = this_offsets
self.clip_boxes = clip_boxes
self.variances = variances
self.coords = coords
self.normalize_coords = normalize_coords
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios) + 1
else:
self.n_boxes = len(aspect_ratios)
super(AnchorBoxes, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
super(AnchorBoxes, self).build(input_shape)
def call(self, x, mask=None):
'''
Return an anchor box tensor based on the shape of the input tensor.
The logic implemented here is identical to the logic in the module `ssd_box_encode_decode_utils.py`.
Note that this tensor does not participate in any graph computations at runtime. It is being created
as a constant once during graph creation and is just being output along with the rest of the model output
during runtime. Because of this, all logic is implemented as Numpy array operations and it is sufficient
to convert the resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. The input for this
layer must be the output of the localization predictor layer.
'''
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in self.aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = self.this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale value and the next.
box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size
wh_list.append((box_width, box_height))
else:
box_height = self.this_scale * size / np.sqrt(ar)
box_width = self.this_scale * size * np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
# We need the shape of the input tensor
if K.image_data_format() == 'channels_last':
batch_size, feature_map_height, feature_map_width, feature_map_channels = x.shape
else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future
batch_size, feature_map_channels, feature_map_height, feature_map_width = x.shape
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes, i.e. how far apart the anchor box center points will be vertically and horizontally.
if (self.this_steps is None):
step_height = self.img_height / feature_map_height
step_width = self.img_width / feature_map_width
else:
if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2):
step_height = self.this_steps[0]
step_width = self.this_steps[1]
elif isinstance(self.this_steps, (int, float)):
step_height = self.this_steps
step_width = self.this_steps
# Compute the offsets, i.e. at what pixel values the first anchor box center point will be from the top and from the left of the image.
if (self.this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2):
offset_height = self.this_offsets[0]
offset_width = self.this_offsets[1]
elif isinstance(self.this_offsets, (int, float)):
offset_height = self.this_offsets
offset_width = self.this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height, (offset_height + feature_map_height - 1) * step_height, feature_map_height)
cx = np.linspace(offset_width * step_width, (offset_width + feature_map_width - 1) * step_width, feature_map_width)
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)`
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:,:,:,[0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:,:,:,[0, 2]] = x_coords
y_coords = boxes_tensor[:,:,:,[1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:,:,:,[1, 3]] = y_coords
# If `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth.
if self.coords == 'centroids':
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids', border_pixels='half')
elif self.coords == 'minmax':
# Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax', border_pixels='half')
# Create a tensor to contain the variances and append it to `boxes_tensor`. This tensor has the same shape
# as `boxes_tensor` and simply contains the same 4 variance values for every position in the last axis.
variances_tensor = np.zeros_like(boxes_tensor) # Has shape `(feature_map_height, feature_map_width, n_boxes, 4)`
variances_tensor += self.variances # Long live broadcasting
# Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)
# Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along
# The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
boxes_tensor = K.tile(K.constant(boxes_tensor, dtype='float32'), (K.shape(x)[0], 1, 1, 1, 1))
return boxes_tensor
def compute_output_shape(self, input_shape):
if K.common.image_dim_ordering() == 'tf':
batch_size, feature_map_height, feature_map_width, feature_map_channels = input_shape
else: # Not yet relevant since TensorFlow is the only supported backend right now, but it can't harm to have this in here for the future
batch_size, feature_map_channels, feature_map_height, feature_map_width = input_shape
return (batch_size, feature_map_height, feature_map_width, self.n_boxes, 8)
def get_config(self):
config = {
'img_height': self.img_height,
'img_width': self.img_width,
'this_scale': self.this_scale,
'next_scale': self.next_scale,
'aspect_ratios': list(self.aspect_ratios),
'two_boxes_for_ar1': self.two_boxes_for_ar1,
'clip_boxes': self.clip_boxes,
'variances': list(self.variances),
'coords': self.coords,
'normalize_coords': self.normalize_coords
}
base_config = super(AnchorBoxes, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateRuleResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'rule_id': 'str',
'name': 'str',
'description': 'str',
'condition_group': 'ConditionGroup',
'actions': 'list[RuleAction]',
'rule_type': 'str',
'status': 'str',
'app_id': 'str',
'edge_node_ids': 'list[str]',
'last_update_time': 'str'
}
attribute_map = {
'rule_id': 'rule_id',
'name': 'name',
'description': 'description',
'condition_group': 'condition_group',
'actions': 'actions',
'rule_type': 'rule_type',
'status': 'status',
'app_id': 'app_id',
'edge_node_ids': 'edge_node_ids',
'last_update_time': 'last_update_time'
}
def __init__(self, rule_id=None, name=None, description=None, condition_group=None, actions=None, rule_type=None, status=None, app_id=None, edge_node_ids=None, last_update_time=None):
"""UpdateRuleResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._rule_id = None
self._name = None
self._description = None
self._condition_group = None
self._actions = None
self._rule_type = None
self._status = None
self._app_id = None
self._edge_node_ids = None
self._last_update_time = None
self.discriminator = None
if rule_id is not None:
self.rule_id = rule_id
if name is not None:
self.name = name
if description is not None:
self.description = description
if condition_group is not None:
self.condition_group = condition_group
if actions is not None:
self.actions = actions
if rule_type is not None:
self.rule_type = rule_type
if status is not None:
self.status = status
if app_id is not None:
self.app_id = app_id
if edge_node_ids is not None:
self.edge_node_ids = edge_node_ids
if last_update_time is not None:
self.last_update_time = last_update_time
@property
def rule_id(self):
"""Gets the rule_id of this UpdateRuleResponse.
规则id。
:return: The rule_id of this UpdateRuleResponse.
:rtype: str
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id):
"""Sets the rule_id of this UpdateRuleResponse.
规则id。
:param rule_id: The rule_id of this UpdateRuleResponse.
:type: str
"""
self._rule_id = rule_id
@property
def name(self):
"""Gets the name of this UpdateRuleResponse.
规则名称。
:return: The name of this UpdateRuleResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateRuleResponse.
规则名称。
:param name: The name of this UpdateRuleResponse.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this UpdateRuleResponse.
规则的描述信息。
:return: The description of this UpdateRuleResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateRuleResponse.
规则的描述信息。
:param description: The description of this UpdateRuleResponse.
:type: str
"""
self._description = description
@property
def condition_group(self):
"""Gets the condition_group of this UpdateRuleResponse.
:return: The condition_group of this UpdateRuleResponse.
:rtype: ConditionGroup
"""
return self._condition_group
@condition_group.setter
def condition_group(self, condition_group):
"""Sets the condition_group of this UpdateRuleResponse.
:param condition_group: The condition_group of this UpdateRuleResponse.
:type: ConditionGroup
"""
self._condition_group = condition_group
@property
def actions(self):
"""Gets the actions of this UpdateRuleResponse.
规则的动作列表,单个规则最多支持设置10个动作。
:return: The actions of this UpdateRuleResponse.
:rtype: list[RuleAction]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this UpdateRuleResponse.
规则的动作列表,单个规则最多支持设置10个动作。
:param actions: The actions of this UpdateRuleResponse.
:type: list[RuleAction]
"""
self._actions = actions
@property
def rule_type(self):
"""Gets the rule_type of this UpdateRuleResponse.
规则的类型 - DEVICE_LINKAGE:设备联动。 - DATA_FORWARDING:数据转发。 - EDGE:边缘侧。
:return: The rule_type of this UpdateRuleResponse.
:rtype: str
"""
return self._rule_type
@rule_type.setter
def rule_type(self, rule_type):
"""Sets the rule_type of this UpdateRuleResponse.
规则的类型 - DEVICE_LINKAGE:设备联动。 - DATA_FORWARDING:数据转发。 - EDGE:边缘侧。
:param rule_type: The rule_type of this UpdateRuleResponse.
:type: str
"""
self._rule_type = rule_type
@property
def status(self):
"""Gets the status of this UpdateRuleResponse.
规则的状态,默认值:active。 - active:激活。 - inactive:未激活。
:return: The status of this UpdateRuleResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this UpdateRuleResponse.
规则的状态,默认值:active。 - active:激活。 - inactive:未激活。
:param status: The status of this UpdateRuleResponse.
:type: str
"""
self._status = status
@property
def app_id(self):
"""Gets the app_id of this UpdateRuleResponse.
资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,建议携带该参数指定创建的规则归属到哪个资源空间下,否则创建的规则将会归属到[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下。
:return: The app_id of this UpdateRuleResponse.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this UpdateRuleResponse.
资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,建议携带该参数指定创建的规则归属到哪个资源空间下,否则创建的规则将会归属到[默认资源空间](https://support.huaweicloud.com/usermanual-iothub/iot_01_0006.html#section0)下。
:param app_id: The app_id of this UpdateRuleResponse.
:type: str
"""
self._app_id = app_id
@property
def edge_node_ids(self):
"""Gets the edge_node_ids of this UpdateRuleResponse.
归属边缘侧节点设备ID列表。
:return: The edge_node_ids of this UpdateRuleResponse.
:rtype: list[str]
"""
return self._edge_node_ids
@edge_node_ids.setter
def edge_node_ids(self, edge_node_ids):
"""Sets the edge_node_ids of this UpdateRuleResponse.
归属边缘侧节点设备ID列表。
:param edge_node_ids: The edge_node_ids of this UpdateRuleResponse.
:type: list[str]
"""
self._edge_node_ids = edge_node_ids
@property
def last_update_time(self):
"""Gets the last_update_time of this UpdateRuleResponse.
规则最后更新时间,使用UTC时区,格式:yyyyMMdd'T'HHmmss'Z'。
:return: The last_update_time of this UpdateRuleResponse.
:rtype: str
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this UpdateRuleResponse.
规则最后更新时间,使用UTC时区,格式:yyyyMMdd'T'HHmmss'Z'。
:param last_update_time: The last_update_time of this UpdateRuleResponse.
:type: str
"""
self._last_update_time = last_update_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateRuleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
""" Some useful functions to port a model from lasagne to tensorflow.
* Lasagne uses the format BCHW, while tensorflow uses BHWC
(B = batch_size, C = channels, H = height, W = width)
* By default, lasagne uses convolution, while tensorflow implements
cross-correlation (convolution is equivalent to cross-correlation with flipped filters)
Here we define some functions to change the filters from one format to the other
"""
import numpy as np
class copy_initializer:
def __init__(self, value_to_copy):
self.value_to_copy = value_to_copy
def __call__(self, shape, **kwargs):
expected_shape = list(shape)
actual_shape = list(self.value_to_copy.shape)
assert actual_shape == expected_shape, 'Invalid shape for initilizer. Expected: %s. Given: %s.' % (expected_shape, actual_shape)
return self.value_to_copy
class flipping_copy_initializer (copy_initializer):
def __init__(self, value_to_copy):
v = np.transpose(value_to_copy, [2,3,1,0])
v = v [::-1,::-1,:,:]
self.value_to_copy = v
class transpose_copy_initializer (copy_initializer):
def __init__(self, value_to_copy):
v = np.transpose(value_to_copy, [2,3,1,0])
self.value_to_copy = v
|
# -*- coding: utf-8 -*-
"""Test atomic values from expression Parser"""
from inspect import isclass
import locale
import math
from json import JSONDecodeError
import pytest
import lark.exceptions
from lark_expr import Expression
from methods import list_methods
# pylint: disable=attribute-defined-outside-init
class exactly(object):
"""An identity match with type"""
def __init__(self, value):
"""init"""
self.value = value
def __eq__(self, other):
"""exact comparision"""
if self.value != other:
return False
if self.value.__class__ != other.__class__:
return False
return True
def __repr__(self):
"""repr"""
return repr(self.value)
def __str__(self):
"""str"""
return str(self.value)
class nearly(object):
"""An identity match with type"""
def __init__(self, value):
"""init"""
self.value = value
def __eq__(self, other):
"""exact comparision"""
if isinstance(self.value, float) and isinstance(other, float):
if not math.isclose(self.value, other):
return False
elif self.value != other:
return False
if self.value.__class__ != other.__class__:
return False
return True
def __repr__(self):
"""repr"""
return repr(self.value)
def __str__(self):
"""str"""
return str(self.value)
class sametype(object):
"""Match any value of same type"""
def __init__(self, value):
"""init"""
self.value = value
def __eq__(self, other):
"""exact comparision"""
if type(other) == type(self.value): # pylint: disable=C0123
return True
return False
def __repr__(self):
"""repr"""
return repr(self.value)
def __str__(self):
"""str"""
return str(self.value)
class SkipIf(object):
"""Skip test if subexpression fails"""
def __init__(self, value, skip_condition):
"""init"""
self.value = value
self.skip_condition = skip_condition
def check_skip(self):
"""check_skip"""
if callable(self.skip_condition):
try:
self.skip_condition()
except Exception:
pytest.skip('Unsupported test')
return self.value
# pylint: disable=line-too-long
FUNCTION_TESTS = [
('join(", ", "one", "two", "three")', 'one, two, three'),
('split("one,two,three", ",")', ['one', 'two', 'three']),
('copysign(1,-5)', -1),
('printf("%5.3f", 0.2)', '0.200'),
('abs(-5)', 5),
('factorial(5)', 120),
('len(1,2,3)', TypeError),
('len([1,2,3])', 3),
('len(name)', 4),
('ceil(.5)', 1),
('ceil(".5")', 1),
('sum(.1, .1, .1, .1, .1, .1, .1, .1, .1, .1)', exactly(1.0)),
('sum([.1, .1, .1, .1, .1, .1, .1, .1, .1, .1])', exactly(1.0)),
('sum(".1", ".1", ".1", ".1", ".1", ".1", ".1", ".1", ".1", ".1")', exactly(1.0)),
('gcd(24, 36)', 12),
('gcd(24, "a")', TypeError),
(
'locale_currency(5000.99, grouping=True)',
SkipIf('$5,000.99', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'asdf',
NameError(
'asdf',
),
),
('acos(1)', 0.0),
('acos(0)', 1.5707963267948966),
(
"acox('f')",
ValueError(
'Function acox not found',
),
),
(
"acos('f')",
TypeError(
'must be real number, not literal',
),
),
(
'acosh(0)',
ValueError(
'math domain error',
),
),
('acosh(1)', 0.0),
('acosh(2)', 1.3169578969248166),
(
"acosh('f')",
TypeError(
'must be real number, not literal',
),
),
('asin(0)', 0.0),
('asin(1)', 1.5707963267948966),
(
"asin('f')",
TypeError(
'must be real number, not literal',
),
),
(
'asinh(0))',
SyntaxError(
"Unexpected token Token(RPAR, ')') at line 1, column 9.",
),
),
('asinh(0)', 0.0),
('asinh(1)', 0.881373587019543),
('asinh(pi/2)', 1.233403117511217),
(
"asinh('f')",
TypeError(
'must be real number, not literal',
),
),
('atan(0)', 0.0),
('atan(1)', 0.7853981633974483),
('atan(pi)', 1.2626272556789115),
(
"atan('f')",
TypeError(
'must be real number, not literal',
),
),
('atanh(0)', 0.0),
(
'atanh(1)',
ValueError(
'math domain error',
),
),
(
"atanh('f')",
TypeError(
'must be real number, not literal',
),
),
(
'atanh(pi)',
ValueError(
'math domain error',
),
),
(
'b64encode(1)',
TypeError(
"a bytes-like object is required, not 'int'",
),
),
("b64encode('foo')", 'Zm9v'),
("b64decode('Zm9v')", 'foo'),
(
'b64decode()',
TypeError(
"f_b64decode() missing 1 required positional argument: 's'",
),
),
(
'b64decode(1)',
TypeError(
"argument should be a bytes-like object or ASCII string, not 'int'",
),
),
("bytes('a')", b'a'),
("bytes('a','ascii')", b'a'),
("center('a', 20)", ' a '),
("choice(False, 'True Choice', 'False Choice')", 'False Choice'),
("choice(True, 'True Choice', 'False Choice')", 'True Choice'),
("choice(True, 'True Choice')", 'True Choice'),
('choice(True)', None),
(
'choice()',
TypeError(
"f_choice() missing 1 required positional argument: 'condition'",
),
),
('chr(65)', 'A'),
(
"chr('x')",
TypeError(
'an integer is required (got type literal)',
),
),
(
'chr(-1)',
ValueError(
'chr() arg not in range(0x110000)',
),
),
('chr(257)', 'ā'),
('cos(0)', 1.0),
('cos(1)', 0.5403023058681398),
('cos(-1)', 0.5403023058681398),
(
"cos('x')",
TypeError(
'must be real number, not literal',
),
),
('cosh(0)', 1.0),
('cosh(1)', 1.5430806348152437),
(
"cosh('x')",
TypeError(
'must be real number, not literal',
),
),
("datetime('July 4, 1776')", '1776-07-04T00:00:00'),
('datetime(157900000)', '1975-01-02T13:06:40+00:00'),
(
"datetime('foo')",
RuntimeError(
'Could not format input (foo) to datetime string.',
),
),
('degrees(2*pi)', 360.0),
(
"degrees('f')",
TypeError(
'must be real number, not literal',
),
),
('erf(1)', 0.842700792949715),
(
"erf('x')",
TypeError(
'must be real number, not literal',
),
),
('erfc(1)', 0.157299207050285),
(
"erfc('x')",
TypeError(
'must be real number, not literal',
),
),
('exp(1)', 2.718281828459045),
('exp(2)', 7.38905609893065),
(
"exp('x')",
TypeError(
'must be real number, not literal',
),
),
('expm1(1)', 1.7182818284590453),
('expm1(0)', 0.0),
(
"expm1('x')",
TypeError(
'must be real number, not literal',
),
),
(
'find()',
TypeError(
"f_find() missing 2 required positional arguments: 'ob' and 'value'",
),
),
("find('fool', 'foo')", 0),
("find('I pity the foo', 'foo')", 11),
("find('I pity the foo', 'Foo')", -1),
(
"find('I pity the foo', 1)",
TypeError(
'must be str, not int',
),
),
(
'find(1, 1)',
AttributeError(
"'int' object has no attribute 'index'",
),
),
('flatten({})', {}),
("flatten({'a': 1'})", lark.exceptions.UnexpectedCharacters),
("flatten({'a': 1})", {'a': '1'}),
("flatten({'a': 1, 'b': {'erp': 0, 'ulp': 5}})", {'a': '1', 'b.erp': '0', 'b.ulp': '5'}),
('float(1)', 1.0),
('float(.1)', 0.1),
(
"float('x')",
ValueError(
"could not convert string to float: 'x'",
),
),
(
'format()',
TypeError(
"f_format() missing 1 required positional argument: 's'",
),
),
("format('a')", 'a'),
("format('{pi}')", '3.141592653589793'),
("format('{pi} / 2 = {f}', f=pi/2)", '3.141592653589793 / 2 = 1.5707963267948966'),
(
'gamma(0)',
ValueError(
'math domain error',
),
),
('gamma(1)', 1.0),
('gamma(e)', 1.5674682557740531),
(
"gamma('x')",
TypeError(
'must be real number, not literal',
),
),
(
'hypot(0)',
TypeError(
"missing a required argument: 'y'",
),
),
('hypot(0,1)', 1.0),
("index('I pity the foo', 'foo')", 11),
(
"index('I pity the foo', 'Foo')",
ValueError(
'substring not found',
),
),
('index((1,2,3), 2)', 1),
(
'index((1,2,3), 2, 4)',
ValueError(
'tuple.index(x): x not in tuple',
),
),
('int(1)', 1),
('int(1.1)', 1),
(
"int('1.1')",
ValueError(
"invalid literal for int() with base 10: '1.1'",
),
),
("int('1')", 1),
(
"int('foo')",
ValueError(
"invalid literal for int() with base 10: 'foo'",
),
),
(
'items()',
TypeError(
"f_items() missing 1 required positional argument: 'ob'",
),
),
(
"items('1')",
AttributeError(
"'literal' object has no attribute 'items'",
),
),
(
'items([1,2,3])',
AttributeError(
"'frozen_list' object has no attribute 'items'",
),
),
(
"items({'a'",
SyntaxError(
"Unexpected token Token($END, '') at line 1, column 8.",
),
),
("items({'a': 1, 'b': 1})", [('a', 1), ('b', 1)]),
(
"jmespath({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]})",
TypeError(
"f_jmespath() missing 1 required positional argument: 'ob'",
),
),
("jmespath('a', { 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", 1),
("jmespath('c', { 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", None),
("jmespath('b', { 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", [{'i': 0}, {'i': 1}]),
("jmespath('*.i', { 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", []),
("jmespath('b[].i', { 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", [0, 1]),
(
"json({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]})",
'{\n "a": 1,\n "b": [\n {\n "i": 0\n },\n {\n "i": 1\n }\n ]\n}',
),
(
'json()',
TypeError(
"f_json() missing 1 required positional argument: 'ob'",
),
),
("json('x')", '"x"'),
(
"json_dump({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]})",
'{\n "a": 1,\n "b": [\n {\n "i": 0\n },\n {\n "i": 1\n }\n ]\n}',
),
(
"json_dump({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]},indent=0)",
'{\n"a": 1,\n"b": [\n{\n"i": 0\n},\n{\n"i": 1\n}\n]\n}',
),
(
"json_load({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]})",
TypeError(
"the JSON object must be str, bytes or bytearray, not 'dict'",
),
),
('json_load("{ \'a\': 1, \'b\': [{\'i\': 0}, {\'i\': 1}]}")', JSONDecodeError),
(
'json_load("{ \'a\': 1, \'b\': [{\'i\': 0}, {\'i\': 1}]})',
lark.exceptions.UnexpectedCharacters,
),
('json_load(\'{ "a": 1, "b": [{"i": 0}, {"i": 1}]}\')', {'a': 1, 'b': [{'i': 0}, {'i': 1}]}),
("keys({ 'a': 1, 'b': [{'i': 0}, {'i': 1}]})", ['a', 'b']),
(
'lgamma(0)',
ValueError(
'math domain error',
),
),
('lgamma(1)', 0.0),
('lgamma(e)', 0.4494617418200675),
(
"locale_format('%5.3f', pi)",
SkipIf('3.142', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
"locale_format('%5.3f', pi*1000)",
SkipIf('3141.593', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
"locale_format('%5.3f', pi*1000, grouping=True)",
SkipIf('3,141.593', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
"locale_format('%5.3f', pi*1000, grouping=True, monetary=True)",
SkipIf('3,141.593', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'locale_currency(pi*1000)',
SkipIf('$3141.59', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'locale_currency(pi*1000, grouping=True)',
SkipIf('$3,141.59', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'locale_currency(pi*1000, symbol=False, grouping=True)',
SkipIf('3,141.59', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'locale_currency(pi*1000, grouping=True, international=True)',
SkipIf('USD 3,141.59', lambda: locale.setlocale(locale.LC_ALL, 'EN_us')),
),
(
'locale_currency(pi*1000, grouping=True, locale="ES_es")',
SkipIf('3.141,59 Eu', lambda: locale.setlocale(locale.LC_ALL, 'ES_es')),
),
(
'locale_currency(pi*1000, grouping=True, locale="EN_gb")',
SkipIf('£3,141.59', lambda: locale.setlocale(locale.LC_ALL, 'EN_gb')),
),
('log(10)', 2.302585092994046),
('log(10,2)', 3.3219280948873626),
('log(16,2)', 4.0),
('log10(100)', 2.0),
('log10(1000)', 3.0),
('log10(5)', 0.6989700043360189),
(
"log10('x')",
TypeError(
'must be real number, not literal',
),
),
('log1p(10)', 2.3978952727983707),
('log2(16)', 4.0),
('log2(10)', 3.321928094887362),
("lower('UPPER')", 'upper'),
("lstrip('foo')", 'foo'),
("lstrip(' foo')", 'foo'),
("lstrip(' foo ')", 'foo '),
(
'namevallist()',
TypeError(
"f_namevallist() missing 1 required positional argument: 'ob'",
),
),
(
'namevallist(pi)',
AttributeError(
"'float' object has no attribute 'items'",
),
),
("namevallist({'a': 1, 'b': 2})", [{'name': 'a', 'value': 1}, {'name': 'b', 'value': 2}]),
("ord('A')", 65),
(
'ord()',
TypeError(
"f_ord() missing 1 required positional argument: 'char'",
),
),
(
"ord('')",
TypeError(
'ord() expected a character, but string of length 0 found',
),
),
(
"ord('AB')",
TypeError(
'ord() expected a character, but string of length 2 found',
),
),
(
'pow(0)',
TypeError(
"missing a required argument: 'y'",
),
),
('pow(0,1)', 0.0),
('radians(360)', 6.283185307179586),
('radians(180)', 3.141592653589793),
(
"radians('x')",
TypeError(
'must be real number, not literal',
),
),
('range(0)', []),
(
'range()',
TypeError(
"f_range() missing 1 required positional argument: 'start_or_stop'",
),
),
('range(5)', [0, 1, 2, 3, 4]),
('range(5,1)', []),
('range(1, 5)', [1, 2, 3, 4]),
('range(5, 0, -1)', [5, 4, 3, 2, 1]),
("replace('foo', 'o', 'e')", 'fee'),
(
"replace('foo')",
TypeError(
"missing a required argument: 'source'",
),
),
("replace(5, 'o', 'e')", '5'),
("rstrip('foo')", 'foo'),
("rstrip(' foo ')", ' foo'),
("rstrip(' foo ')", ' foo'),
('sin(0)', 0.0),
('sin(1)', 0.8414709848078965),
('sin(pi)', 1.2246467991473532e-16),
('sinh(0)', 0.0),
('sinh(1)', 1.1752011936438014),
(
"sinh('x')",
TypeError(
'must be real number, not literal',
),
),
(
'sinh(1,2)',
TypeError(
'too many positional arguments',
),
),
('sort((1,2,3))', [1, 2, 3]),
('sort((3,2,1))', [1, 2, 3]),
("sort('abc')", ['abc']),
('sort(1,2,3)', [1, 2, 3]),
('sort(3,2,1)', [1, 2, 3]),
('sqrt(2)', 1.4142135623730951),
(
"sqrt('')",
TypeError(
'must be real number, not literal',
),
),
('str(pi)', '3.141592653589793'),
("strip(' foo ')", 'foo'),
('tan(0)', 0.0),
('tan(1)', 1.557407724654902),
(
"tan('x')",
TypeError(
'must be real number, not literal',
),
),
('tanh(0)', 0.0),
('tanh(1)', 0.7615941559557649),
(
"tanh('x')",
TypeError(
'must be real number, not literal',
),
),
(
"timedelta('July 4, 1776', 'July 4, 2020')",
{
'datetime_1': '1776-07-04T00:00:00',
'datetime_2': '2020-07-04T00:00:00',
'years': -244,
'months': 0,
'weeks': 0,
'days': 0,
'hours': 0,
'minutes': 0,
'seconds': 0,
'microseconds': 0,
'total_months': -2928,
'total_weeks': -24400,
'total_days': -89119,
'total_hours': -2138856,
'total_minutes': -128331360,
'total_seconds': -7699881600,
'total_microseconds': -7699881600000,
},
),
("title('foo')", 'Foo'),
("title('foo the first')", 'Foo The First'),
('trunc(1.1)', 1),
("upper('foo')", 'FOO'),
("values({'a': 1, 'b': 2})", [1, 2]),
(
"values('a')",
AttributeError(
"'literal' object has no attribute 'values'",
),
),
(
'values(1)',
AttributeError(
"'int' object has no attribute 'values'",
),
),
(
'values([1])',
AttributeError(
"'int' object has no attribute 'values'",
),
),
('min(1,2,3)', 1),
('min(3,2,1)', 1),
('min([3,2,1])', 1),
(
'min()3,2,1))',
SyntaxError(
"Unexpected token Token(INT, '3') at line 1, column 6.",
),
),
('min((3,2,1))', 1),
("min('wxyza')", 'a'),
("max('wxyza')", 'z'),
('max(1,2,3)', 3),
('max(3,2,1)', 3),
("min({'a': 'foo', 'b': 'bla'})", 'a'),
("max({'a': 'foo', 'b': 'bla'})", 'b'),
("pad('foo', 7)", 'foo '),
("pad('foo', 7, '*')", 'foo****'),
(
"pad('foo', 7, '**')",
ValueError(
'pad value must be string of length one',
),
),
("pad('foofoofoo', 7)", 'foofoofoo'),
("pad('foofoof', 7)", 'foofoof'),
("pad('foofoo', 7)", 'foofoo '),
('pad([1,2,3], 7)', [1, 2, 3, None, None, None, None]),
('pad([1,2,3,4,5,6,7], 7)', [1, 2, 3, 4, 5, 6, 7]),
('pad([1,2,3,4,5,6,7,8], 7)', [1, 2, 3, 4, 5, 6, 7, 8]),
(
"pad([1,2,3,4,5,6,7,8], 'a')",
TypeError(
'length must be integer',
),
),
("research('\\d{4}-\\d{2}-\\d{2}', '2017-07-01T16:18:19')", '2017-07-01'),
("rematch('\\d{4}-\\d{2}-\\d{2}', '2017-07-01T16:18:19')", '2017-07-01'),
('pformat(1)', '1'),
("pformat('foo')", "'foo'"),
("pformat({'a': 1, 'b': {'c': 2}})", "{'a': 1, 'b': {'c': 2}}"),
('csvread("a,b,c,1,2,3.5")', ['a', 'b', 'c', 1, 2, 3.5]),
(
'csvread("a,b,c,1,2,3.5\\na,b,c",convert=False)',
[['a', 'b', 'c', '1', '2', '3.5'], ['a', 'b', 'c']],
),
('csvread("a,b,c,1,2,3.5\\na,b,c",rows=1)', ['a', 'b', 'c', 1, 2, 3.5]),
('csvread("a,b,c,1,2,3.5\\na,b,c",rows=1,columns=3)', ['a', 'b', 'c']),
('csvread("a,b,c,1,2,3.5\\nx,y,z",rows=1,columns=3,header=True)', ['x', 'y', 'z']),
(
'csvwrite(["Mary had a little lamb, whose fleece was white as snow", 1, 2, 3.5])',
'"Mary had a little lamb, whose fleece was white as snow",1,2,3.5\r\n',
),
(
'csvwrite(["Mary had a little lamb, whose fleece was white as snow", 1, 2, 3.5], '
'delimiter=\'|\')',
'Mary had a little lamb, whose fleece was white as snow|1|2|3.5\r\n',
),
("md5('foo')", 'acbd18db4cc2f85cedef654fccc4a4d8'),
("md5(bytes('foo', 'utf-8'))", 'acbd18db4cc2f85cedef654fccc4a4d8'),
("md5(['foo', 'bla'])", 'fff5d68e6fd4f50ab7d7668481c534aa'),
("sha1('foo')", '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'),
("sha256('foo')", '2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'),
('sha256(None)', 'dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91'),
("sha256('')", 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'),
("fuzzydist(spamsum('foo '*512 + ' bla'), spamsum('foo '*513))", 2),
("fuzzyhash('foo bla blerg woot growl bark')", 'NQFu2URFUKSJ9Ee'),
("fuzzyhash('foo bla blerg woot Growl bark')", 'NQFu2URF0Ge'),
("fuzzydist('NQFu2URFUKSJ9Ee', 'NQFu2URF0Ge')", 8),
(
"refindall(urlre, 'this contains www.foo.com https://www.foo.com/bla/oog as a url"
" and foo.bla.org/path?param=f&z=y and 1.2.3.4 and matt@domain.com')",
['www.foo.com', 'https://www.foo.com/bla/oog', 'foo.bla.org/path?param=f&z=y'],
),
(
"urlparse('https://foo.com/path/to/html?query=None#fragment')",
('https', 'foo.com', '/path/to/html', '', 'query=None', 'fragment'),
),
("urlparse_qs('foo=bla&bla=oog').foo[0]", 'bla'),
('unique((1,2,3,4,5,2,1,{"foo": "bla"}))', [1, 2, 3, 4, 5, {'foo': 'bla'}]),
("fuzzymatch('bla'+'foo'*63, 'foo'*63+'bla')", 98.17708333333333),
("fuzzymatch('foo bla zoid', 'foo bla21 blerg')", 74.07407407407408),
("fuzzymatch('foo bla zoid', 'foo bla21 blerg')>75", False),
('unnest((1,2,3,(4,5,6,(7,8,9))))', [1, 2, 3, 4, 5, 6, 7, 8, 9]),
("unnest((1,2,3,(4,5,6,(7,8,9),{'foo':'bla'})))", [1, 2, 3, 4, 5, 6, 7, 8, 9, {'foo': 'bla'}]),
("unnest('abcd')", 'abcd'),
('unnest(1)', 1),
(
'unnest(1,2,3)',
TypeError(
'f_unnest() takes 1 positional argument but 3 were given',
),
),
('twoscompliment(160)', 160),
('twoscompliment(-160)', -4294967456),
('twoscompliment(-160,64)', -18446744073709551776),
('hex(-4294967456)', '-1000000a0'),
('hex(-4294967456, False)', '1000000a0'),
("int('100000a0', 16)", 268435616),
("int('1000000a0', 16)", 4294967456),
("int('-1000000a0', 16)", -4294967456),
('bin(2)', '10'),
('bin(3)', '11'),
('bin(-3)', '-11'),
('bin(5)', '101'),
('bin(-5)', '-101'),
("binary('foo')", b'foo'),
("bytes('foo')", b'foo'),
("str(bytes('foo'))", 'foo'),
("conform([{'a': 1}, {'b': 2}])", [{'a': 1, 'b': None}, {'b': 2, 'a': None}]),
("conform([{'a': 1}, {'b': 2}], missing_value='')", [{'a': 1, 'b': ''}, {'b': 2, 'a': ''}]),
("prune(conform([{'a': 1}, {'b': 2}], missing_value=''))", [{'a': 1}, {'b': 2}]),
('structure(12345)', 'int'),
("structure('12345')", 'int'),
("structure(datetime('now'))", 'iso8601'),
("structure(md5('now'))", 'md5'),
("structure(sha1('now'))", 'sha1'),
("structure(sha256('now'))", 'sha256'),
("update({'a': 1}, {'b': 2})", {'a': 1, 'b': 2}),
(
'rexxparse(\'Mary had a little lamb\', \'name . "had a" . thing\')',
{'name': 'Mary', 'thing': 'lamb'},
),
(
'rexxparse(\'Mary had a little lamb\', \'name . "had a " thing\')',
{'name': 'Mary', 'thing': 'little lamb'},
),
(
'rexxparse(\'Mary had a little lamb\', \'name . "had a " size thing\')',
{'name': 'Mary', 'size': 'little', 'thing': 'lamb'},
),
(
'rexxparse(\'Mary had a little lamb\', \'name . "had a " size thing 1 phrase\')',
{'name': 'Mary', 'size': 'little', 'thing': 'lamb', 'phrase': 'Mary had a little lamb'},
),
(
'rexxparse(\'Mary had a little lamb\', \'name . "had a " size thing 1 phrase 1 name +4\')',
{'name': 'Mary', 'size': 'little', 'thing': 'lamb', 'phrase': 'Mary had a little lamb'},
),
(
(
'rexxparse(\'127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] '
'"GET /apache_pb.gif '
'HTTP/1.0" 200 2326\', "ip id user \' [\' date \'] \\"\'method url '
'protocol\'\\" \' status size")'
),
{
'ip': '127.0.0.1',
'id': '-',
'user': 'frank',
'date': '10/Oct/2000:13:55:36 -0700',
'method': 'GET',
'url': '/apache_pb.gif',
'protocol': 'HTTP/1.0',
'status': '200',
'size': '2326',
},
),
(
'rexxparse(\'Mary had a little lamb\', \'"little " chop +2 -2 animal +3\')',
{'chop': 'li', 'animal': 'lit'},
),
(
'rexxparse(\'Mary had a little lamb\', \'"little " chop +2 -2 animal +3 1 phrase\' )',
{'chop': 'li', 'animal': 'lit', 'phrase': 'Mary had a little lamb'},
),
("uuid3('dns', 'mtu.edu')", '93ea5ad7-ae2d-3509-bbbc-958b90bfe336'),
("uuid5('dns', 'mtu.edu')", 'b796a2f3-fcde-53a1-9123-e11e6c8f3216'),
('uuid4()', sametype('b796a2f3-fcde-53a1-9123-e11e6c8f3216')),
("structure(uuid4()) == 'guid'", True),
(
"xmlread('<people><person><name>Matt</name><job>Developer</job></person></people>',"
'compact=True)',
{'people': {'person': {'Matt': 'Developer'}}},
),
(
"xmlwrite({'people': {'person': {'Matt': 'Developer'}}})",
'<people><person><Matt>Developer</Matt></person></people>',
),
(
"xmlread('<people><person><name>Matt</name><job>Developer</job></person></people>', "
'compact=False)',
{'people': [{'person': [{'name': 'Matt'}, {'job': 'Developer'}]}]},
),
(
"xmlwrite({'people': [{'person': [{'name': 'Matt'}, {'job': 'Developer'}]}]})",
'<people><person><name>Matt</name><job>Developer</job></person></people>',
),
('chardet(kosme).encoding', 'utf-8'),
(
'indicator_patterns()',
RuntimeError(
'TCEX not initialized, cannot retrieve patterns',
),
),
("fang('user@threatconnect.com')", 'user@threatconnect.com'),
("defang('user@threatconnect.com')", 'user(at)threatconnect[.]com'),
("fang('user(at)threatconnect[.]com')", 'user@threatconnect.com'),
(
"extract_indicators('ASN1721 is whack but ASN1271 is not')",
RuntimeError(
'TCEX not initialized, cannot retrieve patterns',
),
),
(
"fetch_indicators('ASN1271', default_type='ASN')",
RuntimeError(
'TCEX not initialized, cannot retrieve indicators',
),
),
(
'indicator_types()',
RuntimeError(
'TCEX not initialized, cannot retrieve types',
),
),
(
"pivot(('a', 'b', 'c'), (1,2,3), (1.0, 2.0, 3.0))",
TypeError(
'f_pivot() takes from 1 to 2 positional arguments but 3 were given',
),
),
(
"pivot((('a', 'b', 'c'), (1,2,3), (1.0, 2.0, 3.0)))",
[['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]],
),
(
"pivot((('a', 'b', 'c'), (1,2,3), (1.0, 2.0)))",
[['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, None]],
),
(
"pivot((('a', 'b', 'c'), (1,), (1.0, 2.0)))",
[['a', 1, 1.0], ['b', None, 2.0], ['c', None, None]],
),
(
"pivot((('a', 'b', 'c'), [], (1.0, 2.0)))",
[['a', None, 1.0], ['b', None, 2.0], ['c', None, None]],
),
(
"pivot((('a', 'b', 'c'), [], (1.0, 2.0)), pad='')",
[['a', '', 1.0], ['b', '', 2.0], ['c', '', '']],
),
(
"build((1,2,3), ('a', 'b', 'c'), keys=('number', 'letter'))",
[{'number': 1, 'letter': 'a'}, {'number': 2, 'letter': 'b'}, {'number': 3, 'letter': 'c'}],
),
(
"build((1,2,3), ('a', 'b', 'c'), keys=('number', 'letter', 'extra'))",
[{'number': 1, 'letter': 'a'}, {'number': 2, 'letter': 'b'}, {'number': 3, 'letter': 'c'}],
),
(
"build((1,2,3), ('a', 'b'), keys=('number', 'letter', 'extra'))",
[{'number': 1, 'letter': 'a'}, {'number': 2, 'letter': 'b'}],
),
(
"build((1,2,3), ('a', 'b', 'c'), keys=('number',))",
[{'number': 1}, {'number': 2}, {'number': 3}],
),
(
"update([{'number': 1}, {'number': 2}, {'number': 3}], {'foo': 'bla'})",
[{'number': 1, 'foo': 'bla'}, {'number': 2, 'foo': 'bla'}, {'number': 3, 'foo': 'bla'}],
),
(
"update([{'number': 1}, {'number': 2}, {'number': 3}], {'foo': 'bla', 'number': 0})",
[{'number': 0, 'foo': 'bla'}, {'number': 0, 'foo': 'bla'}, {'number': 0, 'foo': 'bla'}],
),
(
"update([{'number': 1}, {'number': 2}, {'number': 3}], {'foo': 'bla', 'number': 0}, "
'replace=False)',
[{'number': 1, 'foo': 'bla'}, {'number': 2, 'foo': 'bla'}, {'number': 3, 'foo': 'bla'}],
),
(
"update([{'number': 1}, {'number': 2}, {'number': 3}, 'foo'], {'foo': 'bla'})",
TypeError(
'update must work on dictionaries or lists of dictionaries',
),
),
("merge((1,2,3), ('a','b','c'))", [[1, 'a'], [2, 'b'], [3, 'c']]),
("merge((1,2,3), ('a','b','c', 'd'))", [[1, 'a'], [2, 'b'], [3, 'c']]),
(
"merge(({'a': 1}, {'a': 2}, {'a': 3}), ({'b': 1}, {'b': 2}, {'b': 3}))",
[{'a': 1, 'b': 1}, {'a': 2, 'b': 2}, {'a': 3, 'b': 3}],
),
(
"merge(({'a': 1}, {'a': 2}, {'a': 3}), ({'b': 1}, {'b': 2}, {'b': 3, 'a': 0}))",
[{'a': 1, 'b': 1}, {'a': 2, 'b': 2}, {'a': 3, 'b': 3}],
),
(
"merge(({'a': 1}, {'a': 2}, {'a': 3}), ({'b': 1}, {'b': 2}, {'b': 3, 'a': 0}), "
'replace=True)',
[{'a': 1, 'b': 1}, {'a': 2, 'b': 2}, {'a': 0, 'b': 3}],
),
("alter({}, 'a', 1)", 1),
(
"report( ( ('a', 'b', 'c'), (1, 'bollux', 3), ('foo', 'bla', 'rumplestiltskin') ), header=True, title='Report', width=20)",
'\n\n Report \n ------ \n\nA B C \n--- ------ ---------\n1 bollux 3 \nfoo bla rum- \n plestilt-\n skin ',
),
(
"report( ( ('a', 'b', 'c'), (1, 'bollux', 3), ('foo', 'bla', 'rumplestiltskin') ), header=True, title='Report', width=80)",
'\n\n Report \n ------ \n\nA B C \n--- ------ --------------- \n1 bollux 3 \nfoo bla rumplestiltskin',
),
(
"report( ( ('a', 'b', 'c'), (1, 'bollux', 3), ('foo', 'bla', 'rumplestiltskin') ), header=True, title='Report', width=80, prolog='Report Prolog', epilog='Report Epilog')",
'\n\n Report \n ------ \n\n\nReport Prolog \n\nA B C \n--- ------ --------------- \n1 bollux 3 \nfoo bla rumplestiltskin\n\nReport Epilog \n',
),
('dict(one=1, two=2)', {'one': 1, 'two': 2}),
(
"kvlist( { 'name': 'Foo', 'value': 'Foo Value'}, {'name': 'Bla', 'value': 'Bla Value'})",
TypeError(
'dictlist must be a list of dictionaries',
),
),
(
"kvlist(({ 'key': 'Foo', 'value': 'Foo Value'}, {'key': 'Bla', 'value': 'Bla Value'}))",
{'Foo': 'Foo Value', 'Bla': 'Bla Value'},
),
]
class TestFunctions(object):
"""Test atomic values"""
def setup_class(self):
"""setup"""
self.expr = Expression()
self.expr.set('name', 'Matt')
self.expr.set('one', 1)
self.expr.set('two', 2.0)
self.expr.set('self', self)
self.expr.set('kosme', bytes('κόσμε', 'utf-8'))
true = True
@pytest.mark.parametrize('expression,result', FUNCTION_TESTS)
def test_atom(self, expression, result):
"""test atomic values"""
if isinstance(result, SkipIf):
result = result.check_skip()
if isinstance(result, float):
result = nearly(result)
if isclass(result) and issubclass(result, Exception):
with pytest.raises(result):
value = self.expr.eval(expression)
elif isinstance(result, Exception):
with pytest.raises(result.__class__):
value = self.expr.eval(expression)
result = result.args[0]
assert value == result, f'{expression} == {result}'
else:
value = self.expr.eval(expression)
assert value == result, f'{expression} == {result}'
@staticmethod
def test_list_methods():
"""test_list_methods"""
methods = list_methods()
tested = {}
scanning = False
for method in methods.split('\n'):
if method:
method = method.strip()
if method == '## Functions':
scanning = True
continue
if not scanning:
continue
if not method:
continue
if method.startswith('#'):
continue
if method.startswith('* '):
method = method[3:-1]
else:
continue
name = method.strip().split('(')[0].strip()
tested[name] = False
for test, _ in FUNCTION_TESTS:
method = test.split('(')[0].strip()
if method in tested:
tested[method] = True
missing = []
for method, checked in tested.items():
if checked:
continue
missing.append(method)
missing.sort()
missing_tests = ', '.join(missing)
assert not missing, f'Missing tests for {missing_tests}'
|
"""
Django settings for commerce project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6ps8j!crjgrxt34cqbqn7x&b3y%(fny8k8nh21+qa)%ws3fh!q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'auctions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'commerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'auctions.context_processors.get_watchlist',
],
},
},
]
WSGI_APPLICATION = 'commerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_USER_MODEL = 'auctions.User'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
|
from pathlib import Path
from aiohttp import web
from aiohttp_apiset import SwaggerRouter
def test_app(loop, swagger_router):
app = web.Application(loop=loop)
swagger_router.setup(app)
def test_search_dirs():
d = Path(__file__).parent
r = SwaggerRouter(d / 'data/include.yaml')
r.add_search_dir(d)
def test_merge_spec():
d = Path(__file__).parent
r = SwaggerRouter(d / 'data/include.yaml', search_dirs=[d])
r.include('data/file.yaml', basePath='/inc')
def test_routes(swagger_router: SwaggerRouter):
paths = [route.url_for().human_repr()
for route in swagger_router.routes()]
assert '/api/1/file/image' in paths
def test_route_include(swagger_router: SwaggerRouter):
paths = [route.url_for().human_repr()
for route in swagger_router.routes()]
assert '/api/1/include2/inc/image' in paths, paths
def test_handler(swagger_router: SwaggerRouter):
paths = [(route.method, route.url_for().human_repr())
for route in swagger_router.routes()]
assert ('GET', '/api/1/include/image') in paths
async def test_cbv_handler_get(client, swagger_router):
url = swagger_router['file:simple:view'].url()
res = await client.get(url)
assert (await res.text()) == 'simple handler get'
async def test_cbv_handler_post(client, swagger_router):
url = swagger_router['file:simple:view'].url()
res = await client.post(url)
assert (await res.text()) == 'simple handler post'
def test_override_basePath(loop):
router = SwaggerRouter(search_dirs=['tests'])
web.Application(router=router, loop=loop)
prefix = '/override'
router.include('data/root.yaml', basePath=prefix)
paths = [url for url in [
route.url_for().human_repr()
for route in router.routes()
] if url.startswith(prefix)]
assert prefix in router._swagger_data
assert paths
def test_Path():
base = Path(__file__).parent
router = SwaggerRouter(
search_dirs=[base],
swagger_ui=False,
)
spec = base / 'data/root.yaml'
router.include(spec)
assert router._swagger_data
|
from . import VecEnvWrapper
from baselines.common.running_mean_std import RunningMeanStd
import numpy as np
class VecNormalize(VecEnvWrapper):
"""
A vectorized wrapper that normalizes the observations
and returns from an environment.
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8, training=True):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.training = training
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
if self.training:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
if self.training:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def _deobfilt(self, obs):
return obs # For now return it directly
def reset(self):
obs = self.venv.reset()
return self._obfilt(obs)
|
#Imagine you're writing the software for an inventory system for
#a store. Part of the software needs to check to see if inputted
#product codes are valid.
#
#A product code is valid if all of the following conditions are
#true:
#
# - The length of the product code is a multiple of 4. It could
# be 4, 8, 12, 16, 20, etc. characters long.
# - Every character in the product code is either an uppercase
# character or a numeral. No lowercase letters or punctuation
# marks are permitted.
# - The character sequence "A1" appears somewhere in the
# product code.
#
#Write a function called valid_product_code. valid_product_code
#should have one parameter, a string. It should return True if
#the string is a valid product code, and False if it is not.
# capital letter = 65 - 90
# numbers = 48 - 57
# Add your code here!
def valid_product_code( code ):
code_array = list( map( str, code ))
test = 0
if "A1" in code:
if len(code) % 4 == 0:
for i in code_array:
if ord(i) <= 90 and ord(i) >=65:
pass
else:
if ord(i) <= 57 and ord(i) >= 48:
pass
else:
test += 1
else:
return False
else:
return False
if test > 0:
return False
else:
return True
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
# print: True, True, False, False, False
print(valid_product_code("A12B44BP"))
print(valid_product_code("BFDSAUSA98932RWEFOEWA9FEAA1DSFSF"))
print(valid_product_code("A1BBD5"))
print(valid_product_code("BDD5664S"))
print(valid_product_code("66aBSaA1fdsv"))
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow as tf
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
None,
gen_nn_ops.conv2d_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()),
gen_nn_ops.conv2d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
gen_nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()), None,
gen_nn_ops.conv2d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [
None,
gen_nn_ops.depthwise_conv2d_native_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")),
gen_nn_ops.depthwise_conv2d_native(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterGrad(op, grad):
return [
gen_nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")), None,
gen_nn_ops.depthwise_conv2d_native(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
None,
nn_ops.conv3d_backprop_filter_v2(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format), None,
nn_ops.conv3d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the softmax
output.
Returns:
gradient w.r.t the input to the softmax
"""
softmax = op.outputs[0]
sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
return (grad_softmax - sum_channels) * softmax
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad,
gen_nn_ops.bias_add_grad(
out_backprop=received_grad, data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:1]), bias_shape,
array_ops.ones_like(shape[2:])
], 0)
tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops.relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops.elu_grad(grad, elu_x),
array_ops.where(
elu_x < 0, grad * op.inputs[0], array_ops.zeros_like(elu_x)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
selu_x = op.inputs[1]
return (gen_nn_ops.selu_grad(grad, selu_x),
array_ops.where(
selu_x < 0., grad * op.inputs[0], array_ops.zeros_like(selu_x)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops.relu6_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6Grad")
def _Relu6GradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu6_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("LeakyRelu")
def _LeakyReluGrad(op, grad):
x = op.inputs[0]
alpha = op.get_attr("alpha")
return gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha)
@ops.RegisterGradient("LeakyReluGrad")
def _LeakyReluGradGrad(op, grad):
x = op.inputs[1]
alpha = op.get_attr("alpha")
return (gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops.elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops.selu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return grad * math_ops.sigmoid(op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad]):
ddy = gen_nn_ops.softplus_grad(grad, x)
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops.softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
logits = op.inputs[0]
if (grad_grad is not None and
not getattr(grad_grad, "_is_zeros_tensor", False)):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
# There is no gradient for the labels
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
logits = op.inputs[0]
if (grad_grad is not None and
not getattr(grad_grad, "_is_zeros_tensor", False)):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, None
class _DummyTensorManager:
def __init__(self):
self.dummy_tensor_dict = dict()
self.dummy_tensor_id = 0;
def get_tensor(self, shape):
#if shape not in self.dummy_tensor_dict.keys():
# self.dummy_tensor_dict[shape] \
# = tf.stop_gradient(tf.Variable(tf.zeros_like(shape), name=f"dummy_tensor{self.dummy_tensor_id}"))
# self.dummy_tensor_id += 1;
#return self.dummy_tensor_dict[shape]
self.dummy_tensor_id += 1;
return tf.stop_gradient(tf.Variable(tf.zeros_like(shape), name=f"dummy_tensor{self.dummy_tensor_id}"))
dummy_tensor_manager = _DummyTensorManager();
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
global dummy_tensor_manager
"""Gradient function for Conv2D."""
dilations = op.get_attr("dilations")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
explicit_paddings = op.get_attr("explicit_paddings")
use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
data_format = op.get_attr("data_format")
shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. gen_nn_ops functions take a
# `explicit_paddings` parameter, but nn_ops functions do not. So if were were
# to use the nn_ops functions, we would have to convert `padding` and
# `explicit_paddings` into a single `padding` parameter, increasing overhead
# in Eager mode.
if ("FWD" in op.name):
dummy_input1 = dummy_tensor_manager.get_tensor(op.inputs[0])
dummy_input2 = dummy_tensor_manager.get_tensor(grad)
weight_grad_op = gen_nn_ops.conv2d_backprop_filter(
dummy_input1,
shape_1,
dummy_input2,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
name="Conv2DBackpropFilter")
input_grad_op = gen_nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
name="Conv2DBackpropInput")
fake_weight_grad_op = gen_nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
name="DummyConv2DBackpropFilter")
# TODO JY : zero_variables, dummy input1, dummy input2는 나중에 python파일로 뺌
dummy_input3 = dummy_tensor_manager.get_tensor(fake_weight_grad_op)
fake_variable = tf.math.multiply(fake_weight_grad_op, dummy_input3, name="fake_variable")
fake_op = tf.math.add(weight_grad_op, fake_variable, name="fake_op")
return [input_grad_op, fake_op]
else:
return [
gen_nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format),
gen_nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
gen_nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")),
gen_nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [
nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [
gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias,
alpha, beta)
]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops.avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops.max_pool_grad(
op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(
op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
del unused_argmax_grad
return gen_nn_ops.max_pool_grad_with_argmax(
op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
include_batch_in_index=op.get_attr("include_batch_in_index"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
return gen_nn_ops.fractional_max_pool_grad(
op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
def _BaseFusedBatchNormGrad(op, version, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
version: Integer indicating which version to use of the fused batch
norm gradient.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
if version == 2:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v3
elif version == 1:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v2
else:
grad_fun = gen_nn_ops.fused_batch_norm_grad
if is_training:
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": op.outputs[3],
"reserve_space_2": op.outputs[4],
"epsilon": epsilon,
"data_format": data_format,
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
dx, dscale, doffset, _, _ = grad_fun(**args)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])
elif data_format == b"NCDHW":
x = array_ops.transpose(x, [0, 2, 3, 4, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 4, 1])
target_data_format = ("NHWC" if data_format in (b"NCHW",
b"NHWC") else "NDHWC")
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": pop_mean,
"reserve_space_2": pop_var,
"epsilon": epsilon,
"data_format": target_data_format,
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
dx, dscale, doffset, _, _ = grad_fun(**args)
if data_format == b"NCHW":
dx = array_ops.transpose(dx, [0, 3, 1, 2])
elif data_format == b"NCDHW":
dx = array_ops.transpose(dx, [0, 4, 1, 2, 3])
return dx, dscale, doffset, None, None
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
return _BaseFusedBatchNormGrad(op, 0, *grad)
@ops.RegisterGradient("FusedBatchNormV2")
def _FusedBatchNormV2Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 1, *grad)
@ops.RegisterGradient("FusedBatchNormV3")
def _FusedBatchNormV3Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 2, *grad)
def _BatchNormGrad(grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon,
data_format,
is_training=True):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 or 5 dimensions for gradient for y.
x: A `Tensor` of 4 or 5 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default) or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16:
# float16 math is too imprecise, so we do the batch norm gradient
# computations in float32.
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
keepdims = False
reduce_axis = [0, 1, 2]
elif data_format == b"NDHWC":
keepdims = False
reduce_axis = [0, 1, 2, 3]
elif data_format == b"NCHW":
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
else:
keepdims = True
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(scale), 1, 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW" or data_format == b"NCDHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
else:
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
elif data_format == b"NDHWC":
reduce_axis = [0, 1, 2, 3]
elif data_format == b"NCHW":
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
else:
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(pop_mean), 1, 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs with
grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as
grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
is_training = op.get_attr("is_training")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
with backprop.GradientTape() as tape:
tape.watch(grad_y)
tape.watch(x)
tape.watch(scale)
grad_x, grad_scale, grad_offset = _BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = tape.gradient(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("FusedBatchNormGradV2")
def _FusedBatchNormGradGradV2(op, *grad):
return _FusedBatchNormGradGrad(op, *grad)
@ops.RegisterGradient("FusedBatchNormGradV3")
def _FusedBatchNormGradGradV3(op, *grad):
grad_grad_y, grad_x, grad_scale, _, _ = _FusedBatchNormGradGrad(op, *grad)
return grad_grad_y, grad_x, grad_scale, None, None, None
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
# int32 is not supported on GPU hence up-casting
ind_lastdim = array_ops.gather(
math_ops.cast(ind_shape, dtypes.int64),
array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(
math_ops.cast(in_shape, dtypes.int64),
array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(
ind_2d + math_ops.cast(
array_ops.expand_dims(
math_ops.range(0,
math_ops.cast(outerdim, dtypes.int64) * in_lastdim,
in_lastdim), -1), dtypes.int32), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
array_ops.scatter_nd(
array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]),
[math_ops.reduce_prod(in_shape)]), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
@ops.RegisterGradient("NthElement")
def _NthElementGrad(op, grad):
"""Return the gradients for NthElement.
Args:
op: The NthElementOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the NthElementOp
Returns:
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
# dimension. If there are multiple elements then the gradient will be
# divided between them.
indicators = math_ops.cast(
math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)
grad = array_ops.expand_dims(grad, -1)
num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)
return [math_ops.divide(indicators, num_selected) * grad, None]
def _MeanAggregator(inputs, segments):
"""Replaces each segment with its mean along the last axis.
Specifically, each value in the `inputs` tensor gets replaced by the mean
value computed from the values that belong to the same segment.
Args:
inputs: A 2-tensor. Aggregation is done over dimension 1.
segments: A 2-tensor, same shape as `input`.
Returns:
The result, same shape and type as `inputs`.
"""
result = []
for inputs_i, segments_i in zip(
array_ops.split(inputs, inputs.shape[0]),
array_ops.split(segments, segments.shape[0])):
# Note that we do not use tf.math.segment_mean, as it has no TPU support.
means_i = math_ops.unsorted_segment_mean(
inputs_i, segments_i, num_segments=math_ops.reduce_max(segments_i) + 1)
result.append(
array_ops.reshape(array_ops.gather(means_i, segments_i), [-1]))
return array_ops.stack(result, axis=0)
# We have to register the gradients for these ops so that tensorflow will know
# how to differentiate them.
@ops.RegisterGradient("IsotonicRegression")
def _IsotonicRegressionGrad(op, grad_output, grad_segments):
"""Gradient for the isotonic regression function.
Args:
op: The IsotonicRegression tensorflow op.
grad_output: Tensor of incoming gradients with respect to the output.
grad_segments: Tensor of incoming gradients with respect to the segments.
Returns:
A tensor, same size as `grad_output` with the gradient with respect to
the input.
"""
del grad_segments # Discrete, non-differentiable.
segments = op.outputs[1]
return _MeanAggregator(grad_output, segments)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Load dependencies
import numpy as np
import pandas as pd
from uncertainties import ufloat
from uncertainties import unumpy
# # Biomass C content estimation
#
# Biomass is presented in the paper on a dry-weight basis. As part of the biomass calculation, we converted biomass in carbon-weight basis to dry-weight basis by multiplying by a conversion factor.
#
# ## Conversion factor calculation
#
# The conversion factor was calculated based on C content estimates of the different plant compartments (leaves, stems and roots) of different biomes, from [Tang et al.](https://doi.org/10.1073/pnas.1700295114) (units: (mg/g)).
# In[2]:
# Upload C content data from Tang et al., units [mg/g]
c_content = pd.read_excel("C_content_Tang.xlsx")
c_content
# In[3]:
# Save parameters to unumpy arrays
cleaf = unumpy.uarray(list(c_content['leaf']), list(c_content['leaf std']))
cstem = unumpy.uarray(list(c_content['stem'].fillna(0)), list(c_content['stem std'].fillna(0)))
croot = unumpy.uarray(list(c_content['root']), list(c_content['root std']))
# For each biome, we calculate the weighted average C content according to the mass fraction of each plant compartment. Information on plants compartmental mass composition was obtained from [Poorter et al.](https://nph.onlinelibrary.wiley.com/doi/full/10.1111/j.1469-8137.2011.03952.x).
# In[4]:
# Upload compartmental mass composition, from Poorter et al., classified according to Tang et al. biomes
compart_comp = pd.read_excel("compartment_comp_Poorter.xlsx")
compart_comp
# In[5]:
# Save parameters to unumpy arrays
fleaf = unumpy.uarray(list(compart_comp['leaf']), list(compart_comp['leaf std']))
fstem = unumpy.uarray(list(compart_comp['stem'].fillna(0)), list(compart_comp['stem std'].fillna(0)))
froot = unumpy.uarray(list(compart_comp['root']), list(compart_comp['root std']))
# In[6]:
# Calculate the weighted average for each biome
cbiome = (cleaf*fleaf)+(cstem*fstem)+(croot*froot)
# Next, we calculate the plants conversion factor, according to the mass fraction of each biome, which was calculated by the corresponding mass of each of the biome categories, derived from [Erb et al.](https://doi.org/10.1038/nature25138).
# In[7]:
# Upload biomes biomass, from Erb et al., classified according to Tang et al. biomes
mbiome = pd.read_excel('biome_mass_Erb.xlsx')
mbiome
# In[8]:
# Save to unumpy array
mbiomes = unumpy.uarray(list(mbiome['biomass [Gt C]']), list(mbiome['biomass std']))
# Calculate the overall conversion factor
cplants_factor = 1000/np.sum((cbiome* (mbiomes/np.sum(mbiomes))))
# In the overall carbon-weight to dry-weight conversion factor, we also accounted the C content of non-plant biomass, which was based on estimates from [Heldal et al.](https://aem.asm.org/content/50/5/1251.short) and [von Stockar](https://www.sciencedirect.com/science/article/pii/S0005272899000651). We used the current estimate of non-plant biomass fraction - about 10% of the total biomass, according to [Bar-On et al.](https://doi.org/10.1073/pnas.1711842115) and [updates](https://doi.org/10.1038/s41561-018-0221-6).
# In[9]:
# Upload non plant C content data, units [g/g]
cnon_plant = pd.read_excel('C_content_non_plant.xlsx')
cnon_plant
# In[10]:
# Calculate conversion factors
cnon_plant_factor = ufloat(np.average(cnon_plant['C content']) ,np.std(cnon_plant['C content'], ddof = 1))
cfactor = (cplants_factor*0.9) +(0.1*(1/cnon_plant_factor))
cfactor
print 'Our best estimate of the C content conversion factor is: ' + "%.2f" % (cfactor.n) + ', with uncertainty (±1 standard deviation): ' + "%.2f" % (cfactor.s)
|
from core.advbase import *
class Cecile(Adv):
def prerun(self):
self.manachew_gauge = 0
self.manachew_mode = ModeManager(
group="manachew",
buffs=[Selfbuff("manachew_defense", 0.2, -1, "defense", "passive"), Selfbuff("manachew_sd", 0.1, -1, "s", "passive"), Selfbuff("manachew_sp", 0.08, -1, "sp", "passive")],
fs=True,
s1=True,
s2=True,
duration=20,
pause=("s", "dragon"),
)
Event("dragon_end").listener(self.dshift_manachew_gauge)
def a1_update(self, gauge):
if self.manachew_mode.get():
max_add = self.manachew_mode.duration - self.manachew_mode.timeleft()
add_time = min(gauge / 10000 * self.manachew_mode.duration, max_add)
self.manachew_mode.add_time(add_time)
else:
self.manachew_gauge += gauge
if self.manachew_gauge == 10000:
self.manachew_mode.on()
self.manachew_gauge = 0
def dshift_manachew_gauge(self, e):
self.a1_update(5000)
def hitattr_make(self, name, base, group, aseq, attr, onhit=None, dtype=None):
self.a1_update(attr.get("cp", 0))
super().hitattr_make(name, base, group, aseq, attr, onhit=onhit, dtype=dtype)
def a1_add_manachew_time(self):
self.manachew_mode.add_time(2)
def a3_dodge_gauge_fill(self):
self.charge_p("a3", 0.2, target="s1")
self.charge_p("a3", 0.2, target="s2")
class Cecile_DODGE(Cecile):
def prerun(self):
super().prerun()
Event("dodge").listener(self.a1_a3_dodge)
self.comment = "enable a1/a3 dodge every 15 seconds"
def a1_a3_dodge(self, e):
if not self.is_set_cd("a1_a3", 15):
self.a3_dodge_gauge_fill()
if self.manachew_mode.get():
self.a1_add_manachew_time()
variants = {None: Cecile, "DODGE": Cecile_DODGE}
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import enum
class Extensions(enum.Enum):
__order__ = ('AUTHORITY_KEY_IDENTIFIER SUBJECT_KEY_IDENTIFIER '
'AUTHORITY_INFORMATION_ACCESS BASIC_CONSTRAINTS '
'CRL_DISTRIBUTION_POINTS CERTIFICATE_POLICIES '
'EXTENDED_KEY_USAGE OCSP_NO_CHECK INHIBIT_ANY_POLICY '
'KEY_USAGE NAME_CONSTRAINTS SUBJECT_ALTERNATIVE_NAME '
'ISSUER_ALTERNATIVE_NAME')
AUTHORITY_KEY_IDENTIFIER = "authorityKeyIdentifier"
SUBJECT_KEY_IDENTIFIER = "subjectKeyIdentifier"
AUTHORITY_INFORMATION_ACCESS = "authorityInfoAccess"
BASIC_CONSTRAINTS = "basicConstraints"
CRL_DISTRIBUTION_POINTS = "cRLDistributionPoints"
CERTIFICATE_POLICIES = "certificatePolicies"
EXTENDED_KEY_USAGE = "extendedKeyUsage"
OCSP_NO_CHECK = "OCSPNoCheck"
INHIBIT_ANY_POLICY = "inhibitAnyPolicy"
KEY_USAGE = "keyUsage"
NAME_CONSTRAINTS = "nameConstraints"
SUBJECT_ALTERNATIVE_NAME = "subjectAltName"
ISSUER_ALTERNATIVE_NAME = "issuerAltName"
class KeyUsages(enum.Enum):
__order__ = ('DIGITAL_SIGNATURE CONTENT_COMMITMENT KEY_ENCIPHERMENT '
'DATA_ENCIPHERMENT KEY_AGREEMENT KEY_CERT_SIGN '
'CRL_SIGN ENCIPHER_ONLY DECIPHER_ONLY')
DIGITAL_SIGNATURE = ("Digital Signature", "digital_signature")
CONTENT_COMMITMENT = ("Non Repudiation", "content_commitment")
KEY_ENCIPHERMENT = ("Key Encipherment", "key_encipherment")
DATA_ENCIPHERMENT = ("Data Encipherment", "data_encipherment")
KEY_AGREEMENT = ("Key Agreement", "key_agreement")
KEY_CERT_SIGN = ("Certificate Sign", "key_cert_sign")
CRL_SIGN = ("CRL Sign", "crl_sign")
ENCIPHER_ONLY = ("Encipher Only", "encipher_only")
DECIPHER_ONLY = ("Decipher Only", "decipher_only")
|
# -*- coding: utf-8 -*-
"""
Based entirely on Django's own ``setup.py``.
"""
import os
import sys
import setuptools
from distutils.command.install import INSTALL_SCHEMES
from distutils.command.install_data import install_data
from setuptools import setup
try:
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = TestCommand.user_options[:] + [
('pytest-args=', 'a', "Arguments to pass into py.test"),
('exitfirst', 'x', "exit instantly on first error or failed test."),
('no-cov', 'C', "Disable coverage report completely"),
]
exitfirst = False
no_cov = False
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = 'tests django_extensions --ds=tests.testapp.settings --cov=django_extensions --cov-report html --cov-report term'
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
if self.exitfirst:
self.pytest_args += " -x"
if self.no_cov:
self.pytest_args += " --no-cov"
def run_tests(self):
import shlex
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
except ImportError:
PyTest = None
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is at:
# /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific
# fix for this in distutils.command.install_data#306. It fixes install_lib
# but not install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to
# the fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
if PyTest:
cmdclasses['test'] = PyTest
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, package_data = [], {}
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
extensions_dir = 'django_extensions'
for dirpath, dirnames, filenames in os.walk(extensions_dir):
# Ignore PEP 3147 cache dirs and those whose names start with '.'
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d != '__pycache__']
parts = fullsplit(dirpath)
package_name = '.'.join(parts)
if '__init__.py' in filenames:
packages.append(package_name)
elif filenames:
relative_path = []
while '.'.join(parts) not in packages:
relative_path.append(parts.pop())
relative_path.reverse()
path = os.path.join(*relative_path)
package_files = package_data.setdefault('.'.join(parts), [])
package_files.extend([os.path.join(path, f) for f in filenames])
version = __import__('django_extensions').__version__
install_requires = ['six>=1.2']
extras_require = {}
if int(setuptools.__version__.split(".", 1)[0]) < 18:
assert "bdist_wheel" not in sys.argv, "setuptools 18 or later is required for wheels."
if sys.version_info[:2] < (3, 5):
install_requires.append('typing')
elif int(setuptools.__version__.split(".", 1)[0]) >= 36:
install_requires.append('typing;python_version<"3.5"')
else:
extras_require[":python_version<'3.5'"] = ["typing"]
long_description = """django-extensions bundles several useful
additions for Django projects. See the project page for more information:
http://github.com/django-extensions/django-extensions"""
if os.path.isfile("README.rst"):
with open("README.rst") as f:
long_description = f.read()
setup(
name='django-extensions',
version=version,
description="Extensions for Django",
long_description=long_description,
author='Michael Trier',
author_email='mtrier@gmail.com',
maintainer='Bas van Oostveen',
maintainer_email='v.oostveen@gmail.com',
url='http://github.com/django-extensions/django-extensions',
license='MIT License',
platforms=['any'],
packages=packages,
cmdclass=cmdclasses,
package_data=package_data,
install_requires=install_requires,
extras_require=extras_require,
tests_require=[
'Django',
'Werkzeug',
'factory-boy',
'mock',
'pytest',
'pytest-cov',
'pytest-django',
'python-dateutil',
'shortuuid',
'tox',
'vobject',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
)
|
# coding: utf-8
"""
Main BDF class. Defines:
- BDFInputPy
"""
import os
from collections import defaultdict
from itertools import count
from typing import List, Tuple, Optional, Union, Any, cast
from io import StringIO
import numpy as np
from cpylog import get_logger2
from pyNastran.nptyping import NDArrayN2int
from pyNastran.utils import print_bad_path, _filename
from pyNastran.bdf import BULK_DATA_CARDS, CASE_BULK_CARDS
from pyNastran.bdf.errors import MissingDeckSections
from pyNastran.bdf.bdf_interface.utils import _parse_pynastran_header
from pyNastran.bdf.bdf_interface.include_file import get_include_filename
# these allow spaces
FILE_MANAGEMENT = (
'ACQUIRE ', 'ASSIGN ', 'CONNECT ', 'DBCLEAN ', 'DBDICT ', 'DBDIR ',
'DBFIX ', 'DBLOAD ', 'DBLOCATE ', 'DBSETDEL ', 'DBUNLOAD ',
'DBUPDATE ', 'ENDJOB ', 'EXPAND ', 'INCLUDE ', 'INIT ', 'NASTRAN ',
'PROJ ',
)
EXECUTIVE_SPACES = ('ALTER ', 'APP ', 'COMPILE ', 'COMPILER ', 'DIAG ',
'GEOMCHECK ', 'ID ', 'LINK ', 'MALTER ', 'SOL ', 'TIME ')
#CASE_BULK_CARDS = {
## of the form 'LOAD = 5', so 'PARAM,POST,-1' doesn't count
#'LOAD', 'SPC', 'FREQ', 'MPC', # case control + bulk data cards
#'FORCE', 'TRIM', 'DESVAR', 'TSTEP', 'TSTEPNL', 'NSM', 'CLOAD', 'SUPORT1',
#'CSSCHD', 'SDAMPING', 'DLOAD', 'TRIM',
#'SUPORT', # short for SUPORT1
#'ACCEL', # short for ACCELERATION
## 'PARAM', # equals sign is problematic
#}
CASE_CARDS_NO_BULK = (
#'LOAD'
#NX 2019.2
'A2GG', 'ACCE', # ACCELERATION
'ACINTENSITY', 'ACORDCHK', 'ACPOWER', 'ACVELOCITY', 'ADAMSMNF',
'ADAPTERR', 'ADMRECVR', 'AECONFIG', 'AEROF', 'AESYMXY', 'AESYMXZ', 'ALOAD',
'ANALYSIS', 'APRESSURE', 'ATVOUT', 'AUXCASE', 'B2GG', 'B2PP', 'BC', 'BCRESULTS',
'BCSET', 'BGRESULTS', 'BGSET', 'BOLTID', 'BOLTRESULTS', 'BOUTPUT', 'CKGAP',
#CLOAD - in bulk
'CMETHOD', 'CRSTRN', 'CSMSET',
#CSSCHD - in bulk
'CYCFORCES', 'CYCSET', 'CZRESULTS',
'DEFORM', 'DESGLB', 'DESOBJ', 'DESSUB',
'DISP', # DISPLACEMENT
#DIVERG - in bulk
#DLOAD - in bulk
'DMTRCOEF', 'DMTRLOSS', 'DRSPAN', 'DSAPRT', 'DSYM', 'DTEMP', 'EBDSET',
#'ECHO'
'EDE', 'EFLOAD', 'EKE', 'ELAR', 'ELAROUT', 'ELSDCON', 'ELSTRN', 'ELSUM',
'ENTHALPY', 'ERP', 'ESE', 'EXTSEOUT', 'FLSFSEL', 'FLSPOUT', 'FLSTCNT',
'FLUX', 'FLXSLI', 'FLXRESULTS', 'FMETHOD',
#FORCE - in bulk
'FREQU', # 'FREQUENCY',
'FRFIN',
'GCRSTRN', 'GELSTRN', 'GKRESULTS', 'GPFORCE', 'GPKE', 'GPLSTRN', 'GPRSORT',
'GPSDCON', 'GPSTRAIN', 'GPSTRESS', 'GRDCON', 'GROUNDCHECK', 'GSTRAIN', 'GSTRESS',
'GTHSTRN', 'GUST', 'HARMONICS', 'HDOT', 'HOUTPUT', 'IC', 'IMPERF',
#INCLUDE - already handled
'INITS', 'INPOWER', 'JCONSET', 'JRESULTS', 'JINTEG', 'K2GG', 'K2PP', 'LABEL',
'LINE',
#LOAD - in bulk
'LOADSET', 'M2GG', 'M2PP', 'MASTER', 'MAXLINES', 'MAXMIN', 'MBDEXPORT', 'MBDRECVR',
'MEFFMASS', 'METHOD', 'MFLUID', 'MODALE', 'MODCON', 'MODES', 'MODSEL',
'MODTRAK', 'MONITOR', 'MONVAR'
#MPC - in bulk
'MPCF', # MPCFORCES
'MPRES', 'NLARCL', 'NLCNTL', 'NLLOAD', 'NLPARM', 'NLSTRESS', 'NONLINEAR',
'NOUTPUT',
#NSM - in bulk
'OFREQUENCY', 'OLOAD', 'OMODES', 'OPRESS', 'OSTNINI', 'OTEMP', 'OTIME',
'OTMFORC', 'OUTPUT', 'P2G', 'PANCON',
#PARAM - in bulk
'PARTN', 'PEAKOUT', 'PFRESULTS', 'PLOTID', 'PLSTRN', 'PRESSURE',
'RANDOM', 'RCROSS', 'REPCASE', 'RESVEC', 'RIGID', 'RMAXMIN', 'RMETHOD',
'RSMETHOD',
'SACCEL' # SACCELERATION
'SDAMPING',
'SDISP', # SDISPLACEMENT
'SEALL', 'SEDR', 'SEDV',
'SEEXCLUDE', 'SEFINAL',
'SEKREDUCE',
'SELGENERATE', 'SELREDUCE',
'SEMGENERATE', 'SEMREDUCE',
'SEQDEP', 'SERESP',
#SET - in bulk
'SETMC', 'SETMCNAME', 'SETS DEFINITION', 'SHELLTHK', 'SKIP',
'SMETHOD',
# SPC - in bulk
'SPCF', # SPCFORCES
'STATSUB', 'STATVAR', 'STRAIN', 'STRESS', 'STRFIELD', 'SUBCASE', 'SUBCOM',
'SUBSEQ',
'SUBT', # SUBTITLE
#'SUPER',
#SUPORT - in bulk
#SUPORT1 - in bulk
'SVECTOR',
'SVELO', # SVELOCITY
'SYM', 'SYMCOM', 'SYMSEQ', 'TEMPERATURE', 'TFL', 'THERMAL', 'TITLE'
#TRIM - in bulk
'TRLOSS', 'TRPOWER',
# TSTEP - in bulk
# TSTEPNL - in bulk
'TSTRU', 'VATVOUT',
'VELO', # VELOCITY
'VOLUME', 'WEIGHTCHECK'
)
EXECUTIVE_CASE_SPACES = tuple(
list(FILE_MANAGEMENT) +
list(EXECUTIVE_SPACES) +
['SET ', 'SUBCASE '],
)
class BDFInputPy:
"""BDF reader class that only handles lines and not building cards or parsing cards"""
def __init__(self, read_includes: bool, dumplines: bool,
encoding: str, nastran_format: str='msc',
consider_superelements: bool=True,
log: Any=None, debug: bool=False):
"""
BDF reader class that only handles lines and not building cards or parsing cards
Parameters
----------
read_includes : bool
should include files be read
dumplines : bool
Writes 'pyNastran_dump.bdf' up to some failed line index
encoding : str
the character encoding (e.g., utf8, latin1, cp1252)
nastran_format : str; default='msc'
'zona' has a special read method
{msc, nx, zona}
consider_superelements : bool; default=True
parse 'begin super=2'
log : logger(); default=None
a logger for printing INCLUDE files that are loadaed
debug : bool; default=False
used when testing; for the logger
"""
self.dumplines = dumplines
self.encoding = encoding
self.nastran_format = nastran_format
self.include_dir = ''
self.include_lines = defaultdict(list)
self.read_includes = read_includes
self.active_filenames = []
self.active_filename = None
self.consider_superelements = consider_superelements
self.debug = debug
self.log = get_logger2(log, debug)
def get_lines(self, bdf_filename: Union[str, StringIO],
punch: Optional[bool]=False,
make_ilines: bool=True) -> List[str]:
"""
Opens the bdf and extracts the lines by group
Parameters
----------
bdf_filename : str
the main bdf_filename
punch : bool / None; default=False
is this a punch file
None : guess
True : no executive/case control decks
False : executive/case control decks exist
make_ilines : bool; default=True
flag for bulk_data_ilines
Returns
-------
system_lines : List[str]
the system control lines (typically empty; used for alters)
executive_control_lines : List[str]
the executive control lines (stores SOL 101)
case_control_lines : List[str]
the case control lines (stores subcases)
bulk_data_lines : List[str]
the bulk data lines (stores geometry, boundary conditions, loads, etc.)
bulk_data_ilines : None / (nlines, 2) int ndarray
if make_ilines = True:
the [ifile, iline] pair for each line in the file
if make_ilines = False:
ilines = None
"""
main_lines = self.get_main_lines(bdf_filename)
all_lines, ilines = self.lines_to_deck_lines(main_lines, make_ilines=make_ilines)
out = _lines_to_decks(all_lines, ilines, punch, self.log,
keep_enddata=True,
consider_superelements=self.consider_superelements,
nastran_format=self.nastran_format)
(
system_lines, executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines) = out
if self.nastran_format in {'msc', 'nx', 'optistruct', 'nasa95', 'mystran'}:
pass
elif self.nastran_format == 'zona':
bulk_data_lines, bulk_data_ilines, system_lines = self._get_lines_zona(
system_lines, bulk_data_lines, bulk_data_ilines, punch)
else:
msg = f'nastran_format={self.nastran_format!r} and must be msc, nx, optistruct, nasa95, mystran, or zona'
raise NotImplementedError(msg)
return (system_lines, executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines)
def _get_lines_zona(self, system_lines: List[str], bulk_data_lines: List[str],
bulk_data_ilines: NDArrayN2int,
punch: bool) -> Tuple[List[str], NDArrayN2int, List[str]]:
"""load and update the lines for ZONA"""
system_lines2 = []
for system_line in system_lines:
if system_line.upper().startswith('ASSIGN'):
split_system = system_line.split(',')
header = split_system[0]
header_upper = header.upper()
if header_upper.startswith('ASSIGN FEM'):
unused_fem, filename = header.split('=')
filename = filename.strip('"\'')
self.log.debug('reading %s' % filename)
if filename.lower().endswith('.f06'):
filename = os.path.splitext(filename)[0] + '.bdf'
if not filename.endswith('.bdf'):
raise RuntimeError('filename must end in bdf; %s' % filename)
_main_lines = self.get_main_lines(filename)
make_ilines = bulk_data_ilines is not None
_all_lines, _ilines = self.lines_to_deck_lines(
_main_lines, make_ilines=make_ilines)
_out = _lines_to_decks(_all_lines, _ilines, punch, self.log,
keep_enddata=False,
consider_superelements=self.consider_superelements)
(
_system_lines, _executive_control_lines, _case_control_lines,
bulk_data_lines2, bulk_data_ilines2,
_superelement_lines, _superelement_ilines,
) = _out
bulk_data_lines = bulk_data_lines2 + bulk_data_lines
#print("bulk_data_ilines2 =", bulk_data_ilines2, bulk_data_ilines2.shape)
#print("bulk_data_ilines =", bulk_data_ilines, bulk_data_ilines.shape)
bulk_data_ilines = np.vstack([bulk_data_ilines2, bulk_data_ilines])
continue
elif header_upper.startswith('ASSIGN MATRIX'):
pass
elif header_upper.startswith('ASSIGN OUTPUT4'):
pass
else: # pragma: no cover
raise NotImplementedError(system_line)
system_lines2.append(system_line)
system_lines = system_lines
return bulk_data_lines, bulk_data_ilines, system_lines
def get_main_lines(self, bdf_filename: Union[str, StringIO]) -> List[str]:
"""
Opens the bdf and extracts the lines
Parameters
----------
bdf_filename : str / StringIO
the main bdf_filename
Returns
-------
lines : List[str]
all the lines packed into a single line stream
"""
#print('bdf_filename_main =', bdf_filename)
if hasattr(bdf_filename, 'read') and hasattr(bdf_filename, 'write'):
bdf_filename = cast(StringIO, bdf_filename)
lines = bdf_filename.readlines()
if len(lines) == 0:
raise RuntimeError('lines in %s is empty' % bdf_filename)
return lines
bdf_filename = cast(str, bdf_filename)
self.bdf_filename = bdf_filename
# the directory of the 1st BDF (include BDFs are relative to this one)
self.include_dir = os.path.dirname(os.path.abspath(bdf_filename))
with self._open_file(bdf_filename, basename=True) as bdf_file:
try:
lines = bdf_file.readlines()
except UnicodeDecodeError:
_show_bad_file(self, bdf_filename, encoding=self.encoding)
return lines
def lines_to_deck_lines(self, lines: List[str], make_ilines: bool=True) -> Tuple[List[str], int]:
"""
Merges the includes into the main deck.
Parameters
----------
lines : List[str]
the lines from the main BDF
make_ilines : bool; default=True
flag for ilines
Returns
-------
active_lines : List[str]
all the active lines in the deck
ilines : (nlines, 2) int ndarray
if make_ilines = True:
the [ifile, iline] pair for each line in the file
if make_ilines = False:
ilines = None
"""
nlines = len(lines)
#bdf_filenames = [self.bdf_filename]
ilines = None
if make_ilines:
ilines = _make_ilines(nlines, ifile=0)
i = 0
ifile = 1
while i < nlines:
try:
line = lines[i].rstrip('\r\n\t')
except IndexError:
break
line_upper = line.upper()
if line_upper.startswith('INCLUDE'):
j, include_lines = self._get_include_lines(lines, line, i, nlines)
bdf_filename2 = get_include_filename(include_lines, include_dir=self.include_dir)
#bdf_filenames.append(bdf_filename2)
jfile = ilines[i, 0]
# these are the lines associated with the 1st/2nd include file found
self.include_lines[jfile].append((include_lines, bdf_filename2))
if self.read_includes:
lines, nlines, ilines = self._update_include(
lines, nlines, ilines,
include_lines, bdf_filename2, i, j, ifile, make_ilines=make_ilines)
ifile += 1
else:
lines = lines[:i] + lines[j:]
if make_ilines:
ilines = np.vstack([
ilines[:i, :],
ilines[j:, :],
])
#assert len(ilines[:, 1]) == len(np.unique(ilines[:, 1]))
#self.include_lines[ifile].append(include_lines)
#self.reject_lines.append(write_include(bdf_filename2))
i += 1
if self.dumplines:
self._dump_file('pyNastran_dump.bdf', lines, i)
#print(bdf_filenames)
#if make_ilines:
#nilines = ilines.shape[0]
#assert nlines == ilines.shape[0], 'nlines=%s nilines=%s' % (nlines, nilines)
return lines, ilines
def _update_include(self, lines: List[str], nlines: int, ilines,
include_lines: List[str], bdf_filename2: str, i: int, j: int, ifile: int,
make_ilines: bool=False):
"""incorporates an include file into the lines"""
try:
self._open_file_checks(bdf_filename2)
except IOError:
crash_name = 'pyNastran_crash.bdf'
self._dump_file(crash_name, lines, j)
msg = 'There was an invalid filename found while parsing.\n'
msg += 'Check the end of %r\n' % crash_name
msg += 'bdf_filename2 = %r\n' % bdf_filename2
msg += 'abs_filename2 = %r\n' % os.path.abspath(bdf_filename2)
msg += 'include_lines = %s' % include_lines
#msg += 'len(bdf_filename2) = %s' % len(bdf_filename2)
print(msg)
raise
#raise IOError(msg)
read_again = False
with self._open_file(bdf_filename2, basename=False) as bdf_file:
#print('bdf_file.name = %s' % bdf_file.name)
try:
lines2 = bdf_file.readlines()
except UnicodeDecodeError:
#try:
bdf_file.seek(0)
try:
encoding2 = _check_pynastran_encoding(bdf_filename2, encoding=self.encoding)
except UnicodeDecodeError:
encoding2 = self.encoding
#print('***encoding=%s encoding2=%s' % (self.encoding, encoding2))
if self.encoding != encoding2:
read_again = True
else:
msg = (
'Invalid Encoding: encoding=%r. Fix it by:\n'
' 1. try a different encoding (e.g., latin1, cp1252, utf8)\n'
" 2. call read_bdf(...) with `encoding`'\n"
" 3. Add '$ pyNastran : encoding=latin1"
' (or other encoding) to the top of the main/INCLUDE file\n' % (
self.encoding))
raise RuntimeError(msg)
if read_again:
self.active_filenames.pop()
with self._open_file(bdf_filename2, basename=False, encoding=encoding2) as bdf_file:
#print('bdf_file.name = %s' % bdf_file.name)
try:
lines2 = bdf_file.readlines()
except UnicodeDecodeError:
msg = (
'Incorrect Encoding: encoding=%r. Fix it by:\n'
' 1. try a different encoding (e.g., latin1, cp1252, utf8)\n'
" 2. call read_bdf(...) with `encoding`'\n"
" 3. Add '$ pyNastran : encoding=latin1"
' (or other encoding) to the top of the main/INCLUDE file\n' % encoding2)
raise RuntimeError(msg)
#print('lines2 = %s' % lines2)
#line2 = lines[j].split('$')
#if not line2[0].isalpha():
#print('** %s' % line2)
include_comment = '\n$ INCLUDE processed: %s\n' % bdf_filename2
#for line in lines2:
#print(" ?%s" % line.rstrip())
#for ii, line in enumerate(lines):
#print(' %i %r' % (ii, line))
#for ii in range(i):
#print('i=%i %r' % (ii, lines[ii]))
#print('---------')
#for jj in range(j, len(lines)):
#print('j=%i %r' % (jj, lines[jj]))
#print('include_comment = %r' % include_comment)
nlines2 = len(lines2)
if make_ilines:
ilines2 = _make_ilines(nlines2, ifile)
#n_ilines = ilines.shape[0]
#print(ilines[j:, :])
#assert len(lines[:i]) == ilines[:i+1, :].shape[0] - ifile, 'A: nlines=%s nilines=%s' % (len(lines[:i]), ilines[:i+1, :].shape[0])
#assert len(lines[j:]) == ilines[j:, :].shape[0], 'B: nlines=%s nilines=%s' % (len(lines[j:]), ilines[j:, :].shape[0])
#assert len(lines2) == ilines2.shape[0], 'C: nlines=%s nilines=%s' % (len(lines2), ilines2.shape[0])
#assert nlines == ilines.shape[0], 'B: nlines=%s nilines=%s' % (nlines, nilines)
#assert nlines == ilines.shape[0], 'C: nlines=%s nilines=%s' % (nlines, nilines)
#print(nlines-ifile+1, nlines2)
#print(
#len(lines[:i]),
#len([include_comment]),
#len(lines2),
#len(lines[j:]),
#)
#print(
#ilines[:i+1, :].shape[0],
#ilines2.shape[0],
#ilines[j:, :].shape[0],
#)
#print('ilines:\n%s' % ilines)
#print('ilines2:\n%s' % ilines2)
#print('lines2:\n%s' % ''.join(lines2))
ilines = np.vstack([
ilines[:i+1, :],
ilines2,
ilines[j:, :],
])
#dij = j - i
nlines += nlines2
lines = lines[:i] + [include_comment] + lines2 + lines[j:]
#print('*lines:\n%s' % ''.join(lines))
#for ifile_iline, line in zip(ilines, lines):
#print(ifile_iline, line.rstrip())
#if make_ilines:
#n_ilines = ilines.shape[0]
#ncompare = n_ilines - dij
#assert n_ilines == n_ilines, 'nlines=%s dij=%s n_ilines=%s' % (n_ilines, dij, n_ilines)
#for line in lines:
#print(" *%s" % line.rstrip())
return lines, nlines, ilines
def _get_include_lines(self, lines: List[str], line: str,
i: int, nlines: int) -> Tuple[int, List[str]]:
"""
gets the lines for the include file
INCLUDE 'Satellite_V02_INCLUDE:Satellite_V02_Panneau_Externe.dat'
INCLUDE '../../BULK/COORDS/satellite_V02_Coord.blk'
"""
j = i + 1
line_base = line.split('$')[0]
include_lines = [line_base.strip()]
if "'" not in line_base:
pass
else:
#print('----------------------')
line_base = line_base[8:].strip()
if line_base.startswith("'") and line_base.endswith("'"):
pass
else:
while not line.split('$')[0].endswith("'") and j < nlines:
#print('j=%s nlines=%s less?=%s' % (j, nlines, j < nlines))
try:
line = lines[j].split('$')[0].strip()
except IndexError:
#print('bdf_filename=%r' % bdf_filename)
crash_name = 'pyNastran_crash.bdf'
self._dump_file(crash_name, lines, i+1)
msg = 'There was an invalid filename found while parsing (index).\n'
msg += 'Check the end of %r\n' % crash_name
#msg += 'bdf_filename2 = %r\n' % bdf_filename
msg += 'include_lines = %s' % include_lines
raise IndexError(msg)
#print('endswith_quote=%s; %r' % (
#line.split('$')[0].strip().endswith(""), line.strip()))
include_lines.append(line.strip())
j += 1
#print('j=%s nlines=%s less?=%s' % (j, nlines, j < nlines))
#print('*** %s' % line)
#bdf_filename2 = line[7:].strip(" '")
#include_lines = [line] + lines[i+1:j]
#print(include_lines)
return j, include_lines
def _dump_file(self, bdf_dump_filename: str,
lines: List[str],
i: int) -> None:
"""
Writes a BDF up to some failed line index
Parameters
----------
bdf_dump_filename : str
the bdf filename to dump
lines : List[str]
the entire list of lines
i : int
the last index to write
"""
with open(_filename(bdf_dump_filename),
'w', encoding=self.encoding) as crash_file:
for line in lines[:i]:
crash_file.write(line)
def _open_file_checks(self, bdf_filename: str, basename: bool=False) -> None:
"""
Verifies that the BDF about to be opened:
1. Exists
2. Is Unique
3. Isn't an OP2
4. Is a File
Parameters
----------
bdf_filename : str
the bdf filename to open
basename : bool; default=False
only take the basename of the bdf
"""
if basename:
bdf_filename_inc = os.path.join(self.include_dir, os.path.basename(bdf_filename))
else:
bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)
if not os.path.exists(_filename(bdf_filename_inc)):
msg = 'No such bdf_filename: %r\n' % bdf_filename_inc
msg += 'cwd: %r\n' % os.getcwd()
msg += 'include_dir: %r\n' % self.include_dir
msg += print_bad_path(bdf_filename_inc)
self.log.error(msg)
raise IOError(msg)
elif bdf_filename_inc.endswith('.op2'):
msg = 'Invalid filetype: bdf_filename=%r' % bdf_filename_inc
self.log.error(msg)
raise IOError(msg)
bdf_filename = bdf_filename_inc
if bdf_filename in self.active_filenames:
msg = 'bdf_filename=%s is already active.\nactive_filenames=%s' \
% (bdf_filename, self.active_filenames)
self.log.error(msg)
raise RuntimeError(msg)
elif os.path.isdir(_filename(bdf_filename)):
current_filename = self.active_filename if len(self.active_filenames) > 0 else 'None'
msg = 'Found a directory: bdf_filename=%r\ncurrent_file=%s' % (
bdf_filename_inc, current_filename)
self.log.error(msg)
raise IOError(msg)
elif not os.path.isfile(_filename(bdf_filename)):
msg = 'Not a file: bdf_filename=%r' % bdf_filename
self.log.error(msg)
raise IOError(msg)
def _open_file(self, bdf_filename: Union[str, StringIO],
basename: bool=False, check: bool=True, encoding: Optional[str]=None) -> Any:
"""
Opens a new bdf_filename with the proper encoding and include directory
Parameters
----------
bdf_filename : str
the filename to open
basename : bool (default=False)
should the basename of bdf_filename be appended to the include directory
check : bool; default=True
you can disable the checks
Returns
-------
bdf_file : file
a file object
"""
if encoding is None:
encoding = self.encoding
if basename:
bdf_filename_inc = os.path.join(self.include_dir, os.path.basename(bdf_filename))
else:
bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)
self._validate_open_file(bdf_filename, bdf_filename_inc, check)
self.log.debug('opening %r' % bdf_filename_inc)
self.active_filenames.append(bdf_filename_inc)
#print('ENCODING - _open_file=%r' % self.encoding)
#self._check_pynastran_header(lines)
bdf_file = open(_filename(bdf_filename_inc), 'r', encoding=encoding)
return bdf_file
def _validate_open_file(self, bdf_filename: Union[str, StringIO],
bdf_filename_inc: str,
check: bool=True) -> None:
"""
checks that the file doesn't have obvious errors
- hasn't been used
- not a directory
- is a file
Parameters
----------
bdf_filename : str
the current bdf filename
bdf_filename_inc : str
the next bdf filename
check : bool; default=True
you can disable the checks
Raises
------
RuntimeError : file is active
IOError : Invalid file type
"""
if check:
if not os.path.exists(_filename(bdf_filename_inc)):
msg = 'No such bdf_filename: %r\n' % bdf_filename_inc
msg += 'cwd: %r\n' % os.getcwd()
msg += 'include_dir: %r\n' % self.include_dir
msg += print_bad_path(bdf_filename_inc)
raise IOError(msg)
elif bdf_filename_inc.endswith('.op2'):
raise IOError('Invalid filetype: bdf_filename=%r' % bdf_filename_inc)
bdf_filename = bdf_filename_inc
if bdf_filename in self.active_filenames:
msg = 'bdf_filename=%s is already active.\nactive_filenames=%s' \
% (bdf_filename, self.active_filenames)
raise RuntimeError(msg)
elif os.path.isdir(_filename(bdf_filename)):
current_fname = self.active_filename if len(self.active_filenames) > 0 else 'None'
raise IOError('Found a directory: bdf_filename=%r\ncurrent_file=%s' % (
bdf_filename_inc, current_fname))
elif not os.path.isfile(_filename(bdf_filename)):
raise IOError('Not a file: bdf_filename=%r' % bdf_filename)
def _is_bulk_data_line(text: str) -> bool:
"""
Returns True if there is a Bulk Data Deck
Parameters
----------
text : str
a line in the deck
Returns
-------
is_bulk_line : bool
is this a bulk data line
"""
#
# Stripping the data isn't ideal as a GRID card cannot have a leading space.
#
# We strip the data because we need to support:
# ' SUPORT1 = 10'
# if you use text[0:8].strip('* ').upper()
# we get:
# ' SUPO', which is not a case control card called 'SUPORT1'
#
text2 = text.split('$')[0].rstrip()
#card_name = text2.strip().replace(' ', '')[0:8].split(',')[0].upper().rstrip('*')
card_name = text2.strip()[0:8].split(',')[0].upper().rstrip('*')
#card_name2 = text2.split(',')[0].upper()
#print('card_name =%r' % card_name)
#print('card_name2=%r' % card_name2)
# bulk data cards
if card_name in BULK_DATA_CARDS:
# case control + bulk data cards
if '=' in text2 and card_name in CASE_BULK_CARDS or text2.startswith(' '):
return False
elif card_name == 'PARAM':
# The PARAM card can have a comma or tab, but no equals sign.
# If there is a PARAM card, we have to assume we're not in the
# case control.
return False
return True
return False
def _is_case_control_line(text: str) -> bool:
"""
Returns True if there is a Case Control Deck
Parameters
----------
text : str
a line in the deck
Returns
-------
is_case_line : bool
is this a case control line
"""
line_upper = text.split('$')[0].strip().upper()
if line_upper.startswith(CASE_CARDS_NO_BULK):
return True
return False
def _check_pynastran_encoding(bdf_filename: Union[str, StringIO], encoding: str) -> str:
"""updates the $pyNastran: key=value variables"""
line = '$pyNastran: punch=False'
#line_temp = u'é à è ê'.encode('utf8').decode('ascii')
skip_keys = [
'version', 'punch', 'nnodes', 'nelements', 'dumplines',
'is_superelements', 'skip_cards', 'units']
with open(bdf_filename, 'rb') as bdf_file:
line = bdf_file.readline()
line_str = line.decode('ascii')
while '$' in line_str:
#if not line.startswith('$'):
#break
key, value = _parse_pynastran_header(line_str)
if not key:
break
# key/value are lowercase
if key == 'encoding':
encoding = value
break
elif key in skip_keys or 'skip ' in key:
pass
else:
raise NotImplementedError(key)
line = bdf_file.readline()
line_str = line.decode('ascii')
return encoding
IGNORE_COMMENTS = (
'$EXECUTIVE CONTROL DECK',
'$CASE CONTROL DECK',
'NODES', 'SPOINTS', 'EPOINTS', 'ELEMENTS',
'PARAMS', 'PROPERTIES', 'ELEMENTS_WITH_PROPERTIES',
'ELEMENTS_WITH_NO_PROPERTIES (PID=0 and unanalyzed properties)',
'UNASSOCIATED_PROPERTIES',
'MATERIALS', 'THERMAL MATERIALS',
'CONSTRAINTS', 'SPCs', 'MPCs', 'RIGID ELEMENTS',
'LOADS', 'AERO', 'STATIC AERO', 'AERO CONTROL SURFACES',
'FLUTTER', 'GUST', 'DYNAMIC', 'OPTIMIZATION',
'COORDS', 'THERMAL', 'TABLES', 'RANDOM TABLES',
'SETS', 'CONTACT', 'REJECTS', 'REJECT_CARDS', 'REJECT_LINES',
'PROPERTIES_MASS', 'MASSES')
def _clean_comment(comment: str) -> Optional[str]:
"""
Removes specific pyNastran comment lines so duplicate lines aren't
created.
Parameters
----------
comment : str
the comment to possibly remove
Returns
-------
updated_comment : str
the comment
"""
if comment == '':
pass
elif comment in IGNORE_COMMENTS:
comment = None
elif 'pynastran' in comment.lower():
csline = comment.lower().split('pynastran', 1)
if csline[1].strip()[0] == ':':
comment = None
#if comment:
#print(comment)
return comment
def _lines_to_decks(lines: List[str],
ilines: NDArrayN2int,
punch: Optional[bool],
log: Any,
keep_enddata: bool=True,
consider_superelements: bool=False,
nastran_format: str='msc') -> Tuple[
List[str], List[str], List[str], List[str], NDArrayN2int,
List[str], List[str], List[str]]:
"""
Splits the BDF lines into:
- system lines
- executive control deck
- case control deck
- bulk data deck
Parameters
----------
lines : List[str]
all the active lines in the deck
ilines : None / (nlines, 2) int ndarray
None : the old behavior
narray : the [iline, ifile] pair for each line in the file
punch : bool / None
None : guess
True : starts from the bulk data deck
False : read the entire deck
keep_enddata : bool; default=True
True : don't throw away the enddata card
False : throw away the enddata card
Returns
-------
system_lines : List[str]
the system control lines (typically empty; used for alters)
executive_control_lines : List[str]
the executive control lines (stores SOL 101)
case_control_lines : List[str]
the case control lines (stores subcases)
bulk_data_lines : List[str]
the bulk data lines (stores geometry, boundary conditions, loads, etc.)
bulk_data_ilines : None / (nlines, 2) int ndarray
None : the old behavior
narray : the [ifile, iline] pair for each line in the file
superelement_lines : List[str]
???
superelement_ilines : List[str]
???
auxmodel_lines : List[str]
???
"""
if punch: # True
system_lines = []
executive_control_lines = []
case_control_lines = []
bulk_data_lines = lines
bulk_data_ilines = ilines
superelement_lines = {}
superelement_ilines = {}
#auxmodel_lines = {}
return (
system_lines, executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines)
# typical deck
out = _lines_to_decks_main(lines, ilines, log, punch=punch,
keep_enddata=keep_enddata,
consider_superelements=consider_superelements,
nastran_format=nastran_format)
(executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines,
auxmodel_lines, afpm_lines) = out
# break out system commands
system_lines, executive_control_lines = _break_system_lines(executive_control_lines)
for super_id, _lines in superelement_lines.items():
# cqrsee101b2.bdf
if len(_lines) == 0:
raise RuntimeError('lines in superelement %i is empty' % super_id)
#assert len(_lines) == len(superelement_ilines[super_id]), 'superelement %i ilines is the wrong length' % (super_id)
for auxmodel_id, _lines in auxmodel_lines.items():
# C:\MSC.Software\MSC.Nastran2005r3\msc20055\nast\tpl\motion21.dat
# C:\MSC.Software\MSC.Nastran2005r3\msc20055\nast\tpl\d200am1.dat
# C:\MSC.Software\MSC.Nastran2005r3\msc20055\nast\tpl\d200am2.dat
log.warning('skipping auxmodel=%i' % auxmodel_id)
raise RuntimeError('lines in auxmodel %i is empty' % auxmodel_id)
for afpm_id, _lines in afpm_lines.items():
log.warning('skipping AFPM=%i' % afpm_id)
raise RuntimeError('lines in AFPM %i is empty' % afpm_id)
# clean comments
system_lines = [_clean_comment(line) for line in system_lines
if _clean_comment(line) is not None]
executive_control_lines = [_clean_comment(line) for line in executive_control_lines
if _clean_comment(line) is not None]
case_control_lines = [_clean_comment(line) for line in case_control_lines
if _clean_comment(line) is not None]
return (
system_lines, executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines)
def _lines_to_decks_main(lines: List[str],
ilines: Any, log: Any,
punch: Optional[bool]=False,
keep_enddata: bool=True,
consider_superelements: bool=False,
nastran_format: str='msc') -> Tuple[
List[str], List[str], List[str], List[str], NDArrayN2int,
List[str], List[str], List[str]]:
"""
Splits the BDF lines into:
- system lines
- executive control deck
- case control deck
- bulk data deck
Parameters
----------
lines : List[str]
all the active lines in the deck
ilines : None / (nlines, 2) int ndarray
None : the old behavior
narray : the [iline, ifile] pair for each line in the file
punch : bool / None; default=False
None : guess
True : punch file (skipped previously, so this can't be True)
False : not a punch file
keep_enddata : bool; default=True
True : don't throw away the enddata card
False : throw away the enddata card
consider_superelements : bool; default=True
parse 'begin super=2'
Returns
-------
system_executive_control_lines : List[str]
the system control lines (typically empty; used for alters)
and the executive control lines (stores SOL 101)
case_control_lines : List[str]
the case control lines (stores subcases)
bulk_data_lines : List[str]
the bulk data lines (stores geometry, boundary conditions, loads, etc.)
bulk_data_ilines : None / (nlines, 2) int ndarray
None : the old behavior
narray : the [ifile, iline] pair for each line in the file
superelement_lines : List[str]
???
superelement_ilines : List[str]
???
auxmodel_lines : List[str]
???
"""
make_ilines = ilines is not None
guess_deck_sections = punch is None
executive_control_lines = []
case_control_lines = []
bulk_data_lines = []
superelement_lines = defaultdict(list)
superelement_ilines = defaultdict(list)
auxmodel_lines = defaultdict(list)
afpm_lines = defaultdict(list)
auxmodels_found = set()
afpms_found = set()
auxmodels_to_find = []
afpms_to_find = []
is_auxmodel = False
is_afpm = False
is_superelement = False
is_auxmodel_active = False
is_afpm_active = False
auxmodel_id = None
afpm_id = None
#---------------------------------------------
current_lines = executive_control_lines
if nastran_format in ['msc', 'nx', 'nasa95', 'mystran', 'zona']:
flag_word = 'executive'
flag = 1 # start from executive control deck
elif nastran_format == 'optistruct':
flag_word = 'case control'
flag = 2 # case from control deck
else: # pragma: no cover
raise RuntimeError(nastran_format)
#flag = 1
old_flags = []
bulk_data_ilines = []
if ilines is None:
ilines = count()
#guess_deck_sections = True
#print('guess_deck_sections =', guess_deck_sections, punch)
for i, ifile_iline, line in zip(count(), ilines, lines):
#print('%s %-8s %s' % (ifile_iline, flag_word, line.rstrip()))
#print('%s %i %s' % (ifile_iline, flag, line.rstrip()))
line_upper = line.split('$')[0].upper().strip()
if guess_deck_sections and flag == 1 and line_upper.startswith('BEGIN'):
# we're in the executive deck and found the bulk data deck
section_name_map = {
1 : 'executive control',
2 : 'case control',
}
section_name = section_name_map[flag]
if _is_begin_bulk(line_upper):
#old_flags.append(flag)
log.warning(f'currently in {section_name} deck and skipping '
'directly to bulk data section')
flag = 3
current_ilines = bulk_data_ilines
current_lines = bulk_data_lines
bulk_data_ilines = _bulk_data_lines_extract(
lines, ilines, bulk_data_lines, i,
make_ilines=make_ilines, keep_enddata=keep_enddata)
else:
raise RuntimeError(f'currently in {section_name} deck and unexpectedly '
f'found the following line:\n{line}')
break
if guess_deck_sections and flag in [1, 2] and _is_bulk_data_line(line):
# we found the case control deck successfully from the executive deck
# then we found the bulk data deck unexpectedly
section_name_map = {
1 : 'executive control',
2 : 'case control',
}
section_name = section_name_map[flag]
log.warning(f'currently in {section_name} deck and skipping directly '
f'to bulk data section\n{line}')
log.warning(line)
flag = 3
current_ilines = bulk_data_ilines
current_lines = bulk_data_lines
bulk_data_ilines = _bulk_data_lines_extract(
lines, ilines, bulk_data_lines, i-1,
make_ilines=make_ilines, keep_enddata=keep_enddata)
break
elif flag == 1:
# handles ' CEND'
if line_upper.startswith('CEND'):
# case control
old_flags.append(flag)
if flag != 1:
raise RuntimeError('expected a flag of 1 (executive control deck) '
'when going to the case control deck')
flag = 2
flag_word = 'case'
current_lines = case_control_lines
#print('executive: ', line.rstrip())
executive_control_lines.append(line.rstrip())
elif flag == 2 or flag < 0:
# we're in the case control deck right now and looking
# for one of the following:
# - 'BEGIN BULK'
# - 'BEGIN SUPER=1'
# - 'BEGIN BULK AUXMODEL=200'
# - 'BEGIN BULK AFPM=300'
#
# There's a special case for 'BEGIN AUXMODEL=1', so we flag
# AUXCASE/AUXMODEL, and do some extra parsing in flag=3.
#
# flag=2 (BEGIN BULK)
# flag=-1 (BEGIN SUPER=1)
# flag=-2 (BEGIN SUPER=2)
# ...
#
# We haven't yet tried to handle the AFPM special case
# we have to handle the comment because we could incorrectly
# flag the model as flipping to the BULK data section if we
# have BEGIN BULK in a comment
if '$' in line:
line, comment = line.split('$', 1)
current_lines.append('$' + comment.rstrip())
#print('%s: %s' % (flag_word, '$' + comment.rstrip()))
# just reuse the existing one
#line_upper = line.upper().strip()
if line_upper.startswith('BEGIN'):
if _is_begin_bulk(line_upper):
old_flags.append(flag)
#assert flag == 2, flag
# we're about to break because we found begin bulk
flag = 3
current_ilines = bulk_data_ilines
#or not keep_enddata
is_extra_bulk = (is_auxmodel or is_afpm or
is_superelement or consider_superelements)
if not is_extra_bulk:
#print('breaking begin bulk...')
bulk_data_ilines = _bulk_data_lines_extract(
lines, ilines, bulk_data_lines, i,
make_ilines=make_ilines, keep_enddata=keep_enddata)
break
#print('setting lines to bulk---')
current_lines = bulk_data_lines
flag_word = 'bulk'
#print('case: %s' % (line.rstrip()))
case_control_lines.append(line.rstrip())
continue
elif 'SUPER' in line_upper and '=' in line_upper:
super_id = _get_super_id(line, line_upper)
old_flags.append(flag)
flag = -super_id
flag_word = 'SUPER=%s' % super_id
current_lines = superelement_lines[super_id]
current_ilines = superelement_ilines[super_id]
elif ('AUXMODEL' in line_upper or 'AFPM' in line_upper) and '=' in line_upper:
out = _read_bulk_for_auxmodel(
ifile_iline, line, flag, bulk_data_lines,
current_lines, current_ilines,
old_flags,
is_auxmodel, auxmodel_lines, auxmodels_to_find, auxmodels_found,
is_afpm, afpm_lines, afpms_to_find, afpms_found,
superelement_lines, superelement_ilines,
is_auxmodel_active, auxmodel_id,
is_afpm_active, afpm_id,
bulk_data_ilines)
(is_broken,
auxmodel_id, is_auxmodel_active,
afpm_id, is_afpm_active,
flag, current_lines) = out
if is_broken:
break
else:
msg = f'expected "BEGIN BULK" or "BEGIN SUPER=1"\nline = {line}'
raise RuntimeError(msg)
print('%s: %s' % (flag_word, line.rstrip()))
current_lines.append(line.rstrip())
elif line_upper.startswith('SUPER'):
# case control line
# SUPER = ALL
#auxmodel_idi = int(line_upper.split('=')[1])
#auxmodels_to_find.append(auxmodel_idi)
if flag != 2:
raise RuntimeError('expected a flag of 2 (case control deck) '
'when going to an SUPER model')
is_superelement = True
elif line_upper.startswith('AUXMODEL'):
# case control line
# AUXMODEL = 10
auxmodel_idi = int(line_upper.split('=')[1])
auxmodels_to_find.append(auxmodel_idi)
if flag != 2:
raise RuntimeError('expected a flag of 2 (case control deck) '
'when going to an AUXMODEL')
is_auxmodel = True
elif line_upper.startswith('AFPM'):
# case control line
# AFPM = 10
afpm_idi = int(line_upper.split('=')[1])
afpms_to_find.append(afpm_idi)
if flag != 2:
raise RuntimeError('expected a flag of 2 (case control deck) '
'when going to an AFPM model')
is_afpm = True
#print('%s: %s' % (flag_word, line.rstrip()))
current_lines.append(line.rstrip())
elif flag == 3:
if not(is_auxmodel is True or is_superelement is True or consider_superelements):
raise RuntimeError(f'one must be True: is_auxmodel={is_auxmodel}; '
'is_superelement={is_superelement}; '
'consider_superelements={consider_superelements}')
#assert is_auxmodel is True or is_superelement is True or consider_superelements
# we have to handle the comment because we could incorrectly
# flag the model as flipping to the BULK data section if we
# have BEGIN BULK in a comment
if '$' in line:
line, comment = line.split('$', 1)
current_lines.append('$' + comment.rstrip())
if bulk_data_ilines != current_ilines:
raise RuntimeError('bulk_data_ilines != current_ilines')
current_ilines.append(ifile_iline)
#bulk_data_ilines.append(ifile_iline)
out = _read_bulk_for_auxmodel(
ifile_iline, line, flag, bulk_data_lines,
current_lines, current_ilines,
old_flags,
is_auxmodel, auxmodel_lines, auxmodels_to_find, auxmodels_found,
is_afpm, afpm_lines, afpms_to_find, afpms_found,
superelement_lines, superelement_ilines,
is_auxmodel_active, auxmodel_id,
is_afpm_active, afpm_id,
bulk_data_ilines)
(is_broken,
auxmodel_id, is_auxmodel_active,
afpm_id, is_afpm_active,
flag, current_lines) = out
if is_broken:
#print('breaking...')
break
else:
raise RuntimeError(line)
_check_valid_deck(flag, old_flags, nastran_format)
if len(bulk_data_lines) == 0:
raise RuntimeError('no bulk data lines were found')
#print('nbulk=%s nilines=%s' % (len(bulk_data_lines),
#len(bulk_data_ilines)), bulk_data_ilines.shape)
#if bulk_data_ilines is not None and len(bulk_data_lines) != len(bulk_data_ilines):
#raise RuntimeError('nbulk=%s nilines=%s' % (len(bulk_data_lines), len(bulk_data_ilines)))
#print('nbulk=%s nilines=%s' % (
#len(bulk_data_lines), len(bulk_data_ilines)), bulk_data_ilines.shape)
bulk_data_ilines = np.asarray(bulk_data_ilines)
out = (
executive_control_lines, case_control_lines,
bulk_data_lines, bulk_data_ilines,
superelement_lines, superelement_ilines,
auxmodel_lines, afpm_lines,
)
return out
def _bulk_data_lines_extract(lines: List[str],
ilines: Any,
bulk_data_lines: List[str],
i: int,
make_ilines: bool=True,
keep_enddata: bool=True) -> NDArrayN2int:
"""grabs the bulk data lines and ilines when we're breaking"""
if keep_enddata:
for line in lines[i+1:]:
bulk_data_lines.append(line.rstrip())
if make_ilines:
bulk_data_ilines = ilines[i+1:, :]
else:
bulk_data_ilines = None
j = 0
for j, line in enumerate(lines[i+1:]):
rline = line.rstrip()
if rline.upper().startswith('ENDDATA'):
break
bulk_data_lines.append(rline)
if make_ilines:
bulk_data_ilines = ilines[i+1:i+j+1, :]
#if not len(bulk_data_lines) == len(bulk_data_ilines):
#msg = 'len(bulk_data_lines)=%s len(bulk_data_ilines)=%s' % (
#len(bulk_data_lines), len(bulk_data_ilines))
#raise RuntimeError(msg)
return bulk_data_ilines
def _is_begin_bulk(line_upper: str) -> bool:
"""
is this a:
'BEGIN BULK'
but not:
'BEGIN BULK SUPER=2'
'BEGIN BULK AUXMODEL=2'
'BEGIN BULK AFPM=2'
"""
is_begin_bulk = 'BULK' in line_upper and (
'AUXMODEL' not in line_upper and
'AFPM' not in line_upper and
'SUPER' not in line_upper)
return is_begin_bulk
def _read_bulk_for_auxmodel(ifile_iline, line, flag: int, bulk_data_lines: List[str],
current_lines, current_ilines,
old_flags,
unused_is_auxmodel, auxmodel_lines, auxmodels_to_find, auxmodels_found,
unused_is_afpm, afpm_lines, afpm_to_find, afpm_found,
superelement_lines, superelement_ilines,
is_auxmodel_active: bool, auxmodel_id: int,
is_afpm_active: bool, afpm_id: int,
bulk_data_ilines):
"""
Reads a BEGIN BULK section searching for 'BEGIN AUXMODEL=1' and BEGIN SUPER=1'
"""
# we're in the bulk data deck right now and looking
# for a 'BEGIN AUXMODEL=1' or ???.
#
#print(len(bulk_data_lines), len(bulk_data_ilines))
if len(bulk_data_lines) != len(bulk_data_ilines):
raise RuntimeError('len(bulk_data_lines)=%s len(bulk_data_ilines)=%s are not equal' % (
len(bulk_data_lines), len(bulk_data_ilines)))
is_broken = False
#if not is_auxmodel:
#print('breaking B', flag)
#is_broken = True
#return is_broken, auxmodel_id, is_auxmodel_active, flag, current_lines
line_upper = line.upper().strip()
if line_upper.startswith('BEGIN'):
if 'AUXMODEL' in line_upper:
is_auxmodel_active = True
auxmodel_id = _get_auxmodel_id(line, line_upper)
old_flags.append(flag)
flag = -auxmodel_id
current_lines = auxmodel_lines[auxmodel_id]
current_ilines = []
auxmodels_found.add(auxmodel_id)
if len(auxmodels_found) == len(auxmodels_to_find) and len(auxmodels_found):
#print('broken...final', len(bulk_data_lines), len(bulk_data_ilines))
is_broken = True
out = (is_broken,
auxmodel_id, is_auxmodel_active,
afpm_id, is_afpm_active,
flag, current_lines)
return out
elif 'SUPER' in line_upper:
super_id = _get_super_id(line, line_upper)
old_flags.append(flag)
flag = -super_id
current_lines = superelement_lines[super_id]
current_ilines = superelement_ilines[super_id]
elif 'AFPM' in line_upper:
is_afpm_active = True
afpm_id = _get_afpm_id(line, line_upper)
old_flags.append(flag)
flag = -afpm_id
current_lines = afpm_lines[afpm_id]
current_ilines = []
afpm_found.add(afpm_id)
if len(afpm_found) == len(afpm_to_find) and len(afpm_found):
#print('broken...final', len(bulk_data_lines), len(bulk_data_ilines))
is_broken = True
return is_broken, auxmodel_id, is_auxmodel_active, flag, current_lines
else:
msg = 'expected "BEGIN SUPER=1", "BEGIN AUXMODEL=1" or "BEGIN AFPM=1"\nline = %s' % line
raise RuntimeError(msg)
rline = line.rstrip()
if rline:
#if flag == 3:
#bulk_data_ilines.append(ifile_iline)
current_lines.append(rline)
current_ilines.append(ifile_iline)
out = (
is_broken,
auxmodel_id, is_auxmodel_active,
afpm_id, is_afpm_active,
flag, current_lines)
return out
def _break_system_lines(executive_control_lines: List[str]) -> Tuple[List[str], List[str]]:
"""
Extracts the Nastran system lines.
System lines may be interspersed with executive lines.
Per NX Nastran 10:
Header Description
====== ===========
ACQUIRE Selects NDDL schema and NX Nastran Delivery Database.
ASSIGN Assigns physical files to DBset members or special FORTRAN files.
CONNECT Groups geometry data by evaluator and database.
DBCLEAN Deletes selected database version(s) and/or projects.
DBDICT Prints the database directory in user-defined format.
DBDIR Prints the database directory.
DBFIX Identifies and optionally corrects errors found in the database.
DBLOAD Loads a database previously unloaded by DBUNLOAD.
DBLOCATE Obtains data blocks and parameters from databases.
DBSETDEL Deletes DBsets.
DBUNLOAD Unloads a database for compression, transfer, or archival storage.
DBUPDATE Specifies the time between updates of the database directory.
ENDJOB Terminates a job upon completion of FMS statements.
EXPAND Concatenates additional DBset members to an existing DBset.
ID Flag to name the run.
INCLUDE Inserts an external file in the input file.
INIT Creates a temporary or permanent DBset.
NASTRAN Specifies values for system cells.
PROJ Defines the current or default project identifier.
F:\\Program Files\\Siemens\\NXNastran\\nxn10p1\\nxn10p1\\nast\\tpl\\mdb01.dat
"""
j = None
sol_line = None
isol_line = None
system_lines = []
executive_control_lines2 = []
# add all the lines before and including the file management section
# to the system lines
#
# add the other lines (and the SOL 101) to the executive control lines
for i, line in enumerate(executive_control_lines):
line_upper = line.strip().upper()
if line_upper.startswith('SOL '):
isol_line = i+1
sol_line = line
if line_upper.startswith(FILE_MANAGEMENT):
system_lines += executive_control_lines[j:i+1]
j = i+1
# remove SOL 101 from the system lines if it's there
system_lines2 = [
line for line in system_lines
if not line.upper().strip().startswith('SOL ') and
not line.upper().strip().startswith('CEND')
]
if j is None:
# no system lines
executive_control_lines2 = executive_control_lines
else:
# append SOL 101 to the executive lines if we found it
# inside the system section
append_sol_line = isol_line is not None and j is not None and isol_line < j
if append_sol_line:
executive_control_lines2.append(sol_line)
# add the rest of the executive control cards
for iline in range(j, len(executive_control_lines)):
executive_control_lines2.append(executive_control_lines[iline])
#for line in executive_control_lines:
#print('eline = %r' % line)
#for line in system_lines2:
#print('sline2 = %r' % line)
#for line in executive_control_lines2:
#print('eline2 = %r' % line)
return system_lines2, executive_control_lines2
def _check_valid_deck(flag: int, old_flags: List[int],
nastran_format: str) -> None:
"""Crashes if the flag is set wrong"""
if flag != 3:
if flag == 1:
found = ' - Executive Control Deck\n'
missing = ' - Case Control Deck\n'
missing += ' - Bulk Data Deck\n'
elif flag == 2:
found = ' - Executive Control Deck\n'
found += ' - Case Control Deck\n'
missing = ' - Bulk Data Deck\n'
elif flag < 0:
# superelement/auxmodel
found = str('old_flags=%s' % old_flags)
missing = '???'
return
else:
raise RuntimeError('flag=%r is not [1, 2, 3]' % flag)
msg = f'This is not a valid {nastran_format} BDF (a BDF capable of running Nastran).\n\n'
msg += f'The following sections were found:\n{found}\n'
msg += f'The following sections are missing:\n{missing}\n'
msg += 'If you do not have an Executive Control Deck or a Case Control Deck:\n'
msg += ' 1. call read_bdf(...) with `punch=True`\n'
msg += " 2. Add '$ pyNastran : punch=True' to the top of the main file\n"
msg += ' 3. Name your file *.pch\n\n'
msg += 'You cannot read a deck that has an Executive Control Deck, but\n'
msg += 'not a Case Control Deck (or vice versa), even if you have a Bulk Data Deck.\n'
raise MissingDeckSections(msg)
return
def _show_bad_file(self: Any, bdf_filename: Union[str, StringIO],
encoding: str,
nlines_previous: int=10) -> None:
"""
Prints the 10 lines before the UnicodeDecodeError occurred.
Parameters
----------
bdf_filename : str
the filename to print the lines of
encoding : str
the file encoding
nlines_previous : int; default=10
the number of lines to show
"""
lines = [] # type: List[str]
print('ENCODING - show_bad_file=%r' % encoding)
with open(_filename(bdf_filename), 'r', encoding=encoding) as bdf_file:
iline = 0
nblank = 0
while 1:
try:
line = bdf_file.readline().rstrip()
except UnicodeDecodeError:
iline0 = max([iline - nlines_previous, 0])
self.log.error('filename=%s' % bdf_filename)
for iline1, line in enumerate(lines[iline0:iline]):
self.log.error('lines[%i]=%r' % (iline0 + iline1, line))
msg = "\n%s encoding error on line=%s of %s; not '%s'" % (
encoding, iline, bdf_filename, encoding)
raise RuntimeError(msg)
if line:
nblank = 0
else:
nblank += 1
if nblank == 20:
raise RuntimeError('20 blank lines')
iline += 1
lines.append(line)
def _get_auxmodel_id(line: str, line_upper: str) -> int:
"""
parses the superelement header::
BEGIN AUXMODEL=2
BEGIN BULK AUXMODEL=2
BEGIN BULK AUXMODEL = 2
"""
#if '=' in line_upper:
sline = line_upper.split('=')
#else:
#sline = line_upper.split()
try:
auxmodel_id = int(sline[1])
except (IndexError, ValueError):
msg = 'expected "BEGIN AUXMODEL=1"\nline = %s' % line
raise SyntaxError(msg)
if auxmodel_id < 0:
raise SyntaxError('auxmodel_id=%i must be greater than 0; line=%s' % (
auxmodel_id, line))
return auxmodel_id
def _get_afpm_id(line: str, line_upper: str) -> int:
"""
parses the superelement header::
BEGIN AFPM=2
BEGIN BULK AFPM=2
BEGIN BULK AFPM = 2
"""
sline = line_upper.split('=')
try:
afpm_id = int(sline[1])
except (IndexError, ValueError):
msg = 'expected "BEGIN AFPM=1"\nline = %s' % line
raise SyntaxError(msg)
if afpm_id < 0:
raise SyntaxError('afpm_id=%i must be greater than 0; line=%s' % (
afpm_id, line))
return afpm_id
def _get_super_id(line: str, line_upper: str) -> int:
"""
parses the superelement header::
BEGIN SUPER=2
BEGIN BULK SUPER=2
BEGIN BULK SUPER = 2
BEGIN BULK SUPER 2
"""
if '=' in line_upper:
sline = line_upper.split('=')
super_id_str = sline[1]
if len(sline) != 2:
msg = 'expected "BEGIN SUPER=1"\nline = %s' % line
raise SyntaxError(msg)
else:
sline = line_upper.split()
if len(sline) not in [3, 4]:
msg = 'expected "BEGIN SUPER=1"\nline = %s' % line
raise SyntaxError(msg)
if len(sline) == 3:
# BEGIN SUPER 2
super_id_str = sline[2]
elif len(sline) == 4:
super_id_str = sline[3]
try:
super_id = int(super_id_str)
except (IndexError, ValueError):
msg = 'expected "BEGIN SUPER=1"\nline = %s' % line
raise SyntaxError(msg)
if super_id < 0:
raise SyntaxError('super_id=%i must be greater than 0; line=%s' % (
super_id, line))
return super_id
def _clean_comment_bulk(comment: str) -> str:
"""
Removes specific pyNastran comment lines so duplicate lines aren't
created.
Parameters
----------
comment : str
the comment to possibly remove
Returns
-------
updated_comment : str
the comment
"""
if comment == '':
pass
elif comment in IGNORE_COMMENTS:
comment = ''
elif 'pynastran' in comment.lower():
csline = comment.lower().split('pynastran', 1)
if csline[1].strip() == ':':
comment = ''
#if comment:
#print(comment)
return comment
def _make_ilines(nlines: int, ifile: int) -> NDArrayN2int:
"""helper method"""
ilines = np.empty((nlines, 2), dtype='int32')
ilines[:, 0] = ifile
ilines[:, 1] = np.arange(nlines) # 0 to N-1
return ilines
|
"""
Utilities required for Ip packages(Storing, removing and checking IP's efficiently).
"""
__version__ = "3.0.0"
__author__ = "rakesht2499"
|
__author__ = 'jonestj1'
import mbuild as mb
class PegMonomer(mb.Compound):
def __init__(self):
super(PegMonomer, self).__init__()
mb.load('peg_monomer.pdb', compound=self, relative_to_module=self.__module__)
self.translate(-self[0].pos)
self.add(mb.Port(anchor=self[0]), 'down')
self['down'].translate([0, -0.07, 0])
self.add(mb.Port(anchor=self[6]), 'up')
self['up'].translate([0, 0.073, 0])
if __name__ == '__main__':
peg = PegMonomer()
peg.save('peg.mol2')
|
####################################################################
# #
# THIS FILE IS PART OF THE pycollada LIBRARY SOURCE CODE. #
# USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS #
# GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE #
# IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. #
# #
# THE pycollada SOURCE CODE IS (C) COPYRIGHT 2011 #
# by Jeff Terrace and contributors #
# #
####################################################################
"""Module containing classes and functions for the <polylist> primitive."""
import numpy
from collada import primitive
from collada import triangleset
from collada.common import E, tag
from collada.common import DaeIncompleteError, DaeBrokenRefError, \
DaeMalformedError, DaeUnsupportedError
from collada.util import toUnitVec, checkSource, xrange
from collada.xmlutil import etree as ElementTree
class Polygon(object):
"""Single polygon representation. Represents a polygon of N points."""
def __init__(self, indices, vertices, normal_indices, normals, texcoord_indices, texcoords, material):
"""A Polygon should not be created manually."""
self.vertices = vertices
"""A (N, 3) float array containing the points in the polygon."""
self.normals = normals
"""A (N, 3) float array with the normals for points in the polygon. Can be None."""
self.texcoords = texcoords
"""A tuple where entries are numpy float arrays of size (N, 2) containing
the texture coordinates for the points in the polygon for each texture
coordinate set. Can be length 0 if there are no texture coordinates."""
self.material = material
"""If coming from an unbound :class:`collada.polylist.Polylist`, contains a
string with the material symbol. If coming from a bound
:class:`collada.polylist.BoundPolylist`, contains the actual
:class:`collada.material.Effect` the line is bound to."""
self.indices = indices
"""A (N,) int array containing the indices for the vertices
of the N points in the polygon."""
self.normal_indices = normal_indices
"""A (N,) int array containing the indices for the normals of
the N points in the polygon"""
self.texcoord_indices = texcoord_indices
"""A (N,2) int array with texture coordinate indexes for the
texcoords of the N points in the polygon"""
def triangles(self):
"""This triangulates the polygon using a simple fanning method.
:rtype: generator of :class:`collada.polylist.Polygon`
"""
npts = len(self.vertices)
for i in range(npts-2):
tri_indices = numpy.array([
self.indices[0], self.indices[i+1], self.indices[i+2]
], dtype=numpy.float32)
tri_vertices = numpy.array([
self.vertices[0], self.vertices[i+1], self.vertices[i+2]
], dtype=numpy.float32)
if self.normals is None:
tri_normals = None
normal_indices = None
else:
tri_normals = numpy.array([
self.normals[0], self.normals[i+1], self.normals[i+2]
], dtype=numpy.float32)
normal_indices = numpy.array([
self.normal_indices[0],
self.normal_indices[i+1],
self.normal_indices[i+2]
], dtype=numpy.float32)
tri_texcoords = []
tri_texcoord_indices = []
for texcoord, texcoord_indices in zip(
self.texcoords, self.texcoord_indices):
tri_texcoords.append(numpy.array([
texcoord[0],
texcoord[i+1],
texcoord[i+2]
], dtype=numpy.float32))
tri_texcoord_indices.append(numpy.array([
texcoord_indices[0],
texcoord_indices[i+1],
texcoord_indices[i+2]
], dtype=numpy.float32))
tri = triangleset.Triangle(
tri_indices, tri_vertices,
normal_indices, tri_normals,
tri_texcoord_indices, tri_texcoords,
self.material)
yield tri
def __repr__(self):
return '<Polygon vertices=%d>' % len(self.vertices)
def __str__(self):
return repr(self)
class Polylist(primitive.Primitive):
"""Class containing the data COLLADA puts in a <polylist> tag, a collection of
polygons. The Polylist object is read-only. To modify a Polylist, create a new
instance using :meth:`collada.geometry.Geometry.createPolylist`.
* If ``P`` is an instance of :class:`collada.polylist.Polylist`, then ``len(P)``
returns the number of polygons in the set. ``P[i]`` returns the i\ :sup:`th`
polygon in the set.
"""
def __init__(self, sources, material, index, vcounts, xmlnode=None):
"""A Polylist should not be created manually. Instead, call the
:meth:`collada.geometry.Geometry.createPolylist` method after
creating a geometry instance.
"""
if len(sources) == 0: raise DaeIncompleteError('A polylist set needs at least one input for vertex positions')
if not 'VERTEX' in sources: raise DaeIncompleteError('Polylist requires vertex input')
#find max offset
max_offset = max([ max([input[0] for input in input_type_array])
for input_type_array in sources.values() if len(input_type_array) > 0])
self.material = material
self.index = index
self.indices = self.index
self.nindices = max_offset + 1
self.vcounts = vcounts
self.sources = sources
self.index.shape = (-1, self.nindices)
self.npolygons = len(self.vcounts)
self.nvertices = numpy.sum(self.vcounts) if len(self.index) > 0 else 0
self.polyends = numpy.cumsum(self.vcounts)
self.polystarts = self.polyends - self.vcounts
self.polyindex = numpy.dstack((self.polystarts, self.polyends))[0]
if len(self.index) > 0:
self._vertex = sources['VERTEX'][0][4].data
self._vertex_index = self.index[:,sources['VERTEX'][0][0]]
self.maxvertexindex = numpy.max( self._vertex_index )
checkSource(sources['VERTEX'][0][4], ('X', 'Y', 'Z'), self.maxvertexindex)
else:
self._vertex = None
self._vertex_index = None
self.maxvertexindex = -1
if 'NORMAL' in sources and len(sources['NORMAL']) > 0 and len(self.index) > 0:
self._normal = sources['NORMAL'][0][4].data
self._normal_index = self.index[:,sources['NORMAL'][0][0]]
self.maxnormalindex = numpy.max( self._normal_index )
checkSource(sources['NORMAL'][0][4], ('X', 'Y', 'Z'), self.maxnormalindex)
else:
self._normal = None
self._normal_index = None
self.maxnormalindex = -1
if 'TEXCOORD' in sources and len(sources['TEXCOORD']) > 0 \
and len(self.index) > 0:
self._texcoordset = tuple([texinput[4].data
for texinput in sources['TEXCOORD']])
self._texcoord_indexset = tuple([ self.index[:,sources['TEXCOORD'][i][0]]
for i in xrange(len(sources['TEXCOORD'])) ])
self.maxtexcoordsetindex = [numpy.max(each)
for each in self._texcoord_indexset]
for i, texinput in enumerate(sources['TEXCOORD']):
checkSource(texinput[4], ('S', 'T'), self.maxtexcoordsetindex[i])
else:
self._texcoordset = tuple()
self._texcoord_indexset = tuple()
self.maxtexcoordsetindex = -1
if xmlnode is not None:
self.xmlnode = xmlnode
"""ElementTree representation of the line set."""
else:
txtindices = ' '.join(map(str, self.indices.flatten().tolist()))
acclen = len(self.indices)
self.xmlnode = E.polylist(count=str(self.npolygons),
material=self.material)
all_inputs = []
for semantic_list in self.sources.values():
all_inputs.extend(semantic_list)
for offset, semantic, sourceid, set, src in all_inputs:
inpnode = E.input(offset=str(offset), semantic=semantic,
source=sourceid)
if set is not None:
inpnode.set('set', str(set))
self.xmlnode.append(inpnode)
vcountnode = E.vcount(' '.join(map(str, self.vcounts)))
self.xmlnode.append(vcountnode)
self.xmlnode.append(E.p(txtindices))
def __len__(self):
return self.npolygons
def __getitem__(self, i):
polyrange = self.polyindex[i]
vertindex = self._vertex_index[polyrange[0]:polyrange[1]]
v = self._vertex[vertindex]
normalindex = None
if self.normal is None:
n = None
else:
normalindex = self._normal_index[polyrange[0]:polyrange[1]]
n = self._normal[normalindex]
uvindices = []
uv = []
for j, uvindex in enumerate(self._texcoord_indexset):
uvindices.append( uvindex[polyrange[0]:polyrange[1]] )
uv.append( self._texcoordset[j][ uvindex[polyrange[0]:polyrange[1]] ] )
return Polygon(vertindex, v, normalindex, n, uvindices, uv, self.material)
_triangleset = None
def triangleset(self):
"""This performs a simple triangulation of the polylist using the fanning method.
:rtype: :class:`collada.triangleset.TriangleSet`
"""
if self._triangleset is None:
indexselector = numpy.zeros(self.nvertices) == 0
indexselector[self.polyindex[:,1]-1] = False
indexselector[self.polyindex[:,1]-2] = False
indexselector = numpy.arange(self.nvertices)[indexselector]
firstpolyindex = numpy.arange(self.nvertices)
firstpolyindex = firstpolyindex - numpy.repeat(self.polyends - self.vcounts, self.vcounts)
firstpolyindex = firstpolyindex[indexselector]
if len(self.index) > 0:
triindex = numpy.dstack( (self.index[indexselector-firstpolyindex],
self.index[indexselector+1],
self.index[indexselector+2]) )
triindex = numpy.swapaxes(triindex, 1,2).flatten()
else:
triindex = numpy.array([], dtype=self.index.dtype)
triset = triangleset.TriangleSet(self.sources, self.material, triindex, self.xmlnode)
self._triangleset = triset
return self._triangleset
@staticmethod
def load( collada, localscope, node ):
indexnode = node.find(tag('p'))
if indexnode is None: raise DaeIncompleteError('Missing index in polylist')
vcountnode = node.find(tag('vcount'))
if vcountnode is None: raise DaeIncompleteError('Missing vcount in polylist')
try:
if vcountnode.text is None:
vcounts = numpy.array([], dtype=numpy.int32)
else:
vcounts = numpy.fromstring(vcountnode.text, dtype=numpy.int32, sep=' ')
vcounts[numpy.isnan(vcounts)] = 0
except ValueError as ex:
raise DaeMalformedError('Corrupted vcounts in polylist')
all_inputs = primitive.Primitive._getInputs(collada, localscope, node.findall(tag('input')))
try:
if indexnode.text is None:
index = numpy.array([], dtype=numpy.int32)
else:
index = numpy.fromstring(indexnode.text, dtype=numpy.int32, sep=' ')
index[numpy.isnan(index)] = 0
except: raise DaeMalformedError('Corrupted index in polylist')
polylist = Polylist(all_inputs, node.get('material'), index, vcounts, node)
return polylist
def bind(self, matrix, materialnodebysymbol):
"""Create a bound polylist from this polylist, transform and material mapping"""
return BoundPolylist( self, matrix, materialnodebysymbol)
def __str__(self):
return '<Polylist length=%d>' % len(self)
def __repr__(self):
return str(self)
class BoundPolylist(primitive.BoundPrimitive):
"""A polylist bound to a transform matrix and materials mapping.
* If ``P`` is an instance of :class:`collada.polylist.BoundPolylist`, then ``len(P)``
returns the number of polygons in the set. ``P[i]`` returns the i\ :sup:`th`
polygon in the set.
"""
def __init__(self, pl, matrix, materialnodebysymbol):
"""Create a bound polylist from a polylist, transform and material mapping.
This gets created when a polylist is instantiated in a scene. Do not create this manually."""
M = numpy.asmatrix(matrix).transpose()
self._vertex = None if pl._vertex is None else numpy.asarray(pl._vertex * M[:3,:3]) + matrix[:3,3]
self._normal = None if pl._normal is None else numpy.asarray(pl._normal * M[:3,:3])
self._texcoordset = pl._texcoordset
matnode = materialnodebysymbol.get( pl.material )
if matnode:
self.material = matnode.target
self.inputmap = dict([ (sem, (input_sem, set)) for sem, input_sem, set in matnode.inputs ])
else: self.inputmap = self.material = None
self.index = pl.index
self.nvertices = pl.nvertices
self._vertex_index = pl._vertex_index
self._normal_index = pl._normal_index
self._texcoord_indexset = pl._texcoord_indexset
self.polyindex = pl.polyindex
self.npolygons = pl.npolygons
self.matrix = matrix
self.materialnodebysymbol = materialnodebysymbol
self.original = pl
def __len__(self): return self.npolygons
def __getitem__(self, i):
polyrange = self.polyindex[i]
vertindex = self._vertex_index[polyrange[0]:polyrange[1]]
v = self._vertex[vertindex]
normalindex = None
if self.normal is None:
n = None
else:
normalindex = self._normal_index[polyrange[0]:polyrange[1]]
n = self._normal[normalindex]
uvindices = []
uv = []
for j, uvindex in enumerate(self._texcoord_indexset):
uvindices.append( uvindex[polyrange[0]:polyrange[1]] )
uv.append( self._texcoordset[j][ uvindex[polyrange[0]:polyrange[1]] ] )
return Polygon(vertindex, v, normalindex, n, uvindices, uv, self.material)
_triangleset = None
def triangleset(self):
"""This performs a simple triangulation of the polylist using the fanning method.
:rtype: :class:`collada.triangleset.BoundTriangleSet`
"""
if self._triangleset is None:
triset = self.original.triangleset()
boundtriset = triset.bind(self.matrix, self.materialnodebysymbol)
self._triangleset = boundtriset
return self._triangleset
def polygons(self):
"""Iterate through all the polygons contained in the set.
:rtype: generator of :class:`collada.polylist.Polygon`
"""
for i in xrange(self.npolygons): yield self[i]
def shapes(self):
"""Iterate through all the polygons contained in the set.
:rtype: generator of :class:`collada.polylist.Polygon`
"""
return self.polygons()
def __str__(self):
return '<BoundPolylist length=%d>' % len(self)
def __repr__(self):
return str(self)
|
# coding: utf-8
"""
Megaputer Text Mining API
Megaputer Text Mining API # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PerOperationLimitPeriodic1Response(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ntu_period1': 'str',
'ntu_in_period1_limit': 'int',
'ntu_in_period1_tokens': 'int',
'ntu_in_period1_keywords': 'int',
'ntu_in_period1_entities': 'int',
'ntu_in_period1_languages': 'int',
'ntu_in_period1_sentiments': 'int',
'ntu_in_period1_facts': 'int'
}
attribute_map = {
'ntu_period1': 'NTUPeriod1',
'ntu_in_period1_limit': 'NTUInPeriod1Limit',
'ntu_in_period1_tokens': 'NTUInPeriod1Tokens',
'ntu_in_period1_keywords': 'NTUInPeriod1Keywords',
'ntu_in_period1_entities': 'NTUInPeriod1Entities',
'ntu_in_period1_languages': 'NTUInPeriod1Languages',
'ntu_in_period1_sentiments': 'NTUInPeriod1Sentiments',
'ntu_in_period1_facts': 'NTUInPeriod1Facts'
}
def __init__(self, ntu_period1=None, ntu_in_period1_limit=None, ntu_in_period1_tokens=None, ntu_in_period1_keywords=None, ntu_in_period1_entities=None, ntu_in_period1_languages=None, ntu_in_period1_sentiments=None, ntu_in_period1_facts=None): # noqa: E501
"""PerOperationLimitPeriodic1Response - a model defined in OpenAPI""" # noqa: E501
self._ntu_period1 = None
self._ntu_in_period1_limit = None
self._ntu_in_period1_tokens = None
self._ntu_in_period1_keywords = None
self._ntu_in_period1_entities = None
self._ntu_in_period1_languages = None
self._ntu_in_period1_sentiments = None
self._ntu_in_period1_facts = None
self.discriminator = None
if ntu_period1 is not None:
self.ntu_period1 = ntu_period1
if ntu_in_period1_limit is not None:
self.ntu_in_period1_limit = ntu_in_period1_limit
if ntu_in_period1_tokens is not None:
self.ntu_in_period1_tokens = ntu_in_period1_tokens
if ntu_in_period1_keywords is not None:
self.ntu_in_period1_keywords = ntu_in_period1_keywords
if ntu_in_period1_entities is not None:
self.ntu_in_period1_entities = ntu_in_period1_entities
if ntu_in_period1_languages is not None:
self.ntu_in_period1_languages = ntu_in_period1_languages
if ntu_in_period1_sentiments is not None:
self.ntu_in_period1_sentiments = ntu_in_period1_sentiments
if ntu_in_period1_facts is not None:
self.ntu_in_period1_facts = ntu_in_period1_facts
@property
def ntu_period1(self):
"""Gets the ntu_period1 of this PerOperationLimitPeriodic1Response. # noqa: E501
A period when limitations apply # noqa: E501
:return: The ntu_period1 of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: str
"""
return self._ntu_period1
@ntu_period1.setter
def ntu_period1(self, ntu_period1):
"""Sets the ntu_period1 of this PerOperationLimitPeriodic1Response.
A period when limitations apply # noqa: E501
:param ntu_period1: The ntu_period1 of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: str
"""
allowed_values = ["Second", "Minute", "Hour", "Day", "Month"] # noqa: E501
if ntu_period1 not in allowed_values:
raise ValueError(
"Invalid value for `ntu_period1` ({0}), must be one of {1}" # noqa: E501
.format(ntu_period1, allowed_values)
)
self._ntu_period1 = ntu_period1
@property
def ntu_in_period1_limit(self):
"""Gets the ntu_in_period1_limit of this PerOperationLimitPeriodic1Response. # noqa: E501
A maximum number of requests in the specified period # noqa: E501
:return: The ntu_in_period1_limit of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_limit
@ntu_in_period1_limit.setter
def ntu_in_period1_limit(self, ntu_in_period1_limit):
"""Sets the ntu_in_period1_limit of this PerOperationLimitPeriodic1Response.
A maximum number of requests in the specified period # noqa: E501
:param ntu_in_period1_limit: The ntu_in_period1_limit of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_limit = ntu_in_period1_limit
@property
def ntu_in_period1_tokens(self):
"""Gets the ntu_in_period1_tokens of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Text parsing # noqa: E501
:return: The ntu_in_period1_tokens of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_tokens
@ntu_in_period1_tokens.setter
def ntu_in_period1_tokens(self, ntu_in_period1_tokens):
"""Sets the ntu_in_period1_tokens of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Text parsing # noqa: E501
:param ntu_in_period1_tokens: The ntu_in_period1_tokens of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_tokens = ntu_in_period1_tokens
@property
def ntu_in_period1_keywords(self):
"""Gets the ntu_in_period1_keywords of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Keywords extraction # noqa: E501
:return: The ntu_in_period1_keywords of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_keywords
@ntu_in_period1_keywords.setter
def ntu_in_period1_keywords(self, ntu_in_period1_keywords):
"""Sets the ntu_in_period1_keywords of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Keywords extraction # noqa: E501
:param ntu_in_period1_keywords: The ntu_in_period1_keywords of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_keywords = ntu_in_period1_keywords
@property
def ntu_in_period1_entities(self):
"""Gets the ntu_in_period1_entities of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Entities extraction # noqa: E501
:return: The ntu_in_period1_entities of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_entities
@ntu_in_period1_entities.setter
def ntu_in_period1_entities(self, ntu_in_period1_entities):
"""Sets the ntu_in_period1_entities of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Entities extraction # noqa: E501
:param ntu_in_period1_entities: The ntu_in_period1_entities of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_entities = ntu_in_period1_entities
@property
def ntu_in_period1_languages(self):
"""Gets the ntu_in_period1_languages of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Language detection # noqa: E501
:return: The ntu_in_period1_languages of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_languages
@ntu_in_period1_languages.setter
def ntu_in_period1_languages(self, ntu_in_period1_languages):
"""Sets the ntu_in_period1_languages of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Language detection # noqa: E501
:param ntu_in_period1_languages: The ntu_in_period1_languages of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_languages = ntu_in_period1_languages
@property
def ntu_in_period1_sentiments(self):
"""Gets the ntu_in_period1_sentiments of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Sentiments analysis # noqa: E501
:return: The ntu_in_period1_sentiments of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_sentiments
@ntu_in_period1_sentiments.setter
def ntu_in_period1_sentiments(self, ntu_in_period1_sentiments):
"""Sets the ntu_in_period1_sentiments of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Sentiments analysis # noqa: E501
:param ntu_in_period1_sentiments: The ntu_in_period1_sentiments of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_sentiments = ntu_in_period1_sentiments
@property
def ntu_in_period1_facts(self):
"""Gets the ntu_in_period1_facts of this PerOperationLimitPeriodic1Response. # noqa: E501
A number of requests that have already been processed for operation Facts extraction # noqa: E501
:return: The ntu_in_period1_facts of this PerOperationLimitPeriodic1Response. # noqa: E501
:rtype: int
"""
return self._ntu_in_period1_facts
@ntu_in_period1_facts.setter
def ntu_in_period1_facts(self, ntu_in_period1_facts):
"""Sets the ntu_in_period1_facts of this PerOperationLimitPeriodic1Response.
A number of requests that have already been processed for operation Facts extraction # noqa: E501
:param ntu_in_period1_facts: The ntu_in_period1_facts of this PerOperationLimitPeriodic1Response. # noqa: E501
:type: int
"""
self._ntu_in_period1_facts = ntu_in_period1_facts
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PerOperationLimitPeriodic1Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import json
import argparse
from websocket import create_connection
from urllib.parse import urlparse
import tornado.ioloop
import tornado.web
import tornado.websocket
from blockchain import Blockchain, Block
from server import Peer
from const import RESPONSE_BLOCKCHAIN, QUERY_LATEST, QUERY_ALL
parser = argparse.ArgumentParser(
description='Pychain is an implementation of NaiveChain in Python3')
parser.add_argument('--http-port',
action='store',
default=8000,
type=int,
help='http port(default:8000)')
args = parser.parse_args()
blockchain = Blockchain()
peers = []
class BlockHandler(tornado.web.RequestHandler):
def get(self):
json_blockchain = blockchain.to_json()
self.write(json_blockchain)
class MineBlockHandler(tornado.web.RequestHandler):
def post(self):
dic = json.loads(self.request.body)
blockchain.add(data=dic["data"])
resp = {
"type": RESPONSE_BLOCKCHAIN,
"data": [blockchain.blocks[-1].to_dict()]
}
print(resp)
broadcast(resp)
return
class PeerHandler(tornado.web.RequestHandler):
def get(self):
json_peers = json.dumps([str(p) for p in peers])
self.write(json_peers)
class AddPeerHandler(tornado.web.RequestHandler):
def post(self):
dic = json.loads(self.request.body)
url = urlparse(dic["peer"])
try:
sock = create_connection("ws://" + url.hostname + ":" + str(url.port) + "/websocket")
# r = requests.get("http://" + url.hostname + ":" + str(url.port) + "/peers")
p = Peer(url.hostname, url.port, sock)
peers.append(p)
resp = {
'type': QUERY_LATEST,
'host': 'localhost:' + str(args.http_port)
}
print(sock)
p.send(data=resp)
except ConnectionRefusedError:
print("socket connection error")
return
class WebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("open websocket connection")
def on_message(self, message):
message = json.loads(message)
if message["type"] == QUERY_LATEST:
resp = {
'type': RESPONSE_BLOCKCHAIN,
'data': [blockchain.blocks[-1].to_dict()],
}
sock = create_connection("ws://" + message["host"] + "/websocket")
sock.send(json.dumps(resp).encode('utf-8'))
if message["type"] == QUERY_ALL:
resp = {
'type': RESPONSE_BLOCKCHAIN,
'data': blockchain.to_dict(),
}
sock = create_connection("ws://" + message["host"] + "/websocket")
sock.send(json.dumps(resp).encode('utf-8'))
if message["type"] == RESPONSE_BLOCKCHAIN:
handle_blockchain_response(message)
def on_close(self):
print("close websocket connection")
def handle_blockchain_response(message):
received_blocks = sorted(message["data"], key=lambda k: k["index"])
latest_block = received_blocks[-1]
my_latest_block = blockchain.get_latest_block()
if latest_block["index"] > my_latest_block.index:
if(my_latest_block.hash == latest_block["previous_hash"]):
block = Block.make_from_dict(latest_block)
blockchain.add(block=block)
resp = {
'type': RESPONSE_BLOCKCHAIN,
'data': [latest_block]
}
broadcast(resp)
elif(len(received_blocks) == 1):
resp = {
'type': QUERY_ALL,
'host': 'localhost:' + str(args.http_port)
}
broadcast(resp)
else:
blocks = [Block.make_from_dict(b) for b in received_blocks]
blockchain.replace(blocks)
def broadcast(resp):
for p in peers:
p.send(resp)
app = tornado.web.Application([
(r"/blocks", BlockHandler),
(r"/mineBlock", MineBlockHandler),
(r"/peers", PeerHandler),
(r"/addPeer", AddPeerHandler),
(r"/websocket", WebSocket),
])
if __name__ == "__main__":
app.listen(args.http_port)
tornado.ioloop.IOLoop.instance().start()
|
from typing import Dict
def login_admin(admin: Dict) -> bool:
admin_all = [{"username": "renan", "password": "12345"}]
if admin.get("username") and admin.get("password"):
for index in admin_all:
if str(admin.get("username")).lower() == str(index.get("username")).lower():
if admin.get("password") == index.get("password"):
return True
return False
|
"""
Django settings for yatube project.
Generated by 'django-admin startproject' using Django 2.2.19.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5@0*t-w_1u_-a&!418bd5qu49nzly1(5zw3ogbrs^7bxeh&as$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yatube.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yatube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
import pytest
from capreolus import Benchmark, Task, module_registry
from capreolus.tests.common_fixtures import dummy_index, tmpdir_as_cache
tasks = set(module_registry.get_module_names("task"))
@pytest.mark.parametrize("task_name", tasks)
def test_task_creatable(tmpdir_as_cache, dummy_index, task_name):
provide = {"index": dummy_index, "benchmark": Benchmark.create("dummy"), "collection": dummy_index.collection}
task = Task.create(task_name, provide=provide)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: network_access_profiles_info
short_description: Information module for Network Access Profiles
description:
- Get all Network Access Profiles.
version_added: '1.0.0'
extends_documentation_fragment:
- cisco.ise.module_info
author: Rafael Campos (@racampos)
options: {}
requirements:
- ciscoisesdk >= 1.1.0
- python >= 3.5
seealso:
# Reference by Internet resource
- name: Network Access Profiles reference
description: Complete reference of the Network Access Profiles object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Get all Network Access Profiles
cisco.ise.network_access_profiles_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
register: result
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: list
elements: dict
sample: >
[
{
"id": "string",
"name": "string"
}
]
"""
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ... import meta as _meta
from ._inputs import *
__all__ = ['NetworkPolicyInitArgs', 'NetworkPolicy']
@pulumi.input_type
class NetworkPolicyInitArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['NetworkPolicySpecArgs']] = None):
"""
The set of arguments for constructing a NetworkPolicy resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['NetworkPolicySpecArgs'] spec: Specification of the desired behavior for this NetworkPolicy.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'extensions/v1beta1')
if kind is not None:
pulumi.set(__self__, "kind", 'NetworkPolicy')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['NetworkPolicySpecArgs']]:
"""
Specification of the desired behavior for this NetworkPolicy.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['NetworkPolicySpecArgs']]):
pulumi.set(self, "spec", value)
class NetworkPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['NetworkPolicySpecArgs']]] = None,
__props__=None):
"""
DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[pulumi.InputType['NetworkPolicySpecArgs']] spec: Specification of the desired behavior for this NetworkPolicy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[NetworkPolicyInitArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods
:param str resource_name: The name of the resource.
:param NetworkPolicyInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NetworkPolicyInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
spec: Optional[pulumi.Input[pulumi.InputType['NetworkPolicySpecArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NetworkPolicyInitArgs.__new__(NetworkPolicyInitArgs)
__props__.__dict__["api_version"] = 'extensions/v1beta1'
__props__.__dict__["kind"] = 'NetworkPolicy'
__props__.__dict__["metadata"] = metadata
__props__.__dict__["spec"] = spec
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:networking.k8s.io/v1:NetworkPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkPolicy, __self__).__init__(
'kubernetes:extensions/v1beta1:NetworkPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkPolicy':
"""
Get an existing NetworkPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkPolicyInitArgs.__new__(NetworkPolicyInitArgs)
__props__.__dict__["api_version"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["spec"] = None
return NetworkPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def spec(self) -> pulumi.Output[Optional['outputs.NetworkPolicySpec']]:
"""
Specification of the desired behavior for this NetworkPolicy.
"""
return pulumi.get(self, "spec")
|
from . import local
import unittest
class LocalDestinationTestCase(unittest.TestCase):
def test_out_dir(self):
config = local.Config(out_dir='~/test/')
destination = local.LocalDestination(config)
# Weakly verify out_dir is expanded.
self.assertNotIn('~', destination.out_dir)
if __name__ == '__main__':
unittest.main()
|
import math
from warriorpy.warriorpyGeography.src.relativeDirections import BACKWARD, FORWARD
from .base import AbilityBase
class Attack(AbilityBase):
def perform(self, direction="forward"):
self.verify_direction(direction)
receiver = self.unit(direction)
if receiver:
self._unit.say("attacks %s and hits %s" %
(direction, receiver.__class__.__name__))
if direction == "backward":
power = math.ceil(self._unit.attack_power/2.0)
else:
power = self._unit.attack_power
self.damage(receiver, power)
else:
self._unit.say("attacks %s and hits nothing" % direction)
def description(self):
return "Attacks a unit in given direction (forward by default)."
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SERVER_NAME = os.environ.get('SERVER_NAME') or 'localhost.dev:5000'
SECRET_KEY = os.environ.get('SECRET_KEY') or 'nunca-lo-adivinaras'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir,'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
BOOTSTRAP_SERVE_LOCAL=True
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS')
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['your-email@example.com']
BOOTSTRAP_SERVE_LOCAL = True
|
import collections
import concurrent.futures
import copy
import datetime
import decimal
import functools
import hashlib
import itertools
import json
import os
from contextlib import contextmanager
from enum import Enum
from typing_extensions import Protocol
from typing import (
Tuple, Type, Any, Optional, TypeVar, Dict, Union, Callable, List, Iterator,
Mapping, Iterable, AbstractSet, Set, Sequence
)
import dbt.exceptions
from dbt.node_types import NodeType
DECIMALS: Tuple[Type[Any], ...]
try:
import cdecimal # typing: ignore
except ImportError:
DECIMALS = (decimal.Decimal,)
else:
DECIMALS = (decimal.Decimal, cdecimal.Decimal)
class ExitCodes(int, Enum):
Success = 0
ModelError = 1
UnhandledError = 2
def coalesce(*args):
for arg in args:
if arg is not None:
return arg
return None
def get_profile_from_project(project):
target_name = project.get('target', {})
profile = project.get('outputs', {}).get(target_name, {})
return profile
def get_model_name_or_none(model):
if model is None:
name = '<None>'
elif isinstance(model, str):
name = model
elif isinstance(model, dict):
name = model.get('alias', model.get('name'))
elif hasattr(model, 'alias'):
name = model.alias
elif hasattr(model, 'name'):
name = model.name
else:
name = str(model)
return name
MACRO_PREFIX = 'dbt_macro__'
DOCS_PREFIX = 'dbt_docs__'
def get_dbt_macro_name(name):
if name is None:
raise dbt.exceptions.InternalException('Got None for a macro name!')
return '{}{}'.format(MACRO_PREFIX, name)
def get_dbt_docs_name(name):
if name is None:
raise dbt.exceptions.InternalException('Got None for a doc name!')
return '{}{}'.format(DOCS_PREFIX, name)
def get_materialization_macro_name(materialization_name, adapter_type=None,
with_prefix=True):
if adapter_type is None:
adapter_type = 'default'
name = 'materialization_{}_{}'.format(materialization_name, adapter_type)
if with_prefix:
return get_dbt_macro_name(name)
else:
return name
def get_docs_macro_name(docs_name, with_prefix=True):
if with_prefix:
return get_dbt_docs_name(docs_name)
else:
return docs_name
def split_path(path):
return path.split(os.sep)
def merge(*args):
if len(args) == 0:
return None
if len(args) == 1:
return args[0]
lst = list(args)
last = lst.pop(len(lst) - 1)
return _merge(merge(*lst), last)
def _merge(a, b):
to_return = a.copy()
to_return.update(b)
return to_return
# http://stackoverflow.com/questions/20656135/python-deep-merge-dictionary-data
def deep_merge(*args):
"""
>>> dbt.utils.deep_merge({'a': 1, 'b': 2, 'c': 3}, {'a': 2}, {'a': 3, 'b': 1}) # noqa
{'a': 3, 'b': 1, 'c': 3}
"""
if len(args) == 0:
return None
if len(args) == 1:
return copy.deepcopy(args[0])
lst = list(args)
last = copy.deepcopy(lst.pop(len(lst) - 1))
return _deep_merge(deep_merge(*lst), last)
def _deep_merge(destination, source):
if isinstance(source, dict):
for key, value in source.items():
deep_merge_item(destination, key, value)
return destination
def deep_merge_item(destination, key, value):
if isinstance(value, dict):
node = destination.setdefault(key, {})
destination[key] = deep_merge(node, value)
elif isinstance(value, tuple) or isinstance(value, list):
if key in destination:
destination[key] = list(value) + list(destination[key])
else:
destination[key] = value
else:
destination[key] = value
def _deep_map(
func: Callable[[Any, Tuple[Union[str, int], ...]], Any],
value: Any,
keypath: Tuple[Union[str, int], ...],
) -> Any:
atomic_types: Tuple[Type[Any], ...] = (int, float, str, type(None), bool)
ret: Any
if isinstance(value, list):
ret = [
_deep_map(func, v, (keypath + (idx,)))
for idx, v in enumerate(value)
]
elif isinstance(value, dict):
ret = {
k: _deep_map(func, v, (keypath + (str(k),)))
for k, v in value.items()
}
elif isinstance(value, atomic_types):
ret = func(value, keypath)
else:
container_types: Tuple[Type[Any], ...] = (list, dict)
ok_types = container_types + atomic_types
raise dbt.exceptions.DbtConfigError(
'in _deep_map, expected one of {!r}, got {!r}'
.format(ok_types, type(value))
)
return ret
def deep_map(
func: Callable[[Any, Tuple[Union[str, int], ...]], Any],
value: Any
) -> Any:
"""map the function func() onto each non-container value in 'value'
recursively, returning a new value. As long as func does not manipulate
value, then deep_map will also not manipulate it.
value should be a value returned by `yaml.safe_load` or `json.load` - the
only expected types are list, dict, native python number, str, NoneType,
and bool.
func() will be called on numbers, strings, Nones, and booleans. Its first
parameter will be the value, and the second will be its keypath, an
iterable over the __getitem__ keys needed to get to it.
:raises: If there are cycles in the value, raises a
dbt.exceptions.RecursionException
"""
try:
return _deep_map(func, value, ())
except RuntimeError as exc:
if 'maximum recursion depth exceeded' in str(exc):
raise dbt.exceptions.RecursionException(
'Cycle detected in deep_map'
)
raise
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def get_pseudo_test_path(node_name, source_path, test_type):
"schema tests all come from schema.yml files. fake a source sql file"
source_path_parts = split_path(source_path)
source_path_parts.pop() # ignore filename
suffix = [test_type, "{}.sql".format(node_name)]
pseudo_path_parts = source_path_parts + suffix
return os.path.join(*pseudo_path_parts)
def get_pseudo_hook_path(hook_name):
path_parts = ['hooks', "{}.sql".format(hook_name)]
return os.path.join(*path_parts)
def md5(string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def get_hash(model):
return hashlib.md5(model.unique_id.encode('utf-8')).hexdigest()
def get_hashed_contents(model):
return hashlib.md5(model.raw_sql.encode('utf-8')).hexdigest()
def flatten_nodes(dep_list):
return list(itertools.chain.from_iterable(dep_list))
class memoized:
'''Decorator. Caches a function's return value each time it is called. If
called later with the same arguments, the cached value is returned (not
reevaluated).
Taken from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.abc.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
K_T = TypeVar('K_T')
V_T = TypeVar('V_T')
def filter_null_values(input: Dict[K_T, Optional[V_T]]) -> Dict[K_T, V_T]:
return {k: v for k, v in input.items() if v is not None}
def add_ephemeral_model_prefix(s: str) -> str:
return '__dbt__cte__{}'.format(s)
def timestring() -> str:
"""Get the current datetime as an RFC 3339-compliant string"""
# isoformat doesn't include the mandatory trailing 'Z' for UTC.
return datetime.datetime.utcnow().isoformat() + 'Z'
class JSONEncoder(json.JSONEncoder):
"""A 'custom' json encoder that does normal json encoder things, but also
handles `Decimal`s. Naturally, this can lose precision because they get
converted to floats.
"""
def default(self, obj):
if isinstance(obj, DECIMALS):
return float(obj)
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
if hasattr(obj, 'to_dict'):
# if we have a to_dict we should try to serialize the result of
# that!
return obj.to_dict(omit_none=True)
return super().default(obj)
class ForgivingJSONEncoder(JSONEncoder):
def default(self, obj):
# let dbt's default JSON encoder handle it if possible, fallback to
# str()
try:
return super().default(obj)
except TypeError:
return str(obj)
class Translator:
def __init__(self, aliases: Mapping[str, str], recursive: bool = False):
self.aliases = aliases
self.recursive = recursive
def translate_mapping(
self, kwargs: Mapping[str, Any]
) -> Dict[str, Any]:
result: Dict[str, Any] = {}
for key, value in kwargs.items():
canonical_key = self.aliases.get(key, key)
if canonical_key in result:
dbt.exceptions.raise_duplicate_alias(
kwargs, self.aliases, canonical_key
)
result[canonical_key] = self.translate_value(value)
return result
def translate_sequence(self, value: Sequence[Any]) -> List[Any]:
return [self.translate_value(v) for v in value]
def translate_value(self, value: Any) -> Any:
if self.recursive:
if isinstance(value, Mapping):
return self.translate_mapping(value)
elif isinstance(value, (list, tuple)):
return self.translate_sequence(value)
return value
def translate(self, value: Mapping[str, Any]) -> Dict[str, Any]:
try:
return self.translate_mapping(value)
except RuntimeError as exc:
if 'maximum recursion depth exceeded' in str(exc):
raise dbt.exceptions.RecursionException(
'Cycle detected in a value passed to translate!'
)
raise
def translate_aliases(
kwargs: Dict[str, Any], aliases: Dict[str, str], recurse: bool = False,
) -> Dict[str, Any]:
"""Given a dict of keyword arguments and a dict mapping aliases to their
canonical values, canonicalize the keys in the kwargs dict.
If recurse is True, perform this operation recursively.
:return: A dict containing all the values in kwargs referenced by their
canonical key.
:raises: `AliasException`, if a canonical key is defined more than once.
"""
translator = Translator(aliases, recurse)
return translator.translate(kwargs)
def _pluralize(string: Union[str, NodeType]) -> str:
try:
convert = NodeType(string)
except ValueError:
return f'{string}s'
else:
return convert.pluralize()
def pluralize(count, string: Union[str, NodeType]):
pluralized: str = str(string)
if count != 1:
pluralized = _pluralize(string)
return f'{count} {pluralized}'
def restrict_to(*restrictions):
"""Create the metadata for a restricted dataclass field"""
return {'restrict': list(restrictions)}
def coerce_dict_str(value: Any) -> Optional[Dict[str, Any]]:
"""For annoying mypy reasons, this helper makes dealing with nested dicts
easier. You get either `None` if it's not a Dict[str, Any], or the
Dict[str, Any] you expected (to pass it to dbtClassMixin.from_dict(...)).
"""
if (isinstance(value, dict) and all(isinstance(k, str) for k in value)):
return value
else:
return None
def lowercase(value: Optional[str]) -> Optional[str]:
if value is None:
return None
else:
return value.lower()
# some types need to make constants available to the jinja context as
# attributes, and regular properties only work with objects. maybe this should
# be handled by the RelationProxy?
class classproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype):
return self.func(objtype)
def format_bytes(num_bytes):
for unit in ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB']:
if abs(num_bytes) < 1024.0:
return f"{num_bytes:3.1f} {unit}"
num_bytes /= 1024.0
num_bytes *= 1024.0
return f"{num_bytes:3.1f} {unit}"
def format_rows_number(rows_number):
for unit in ['', 'k', 'm', 'b', 't']:
if abs(rows_number) < 1000.0:
return f"{rows_number:3.1f}{unit}".strip()
rows_number /= 1000.0
rows_number *= 1000.0
return f"{rows_number:3.1f}{unit}".strip()
class ConnectingExecutor(concurrent.futures.Executor):
def submit_connected(self, adapter, conn_name, func, *args, **kwargs):
def connected(conn_name, func, *args, **kwargs):
with self.connection_named(adapter, conn_name):
return func(*args, **kwargs)
return self.submit(connected, conn_name, func, *args, **kwargs)
# a little concurrent.futures.Executor for single-threaded mode
class SingleThreadedExecutor(ConnectingExecutor):
def submit(*args, **kwargs):
# this basic pattern comes from concurrent.futures.Executor itself,
# but without handling the `fn=` form.
if len(args) >= 2:
self, fn, *args = args
elif not args:
raise TypeError(
"descriptor 'submit' of 'SingleThreadedExecutor' object needs "
"an argument"
)
else:
raise TypeError(
'submit expected at least 1 positional argument, '
'got %d' % (len(args) - 1)
)
fut = concurrent.futures.Future()
try:
result = fn(*args, **kwargs)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(result)
return fut
@contextmanager
def connection_named(self, adapter, name):
yield
class MultiThreadedExecutor(
ConnectingExecutor,
concurrent.futures.ThreadPoolExecutor,
):
@contextmanager
def connection_named(self, adapter, name):
with adapter.connection_named(name):
yield
class ThreadedArgs(Protocol):
single_threaded: bool
class HasThreadingConfig(Protocol):
args: ThreadedArgs
threads: Optional[int]
def executor(config: HasThreadingConfig) -> ConnectingExecutor:
if config.args.single_threaded:
return SingleThreadedExecutor()
else:
return MultiThreadedExecutor(max_workers=config.threads)
def fqn_search(
root: Dict[str, Any], fqn: List[str]
) -> Iterator[Dict[str, Any]]:
"""Iterate into a nested dictionary, looking for keys in the fqn as levels.
Yield the level config.
"""
yield root
for level in fqn:
level_config = root.get(level, None)
if not isinstance(level_config, dict):
break
# This used to do a 'deepcopy',
# but it didn't seem to be necessary
yield level_config
root = level_config
StringMap = Mapping[str, Any]
StringMapList = List[StringMap]
StringMapIter = Iterable[StringMap]
class MultiDict(Mapping[str, Any]):
"""Implement the mapping protocol using a list of mappings. The most
recently added mapping "wins".
"""
def __init__(self, sources: Optional[StringMapList] = None) -> None:
super().__init__()
self.sources: StringMapList
if sources is None:
self.sources = []
else:
self.sources = sources
def add_from(self, sources: StringMapIter):
self.sources.extend(sources)
def add(self, source: StringMap):
self.sources.append(source)
def _keyset(self) -> AbstractSet[str]:
# return the set of keys
keys: Set[str] = set()
for entry in self._itersource():
keys.update(entry)
return keys
def _itersource(self) -> StringMapIter:
return reversed(self.sources)
def __iter__(self) -> Iterator[str]:
# we need to avoid duplicate keys
return iter(self._keyset())
def __len__(self):
return len(self._keyset())
def __getitem__(self, name: str) -> Any:
for entry in self._itersource():
if name in entry:
return entry[name]
raise KeyError(name)
def __contains__(self, name) -> bool:
return any((name in entry for entry in self._itersource()))
|
import sys
def is_one(x):
return x == 1 or x == 1.0 or x == '1' or x == '1.0'
def is_negone(x):
return x == -1 or x == -1.0 or x == '-1' or x == '-1.0'
def is_nonzero(x):
return x != 0 and x != 0.0 and x != -0.0 and x != '0' and x != '0.0' and x != '-0.0'
def contain_nontrivial( coeffs ):
for coeff in coeffs:
if ( ( not is_one( coeff ) ) and ( not is_negone( coeff ) ) and ( is_nonzero( coeff ) ) ):
return True
return False
def write_line(myfile, num_indent, code):
''' Write the line of code with num_indent number of indents. '''
myfile.write(' ' * 4 * num_indent + code + '\n')
def write_break(myfile, num_breaks=1):
''' Write a break (new line) in the file myfile. '''
myfile.write('\n' * num_breaks)
def data_access( mat_name, ind="" ):
return '%s[ i + j * ld%s ]' % ( mat_name + str(ind), mat_name )
def transpose(coeffs):
''' Given a list of rows, return a list of columns. '''
return [[x[i] for x in coeffs] for i in range(len(coeffs[0]))]
#def transpose( twodim_list ):
# result_list = []
# for jj in range( len( twodim_list[ 0 ] ) ):
# cur_list = []
# for ii in range( len( twodim_list ) ):
# cur_list.append( int(twodim_list[ ii ][ jj ]) )
# #cur_list.append( twodim_list[ ii ][ jj ] )
# result_list.append( cur_list )
#
# return result_list
#def transpose( twodim_list ):
# result_list = []
# for jj in range( len( twodim_list[ 0 ] ) ):
# #cur_list = []
# #for row in twodim_list:
# # cur_list.append( row[ jj ] )
# #result_list.append( cur_list )
# result_list.append( [ row[ jj ] for row in twodim_list ] )
#
# return result_list
#def transpose( twodim_list ):
# return [[row[i] for row in twodim_list] for i in range( len( twodim_list[ 0 ] ) )]
def printmat( X ):
for jj in range( len(X[0]) ):
mystr = ""
for ii in range( len(X) ):
#mystr += '{:04.2f}'.format( float(X[ii][jj]) ) + " "
mystr += '%5.2f' % ( float(X[ii][jj]) ) + " "
print mystr
def writeCoeffs( coeffs ):
U = transpose( coeffs[ 0 ] )
V = transpose( coeffs[ 1 ] )
W = transpose( coeffs[ 2 ] )
print ( "U:" )
printmat( U )
print ( "V:" )
printmat( V )
print ( "W:" )
printmat( W )
print ""
def genSubmatID( submat_id_queue, split_num ):
res_submat_id_queue = []
for elem in submat_id_queue:
for idx in range( split_num ):
res_submat_id_queue.append( elem + '_' + str(idx) )
return res_submat_id_queue
# composition operation?
def phantomMatMul( A, B ):
m_A = len( A[0] )
n_A = len( A )
m_B = len( B[0] )
n_B = len( B )
m_C = m_A * m_B
n_C = n_A * n_B
C = [ [0 for x in range( m_C )] for y in range( n_C ) ]
#print C
for colid_A in range( n_A ):
vec_A = A[ colid_A ]
for rowid_A in range( m_A ):
elem_A = vec_A[ rowid_A ]
if ( elem_A != 0 ):
for colid_B in range( n_B ):
vec_B = B[ colid_B ]
for rowid_B in range( m_B ):
elem_B = vec_B[ rowid_B ]
if ( elem_B != 0 ):
rowid_C = rowid_A * m_B + rowid_B
colid_C = colid_A * n_B + colid_B
elem_C = str( float(elem_A) * float(elem_B) )
C[ colid_C ][ rowid_C ] = elem_C
#print C
return C
def parse_coeff(coeff):
''' Parse a coefficient. The grammar is:
* --> *i | -* | *p | [a-z] | [floating point number]
p --> 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
*i --> 1 / (*)
-* --> -(*)
*p --> (*)^p
So -x2i is parsed as - (1 / ((x)^2))
'''
coeff = coeff.strip()
# First try to convert to float
try:
val = float(coeff)
return coeff
except:
pass
# Parameterized coefficient
if len(coeff) == 1:
# Coeff is like 'x'. We will use 'x' instead of whatever is provided.
# For now, this means that we only support one paramterized coefficient.
return 'x'
elif coeff[0] == '(':
assert(coeff[-1] == ')')
expr = coeff[1:-1].split('+')
return '(' + ' + '.join([parse_coeff(e) for e in expr]) + ')'
elif coeff[0] == '-':
return '-(%s)' % parse_coeff(coeff[1:])
elif coeff[-1] == 'i':
return '1.0 / (%s)' % parse_coeff(coeff[:-1])
else:
# Test for a multiplier out in front
try:
mult = float(coeff[0])
return '%s * (%s)' % (mult, parse_coeff(coeff[1:]))
except:
pass
# Test for an exponent
try:
exp = int(coeff[-1])
return ' * '.join([parse_coeff(coeff[:-1]) for i in xrange(exp)])
except:
raise Exception('Cannot parse coefficient: %s' % coeff)
def read_coeffs(filename):
''' Read the coefficient file. There is one group of coefficients for each
of the three matrices.
filename is the name of the file from which coefficients are read
'''
coeffs = []
with open(filename, 'r') as coeff_file:
curr_group = []
for line in coeff_file:
if line[0] == '#':
if len(curr_group) > 0:
coeffs.append(curr_group)
curr_group = []
else:
curr_group.append([parse_coeff(val) for val in line.split()])
coeffs.append(curr_group)
# There should be three sets of coefficients: one for each matrix.
if (len(coeffs) < 3):
raise Exception('Expected three sets of coefficients!')
return coeffs
def writeFMM( myfile, coeffs, dims, level=1 ):
print "eq_num:"+str(len( coeffs[0][0] ))
print "coeff_num:"+str(len( coeffs ))
for eq_index in range( len( coeffs[0][0] ) ):
for coeff_index in range( len(coeffs) ):
#print "coeff_index:" + str(coeff_index)
name_list = getName( coeff_index ) # 0: a, gamma; 1: b, delta; 2: c, alpha
coeff_list = transpose( coeffs[ coeff_index ] )
my_eq_coeff_list = coeff_list[ eq_index ]
write_line( myfile, 0, "len_{0} = {1};".format( name_list[ 0 ], str(sum([ abs(int(elem_coeff)) for elem_coeff in my_eq_coeff_list ])) ) )
nz_index = 0
for item_index in range( len(my_eq_coeff_list) ):
if ( my_eq_coeff_list[ item_index ] != 0 ):
#write_line( myfile, 0, str( coeff_index ) + " " + str( item_index ) )
write_line( myfile, 0, "{0}_list[{1}] = {2}; {3}_list[{1}] = {4};".format( name_list[0], str(nz_index), getBlockName( coeff_index, item_index, dims, level ), name_list[1], my_eq_coeff_list[ item_index ] ) )
nz_index += 1
write_line( myfile, 0,
"""bl_dgemm_str_abc( ms, ns, ks,
len_a, lda,
a_list, gamma_list,
len_b, ldb,
b_list, delta_list,
len_c, ldc,
c_list, alpha_list,
packA, packB, bl_ic_nt
);""" )
write_break( myfile )
def writeSubmat( myfile, mat_name, dim1, dim2, split1, split2, src_mat_id ):
if mat_name == 'C':
decl = "T *"
else:
decl = "const T *"
sep_symbol = ""
for ii in range( split1 ):
for jj in range( split2 ):
decl+=sep_symbol+mat_name+str(src_mat_id)+'_'+str(ii * split2 + jj)
sep_symbol=", *"
decl+=";"
write_line( myfile, 1, decl )
for ii in range( split1 ):
for jj in range( split2 ):
#write_line( myfile, 1, "stra_acquire_mpart( {0}, {1}, {2}, ld{3}, {4}, {5}, {6}, {7}, &{2}{8} );".format( dim1, dim2, mat_name+str(src_mat_id), mat_name, split1, split2, ii, jj, '_'+str(ii * split2 + jj) ) )
write_line( myfile, 1, "stra_acquire_tpart( my_sub_len_{0}, my_sub_len_{1}, my_stride_{2}_{0}, my_stride_{2}_{1}, {5}, {6}, {7}, {8}, {2}{3}, &{2}{3}{4} );".format( dim1, dim2, mat_name, str(src_mat_id), '_'+str(ii * split2 + jj), split1, split2, ii, jj ) )
def exp_dim( dims, level ):
res = [ 1, 1, 1 ]
for i in range( level ):
res[ 0 ] = res[ 0 ] * dims[ 0 ]
res[ 1 ] = res[ 1 ] * dims[ 1 ]
res[ 2 ] = res[ 2 ] * dims[ 2 ]
return tuple( res )
def writePartition( myfile, dims, level=1 ):
#write_line( myfile, 0, "assert(m % {0} == 0);".format( dims[0] ) );
#write_line( myfile, 0, "assert(k % {0} == 0);".format( dims[1] ) );
#write_line( myfile, 0, "assert(n % {0} == 0);".format( dims[2] ) );
write_line( myfile, 1, "std::vector<len_type> my_sub_len_AB;" )
write_line( myfile, 1, "std::vector<len_type> my_sub_len_AC;" )
write_line( myfile, 1, "std::vector<len_type> my_sub_len_BC;" )
#write_line( myfile, 1, "double *a = XA, *b= XB, *c = XC;" )
write_break( myfile )
level_dim = exp_dim( dims, level )
##write_line( myfile, 1, "mr = m %% ( %d * DGEMM_MR ), kr = k %% ( %d ), nr = n %% ( %d * DGEMM_NR );" % ( level_dim[0], level_dim[1], level_dim[2] ) )
#write_line( myfile, 1, "mr = m %% ( %d ), kr = k %% ( %d ), nr = n %% ( %d );" % ( level_dim[0], level_dim[1], level_dim[2] ) )
#write_line( myfile, 1, "md = m - mr, kd = k - kr, nd = n - nr;" )
write_break( myfile )
triple_combinations = [
( "A", "AC", "AB", dims[0], dims[1] ),
( "B", "AB", "BC", dims[1], dims[2] ),
( "C", "AC", "BC", dims[0], dims[2] )
]
for ( mat_name, dim1, dim2, split1, split2 ) in triple_combinations:
write_line( myfile, 1, "my_sub_len_AB = my_len_AB;" )
write_line( myfile, 1, "my_sub_len_AC = my_len_AC;" )
write_line( myfile, 1, "my_sub_len_BC = my_len_BC;" )
submat_id_queue = [""]
for level_id in range( level ):
for src_mat_id in submat_id_queue:
writeSubmat( myfile, mat_name, dim1, dim2, split1, split2, src_mat_id )
#Generate next level myqueue
submat_id_queue = genSubmatID( submat_id_queue, split1 * split2 )
# Get the current submat size
if ( level_id != level - 1 ):
#write_line( myfile, 1, "ms=ms/{0}, ks=ks/{1}, ns=ns/{2};".format( dims[0], dims[1], dims[2] ) )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_AB, 2 );" )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_AC, 2 );" )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_BC, 2 );" )
#write_break( myfile )
write_break( myfile )
#write_line( myfile, 1, "ms=ms/{0}, ks=ks/{1}, ns=ns/{2};".format( dims[0], dims[1], dims[2] ) )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_AB, 2 );" )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_AC, 2 );" )
write_line( myfile, 1, "stra_divide_vector( my_sub_len_BC, 2 );" )
write_break( myfile )
def getActualMatName( idx ):
if ( idx == 0 ):
matname = "A"
elif( idx == 1 ):
matname = "B"
elif( idx == 2 ):
matname = "C"
else:
print "Not supported!\n"
return matname
def getActualBlockName( coeff_index, item_index, dims, level=1 ):
my_mat_name = getActualMatName( coeff_index )
if( coeff_index == 0 ):
mm = dims[0]
nn = dims[1]
elif( coeff_index == 1 ):
mm = dims[1]
nn = dims[2]
elif( coeff_index == 2 ):
mm = dims[0]
nn = dims[2]
else:
print "Wrong coeff_index\n"
#my_partition_ii = item_index / nn
#my_partition_jj = item_index % nn
submat_index = ""
dividend = item_index
mm_base = 1
nn_base = 1
ii_index = 0
jj_index = 0
for level_index in range( level ):
remainder = dividend % ( mm * nn )
#remainder -> i, j (m_axis, n_axis)
ii = remainder / nn
jj = remainder % nn
ii_index = ii * mm_base + ii_index
jj_index = jj * nn_base + jj_index
#submat_index = str(remainder) + submat_index
dividend = dividend / ( mm * nn )
mm_base = mm_base * mm
nn_base = nn_base * nn
return my_mat_name + "(" + str( ii_index ) + "," + str( jj_index ) + ")"
def writeEquation( coeffs, dims, level ):
for eq_index in range( len( coeffs[0][0] ) ):
m_mat_name = "M"+str(eq_index)
my_eq_str = ""
for coeff_index in range( len(coeffs) ):
#print "coeff_index:" + str(coeff_index)
name_list = getName( coeff_index ) # 0: a, gamma; 1: b, delta; 2: c, alpha
coeff_list = transpose( coeffs[ coeff_index ] )
my_eq_coeff_list = coeff_list[ eq_index ]
if ( coeff_index == 0 ): #A
my_eq_str = my_eq_str + m_mat_name + "=( "
elif ( coeff_index == 1 ): #B
my_eq_str = my_eq_str + " )( "
elif ( coeff_index == 2 ): #C
my_eq_str += " );\n "
else:
print "Coeff_index not supported!\n"
nz_index = 0
for item_index in range( len(my_eq_coeff_list) ):
if ( is_nonzero( my_eq_coeff_list[ item_index ] ) ):
mat_name = getActualBlockName( coeff_index, item_index, dims, level )
if ( coeff_index == 0 or coeff_index == 1 ): # A or B
mat_prefix = ""
if ( is_negone( my_eq_coeff_list[ item_index ] ) ):
mat_prefix = "-"
elif ( is_one( my_eq_coeff_list[ item_index ] ) ):
if ( nz_index == 0 ):
mat_prefix = ""
else:
mat_prefix = "+"
else:
mat_prefix = "+(" + str( my_eq_coeff_list[ item_index ] )+")"
#print "%d:%s" % ( item_index, my_eq_coeff_list[ item_index ] )
#print "entry should be either 1 or -1!"
my_eq_str += mat_prefix + mat_name
elif ( coeff_index == 2 ):
mat_suffix = ""
if ( is_negone( my_eq_coeff_list[ item_index ] ) ):
mat_suffix = "-="
elif ( is_one( my_eq_coeff_list[ item_index ] ) ):
mat_suffix = "+="
else:
mat_suffix = "+=(" + str( my_eq_coeff_list[ item_index ] ) + ") "
#print "%d:%s" % ( item_index, my_eq_coeff_list[ item_index ] )
#print "entry should be either 1 or -1!"
my_eq_str += mat_name + mat_suffix + m_mat_name + ";"
else:
print "Coeff_index not support!\n"
#write_line( myfile, 0, str( coeff_index ) + " " + str( item_index ) )
#write_line( myfile, 0, "{0}_list[{1}] = {2}; {3}_list[{1}] = {4};".format( name_list[0], str(nz_index), getBlockName( coeff_index, item_index, dims, level ), name_list[1], my_eq_coeff_list[ item_index ] ) )
nz_index += 1
print my_eq_str
#print ""
def num_nonzero(arr):
''' Returns number of non-zero entries in the array arr. '''
return len(filter(is_nonzero, arr))
def getSubMatName( coeff_index, item_index, dims, level=1 ):
my_mat_name = (getName( coeff_index )) [ 0 ]
if( coeff_index == 0 ):
mm = dims[0]
nn = dims[1]
elif( coeff_index == 1 ):
mm = dims[1]
nn = dims[2]
elif( coeff_index == 2 ):
mm = dims[0]
nn = dims[2]
else:
print "Wrong coeff_index\n"
#my_partition_ii = item_index / nn
#my_partition_jj = item_index % nn
submat_index = ""
dividend = item_index
for ii in range( level ):
remainder = dividend % ( mm * nn )
submat_index = str(remainder) + submat_index
#submat_index = submat_index + str(remainder)
dividend = dividend / ( mm * nn )
return my_mat_name + str( submat_index )
def getBlockName( coeff_index, item_index, dims, level=1 ):
my_mat_name = (getName( coeff_index )) [ 0 ]
if( coeff_index == 0 ):
mm = dims[0]
nn = dims[1]
elif( coeff_index == 1 ):
mm = dims[1]
nn = dims[2]
elif( coeff_index == 2 ):
mm = dims[0]
nn = dims[2]
else:
print "Wrong coeff_index\n"
#my_partition_ii = item_index / nn
#my_partition_jj = item_index % nn
submat_index = ""
dividend = item_index
for ii in range( level ):
remainder = dividend % ( mm * nn )
submat_index = '_' + str(remainder) + submat_index
#submat_index = submat_index + str(remainder)
dividend = dividend / ( mm * nn )
return my_mat_name + str( submat_index )
def getName( idx ):
if ( idx == 0 ):
my_list = [ 'A', 'gamma' ]
elif( idx == 1 ):
my_list = [ 'B', 'delta' ]
elif( idx == 2 ):
my_list = [ 'C', 'alpha' ]
else:
my_list = []
print "Not supported!\n"
return my_list
def generateCoeffs( coeffs, level ):
U = transpose( coeffs[ 0 ] )
V = transpose( coeffs[ 1 ] )
W = transpose( coeffs[ 2 ] )
UM = U
VM = V
WM = W
for ii in range( level - 1 ):
UM = phantomMatMul( UM, U )
VM = phantomMatMul( VM, V )
WM = phantomMatMul( WM, W )
#print ( "U2:" )
#printmat( U2 )
#print ( "V2:" )
#printmat( V2 )
#print ( "W2:" )
#printmat( W2 )
res_coeffs = [ transpose( UM ), transpose( VM ), transpose( WM ) ]
return res_coeffs
|
_base_ = [
'../../lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py'
]
# data = dict(train=dict(oversample_thr=0.0))
# model = dict(roi_head=dict(bbox_head=dict(loss_cls=dict(type="Icloglog",activation='normal'),
# init_cfg = dict(type='Constant',val=0.01, bias=-3.45, override=dict(name='fc_cls')))))
model = dict(roi_head=dict(bbox_head=dict(loss_cls=dict(type="Icloglog",activation='gumbel'),
init_cfg = dict(type='Constant',val=0.001, bias=-2, override=dict(name='fc_cls')))))
# work_dir='./experiments/gumbel_debug/'
work_dir='./experiments/test/'
# get_stats=1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfLocal, SkipIfGCS
from tests.util.filesystem_utils import IS_ISILON, WAREHOUSE
from tests.util.hdfs_util import (
HdfsConfig,
get_webhdfs_client,
get_webhdfs_client_from_conf)
TEST_TBL = "insert_inherit_permission"
@SkipIfS3.hdfs_acls
@SkipIfGCS.hdfs_acls
@SkipIfABFS.hdfs_acls
@SkipIfADLS.hdfs_acls
class TestInsertBehaviourCustomCluster(CustomClusterTestSuite):
@classmethod
def setup_class(cls):
super(TestInsertBehaviourCustomCluster, cls).setup_class()
if pytest.config.option.namenode_http_address is None:
hdfs_conf = HdfsConfig(pytest.config.option.minicluster_xml_conf)
cls.hdfs_client = get_webhdfs_client_from_conf(hdfs_conf)
else:
host, port = pytest.config.option.namenode_http_address.split(":")
cls.hdfs_client = get_webhdfs_client(host, port)
def _check_partition_perms(self, part, perms):
ls = self.hdfs_client.get_file_dir_status("test-warehouse/%s/%s" % (TEST_TBL, part))
assert ls['FileStatus']['permission'] == perms
def _get_impala_client(self):
impalad = self.cluster.get_any_impalad()
return impalad.service.create_beeswax_client()
def _create_test_tbl(self):
client = self._get_impala_client()
options = {'sync_ddl': '1'}
try:
self.execute_query_expect_success(client, "DROP TABLE IF EXISTS %s" % TEST_TBL,
query_options=options)
self.execute_query_expect_success(client,
"CREATE TABLE {0} (col int) PARTITIONED"
" BY (p1 int, p2 int, p3 int) location"
" '{1}/{0}'".format(TEST_TBL, WAREHOUSE),
query_options=options)
self.execute_query_expect_success(client, "ALTER TABLE %s"
" ADD PARTITION(p1=1, p2=1, p3=1)" % TEST_TBL,
query_options=options)
finally:
client.close()
def _drop_test_tbl(self):
client = self._get_impala_client()
self.execute_query_expect_success(client, "drop table if exists %s" % TEST_TBL)
client.close()
def setup_method(cls, method):
super(TestInsertBehaviourCustomCluster, cls).setup_method(method)
cls._create_test_tbl()
def teardown_method(cls, method):
cls._drop_test_tbl()
super(TestInsertBehaviourCustomCluster, cls).teardown_method(method)
@SkipIfLocal.hdfs_client
@SkipIfLocal.root_path
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--insert_inherit_permissions=true")
def test_insert_inherit_permission(self):
"""Create a table with three partition columns to test permission inheritance"""
client = self._get_impala_client()
try:
self.hdfs_client.chmod("test-warehouse/%s/p1=1/" % TEST_TBL, "777")
# 1. INSERT that creates two new directories gets permissions from parent
self.execute_query_expect_success(client, "INSERT INTO %s"
" PARTITION(p1=1, p2=2, p3=2) VALUES(1)" % TEST_TBL)
self._check_partition_perms("p1=1/p2=2/", "777")
self._check_partition_perms("p1=1/p2=2/p3=2/", "777")
# 2. INSERT that creates one new directory gets permissions from parent
self.execute_query_expect_success(client, "INSERT INTO %s"
" PARTITION(p1=1, p2=2, p3=3) VALUES(1)" % TEST_TBL)
self._check_partition_perms("p1=1/p2=2/p3=3/", "777")
# 3. INSERT that creates no new directories keeps standard permissions
self.hdfs_client.chmod("test-warehouse/%s/p1=1/p2=2" % TEST_TBL, "744")
self.execute_query_expect_success(client, "INSERT INTO %s"
" PARTITION(p1=1, p2=2, p3=3) VALUES(1)" % TEST_TBL)
self._check_partition_perms("p1=1/p2=2/", "744")
self._check_partition_perms("p1=1/p2=2/p3=3/", "777")
finally:
client.close()
@SkipIfLocal.hdfs_client
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args("--insert_inherit_permissions=false")
def test_insert_inherit_permission_disabled(self):
"""Check that turning off insert permission inheritance works correctly."""
impalad = self.cluster.get_any_impalad()
client = impalad.service.create_beeswax_client()
try:
ls = self.hdfs_client.get_file_dir_status("test-warehouse/%s/p1=1/" % TEST_TBL)
default_perms = ls['FileStatus']['permission']
self.hdfs_client.chmod("test-warehouse/%s/p1=1/" % TEST_TBL, "777")
self.execute_query_expect_success(client, "INSERT INTO %s"
" PARTITION(p1=1, p2=3, p3=4) VALUES(1)" % TEST_TBL)
# Would be 777 if inheritance was enabled
if not IS_ISILON: # IMPALA-4221
self._check_partition_perms("p1=1/p2=3/", default_perms)
self._check_partition_perms("p1=1/p2=3/p3=4/", default_perms)
finally:
client.close()
|
"""Structured representations of system events."""
import logging
import os
from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions import (
AssetMaterialization,
EventMetadataEntry,
ExpectationResult,
Materialization,
SolidHandle,
TypeCheck,
)
from dagster.core.definitions.events import AssetStoreOperationType, ObjectStoreOperationType
from dagster.core.execution.context.system import (
HookContext,
SystemExecutionContext,
SystemStepExecutionContext,
)
from dagster.core.execution.plan.objects import StepOutputData
from dagster.core.log_manager import DagsterLogManager
from dagster.serdes import register_serdes_tuple_fallbacks, whitelist_for_serdes
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from dagster.utils.timing import format_duration
class DagsterEventType(Enum):
"""The types of events that may be yielded by solid and pipeline execution."""
STEP_OUTPUT = "STEP_OUTPUT"
STEP_INPUT = "STEP_INPUT"
STEP_FAILURE = "STEP_FAILURE"
STEP_START = "STEP_START"
STEP_SUCCESS = "STEP_SUCCESS"
STEP_SKIPPED = "STEP_SKIPPED"
STEP_UP_FOR_RETRY = "STEP_UP_FOR_RETRY" # "failed" but want to retry
STEP_RESTARTED = "STEP_RESTARTED"
STEP_MATERIALIZATION = "STEP_MATERIALIZATION"
STEP_EXPECTATION_RESULT = "STEP_EXPECTATION_RESULT"
PIPELINE_INIT_FAILURE = "PIPELINE_INIT_FAILURE"
PIPELINE_ENQUEUED = "PIPELINE_ENQUEUED"
PIPELINE_DEQUEUED = "PIPELINE_DEQUEUED"
PIPELINE_START = "PIPELINE_START"
PIPELINE_SUCCESS = "PIPELINE_SUCCESS"
PIPELINE_FAILURE = "PIPELINE_FAILURE"
OBJECT_STORE_OPERATION = "OBJECT_STORE_OPERATION"
ASSET_STORE_OPERATION = "ASSET_STORE_OPERATION"
ENGINE_EVENT = "ENGINE_EVENT"
HOOK_COMPLETED = "HOOK_COMPLETED"
HOOK_ERRORED = "HOOK_ERRORED"
HOOK_SKIPPED = "HOOK_SKIPPED"
STEP_EVENTS = {
DagsterEventType.STEP_INPUT,
DagsterEventType.STEP_START,
DagsterEventType.STEP_OUTPUT,
DagsterEventType.STEP_FAILURE,
DagsterEventType.STEP_SUCCESS,
DagsterEventType.STEP_SKIPPED,
DagsterEventType.STEP_MATERIALIZATION,
DagsterEventType.STEP_EXPECTATION_RESULT,
DagsterEventType.OBJECT_STORE_OPERATION,
DagsterEventType.STEP_RESTARTED,
DagsterEventType.STEP_UP_FOR_RETRY,
}
FAILURE_EVENTS = {
DagsterEventType.PIPELINE_INIT_FAILURE,
DagsterEventType.PIPELINE_FAILURE,
DagsterEventType.STEP_FAILURE,
}
PIPELINE_EVENTS = {
DagsterEventType.PIPELINE_ENQUEUED,
DagsterEventType.PIPELINE_DEQUEUED,
DagsterEventType.PIPELINE_START,
DagsterEventType.PIPELINE_SUCCESS,
DagsterEventType.PIPELINE_FAILURE,
}
HOOK_EVENTS = {
DagsterEventType.HOOK_COMPLETED,
DagsterEventType.HOOK_ERRORED,
DagsterEventType.HOOK_SKIPPED,
}
def _assert_type(method, expected_type, actual_type):
check.invariant(
expected_type == actual_type,
(
"{method} only callable when event_type is {expected_type}, called on {actual_type}"
).format(method=method, expected_type=expected_type, actual_type=actual_type),
)
def _validate_event_specific_data(event_type, event_specific_data):
from dagster.core.execution.plan.objects import StepFailureData, StepSuccessData, StepInputData
if event_type == DagsterEventType.STEP_OUTPUT:
check.inst_param(event_specific_data, "event_specific_data", StepOutputData)
elif event_type == DagsterEventType.STEP_FAILURE:
check.inst_param(event_specific_data, "event_specific_data", StepFailureData)
elif event_type == DagsterEventType.STEP_SUCCESS:
check.inst_param(event_specific_data, "event_specific_data", StepSuccessData)
elif event_type == DagsterEventType.STEP_MATERIALIZATION:
check.inst_param(event_specific_data, "event_specific_data", StepMaterializationData)
elif event_type == DagsterEventType.STEP_EXPECTATION_RESULT:
check.inst_param(event_specific_data, "event_specific_data", StepExpectationResultData)
elif event_type == DagsterEventType.STEP_INPUT:
check.inst_param(event_specific_data, "event_specific_data", StepInputData)
elif event_type == DagsterEventType.ENGINE_EVENT:
check.inst_param(event_specific_data, "event_specific_data", EngineEventData)
elif event_type == DagsterEventType.HOOK_ERRORED:
check.inst_param(event_specific_data, "event_specific_data", HookErroredData)
return event_specific_data
def log_step_event(step_context, event):
check.inst_param(step_context, "step_context", SystemStepExecutionContext)
check.inst_param(event, "event", DagsterEvent)
event_type = DagsterEventType(event.event_type_value)
log_fn = step_context.log.error if event_type in FAILURE_EVENTS else step_context.log.debug
log_fn(
event.message
or "{event_type} for step {step_key}".format(
event_type=event_type, step_key=step_context.step.key
),
dagster_event=event,
pipeline_name=step_context.pipeline_def.name,
)
def log_pipeline_event(pipeline_context, event, step_key):
event_type = DagsterEventType(event.event_type_value)
log_fn = (
pipeline_context.log.error if event_type in FAILURE_EVENTS else pipeline_context.log.debug
)
log_fn(
event.message
or "{event_type} for pipeline {pipeline_name}".format(
event_type=event_type, pipeline_name=pipeline_context.pipeline_def.name
),
dagster_event=event,
pipeline_name=pipeline_context.pipeline_def.name,
step_key=step_key,
)
def log_resource_event(log_manager, pipeline_name, event):
check.inst_param(log_manager, "log_manager", DagsterLogManager)
check.inst_param(event, "event", DagsterEvent)
check.inst(event.event_specific_data, EngineEventData)
log_fn = log_manager.error if event.event_specific_data.error else log_manager.debug
log_fn(event.message, dagster_event=event, pipeline_name=pipeline_name, step_key=event.step_key)
@whitelist_for_serdes
class DagsterEvent(
namedtuple(
"_DagsterEvent",
"event_type_value pipeline_name step_key solid_handle step_kind_value "
"logging_tags event_specific_data message pid",
)
):
"""Events yielded by solid and pipeline execution.
Users should not instantiate this class.
Attributes:
event_type_value (str): Value for a DagsterEventType.
pipeline_name (str)
step_key (str)
solid_handle (SolidHandle)
step_kind_value (str): Value for a StepKind.
logging_tags (Dict[str, str])
event_specific_data (Any): Type must correspond to event_type_value.
message (str)
pid (int)
"""
@staticmethod
def from_step(event_type, step_context, event_specific_data=None, message=None):
check.inst_param(step_context, "step_context", SystemStepExecutionContext)
event = DagsterEvent(
check.inst_param(event_type, "event_type", DagsterEventType).value,
step_context.pipeline_def.name,
step_context.step.key,
step_context.step.solid_handle,
step_context.step.kind.value,
step_context.logging_tags,
_validate_event_specific_data(event_type, event_specific_data),
check.opt_str_param(message, "message"),
pid=os.getpid(),
)
log_step_event(step_context, event)
return event
@staticmethod
def from_pipeline(
event_type, pipeline_context, message=None, event_specific_data=None, step_key=None
):
check.inst_param(pipeline_context, "pipeline_context", SystemExecutionContext)
pipeline_name = pipeline_context.pipeline_def.name
event = DagsterEvent(
check.inst_param(event_type, "event_type", DagsterEventType).value,
check.str_param(pipeline_name, "pipeline_name"),
message=check.opt_str_param(message, "message"),
event_specific_data=_validate_event_specific_data(event_type, event_specific_data),
step_key=step_key,
pid=os.getpid(),
)
log_pipeline_event(pipeline_context, event, step_key)
return event
@staticmethod
def from_resource(execution_plan, log_manager, message=None, event_specific_data=None):
from dagster.core.execution.plan.plan import ExecutionPlan
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
pipeline_name = execution_plan.pipeline_def.name
event = DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
pipeline_name=pipeline_name,
message=check.opt_str_param(message, "message"),
event_specific_data=_validate_event_specific_data(
DagsterEventType.ENGINE_EVENT, event_specific_data
),
step_key=execution_plan.step_key_for_single_step_plans(),
pid=os.getpid(),
)
log_resource_event(log_manager, pipeline_name, event)
return event
def __new__(
cls,
event_type_value,
pipeline_name,
step_key=None,
solid_handle=None,
step_kind_value=None,
logging_tags=None,
event_specific_data=None,
message=None,
pid=None,
):
event_type_value, event_specific_data = _handle_back_compat(
event_type_value, event_specific_data
)
return super(DagsterEvent, cls).__new__(
cls,
check.str_param(event_type_value, "event_type_value"),
check.str_param(pipeline_name, "pipeline_name"),
check.opt_str_param(step_key, "step_key"),
check.opt_inst_param(solid_handle, "solid_handle", SolidHandle),
check.opt_str_param(step_kind_value, "step_kind_value"),
check.opt_dict_param(logging_tags, "logging_tags"),
_validate_event_specific_data(DagsterEventType(event_type_value), event_specific_data),
check.opt_str_param(message, "message"),
check.opt_int_param(pid, "pid"),
)
@property
def solid_name(self):
return self.solid_handle.name
@property
def event_type(self):
"""DagsterEventType: The type of this event."""
return DagsterEventType(self.event_type_value)
@property
def is_step_event(self):
return self.event_type in STEP_EVENTS
@property
def is_hook_event(self):
return self.event_type in HOOK_EVENTS
@property
def step_kind(self):
from dagster.core.execution.plan.objects import StepKind
return StepKind(self.step_kind_value)
@property
def is_step_success(self):
return self.event_type == DagsterEventType.STEP_SUCCESS
@property
def is_successful_output(self):
return self.event_type == DagsterEventType.STEP_OUTPUT
@property
def is_step_start(self):
return self.event_type == DagsterEventType.STEP_START
@property
def is_step_failure(self):
return self.event_type == DagsterEventType.STEP_FAILURE
@property
def is_step_skipped(self):
return self.event_type == DagsterEventType.STEP_SKIPPED
@property
def is_step_up_for_retry(self):
return self.event_type == DagsterEventType.STEP_UP_FOR_RETRY
@property
def is_step_restarted(self):
return self.event_type == DagsterEventType.STEP_RESTARTED
@property
def is_pipeline_success(self):
return self.event_type == DagsterEventType.PIPELINE_SUCCESS
@property
def is_pipeline_failure(self):
return self.event_type == DagsterEventType.PIPELINE_FAILURE
@property
def is_pipeline_init_failure(self):
return self.event_type == DagsterEventType.PIPELINE_INIT_FAILURE
@property
def is_failure(self):
return self.event_type in FAILURE_EVENTS
@property
def is_pipeline_event(self):
return self.event_type in PIPELINE_EVENTS
@property
def is_engine_event(self):
return self.event_type == DagsterEventType.ENGINE_EVENT
@property
def is_asset_store_operation(self):
return self.event_type == DagsterEventType.ASSET_STORE_OPERATION
@property
def is_step_materialization(self):
return self.event_type == DagsterEventType.STEP_MATERIALIZATION
@property
def asset_key(self):
if self.event_type != DagsterEventType.STEP_MATERIALIZATION:
return None
return self.step_materialization_data.materialization.asset_key
@property
def step_input_data(self):
_assert_type("step_input_data", DagsterEventType.STEP_INPUT, self.event_type)
return self.event_specific_data
@property
def step_output_data(self):
_assert_type("step_output_data", DagsterEventType.STEP_OUTPUT, self.event_type)
return self.event_specific_data
@property
def step_success_data(self):
_assert_type("step_success_data", DagsterEventType.STEP_SUCCESS, self.event_type)
return self.event_specific_data
@property
def step_failure_data(self):
_assert_type("step_failure_data", DagsterEventType.STEP_FAILURE, self.event_type)
return self.event_specific_data
@property
def step_retry_data(self):
_assert_type("step_retry_data", DagsterEventType.STEP_UP_FOR_RETRY, self.event_type)
return self.event_specific_data
@property
def step_materialization_data(self):
_assert_type(
"step_materialization_data", DagsterEventType.STEP_MATERIALIZATION, self.event_type
)
return self.event_specific_data
@property
def step_expectation_result_data(self):
_assert_type(
"step_expectation_result_data",
DagsterEventType.STEP_EXPECTATION_RESULT,
self.event_type,
)
return self.event_specific_data
@property
def pipeline_init_failure_data(self):
_assert_type(
"pipeline_init_failure_data", DagsterEventType.PIPELINE_INIT_FAILURE, self.event_type
)
return self.event_specific_data
@property
def pipeline_failure_data(self):
_assert_type("pipeline_failure_data", DagsterEventType.PIPELINE_FAILURE, self.event_type)
return self.event_specific_data
@property
def engine_event_data(self):
_assert_type("engine_event_data", DagsterEventType.ENGINE_EVENT, self.event_type)
return self.event_specific_data
@property
def hook_completed_data(self):
_assert_type("hook_completed_data", DagsterEventType.HOOK_COMPLETED, self.event_type)
return self.event_specific_data
@property
def hook_errored_data(self):
_assert_type("hook_errored_data", DagsterEventType.HOOK_ERRORED, self.event_type)
return self.event_specific_data
@property
def hook_skipped_data(self):
_assert_type("hook_skipped_data", DagsterEventType.HOOK_SKIPPED, self.event_type)
return self.event_specific_data
@staticmethod
def step_output_event(step_context, step_output_data):
check.inst_param(step_output_data, "step_output_data", StepOutputData)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_OUTPUT,
step_context=step_context,
event_specific_data=step_output_data,
message='Yielded output "{output_name}" of type "{output_type}".{type_check_clause}'.format(
output_name=step_output_data.step_output_handle.output_name,
output_type=step_context.step.step_output_named(
step_output_data.step_output_handle.output_name
).dagster_type.display_name,
type_check_clause=(
" Warning! Type check failed."
if not step_output_data.type_check_data.success
else " (Type check passed)."
)
if step_output_data.type_check_data
else " (No type check).",
),
)
@staticmethod
def step_failure_event(step_context, step_failure_data):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_FAILURE,
step_context=step_context,
event_specific_data=step_failure_data,
message='Execution of step "{step_key}" failed.'.format(step_key=step_context.step.key),
)
@staticmethod
def step_retry_event(step_context, step_retry_data):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_UP_FOR_RETRY,
step_context=step_context,
event_specific_data=step_retry_data,
message='Execution of step "{step_key}" failed and has requested a retry{wait_str}.'.format(
step_key=step_context.step.key,
wait_str=" in {n} seconds".format(n=step_retry_data.seconds_to_wait)
if step_retry_data.seconds_to_wait
else "",
),
)
@staticmethod
def step_input_event(step_context, step_input_data):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_INPUT,
step_context=step_context,
event_specific_data=step_input_data,
message='Got input "{input_name}" of type "{input_type}".{type_check_clause}'.format(
input_name=step_input_data.input_name,
input_type=step_context.step.step_input_named(
step_input_data.input_name
).dagster_type.display_name,
type_check_clause=(
" Warning! Type check failed."
if not step_input_data.type_check_data.success
else " (Type check passed)."
)
if step_input_data.type_check_data
else " (No type check).",
),
)
@staticmethod
def step_start_event(step_context):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_START,
step_context=step_context,
message='Started execution of step "{step_key}".'.format(
step_key=step_context.step.key
),
)
@staticmethod
def step_restarted_event(step_context, previous_attempts):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_RESTARTED,
step_context=step_context,
message='Started re-execution (attempt # {n}) of step "{step_key}".'.format(
step_key=step_context.step.key, n=previous_attempts + 1
),
)
@staticmethod
def step_success_event(step_context, success):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_SUCCESS,
step_context=step_context,
event_specific_data=success,
message='Finished execution of step "{step_key}" in {duration}.'.format(
# TODO: Make duration human readable
# See: https://github.com/dagster-io/dagster/issues/1602
step_key=step_context.step.key,
duration=format_duration(success.duration_ms),
),
)
@staticmethod
def step_skipped_event(step_context):
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_SKIPPED,
step_context=step_context,
message='Skipped execution of step "{step_key}".'.format(
step_key=step_context.step.key
),
)
@staticmethod
def step_materialization(step_context, materialization):
check.inst_param(
materialization, "materialization", (AssetMaterialization, Materialization)
)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_MATERIALIZATION,
step_context=step_context,
event_specific_data=StepMaterializationData(materialization),
message=materialization.description
if materialization.description
else "Materialized value{label_clause}.".format(
label_clause=" {label}".format(label=materialization.label)
if materialization.label
else ""
),
)
@staticmethod
def step_expectation_result(step_context, expectation_result):
check.inst_param(expectation_result, "expectation_result", ExpectationResult)
def _msg():
if expectation_result.description:
return expectation_result.description
return "Expectation{label_clause} {result_verb}".format(
label_clause=" " + expectation_result.label if expectation_result.label else "",
result_verb="passed" if expectation_result.success else "failed",
)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_EXPECTATION_RESULT,
step_context=step_context,
event_specific_data=StepExpectationResultData(expectation_result),
message=_msg(),
)
@staticmethod
def pipeline_start(pipeline_context):
return DagsterEvent.from_pipeline(
DagsterEventType.PIPELINE_START,
pipeline_context,
message='Started execution of pipeline "{pipeline_name}".'.format(
pipeline_name=pipeline_context.pipeline_def.name
),
)
@staticmethod
def pipeline_success(pipeline_context):
return DagsterEvent.from_pipeline(
DagsterEventType.PIPELINE_SUCCESS,
pipeline_context,
message='Finished execution of pipeline "{pipeline_name}".'.format(
pipeline_name=pipeline_context.pipeline_def.name
),
)
@staticmethod
def pipeline_failure(pipeline_context, context_msg, error_info=None):
return DagsterEvent.from_pipeline(
DagsterEventType.PIPELINE_FAILURE,
pipeline_context,
message='Execution of pipeline "{pipeline_name}" failed. {context_msg}'.format(
pipeline_name=pipeline_context.pipeline_def.name,
context_msg=check.str_param(context_msg, "context_msg"),
),
event_specific_data=PipelineFailureData(
check.opt_inst_param(error_info, "error_info", SerializableErrorInfo)
),
)
@staticmethod
def resource_init_start(execution_plan, log_manager, resource_keys):
from dagster.core.execution.plan.plan import ExecutionPlan
return DagsterEvent.from_resource(
execution_plan=check.inst_param(execution_plan, "execution_plan", ExecutionPlan),
log_manager=check.inst_param(log_manager, "log_manager", DagsterLogManager),
message="Starting initialization of resources [{}].".format(
", ".join(sorted(resource_keys))
),
event_specific_data=EngineEventData(metadata_entries=[], marker_start="resources"),
)
@staticmethod
def resource_init_success(execution_plan, log_manager, resource_instances, resource_init_times):
from dagster.core.execution.plan.plan import ExecutionPlan
metadata_entries = []
for resource_key in resource_instances.keys():
resource_obj = resource_instances[resource_key]
resource_time = resource_init_times[resource_key]
metadata_entries.append(
EventMetadataEntry.python_artifact(
resource_obj.__class__, resource_key, "Initialized in {}".format(resource_time)
)
)
return DagsterEvent.from_resource(
execution_plan=check.inst_param(execution_plan, "execution_plan", ExecutionPlan),
log_manager=check.inst_param(log_manager, "log_manager", DagsterLogManager),
message="Finished initialization of resources [{}].".format(
", ".join(sorted(resource_init_times.keys()))
),
event_specific_data=EngineEventData(
metadata_entries=metadata_entries, marker_end="resources",
),
)
@staticmethod
def resource_init_failure(execution_plan, log_manager, resource_keys, error):
from dagster.core.execution.plan.plan import ExecutionPlan
return DagsterEvent.from_resource(
execution_plan=check.inst_param(execution_plan, "execution_plan", ExecutionPlan),
log_manager=check.inst_param(log_manager, "log_manager", DagsterLogManager),
message="Initialization of resources [{}] failed.".format(", ".join(resource_keys)),
event_specific_data=EngineEventData(
metadata_entries=[], marker_end="resources", error=error,
),
)
@staticmethod
def resource_teardown_failure(execution_plan, log_manager, resource_keys, error):
from dagster.core.execution.plan.plan import ExecutionPlan
return DagsterEvent.from_resource(
execution_plan=check.inst_param(execution_plan, "execution_plan", ExecutionPlan),
log_manager=check.inst_param(log_manager, "log_manager", DagsterLogManager),
message="Teardown of resources [{}] failed.".format(", ".join(resource_keys)),
event_specific_data=EngineEventData(
metadata_entries=[], marker_start=None, marker_end=None, error=error,
),
)
@staticmethod
def pipeline_init_failure(pipeline_name, failure_data, log_manager):
check.inst_param(failure_data, "failure_data", PipelineInitFailureData)
check.inst_param(log_manager, "log_manager", DagsterLogManager)
# this failure happens trying to bring up context so can't use from_pipeline
event = DagsterEvent(
event_type_value=DagsterEventType.PIPELINE_INIT_FAILURE.value,
pipeline_name=pipeline_name,
event_specific_data=failure_data,
message=(
'Pipeline failure during initialization of pipeline "{pipeline_name}". '
"This may be due to a failure in initializing a resource or logger."
).format(pipeline_name=pipeline_name),
pid=os.getpid(),
)
log_manager.error(
event.message
or "{event_type} for pipeline {pipeline_name}".format(
event_type=DagsterEventType.PIPELINE_INIT_FAILURE, pipeline_name=pipeline_name
),
dagster_event=event,
pipeline_name=pipeline_name,
)
return event
@staticmethod
def engine_event(pipeline_context, message, event_specific_data=None, step_key=None):
return DagsterEvent.from_pipeline(
DagsterEventType.ENGINE_EVENT,
pipeline_context,
message,
event_specific_data=event_specific_data,
step_key=step_key,
)
@staticmethod
def object_store_operation(step_context, object_store_operation_result):
from dagster.core.definitions.events import ObjectStoreOperation
check.inst_param(
object_store_operation_result, "object_store_operation_result", ObjectStoreOperation
)
object_store_name = (
"{object_store_name} ".format(
object_store_name=object_store_operation_result.object_store_name
)
if object_store_operation_result.object_store_name
else ""
)
serialization_strategy_modifier = (
" using {serialization_strategy_name}".format(
serialization_strategy_name=object_store_operation_result.serialization_strategy_name
)
if object_store_operation_result.serialization_strategy_name
else ""
)
value_name = object_store_operation_result.value_name
if (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.SET_OBJECT
):
message = (
"Stored intermediate object for output {value_name} in "
"{object_store_name}object store{serialization_strategy_modifier}."
).format(
value_name=value_name,
object_store_name=object_store_name,
serialization_strategy_modifier=serialization_strategy_modifier,
)
elif (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.GET_OBJECT
):
message = (
"Retrieved intermediate object for input {value_name} in "
"{object_store_name}object store{serialization_strategy_modifier}."
).format(
value_name=value_name,
object_store_name=object_store_name,
serialization_strategy_modifier=serialization_strategy_modifier,
)
elif (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.CP_OBJECT
):
message = (
"Copied intermediate object for input {value_name} from {key} to {dest_key}"
).format(
value_name=value_name,
key=object_store_operation_result.key,
dest_key=object_store_operation_result.dest_key,
)
else:
message = ""
return DagsterEvent.from_step(
DagsterEventType.OBJECT_STORE_OPERATION,
step_context,
event_specific_data=ObjectStoreOperationResultData(
op=object_store_operation_result.op,
value_name=value_name,
address=object_store_operation_result.key,
metadata_entries=[
EventMetadataEntry.path(object_store_operation_result.key, label="key")
],
version=object_store_operation_result.version,
),
message=message,
)
@staticmethod
def asset_store_operation(step_context, asset_store_operation):
from dagster.core.definitions.events import AssetStoreOperation
check.inst_param(asset_store_operation, "asset_store_operation", AssetStoreOperation)
if AssetStoreOperationType(asset_store_operation.op) == AssetStoreOperationType.SET_ASSET:
message = (
'Stored output "{output_name}" using asset store "{asset_store_key}".'
).format(
asset_store_key=asset_store_operation.asset_store_handle.asset_store_key,
output_name=asset_store_operation.step_output_handle.output_name,
)
elif AssetStoreOperationType(asset_store_operation.op) == AssetStoreOperationType.GET_ASSET:
message = (
'Retrieved output "{output_name}" from step "{step_key}" '
'using asset store "{asset_store_key}".'
).format(
asset_store_key=asset_store_operation.asset_store_handle.asset_store_key,
output_name=asset_store_operation.step_output_handle.output_name,
step_key=asset_store_operation.step_output_handle.step_key,
)
else:
message = ""
return DagsterEvent.from_step(
event_type=DagsterEventType.ASSET_STORE_OPERATION,
step_context=step_context,
event_specific_data=AssetStoreOperationData(
op=asset_store_operation.op,
step_key=asset_store_operation.step_output_handle.step_key,
output_name=asset_store_operation.step_output_handle.output_name,
asset_store_key=asset_store_operation.asset_store_handle.asset_store_key,
),
message=message,
)
@staticmethod
def hook_completed(hook_context, hook_def):
event_type = DagsterEventType.HOOK_COMPLETED
check.inst_param(hook_context, "hook_context", HookContext)
event = DagsterEvent(
event_type_value=event_type.value,
pipeline_name=hook_context.pipeline_def.name,
step_key=hook_context.step.key,
solid_handle=hook_context.step.solid_handle,
step_kind_value=hook_context.step.kind.value,
logging_tags=hook_context.logging_tags,
message=(
'Finished the execution of hook "{hook_name}" triggered for solid "{solid_name}".'
).format(hook_name=hook_def.name, solid_name=hook_context.solid.name),
)
hook_context.log.debug(
event.message, dagster_event=event, pipeline_name=hook_context.pipeline_def.name,
)
return event
@staticmethod
def hook_errored(hook_context, error):
event_type = DagsterEventType.HOOK_ERRORED
check.inst_param(hook_context, "hook_context", HookContext)
event = DagsterEvent(
event_type_value=event_type.value,
pipeline_name=hook_context.pipeline_def.name,
step_key=hook_context.step.key,
solid_handle=hook_context.step.solid_handle,
step_kind_value=hook_context.step.kind.value,
logging_tags=hook_context.logging_tags,
event_specific_data=_validate_event_specific_data(
event_type,
HookErroredData(
error=serializable_error_info_from_exc_info(error.original_exc_info)
),
),
)
hook_context.log.error(
str(error), dagster_event=event, pipeline_name=hook_context.pipeline_def.name,
)
return event
@staticmethod
def hook_skipped(hook_context, hook_def):
event_type = DagsterEventType.HOOK_SKIPPED
check.inst_param(hook_context, "hook_context", HookContext)
event = DagsterEvent(
event_type_value=event_type.value,
pipeline_name=hook_context.pipeline_def.name,
step_key=hook_context.step.key,
solid_handle=hook_context.step.solid_handle,
step_kind_value=hook_context.step.kind.value,
logging_tags=hook_context.logging_tags,
message=(
'Skipped the execution of hook "{hook_name}". It did not meet its triggering '
'condition during the execution of solid "{solid_name}".'
).format(hook_name=hook_def.name, solid_name=hook_context.solid.name),
)
hook_context.log.debug(
event.message, dagster_event=event, pipeline_name=hook_context.pipeline_def.name,
)
return event
def get_step_output_event(events, step_key, output_name="result"):
check.list_param(events, "events", of_type=DagsterEvent)
check.str_param(step_key, "step_key")
check.str_param(output_name, "output_name")
for event in events:
if (
event.event_type == DagsterEventType.STEP_OUTPUT
and event.step_key == step_key
and event.step_output_data.output_name == output_name
):
return event
return None
@whitelist_for_serdes
class StepMaterializationData(namedtuple("_StepMaterializationData", "materialization")):
pass
@whitelist_for_serdes
class StepExpectationResultData(namedtuple("_StepExpectationResultData", "expectation_result")):
pass
@whitelist_for_serdes
class AssetStoreOperationData(
namedtuple("_AssetStoreOperationData", "op step_key output_name asset_store_key")
):
pass
@whitelist_for_serdes
class ObjectStoreOperationResultData(
namedtuple("_ObjectStoreOperationResultData", "op value_name metadata_entries address version")
):
def __new__(cls, op, value_name, metadata_entries, address=None, version=None):
return super(ObjectStoreOperationResultData, cls).__new__(
cls,
op=check.opt_str_param(op, "op"),
value_name=check.opt_str_param(value_name, "value_name"),
metadata_entries=check.opt_list_param(metadata_entries, "metadata_entries"),
address=check.opt_str_param(address, "address"),
version=check.opt_str_param(version, "version"),
)
@whitelist_for_serdes
class EngineEventData(
namedtuple("_EngineEventData", "metadata_entries error marker_start marker_end")
):
# serdes log
# * added optional error
# * added marker_start / marker_end
#
def __new__(cls, metadata_entries=None, error=None, marker_start=None, marker_end=None):
return super(EngineEventData, cls).__new__(
cls,
metadata_entries=check.opt_list_param(
metadata_entries, "metadata_entries", EventMetadataEntry
),
error=check.opt_inst_param(error, "error", SerializableErrorInfo),
marker_start=check.opt_str_param(marker_start, "marker_start"),
marker_end=check.opt_str_param(marker_end, "marker_end"),
)
@staticmethod
def in_process(pid, step_keys_to_execute=None, marker_end=None):
check.int_param(pid, "pid")
check.opt_list_param(step_keys_to_execute, "step_keys_to_execute")
return EngineEventData(
metadata_entries=[EventMetadataEntry.text(str(pid), "pid")]
+ (
[EventMetadataEntry.text(str(step_keys_to_execute), "step_keys")]
if step_keys_to_execute
else []
),
marker_end=marker_end,
)
@staticmethod
def multiprocess(pid, step_keys_to_execute=None):
check.int_param(pid, "pid")
check.opt_list_param(step_keys_to_execute, "step_keys_to_execute")
return EngineEventData(
metadata_entries=[EventMetadataEntry.text(str(pid), "pid")]
+ (
[EventMetadataEntry.text(str(step_keys_to_execute), "step_keys")]
if step_keys_to_execute
else []
)
)
@staticmethod
def interrupted(steps_interrupted):
check.list_param(steps_interrupted, "steps_interrupted", str)
return EngineEventData(
metadata_entries=[EventMetadataEntry.text(str(steps_interrupted), "steps_interrupted")]
)
@staticmethod
def engine_error(error):
check.inst_param(error, "error", SerializableErrorInfo)
return EngineEventData(metadata_entries=[], error=error)
@whitelist_for_serdes
class PipelineInitFailureData(namedtuple("_PipelineInitFailureData", "error")):
def __new__(cls, error):
return super(PipelineInitFailureData, cls).__new__(
cls, error=check.inst_param(error, "error", SerializableErrorInfo)
)
@whitelist_for_serdes
class PipelineFailureData(namedtuple("_PipelineFailureData", "error")):
def __new__(cls, error):
return super(PipelineFailureData, cls).__new__(
cls, error=check.opt_inst_param(error, "error", SerializableErrorInfo)
)
@whitelist_for_serdes
class HookErroredData(namedtuple("_HookErroredData", "error")):
def __new__(cls, error):
return super(HookErroredData, cls).__new__(
cls, error=check.inst_param(error, "error", SerializableErrorInfo),
)
###################################################################################################
# THE GRAVEYARD
#
# -|-
# |
# _-'~~~~~`-_
# .' '.
# | R I P |
# | |
# | Synthetic |
# | Process |
# | Events |
# | |
###################################################################################################
def _handle_back_compat(event_type_value, event_specific_data):
if event_type_value == "PIPELINE_PROCESS_START":
return DagsterEventType.ENGINE_EVENT.value, EngineEventData([])
elif event_type_value == "PIPELINE_PROCESS_STARTED":
return DagsterEventType.ENGINE_EVENT.value, EngineEventData([])
elif event_type_value == "PIPELINE_PROCESS_EXITED":
return DagsterEventType.ENGINE_EVENT.value, EngineEventData([])
else:
return event_type_value, event_specific_data
register_serdes_tuple_fallbacks(
{
"PipelineProcessStartedData": None,
"PipelineProcessExitedData": None,
"PipelineProcessStartData": None,
}
)
|
from argostrain.dataset import *
from argostrain.utils import *
from collections import deque
from argostranslate import package, translate
MIN_TAG_TEXT_LENGTH = 10
OPEN_TOKEN = '<x>'
CLOSE_TOKEN = '</x>'
def generate_xml_data(source_code, target_code, source_and_target_line):
installed_languages = translate.get_installed_languages()
source_translation = list(filter(
lambda x: x.code == source_code,
installed_languages))[0]
target_translation = list(filter(
lambda x: x.code == target_code,
installed_languages))[0]
source_translation = source_translation.get_translation(target_translation)
source_line, target_line = source_and_target_line
info(f'Processing xml ' + \
f'source_line={source_line} ' + \
f'target_line={target_line} ')
best_source_start_index = None
best_source_end_index = None
best_matching_index = None
best_target_end_index = None
best_score = None
for source_start_index in range(len(source_line) - 1):
for source_end_index in range(source_start_index, len(source_line) + 1):
source_sub_string = source_line[source_start_index:source_end_index]
if len(source_sub_string) < MIN_TAG_TEXT_LENGTH:
continue
translation_hypothesis = source_translation.hypotheses(source_sub_string, 1)[0]
translated_sub_string = translation_hypothesis.value
score = translation_hypothesis.score
matching_index = target_line.find(translated_sub_string)
if matching_index == -1:
continue
if best_score == None or score > best_score:
best_source_start_index = source_start_index
best_source_end_index = source_end_index
best_matching_index = matching_index
best_target_end_index = matching_index + len(translated_sub_string)
best_score = score
if best_score == None:
return None
generated_source_line = source_line[:best_source_start_index] + \
OPEN_TOKEN + \
source_line[best_source_start_index:best_source_end_index] + \
CLOSE_TOKEN + \
source_line[best_source_end_index:]
generated_target_line = target_line[:best_matching_index] + \
OPEN_TOKEN + \
target_line[best_matching_index:best_target_end_index] + \
CLOSE_TOKEN + \
target_line[best_target_end_index:]
info(f'Generated tag data ' + \
f'generated_source_line={generated_source_line} ' + \
f'generated_target_line={generated_target_line} ')
return (generated_source_line, generated_target_line)
|
import codecs
import os
import re
import sys
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
# Some general-purpose code stolen from
# https://github.com/jeffknupp/sandman/blob/5c4b7074e8ba5a60b00659760e222c57ad24ef91/setup.py
here = os.path.abspath(os.path.dirname(__file__))
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
def read(*parts):
# intentionally *not* adding an encoding option to open
with codecs.open(os.path.join(here, *parts), 'r') as f:
return f.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Make sure build path exists.
build_path = os.path.join(here, 'build')
if not os.path.exists(build_path):
os.mkdir(build_path)
# Generate Python bindings for bundled C++ library.
module_fname = os.path.join(build_path, "rabinkarprh.cpp")
try:
import pybindgen # @UnusedImport
except ImportError:
print("WARNING: Failed to import pybindgen. If you called setup.py egg_info, this is probably acceptable; "
"otherwise, build will fail. You can resolve this problem by installing pybindgen beforehand.")
else:
with open(module_fname, "wt") as file_:
print("Generating file {}".format(module_fname))
from lib.rabinkarp_gen import generate
generate(file_)
setup(
name='fastchunking',
version=find_version('fastchunking', '__init__.py'),
description='Fast chunking library.',
long_description=read('README.rst'),
url='https://github.com/netleibi/fastchunking',
author='Dominik Leibenger',
author_email='python-fastchunking@mails.dominik-leibenger.de',
license='Apache Software License',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
],
keywords=['text chunking', 'SC', 'static chunking', 'CDC', 'content-defined chunking', 'ML-*',
'multi-level chunking', 'ML-SC', 'ML-CDC', 'Rabin Karp', 'rolling hash'],
packages=['fastchunking', 'lib'],
setup_requires=['pybindgen'],
install_requires=['pybindgen'],
ext_modules=[
Extension('fastchunking._rabinkarprh',
sources=[module_fname, 'lib/rabinkarp.cpp'],
include_dirs=['lib']
)
],
test_suite='fastchunking.test',
tests_require=['tox'],
cmdclass={'test': Tox}
)
|
"""
This Module start the route and start Flask
"""
from flask import Flask, make_response, request
from flask_restx import Api, Resource, fields
from src.users import UsersDAO
app = Flask(__name__)
api = Api(
app,
version="1.0",
title="UserMVC API",
description="A simple UserMVC API",
)
doc_user = api.namespace("users", description="Users list")
doc_balance = api.namespace("balance", description="How to get balance")
doc_reset = api.namespace("reset", description="Reset list of users")
doc_event = api.namespace("event", description="Activate event")
user = api.model(
"UsersDAO",
{
"id": fields.Integer(required=True, description="The unique identifier"),
"balance": fields.Integer(required=True, description="The user balance"),
},
)
event1 = api.model(
"Operation",
{
"type": fields.String(
required=True,
default="deposit",
description="Type of transaction, deposit or withdraw",
),
"destination": fields.Integer(
required=True, default=1234, description="User id"
),
"amount": fields.Integer(
required=True, default=15, description="Value to deposit or withdraw"
),
},
)
DAO = UsersDAO()
DAO.create(0, 100)
DAO.create(1, 200)
DAO.create(3, 300)
@doc_user.route("/")
class UsersList(Resource):
"""Shows a list of all users"""
@doc_user.doc("list_users")
@doc_user.marshal_list_with(user)
@classmethod
def get(cls):
"""List all tasks"""
return DAO.users, 200
@doc_balance.route("", doc={"params": {"account_id": "An user id"}})
class Balance(Resource):
"""Show the user balance"""
@doc_balance.doc("show_balance")
@doc_balance.response(200, "Success")
@doc_balance.response(404, "User <user_id> does not exist")
@classmethod
def get(cls):
"""Get the balance of id"""
user_id = request.args.get("account_id", None)
_, user_ = DAO.get(user_id)
if user_ is None:
return 0, 404
return user_.balance, 200
@doc_reset.route("")
class Reset(Resource):
"""Reset all the data"""
@doc_reset.doc("reset_data")
@classmethod
def post(cls):
"""Reset all data"""
while len(DAO.users) > 0:
DAO.users.clear()
response = make_response("OK", 200)
response.mimetype = "text/plain"
return response
@doc_event.route("")
class Event(Resource):
"""Reset all the data"""
@doc_event.doc("event_data")
@doc_event.expect(event1)
@classmethod
def post(cls):
"""Execute deposit, withdraw and transfer"""
post_data = request.get_json()
response_object = {}
type_ = post_data.get("type")
if type_ == "deposit":
destination = post_data.get("destination")
amount = post_data.get("amount")
_, user_ = DAO.get(destination)
if not user_:
user_updated = DAO.create(destination, amount)
else:
user_updated = DAO.update(user_.id, user_.balance + amount)
response_object = {
"destination": {"id": user_updated.id, "balance": user_updated.balance}
}
return response_object, 201
if type_ == "withdraw":
origin = post_data.get("origin")
amount = post_data.get("amount")
_, user_ = DAO.get(origin)
if not user_:
return 0, 404
if user_.balance - amount >= 0:
user_updated = DAO.update(user_.id, user_.balance - amount)
else:
return 0, 404
response_object = {
"origin": {"id": user_updated.id, "balance": user_updated.balance}
}
return response_object, 201
if type_ == "transfer":
origin = post_data.get("origin")
destination = post_data.get("destination")
amount = post_data.get("amount")
_, user1 = DAO.get(origin)
_, user2 = DAO.get(destination)
if not user1 or (origin == destination):
return 0, 404
if user1.balance - amount >= 0:
user1 = DAO.update(user1.id, user1.balance - amount)
if not user2:
user2 = DAO.create(destination, amount)
else:
user2 = DAO.update(user2.id, user2.balance + amount)
response_object = {
"origin": {"id": user1.id, "balance": user1.balance},
"destination": {"id": user2.id, "balance": user2.balance},
}
return response_object, 201
else:
return 0, 404
api.abort(404, f"Invalid type {type} does not exist")
if __name__ == "__main__":
app.run(debug=True)
|
"""Perform communication related functions."""
from time import sleep
from plugins import USBRelay
can_use_comm = True
try:
from .low_level import low_comm
except Exception as e:
print("[XbeeComm]:", e)
can_use_comm = False
SECRET_KEY = "CURE"
from json import loads
class Comm:
def __init__(self, conf):
self.conf = conf
self.antenna = low_comm.Antenna(
remote_address=conf.REMOTE_XBEE_ID
)
def read_comm(self, read_time=None):
"""
Read communication for meta-state changes.
"""
return self.antenna.read_time(read_time)
def send(self, data, skip_time=0.25, as_json=False):
"""Async send data to specific XBee network."""
return self.antenna.send(
data,
skip_time=skip_time,
as_json=as_json
)
def loop(conf, data):
if not can_use_comm:
return
ant = Comm(conf)
while True:
# Send data
ant.send(data.to_dict())
# Read incoming
incoming = ant.read_comm()
command = ""
if incoming != "":
try:
info = loads(incoming)
if info.get("key", "") == SECRET_KEY:
command = info.get("command", "")
except Exception as e:
print("ERROR!!!!", e)
pass
if command not in ["", "{}"]:
print(f"\nIncoming Data: <{incoming}> <{command}>\n")
if command == "a":
print("\nGot command to ARM!")
conf.state = "ARM"
if command == "s":
print("\nGot command to SIMULATE!")
if command == "h":
print("\nGot command to HALT!")
conf.state = "HALT"
if command == "e1":
print("\nGot command to EJECT_APOGEE!")
USBRelay.Relay1(conf, data)
if command == "e2":
print("\nGot command to EJECT_MAIN!")
USBRelay.Relay2(conf, data)
|
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoriesResponseData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'business': 'object',
'personal': 'object'
}
attribute_map = {
'business': 'business',
'personal': 'personal'
}
def __init__(self, business=None, personal=None): # noqa: E501
"""CategoriesResponseData - a model defined in Swagger""" # noqa: E501
self._business = None
self._personal = None
self.discriminator = None
if business is not None:
self.business = business
if personal is not None:
self.personal = personal
@property
def business(self):
"""Gets the business of this CategoriesResponseData. # noqa: E501
[Business categories](/data_enrichment/v5/#business-categories) # noqa: E501
:return: The business of this CategoriesResponseData. # noqa: E501
:rtype: object
"""
return self._business
@business.setter
def business(self, business):
"""Sets the business of this CategoriesResponseData.
[Business categories](/data_enrichment/v5/#business-categories) # noqa: E501
:param business: The business of this CategoriesResponseData. # noqa: E501
:type: object
"""
self._business = business
@property
def personal(self):
"""Gets the personal of this CategoriesResponseData. # noqa: E501
[Personal categories](/data_enrichment/v5/#personal-categories) # noqa: E501
:return: The personal of this CategoriesResponseData. # noqa: E501
:rtype: object
"""
return self._personal
@personal.setter
def personal(self, personal):
"""Sets the personal of this CategoriesResponseData.
[Personal categories](/data_enrichment/v5/#personal-categories) # noqa: E501
:param personal: The personal of this CategoriesResponseData. # noqa: E501
:type: object
"""
self._personal = personal
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoriesResponseData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoriesResponseData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import numpy as np
import time
import os
import shutil
from tridesclous.dataio import DataIO
from tridesclous.catalogueconstructor import CatalogueConstructor
from tridesclous import mkQApp, CatalogueWindow
from matplotlib import pyplot
from tridesclous.tests.testingtools import setup_catalogue
#~ dataset_name='olfactory_bulb'
dataset_name = 'purkinje'
#~ dataset_name='locust'
#~ dataset_name='striatum_rat'
def setup_module():
cc, params = setup_catalogue('test_cleancluster', dataset_name=dataset_name)
cc.find_clusters(method=params['cluster_method'], **params['cluster_kargs'])
cc.create_savepoint(name='after_find_clusters')
def restore_savepoint(dirname, savepoint=None):
folder = os.path.join(dirname, 'channel_group_0', 'catalogue_constructor')
savepoint_folder = os.path.join(dirname, 'channel_group_0', 'catalogue_constructor_SAVEPOINT_' + savepoint)
assert os.path.exists(savepoint_folder)
shutil.rmtree(folder)
shutil.copytree(savepoint_folder, folder)
def test_auto_split():
dirname = 'test_cleancluster'
restore_savepoint(dirname, savepoint='after_find_clusters')
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
cc.find_clusters(method='pruningshears')
print(cc)
print(cc.n_jobs)
t1 = time.perf_counter()
cc.auto_split_cluster()
t2 = time.perf_counter()
print('auto_split_cluster', t2-t1)
print(cc)
cc.create_savepoint(name='after_auto_split')
def test_trash_not_aligned():
dirname = 'test_cleancluster'
restore_savepoint(dirname, savepoint='after_auto_split')
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
t1 = time.perf_counter()
cc.trash_not_aligned()
t2 = time.perf_counter()
print('trash_not_aligned', t2-t1)
cc.create_savepoint(name='after_trash_not_aligned')
def test_auto_merge():
dirname = 'test_cleancluster'
restore_savepoint(dirname, savepoint='after_trash_not_aligned')
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
t1 = time.perf_counter()
cc.auto_merge_cluster()
t2 = time.perf_counter()
print('auto_merge_cluster', t2-t1)
cc.create_savepoint(name='after_auto_merge_cluster')
def test_trash_low_extremum():
dirname = 'test_cleancluster'
restore_savepoint(dirname, savepoint='after_auto_merge_cluster')
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
print(cc)
t1 = time.perf_counter()
cc.trash_low_extremum()
t2 = time.perf_counter()
print('trash_low_extremum', t2-t1)
cc.create_savepoint(name='after_trash_low_extremum')
print(cc)
def test_trash_small_cluster():
dirname = 'test_cleancluster'
restore_savepoint(dirname, savepoint='after_trash_low_extremum')
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
t1 = time.perf_counter()
cc.trash_small_cluster()
t2 = time.perf_counter()
print('trash_small_cluster', t2-t1)
if __name__ == '__main__':
#~ setup_module()
#~ test_auto_split()
#~ test_trash_not_aligned()
#~ test_auto_merge()
#~ test_trash_low_extremum()
test_trash_small_cluster()
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SignZoneNow(A10BaseClass):
""" :param zone_name: {"description": "Specify the name for the DNS zone, empty means sign all zones", "format": "string", "minLength": 1, "optional": true, "maxLength": 127, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
sign zone right now.
Class sign-zone-now supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/dnssec/sign-zone-now`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "sign-zone-now"
self.a10_url="/axapi/v3/dnssec/sign-zone-now"
self.DeviceProxy = ""
self.zone_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
"""
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..base import BaseEstimator
__all__ = [
"PatchExtractor",
"extract_patches_2d",
"grid_to_graph",
"img_to_graph",
"reconstruct_from_patches_2d",
]
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
_, n_y, n_z = img.shape
gradient = np.abs(
img[
edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z,
]
- img[
edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z,
]
)
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds), np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(
n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
):
"""Auxiliary function for img_to_graph and grid_to_graph"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=bool, copy=False)
mask = np.asarray(mask, dtype=bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix(
(
np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
),
(n_voxels, n_voxels),
dtype=dtype,
)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(
n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
):
"""Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (numbers.Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (numbers.Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (numbers.Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or \
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError(
"Height of the patch should be less than the height of the image."
)
if p_w > i_w:
raise ValueError(
"Width of the patch should be less than the width of the image."
)
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(
image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or \
(image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i : i + p_h, j : j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images.
Read more in the :ref:`User Guide <image_feature_extraction>`.
.. versionadded:: 0.9
Parameters
----------
patch_size : tuple of int (patch_height, patch_width), default=None
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches per image to extract. If `max_patches` is
a float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches is not None`. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
See Also
--------
reconstruct_from_patches_2d : Reconstruct image from all of its patches.
Examples
--------
>>> from sklearn.datasets import load_sample_images
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the second image in this dataset:
>>> X = load_sample_images().images[1]
>>> print('Image shape: {}'.format(X.shape))
Image shape: (427, 640, 3)
>>> pe = image.PatchExtractor(patch_size=(2, 2))
>>> pe_fit = pe.fit(X)
>>> pe_trans = pe.transform(X)
>>> print('Patches shape: {}'.format(pe_trans.shape))
Patches shape: (545706, 2, 2)
"""
def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
return self
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
image,
patch_size,
max_patches=self.max_patches,
random_state=self.random_state,
)
return patches
def _more_tags(self):
return {"X_types": ["3darray"]}
|
import os, sys
import json
import ctypes
from cerver import *
from cerver.http import *
web_service = None
# end
def end (signum, frame):
http_cerver_all_stats_print (http_cerver_get (web_service))
cerver_teardown (web_service)
cerver_end ()
sys.exit ("Done!")
# GET /
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def main_handler (http_receive, request):
http_send_response (
http_receive, HTTP_STATUS_OK, { "oki": "doki!" }
)
# GET /echo
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def echo_handler (http_receive, request):
body_values = http_request_get_query_params (request)
value = http_request_get_query_value (body_values, "value")
http_send_response (
http_receive, HTTP_STATUS_OK,
{ "echo": f"Values received: {value}" }
)
# POST /data
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def data_handler (http_receive, request):
body = http_request_get_body_json (request)
http_send_response (http_receive, HTTP_STATUS_OK, { "echo": body })
# POST /token
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def token_handler (http_receive, request):
body = http_request_get_body_json (request)
http_jwt_create_and_send (http_receive, HTTP_STATUS_OK, body)
# GET /auth
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def auth_handler (http_receive, request):
token_values = http_jwt_token_decode (request)
http_send_response (
http_receive, HTTP_STATUS_OK, {
"oki": "doki",
"values": token_values
})
# GET /parent
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def parent_handler (http_receive, request):
pass
# GET /child
@ctypes.CFUNCTYPE (None, ctypes.c_void_p, ctypes.c_void_p)
def child_handler (http_receive, request):
pass
def start ():
global web_service
web_service = http_cerver_configuration ("web")
http_cerver = http_cerver_get (web_service)
http_cerver_auth_configuration (
http_cerver, JWT_ALG_RS256, "./web/keys/key.key", "./web/keys/key.pub"
)
# GET /
main_route = http_create_route (
http_cerver, REQUEST_METHOD_GET, "/", main_handler
)
# GET /echo
echo_route = http_create_route (
http_cerver, REQUEST_METHOD_GET, "echo", echo_handler
)
# POST /data
data_route = http_create_route (
http_cerver, REQUEST_METHOD_POST, "data", data_handler
)
# POST /token
token_route = http_create_route (
http_cerver, REQUEST_METHOD_POST, "token", token_handler
)
# GET /auth
auth_route = http_create_route (
http_cerver, REQUEST_METHOD_GET, "auth",
auth_handler, HTTP_ROUTE_AUTH_TYPE_BEARER
)
# GET /parent
parent_route = http_create_route (
http_cerver, REQUEST_METHOD_GET, "parent",
parent_handler, HTTP_ROUTE_AUTH_TYPE_BEARER
)
# GET /parent/child
auth_route = http_create_child_route (
parent_route, REQUEST_METHOD_GET, "child",
child_handler, HTTP_ROUTE_AUTH_TYPE_BEARER
)
cerver_start (web_service)
if __name__ == "__main__":
signal.signal (signal.SIGINT, end)
signal.signal (signal.SIGTERM, end)
signal.signal (signal.SIGPIPE, signal.SIG_IGN)
cerver_init ()
cerver_version_print_full ()
pycerver_version_print_full ()
start ()
|
import base64
def replace_b64_in_dict(item):
"""
Replace base64 string in python dictionary of inference data. Refer to https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/api_rest.md#encoding-binary-values .
For example: {'inputs': {'images': {'b64': 'YWJjZGVmZ2hpMTIz'}, 'foo': 'bar'}} to {'inputs': {'images': 'abcdefghi123', 'foo': 'bar'}}.
"""
if isinstance(item, dict):
# Use items for Python 3 instead of iteritems
for key, value in item.items():
if isinstance(value, dict) and list(value.keys())[0] == "b64":
# Use list to wrap .keys() and .values() for Python 3
b64_string = list(value.values())[0]
# TODO: unicode string to string
b64_string = str(b64_string)
bytearray_string = base64.urlsafe_b64decode(b64_string)
item[key] = bytearray_string
else:
replace_b64_in_dict(value)
elif isinstance(item, list):
for index, value in enumerate(item):
if isinstance(value, dict) and list(value.keys())[0] == "b64":
b64_string = list(value.values())[0]
b64_string = str(b64_string)
bytearray_string = base64.urlsafe_b64decode(b64_string)
item[index] = bytearray_string
else:
replace_b64_in_dict(value)
|
from collections import defaultdict
from promise import Promise
from ....attribute.models import (
AssignedProductAttribute,
AssignedProductAttributeValue,
AssignedVariantAttribute,
AssignedVariantAttributeValue,
AttributeProduct,
AttributeVariant,
)
from ....core.permissions import ProductPermissions
from ...attribute.dataloaders import AttributesByAttributeId, AttributeValueByIdLoader
from ...core.dataloaders import DataLoader
from ...utils import get_user_or_app_from_context
from .products import ProductByIdLoader, ProductVariantByIdLoader
class BaseProductAttributesByProductTypeIdLoader(DataLoader):
"""Loads product attributes by product type ID."""
model_name = None
def batch_load(self, keys):
if not self.model_name:
raise ValueError("Provide a model_name for this dataloader.")
requestor = get_user_or_app_from_context(self.context)
if requestor.is_active and requestor.has_perm(
ProductPermissions.MANAGE_PRODUCTS
):
qs = self.model_name.objects.using(self.database_connection_name).all()
else:
qs = self.model_name.objects.using(self.database_connection_name).filter(
attribute__visible_in_storefront=True
)
product_type_attribute_pairs = qs.filter(product_type_id__in=keys).values_list(
"product_type_id", "attribute_id"
)
product_type_to_attributes_map = defaultdict(list)
for product_type_id, attr_id in product_type_attribute_pairs:
product_type_to_attributes_map[product_type_id].append(attr_id)
def map_attributes(attributes):
attributes_map = {attr.id: attr for attr in attributes}
return [
[
attributes_map[attr_id]
for attr_id in product_type_to_attributes_map[product_type_id]
]
for product_type_id in keys
]
return (
AttributesByAttributeId(self.context)
.load_many(set(attr_id for _, attr_id in product_type_attribute_pairs))
.then(map_attributes)
)
class ProductAttributesByProductTypeIdLoader(
BaseProductAttributesByProductTypeIdLoader
):
"""Loads product attributes by product type ID."""
context_key = "product_attributes_by_producttype"
model_name = AttributeProduct
class VariantAttributesByProductTypeIdLoader(
BaseProductAttributesByProductTypeIdLoader
):
"""Loads variant attributes by product type ID."""
context_key = "variant_attributes_by_producttype"
model_name = AttributeVariant
class AttributeProductsByProductTypeIdLoader(DataLoader):
"""Loads AttributeProduct objects by product type ID."""
context_key = "attributeproducts_by_producttype"
def batch_load(self, keys):
requestor = get_user_or_app_from_context(self.context)
if requestor.is_active and requestor.has_perm(
ProductPermissions.MANAGE_PRODUCTS
):
qs = AttributeProduct.objects.using(self.database_connection_name).all()
else:
qs = AttributeProduct.objects.using(self.database_connection_name).filter(
attribute__visible_in_storefront=True
)
attribute_products = qs.filter(product_type_id__in=keys)
producttype_to_attributeproducts = defaultdict(list)
for attribute_product in attribute_products:
producttype_to_attributeproducts[attribute_product.product_type_id].append(
attribute_product
)
return [producttype_to_attributeproducts[key] for key in keys]
class AttributeVariantsByProductTypeIdLoader(DataLoader):
context_key = "attributevariants_by_producttype"
def batch_load(self, keys):
requestor = get_user_or_app_from_context(self.context)
if requestor.is_active and requestor.has_perm(
ProductPermissions.MANAGE_PRODUCTS
):
qs = AttributeVariant.objects.using(self.database_connection_name).all()
else:
qs = AttributeVariant.objects.using(self.database_connection_name).filter(
attribute__visible_in_storefront=True
)
attribute_variants = qs.filter(product_type_id__in=keys)
producttype_to_attributevariants = defaultdict(list)
for attribute_variant in attribute_variants:
producttype_to_attributevariants[attribute_variant.product_type_id].append(
attribute_variant
)
return [producttype_to_attributevariants[key] for key in keys]
class AssignedProductAttributesByProductIdLoader(DataLoader):
context_key = "assignedproductattributes_by_product"
def batch_load(self, keys):
requestor = get_user_or_app_from_context(self.context)
if requestor.is_active and requestor.has_perm(
ProductPermissions.MANAGE_PRODUCTS
):
qs = AssignedProductAttribute.objects.using(
self.database_connection_name
).all()
else:
qs = AssignedProductAttribute.objects.using(
self.database_connection_name
).filter(assignment__attribute__visible_in_storefront=True)
assigned_product_attributes = qs.filter(product_id__in=keys)
product_to_assignedproductattributes = defaultdict(list)
for assigned_product_attribute in assigned_product_attributes:
product_to_assignedproductattributes[
assigned_product_attribute.product_id
].append(assigned_product_attribute)
return [product_to_assignedproductattributes[product_id] for product_id in keys]
class AssignedVariantAttributesByProductVariantId(DataLoader):
context_key = "assignedvariantattributes_by_productvariant"
def batch_load(self, keys):
requestor = get_user_or_app_from_context(self.context)
if requestor.is_active and requestor.has_perm(
ProductPermissions.MANAGE_PRODUCTS
):
qs = AssignedVariantAttribute.objects.using(
self.database_connection_name
).all()
else:
qs = AssignedVariantAttribute.objects.using(
self.database_connection_name
).filter(assignment__attribute__visible_in_storefront=True)
assigned_variant_attributes = qs.filter(variant_id__in=keys).select_related(
"assignment__attribute"
)
variant_attributes = defaultdict(list)
for assigned_variant_attribute in assigned_variant_attributes:
variant_attributes[assigned_variant_attribute.variant_id].append(
assigned_variant_attribute
)
return [variant_attributes[variant_id] for variant_id in keys]
class AttributeValuesByAssignedProductAttributeIdLoader(DataLoader):
context_key = "attributevalues_by_assignedproductattribute"
def batch_load(self, keys):
attribute_values = AssignedProductAttributeValue.objects.using(
self.database_connection_name
).filter(assignment_id__in=keys)
value_ids = [a.value_id for a in attribute_values]
def map_assignment_to_values(values):
value_map = dict(zip(value_ids, values))
assigned_product_map = defaultdict(list)
for attribute_value in attribute_values:
assigned_product_map[attribute_value.assignment_id].append(
value_map.get(attribute_value.value_id)
)
return [assigned_product_map[key] for key in keys]
return (
AttributeValueByIdLoader(self.context)
.load_many(value_ids)
.then(map_assignment_to_values)
)
class AttributeValuesByAssignedVariantAttributeIdLoader(DataLoader):
context_key = "attributevalues_by_assignedvariantattribute"
def batch_load(self, keys):
attribute_values = AssignedVariantAttributeValue.objects.using(
self.database_connection_name
).filter(assignment_id__in=keys)
value_ids = [a.value_id for a in attribute_values]
def map_assignment_to_values(values):
value_map = dict(zip(value_ids, values))
assigned_variant_map = defaultdict(list)
for attribute_value in attribute_values:
assigned_variant_map[attribute_value.assignment_id].append(
value_map.get(attribute_value.value_id)
)
return [assigned_variant_map[key] for key in keys]
return (
AttributeValueByIdLoader(self.context)
.load_many(value_ids)
.then(map_assignment_to_values)
)
class SelectedAttributesByProductIdLoader(DataLoader):
context_key = "selectedattributes_by_product"
def batch_load(self, keys):
def with_products_and_assigned_attributes(result):
products, product_attributes = result
products = [product for product in products if product is not None]
assigned_product_attribute_ids = [
a.id for attrs in product_attributes for a in attrs
]
product_type_ids = list({p.product_type_id for p in products})
product_attributes = dict(zip(keys, product_attributes))
def with_attributeproducts_and_values(result):
attribute_products, attribute_values = result
attribute_ids = list(
{ap.attribute_id for aps in attribute_products for ap in aps}
)
attribute_products = dict(zip(product_type_ids, attribute_products))
attribute_values = dict(
zip(assigned_product_attribute_ids, attribute_values)
)
def with_attributes(attributes):
id_to_attribute = dict(zip(attribute_ids, attributes))
selected_attributes_map = defaultdict(list)
for key, product in zip(keys, products):
assigned_producttype_attributes = attribute_products[
product.product_type_id
]
assigned_product_attributes = product_attributes[key]
for (
assigned_producttype_attribute
) in assigned_producttype_attributes:
product_assignment = next(
(
apa
for apa in assigned_product_attributes
if apa.assignment_id
== assigned_producttype_attribute.id
),
None,
)
attribute = id_to_attribute[
assigned_producttype_attribute.attribute_id
]
if product_assignment:
values = attribute_values[product_assignment.id]
else:
values = []
selected_attributes_map[key].append(
{"values": values, "attribute": attribute}
)
return [selected_attributes_map[key] for key in keys]
return (
AttributesByAttributeId(self.context)
.load_many(attribute_ids)
.then(with_attributes)
)
attribute_products = AttributeProductsByProductTypeIdLoader(
self.context
).load_many(product_type_ids)
attribute_values = AttributeValuesByAssignedProductAttributeIdLoader(
self.context
).load_many(assigned_product_attribute_ids)
return Promise.all([attribute_products, attribute_values]).then(
with_attributeproducts_and_values
)
products = ProductByIdLoader(self.context).load_many(keys)
assigned_attributes = AssignedProductAttributesByProductIdLoader(
self.context
).load_many(keys)
return Promise.all([products, assigned_attributes]).then(
with_products_and_assigned_attributes
)
class SelectedAttributesByProductVariantIdLoader(DataLoader):
context_key = "selectedattributes_by_productvariant"
def batch_load(self, keys):
def with_variants_and_assigned_attributed(results):
product_variants, variant_attributes = results
product_ids = list({v.product_id for v in product_variants})
assigned_variant_attribute_ids = [
a.id for attrs in variant_attributes for a in attrs
]
variant_attributes = dict(zip(keys, variant_attributes))
def with_products_and_attribute_values(results):
products, attribute_values = results
product_type_ids = list({p.product_type_id for p in products})
products = dict(zip(product_ids, products))
attribute_values = dict(
zip(assigned_variant_attribute_ids, attribute_values)
)
def with_attribute_products(attribute_products):
attribute_ids = list(
{ap.attribute_id for aps in attribute_products for ap in aps}
)
attribute_products = dict(zip(product_type_ids, attribute_products))
def with_attributes(attributes):
id_to_attribute = dict(zip(attribute_ids, attributes))
selected_attributes_map = defaultdict(list)
for key, product_variant in zip(keys, product_variants):
product = products[product_variant.product_id]
assigned_producttype_attributes = attribute_products[
product.product_type_id
]
assigned_variant_attributes = variant_attributes[key]
for (
assigned_producttype_attribute
) in assigned_producttype_attributes:
variant_assignment = next(
(
apa
for apa in assigned_variant_attributes
if apa.assignment_id
== assigned_producttype_attribute.id
),
None,
)
attribute = id_to_attribute[
assigned_producttype_attribute.attribute_id
]
if variant_assignment:
values = attribute_values[variant_assignment.id]
else:
values = []
selected_attributes_map[key].append(
{"values": values, "attribute": attribute}
)
return [selected_attributes_map[key] for key in keys]
return (
AttributesByAttributeId(self.context)
.load_many(attribute_ids)
.then(with_attributes)
)
return (
AttributeVariantsByProductTypeIdLoader(self.context)
.load_many(product_type_ids)
.then(with_attribute_products)
)
products = ProductByIdLoader(self.context).load_many(product_ids)
attribute_values = AttributeValuesByAssignedVariantAttributeIdLoader(
self.context
).load_many(assigned_variant_attribute_ids)
return Promise.all([products, attribute_values]).then(
with_products_and_attribute_values
)
product_variants = ProductVariantByIdLoader(self.context).load_many(keys)
assigned_attributes = AssignedVariantAttributesByProductVariantId(
self.context
).load_many(keys)
return Promise.all([product_variants, assigned_attributes]).then(
with_variants_and_assigned_attributed
)
|
from django.contrib import admin
from .models import *
admin.site.register(Topic)
admin.site.register(Comment)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# train backbone network with imagenet dataset
#
import os, sys, argparse
import numpy as np
from multiprocessing import cpu_count
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TerminateOnNaN
from tensorflow.keras.utils import multi_gpu_model
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from shufflenet import ShuffleNet
from shufflenet_v2 import ShuffleNetV2
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..'))
from yolo3.models.yolo3_nano import NanoNet
from yolo3.models.yolo3_darknet import DarkNet53
from yolo4.models.yolo4_darknet import CSPDarkNet53
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#import tensorflow as tf
#config = tf.ConfigProto()
#config.gpu_options.allow_growth=True #dynamic alloc GPU resource
#config.gpu_options.per_process_gpu_memory_fraction = 0.9 #GPU memory threshold 0.3
#session = tf.Session(config=config)
## set session
#K.set_session(session)
def preprocess(x):
x = np.expand_dims(x, axis=0)
"""
"mode" option description in preprocess_input
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
"""
#x = preprocess_input(x, mode='tf')
x /= 255.0
x -= 0.5
x *= 2.0
return x
def get_model(model_type, include_top=True):
if model_type == 'shufflenet':
input_shape = (224, 224, 3)
model = ShuffleNet(groups=3, weights=None, include_top=include_top)
elif model_type == 'shufflenet_v2':
input_shape = (224, 224, 3)
model = ShuffleNetV2(bottleneck_ratio=1, weights=None, include_top=include_top)
elif model_type == 'nanonet':
input_shape = (224, 224, 3)
model = NanoNet(weights=None, include_top=include_top)
elif model_type == 'darknet53':
input_shape = (224, 224, 3)
model = DarkNet53(weights=None, include_top=include_top)
elif model_type == 'cspdarknet53':
input_shape = (224, 224, 3)
model = CSPDarkNet53(weights=None, include_top=include_top)
else:
raise ValueError('Unsupported model type')
return model, input_shape[:2]
def get_optimizer(optim_type, learning_rate):
if optim_type == 'sgd':
optimizer = SGD(lr=learning_rate, decay=5e-4, momentum=0.9)
elif optim_type == 'rmsprop':
optimizer = RMSprop(lr=learning_rate)
elif optim_type == 'adam':
optimizer = Adam(lr=learning_rate, decay=5e-4)
else:
raise ValueError('Unsupported optimizer type')
return optimizer
def train(args, model, input_shape):
log_dir = 'logs'
# callbacks for training process
checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-val_loss{val_loss:.3f}-val_acc{val_acc:.3f}-val_top_k_categorical_accuracy{val_top_k_categorical_accuracy:.3f}.h5'),
monitor='val_acc',
mode='max',
verbose=1,
save_weights_only=False,
save_best_only=True,
period=1)
logging = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False, write_grads=False, write_images=False, update_freq='batch')
terminate_on_nan = TerminateOnNaN()
learn_rates = [0.05, 0.01, 0.005, 0.001, 0.0005]
lr_scheduler = LearningRateScheduler(lambda epoch: learn_rates[epoch // 30])
# data generator
train_datagen = ImageDataGenerator(preprocessing_function=preprocess,
zoom_range=0.25,
width_shift_range=0.05,
height_shift_range=0.05,
brightness_range=[0.5,1.5],
rotation_range=30,
shear_range=0.2,
channel_shift_range=0.1,
#rescale=1./255,
vertical_flip=True,
horizontal_flip=True)
test_datagen = ImageDataGenerator(preprocessing_function=preprocess)
train_generator = train_datagen.flow_from_directory(
args.train_data_path,
target_size=input_shape,
batch_size=args.batch_size)
test_generator = test_datagen.flow_from_directory(
args.val_data_path,
target_size=input_shape,
batch_size=args.batch_size)
# get optimizer
optimizer = get_optimizer(args.optim_type, args.learning_rate)
# start training
model.compile(
optimizer=optimizer,
metrics=['accuracy', 'top_k_categorical_accuracy'],
loss='categorical_crossentropy')
print('Train on {} samples, val on {} samples, with batch size {}.'.format(train_generator.samples, test_generator.samples, args.batch_size))
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples // args.batch_size,
epochs=args.total_epoch,
workers=cpu_count()-1, #Try to parallized feeding image data but leave one cpu core idle
initial_epoch=args.init_epoch,
use_multiprocessing=True,
max_queue_size=10,
validation_data=test_generator,
validation_steps=test_generator.samples // args.batch_size,
callbacks=[logging, checkpoint, lr_scheduler, terminate_on_nan])
# Finally store model
model.save(os.path.join(log_dir, 'trained_final.h5'))
def evaluate_model(args, model, input_shape):
# eval data generator
eval_datagen = ImageDataGenerator(preprocessing_function=preprocess)
eval_generator = eval_datagen.flow_from_directory(
args.val_data_path,
target_size=input_shape,
batch_size=args.batch_size)
# get optimizer
optimizer = get_optimizer(args.optim_type, args.learning_rate)
# start training
model.compile(
optimizer=optimizer,
metrics=['accuracy', 'top_k_categorical_accuracy'],
loss='categorical_crossentropy')
print('Evaluate on {} samples, with batch size {}.'.format(eval_generator.samples, args.batch_size))
scores = model.evaluate_generator(
eval_generator,
steps=eval_generator.samples // args.batch_size,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=1)
print('Evaluate loss:', scores[0])
print('Top-1 accuracy:', scores[1])
print('Top-k accuracy:', scores[2])
def verify_with_image(model, input_shape):
from tensorflow.keras.applications.resnet50 import decode_predictions
from PIL import Image
while True:
img_file = input('Input image filename:')
try:
img = Image.open(img_file)
resized_img = img.resize(input_shape, Image.BICUBIC)
except:
print('Open Error! Try again!')
continue
else:
img_array = np.asarray(resized_img).astype('float32')
x = preprocess(img_array)
preds = model.predict(x)
print('Predict result:', decode_predictions(preds))
img.show()
def main(args):
include_top = True
if args.dump_headless:
include_top = False
# prepare model
model, input_shape = get_model(args.model_type, include_top=include_top)
if args.weights_path:
model.load_weights(args.weights_path, by_name=True)
# support multi-gpu training
if args.gpu_num >= 2:
model = multi_gpu_model(model, gpus=args.gpu_num)
model.summary()
if args.evaluate:
K.set_learning_phase(0)
evaluate_model(args, model, input_shape)
elif args.verify_with_image:
K.set_learning_phase(0)
verify_with_image(model, input_shape)
elif args.dump_headless:
K.set_learning_phase(0)
model.save(args.output_model_file)
print('export headless model to %s' % str(args.output_model_file))
else:
train(args, model, input_shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str, required=False, default='shufflenet_v2',
help='backbone model type: shufflenet/shufflenet_v2/nanonet/darknet53/cspdarknet53, default=shufflenet_v2')
parser.add_argument('--train_data_path', type=str,# required=True,
help='path to Imagenet train data')
parser.add_argument('--val_data_path', type=str,# required=True,
help='path to Imagenet validation dataset')
parser.add_argument('--weights_path', type=str,required=False, default=None,
help = "Pretrained model/weights file for fine tune")
parser.add_argument('--batch_size', type=int,required=False, default=128,
help = "batch size for train, default=128")
parser.add_argument('--optim_type', type=str, required=False, default='sgd',
help='optimizer type: sgd/rmsprop/adam, default=sgd')
parser.add_argument('--learning_rate', type=float,required=False, default=.05,
help = "Initial learning rate, default=0.05")
parser.add_argument('--init_epoch', type=int,required=False, default=0,
help = "Initial training epochs for fine tune training, default=0")
parser.add_argument('--total_epoch', type=int,required=False, default=200,
help = "Total training epochs, default=200")
parser.add_argument('--gpu_num', type=int, required=False, default=1,
help='Number of GPU to use, default=1')
parser.add_argument('--evaluate', default=False, action="store_true",
help='Evaluate a trained model with validation dataset')
parser.add_argument('--verify_with_image', default=False, action="store_true",
help='Verify trained model with image')
parser.add_argument('--dump_headless', default=False, action="store_true",
help='Dump out classification model to headless backbone model')
parser.add_argument('--output_model_file', type=str,
help='output headless backbone model file')
args = parser.parse_args()
main(args)
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-e^2(ic*d1hmigz(^i2x3a7ij9n$9uls%s7)a6puaubu$j!t1l7"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"django.contrib.flatpages",
"fpages",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = "static/"
STATICFILES_DIRS = [
BASE_DIR / "static",
]
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
"""
Utility functions for training
Author: Zhuo Su, Wenzhe Liu
Date: Aug 22, 2020
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import os
import shutil
import math
import time
import random
import skimage
import numpy as np
from skimage import io
from skimage.transform import resize
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
######################################
# measurement functions #
######################################
def get_model_parm_nums(model):
total = sum([param.numel() for param in model.parameters()])
total = float(total) / 1e6
return total
######################################
# basic functions #
######################################
def load_checkpoint(args, running_file):
model_dir = os.path.join(args.savedir, 'save_models')
latest_filename = os.path.join(model_dir, 'latest.txt')
model_filename = ''
if args.evaluate is not None:
model_filename = args.evaluate
else:
if os.path.exists(latest_filename):
with open(latest_filename, 'r') as fin:
model_filename = fin.readlines()[0].strip()
loadinfo = "=> loading checkpoint from '{}'".format(model_filename)
print(loadinfo)
state = None
if os.path.exists(model_filename):
state = torch.load(model_filename, map_location='cpu')
loadinfo2 = "=> loaded checkpoint '{}' successfully".format(model_filename)
else:
loadinfo2 = "no checkpoint loaded"
print(loadinfo2)
running_file.write('%s\n%s\n' % (loadinfo, loadinfo2))
running_file.flush()
return state
def save_checkpoint(state, epoch, root, saveID, keep_freq=10):
filename = 'checkpoint_%03d.pth.tar' % epoch
model_dir = os.path.join(root, 'save_models')
model_filename = os.path.join(model_dir, filename)
latest_filename = os.path.join(model_dir, 'latest.txt')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# write new checkpoint
torch.save(state, model_filename)
with open(latest_filename, 'w') as fout:
fout.write(model_filename)
print("=> saved checkpoint '{}'".format(model_filename))
# remove old model
if saveID is not None and (saveID + 1) % keep_freq != 0:
filename = 'checkpoint_%03d.pth.tar' % saveID
model_filename = os.path.join(model_dir, filename)
if os.path.exists(model_filename):
os.remove(model_filename)
print('=> removed checkpoint %s' % model_filename)
print('##########Time##########', time.strftime('%Y-%m-%d %H:%M:%S'))
return epoch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
#self.sum += val * n
self.sum += val
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, args):
method = args.lr_type
if method == 'cosine':
T_total = float(args.epochs)
T_cur = float(epoch)
lr = 0.5 * args.lr * (1 + math.cos(math.pi * T_cur / T_total))
elif method == 'multistep':
lr = args.lr
for epoch_step in args.lr_steps:
if epoch >= epoch_step:
lr = lr * 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = lr
str_lr = '%.6f' % lr
return str_lr
######################################
# edge specific functions #
######################################
def cross_entropy_loss_RCF(prediction, labelf, beta):
label = labelf.long()
mask = labelf.clone()
num_positive = torch.sum(label==1).float()
num_negative = torch.sum(label==0).float()
mask[label == 1] = 1.0 * num_negative / (num_positive + num_negative)
mask[label == 0] = beta * num_positive / (num_positive + num_negative)
mask[label == 2] = 0
cost = F.binary_cross_entropy(
prediction, labelf, weight=mask, reduction='sum')
return cost
######################################
# debug functions #
######################################
# no function currently
|
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
'''
Defines the base class for analysis tasks.
'''
# Authors
# -------
# Xylar Asay-Davis
from __future__ import absolute_import, division, print_function, \
unicode_literals
from multiprocessing import Process, Value
import time
import traceback
import logging
import sys
from mpas_analysis.shared.io import NameList, StreamsFile
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories, get_files_year_month
class AnalysisTask(Process): # {{{
'''
The base class for analysis tasks.
Attributes
----------
config : ``MpasAnalysisConfigParser``
Contains configuration options
taskName : str
The name of the task, typically the same as the class name except
starting with lowercase (e.g. 'myTask' for class 'MyTask')
componentName : {'ocean', 'seaIce'}
The name of the component (same as the folder where the task
resides)
tags : list of str
Tags used to describe the task (e.g. 'timeSeries', 'climatology',
horizontalMap', 'index', 'transect'). These are used to determine
which tasks are generated (e.g. 'all_transect' or 'no_climatology'
in the 'generate' flags)
runDirectory : str
The base input directory for namelists, streams files and restart files
historyDirectory : str
The base input directory for history files
plotsDirectory : str
The directory for writing plots (which is also created if it doesn't
exist)
namelist : ``shared.io.NameList``
the namelist reader
runStreams : ``shared.io.StreamsFile``
the streams file reader for streams in the run directory (e.g. restart
files)
historyStreams : ``shared.io.StreamsFile``
the streams file reader for streams in the history directory (most
streams other than restart files)
calendar : {'gregorian', 'gregoraian_noleap'}
The calendar used in the MPAS run
runAfterTasks : list of ``AnalysisTasks``
tasks that must be complete before this task can run
subtasks : ``OrderedDict`` of ``AnalysisTasks``
Subtasks of this task, with subtask names as keys
xmlFileNames : list of strings
The XML file associated with each plot produced by this analysis, empty
if no plots were produced
logger : ``logging.Logger``
A logger for output during the run phase of an analysis task
'''
# Authors
# -------
# Xylar Asay-Davis
# flags for run status
UNSET = 0
READY = 1
BLOCKED = 2
RUNNING = 3
SUCCESS = 4
FAIL = 5
def __init__(self, config, taskName, componentName, tags=[],
subtaskName=None): # {{{
'''
Construct the analysis task.
Individual tasks (children classes of this base class) should first
call this method to perform basic initialization, then, define the
``taskName``, ``componentName`` and list of ``tags`` for the task.
Parameters
----------
config : instance of MpasAnalysisConfigParser
Contains configuration options
taskName : str
The name of the task, typically the same as the class name except
starting with lowercase (e.g. 'myTask' for class 'MyTask')
componentName : {'ocean', 'seaIce'}
The name of the component (same as the folder where the task
resides)
tags : list of str, optional
Tags used to describe the task (e.g. 'timeSeries', 'climatology',
horizontalMap', 'index', 'transect'). These are used to determine
which tasks are generated (e.g. 'all_transect' or 'no_climatology'
in the 'generate' flags)
subtaskName : str, optional
If this is a subtask of ``taskName``, the name of the subtask
'''
# Authors
# -------
# Xylar Asay-Davis
if subtaskName is None:
self.fullTaskName = taskName
self.printTaskName = taskName
else:
self.fullTaskName = '{}_{}'.format(taskName, subtaskName)
self.printTaskName = '{}: {}'.format(taskName, subtaskName)
# call the constructor from the base class (Process)
super(AnalysisTask, self).__init__(name=self.fullTaskName)
self.config = config
self.taskName = taskName
self.subtaskName = subtaskName
self.componentName = componentName
self.tags = tags
self.subtasks = []
self.logger = None
self.runAfterTasks = []
self.xmlFileNames = []
# non-public attributes related to multiprocessing and logging
self.daemon = True
self._setupStatus = None
self._runStatus = Value('i', AnalysisTask.UNSET)
self._stackTrace = None
self._logFileName = None
# the number of subprocesses run by this process, typically 1 but
# could be 12 for ncclimo in bck or mpi mode
self.subprocessCount = 1
# run the task directly as opposed to launching it as a new process
# even in parallel because it has subprocesses such as Pools
self.runDirectly = False
# }}}
def setup_and_check(self): # {{{
'''
Perform steps to set up the analysis (e.g. reading namelists and
streams files).
After this call, the following attributes are set (see documentation
for the class):
runDirectory, historyDirectory, plotsDirectory, namelist, runStreams,
historyStreams, calendar
Individual tasks (children classes of this base class) should first
call this method to perform basic setup, then, check whether the
configuration is correct for a given analysis and perform additional,
analysis-specific setup. For example, this function could check if
necessary observations and other data files are found, then, determine
the list of files to be read when the analysis is run.
'''
# Authors
# -------
# Xylar Asay-Davis
# read parameters from config file
# the run directory contains the restart files
self.runDirectory = build_config_full_path(self.config, 'input',
'runSubdirectory')
# if the history directory exists, use it; if not, fall back on
# runDirectory
self.historyDirectory = build_config_full_path(
self.config, 'input',
'{}HistorySubdirectory'.format(self.componentName),
defaultPath=self.runDirectory)
self.plotsDirectory = build_config_full_path(self.config, 'output',
'plotsSubdirectory')
namelistFileName = build_config_full_path(
self.config, 'input',
'{}NamelistFileName'.format(self.componentName))
self.namelist = NameList(namelistFileName)
streamsFileName = build_config_full_path(
self.config, 'input',
'{}StreamsFileName'.format(self.componentName))
self.runStreams = StreamsFile(streamsFileName,
streamsdir=self.runDirectory)
self.historyStreams = StreamsFile(streamsFileName,
streamsdir=self.historyDirectory)
self.calendar = self.namelist.get('config_calendar_type')
make_directories(self.plotsDirectory)
# set the start and end dates for each type of analysis
for tag in ['climatology', 'timeSeries', 'index']:
if tag in self.tags:
self.set_start_end_date(section=tag)
# redirect output to a log file
logsDirectory = build_config_full_path(self.config, 'output',
'logsSubdirectory')
self._logFileName = '{}/{}.log'.format(logsDirectory,
self.fullTaskName)
# }}}
def run_task(self): # {{{
'''
Run the analysis. Each task should override this function to do the
work of computing and/or plotting analysis
'''
# Authors
# -------
# Xylar Asay-Davis
return # }}}
def run_after(self, task): # {{{
'''
Only run this task after the given task has completed. This allows a
task to be constructed of multiple subtasks, some of which may block
later tasks, while allowing some subtasks to run in parallel. It also
allows for tasks to depend on other tasks (e.g. for computing
climatologies or extracting time series for many variables at once).
Parameters
----------
task : ``AnalysisTask``
The task that should finish before this one begins
'''
# Authors
# -------
# Xylar Asay-Davis
if task not in self.runAfterTasks:
self.runAfterTasks.append(task)
# }}}
def add_subtask(self, subtask): # {{{
'''
Add a subtask to this tasks. This task always runs after the subtask
has finished. However, this task gets set up *before* the subtask,
so the setup of the subtask can depend on fields defined during the
setup of this task (the parent).
Parameters
----------
subtask : ``AnalysisTask``
The subtask to run as part of this task
'''
# Authors
# -------
# Xylar Asay-Davis
if subtask not in self.subtasks:
self.subtasks.append(subtask)
# }}}
def run(self, writeLogFile=True): # {{{
'''
Sets up logging and then runs the analysis task.
Parameters
----------
writeLogFile : bool, optional
If ``True``, output to stderr and stdout get written to a log file.
Otherwise, the internal logger ``self.logger`` points to stdout
and no log file is created. The intention is for logging to take
place in parallel mode but not in serial mode.
'''
# Authors
# -------
# Xylar Asay-Davis
# redirect output to a log file
if writeLogFile:
self.logger = logging.getLogger(self.fullTaskName)
handler = logging.FileHandler(self._logFileName)
else:
self.logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
formatter = AnalysisFormatter()
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
self.logger.propagate = False
if writeLogFile:
oldStdout = sys.stdout
oldStderr = sys.stderr
sys.stdout = StreamToLogger(self.logger, logging.INFO)
sys.stderr = StreamToLogger(self.logger, logging.ERROR)
startTime = time.time()
try:
self.run_task()
self._runStatus.value = AnalysisTask.SUCCESS
except (Exception, BaseException) as e:
if isinstance(e, KeyboardInterrupt):
raise e
self._stackTrace = traceback.format_exc()
self.logger.error("analysis task {} failed during run \n"
"{}".format(self.fullTaskName, self._stackTrace))
self._runStatus.value = AnalysisTask.FAIL
runDuration = time.time() - startTime
m, s = divmod(runDuration, 60)
h, m = divmod(int(m), 60)
self.logger.info('Execution time: {}:{:02d}:{:05.2f}'.format(h, m, s))
if writeLogFile:
handler.close()
# restore stdout and stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
# remove the handlers from the logger (probably only necessary if
# writeLogFile==False)
self.logger.handlers = []
# }}}
def check_generate(self):
# {{{
'''
Determines if this analysis should be generated, based on the
``generate`` config option and ``taskName``, ``componentName`` and
``tags``.
Individual tasks do not need to create their own versions of this
function.
Returns
-------
generate : bool
Whether or not this task should be run.
Raises
------
ValueError : If one of ``self.taskName``, ``self.componentName``
or ``self.tags`` has not been set.
'''
# Authors
# -------
# Xylar Asay-Davis
for memberName in ['taskName', 'componentName', 'tags']:
if not hasattr(self, memberName):
raise ValueError('Analysis tasks must define self.{} in their '
'__init__ method.'.format(memberName))
if (not isinstance(self.tags, list) and
self.tags is not None):
raise ValueError('Analysis tasks\'s member self.tags '
'must be None or a list of strings.')
config = self.config
generateList = config.getExpression('output', 'generate')
if len(generateList) > 0 and generateList[0][0:5] == 'only_':
# add 'all' if the first item in the list has the 'only' prefix.
# Otherwise, we would not run any tasks
generateList = ['all'] + generateList
generate = False
for element in generateList:
if '_' in element:
(prefix, suffix) = element.split('_', 1)
else:
prefix = element
suffix = None
allSuffixes = [self.componentName]
if self.tags is not None:
allSuffixes = allSuffixes + self.tags
noSuffixes = [self.taskName] + allSuffixes
if prefix == 'all':
if (suffix in allSuffixes) or (suffix is None):
generate = True
elif prefix == 'no':
if suffix in noSuffixes:
generate = False
if prefix == 'only':
if suffix not in allSuffixes:
generate = False
elif element == self.taskName:
generate = True
return generate # }}}
def check_analysis_enabled(self, analysisOptionName, default=False,
raiseException=True):
'''
Check to make sure a given analysis is turned on, issuing a warning or
raising an exception if not.
Parameters
----------
analysisOptionName : str
The name of a boolean namelist option indicating whether the given
analysis member is enabled
default : bool, optional
If no analysis option with the given name can be found, indicates
whether the given analysis is assumed to be enabled by default.
raiseException : bool, optional
Whether
Returns
-------
enabled : bool
Whether the given analysis is enabled
Raises
------
RuntimeError
If the given analysis option is not found and ``default`` is not
``True`` or if the analysis option is found and is ``False``. The
exception is only raised if ``raiseException = True``.
'''
# Authors
# -------
# Xylar Asay-Davis
try:
optionName = analysisOptionName
enabled = self.namelist.getbool(optionName)
except ValueError:
enabled = default
if default:
print('Warning: namelist option {} not found.\n'
'This likely indicates that the simulation you '
'are analyzing was run with an\n'
'older version of MPAS-O that did not support '
'this flag. Assuming enabled.'.format(
analysisOptionName))
if not enabled and raiseException:
raise RuntimeError('*** MPAS-Analysis relies on {} = .true.\n'
'*** Make sure to enable this analysis '
'member.'.format(analysisOptionName))
return enabled
def set_start_end_date(self, section): # {{{
'''
Set the start and end dates in the ``config`` correspond to the start
and end years in a given category of analysis
Parameters
----------
section : str
The name of a section in the config file containing ``startYear``
and ``endYear`` options. ``section`` is typically one of
``climatology``, ``timeSeries`` or ``index``
'''
# Authors
# -------
# Xylar Asay-Davis
if not self.config.has_option(section, 'startDate'):
startDate = '{:04d}-01-01_00:00:00'.format(
self.config.getint(section, 'startYear'))
self.config.set(section, 'startDate', startDate)
if not self.config.has_option(section, 'endDate'):
endDate = '{:04d}-12-31_23:59:59'.format(
self.config.getint(section, 'endYear'))
self.config.set(section, 'endDate', endDate) # }}}
# }}}
class AnalysisFormatter(logging.Formatter): # {{{
"""
A custom formatter for logging
Modified from:
https://stackoverflow.com/a/8349076/7728169
"""
# Authors
# -------
# Xylar Asay-Davis
# printing error messages without a prefix because they are sometimes
# errors and sometimes only warnings sent to stderr
dbg_fmt = "DEBUG: %(module)s: %(lineno)d: %(msg)s"
info_fmt = "%(msg)s"
err_fmt = info_fmt
def __init__(self, fmt=info_fmt):
logging.Formatter.__init__(self, fmt)
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
format_orig = self._fmt
# Replace the original format with one customized by logging level
if record.levelno == logging.DEBUG:
self._fmt = AnalysisFormatter.dbg_fmt
elif record.levelno == logging.INFO:
self._fmt = AnalysisFormatter.info_fmt
elif record.levelno == logging.ERROR:
self._fmt = AnalysisFormatter.err_fmt
# Call the original formatter class to do the grunt work
result = logging.Formatter.format(self, record)
# Restore the original format configured by the user
self._fmt = format_orig
return result
# }}}
class StreamToLogger(object): # {{{
"""
Modified based on code by:
https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
Copyright (C) 2011 Ferry Boender
License: "available under the GPL" (the author does not provide more
details)
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, str(line.rstrip()))
def flush(self):
pass
# }}}
def update_time_bounds_from_file_names(config, section, componentName): # {{{
"""
Update the start and end years and dates for time series, climatologies or
climate indices based on the years actually available in the list of files.
"""
# Authors
# -------
# Xylar Asay-Davis
# read parameters from config file
# the run directory contains the restart files
runDirectory = build_config_full_path(config, 'input', 'runSubdirectory')
# if the history directory exists, use it; if not, fall back on
# runDirectory
historyDirectory = build_config_full_path(
config, 'input',
'{}HistorySubdirectory'.format(componentName),
defaultPath=runDirectory)
errorOnMissing = config.getboolean('input', 'errorOnMissing')
namelistFileName = build_config_full_path(
config, 'input',
'{}NamelistFileName'.format(componentName))
try:
namelist = NameList(namelistFileName)
except (OSError, IOError):
# this component likely doesn't have output in this run
return
streamsFileName = build_config_full_path(
config, 'input',
'{}StreamsFileName'.format(componentName))
try:
historyStreams = StreamsFile(streamsFileName,
streamsdir=historyDirectory)
except (OSError, IOError):
# this component likely doesn't have output in this run
return
calendar = namelist.get('config_calendar_type')
requestedStartYear = config.getint(section, 'startYear')
requestedEndYear = config.get(section, 'endYear')
if requestedEndYear == 'end':
requestedEndYear = None
else:
# get it again as an integer
requestedEndYear = config.getint(section, 'endYear')
startDate = '{:04d}-01-01_00:00:00'.format(requestedStartYear)
if requestedEndYear is None:
endDate = None
else:
endDate = '{:04d}-12-31_23:59:59'.format(requestedEndYear)
streamName = 'timeSeriesStatsMonthlyOutput'
try:
inputFiles = historyStreams.readpath(
streamName, startDate=startDate, endDate=endDate,
calendar=calendar)
except ValueError:
# this component likely doesn't have output in this run
return
if len(inputFiles) == 0:
raise ValueError('No input files found for stream {} in {} between '
'{} and {}'.format(streamName, componentName,
requestedStartYear,
requestedEndYear))
years, months = get_files_year_month(sorted(inputFiles),
historyStreams,
streamName)
# search for the start of the first full year
firstIndex = 0
while(firstIndex < len(years) and months[firstIndex] != 1):
firstIndex += 1
startYear = years[firstIndex]
# search for the end of the last full year
lastIndex = len(years) - 1
while(lastIndex >= 0 and months[lastIndex] != 12):
lastIndex -= 1
endYear = years[lastIndex]
if requestedEndYear is None:
config.set(section, 'endYear', str(endYear))
requestedEndYear = endYear
if startYear != requestedStartYear or endYear != requestedEndYear:
if errorOnMissing:
raise ValueError(
"{} start and/or end year different from requested\n"
"requested: {:04d}-{:04d}\n"
"actual: {:04d}-{:04d}\n".format(
section, requestedStartYear, requestedEndYear, startYear,
endYear))
else:
print("Warning: {} start and/or end year different from "
"requested\n"
"requested: {:04d}-{:04d}\n"
"actual: {:04d}-{:04d}\n".format(section,
requestedStartYear,
requestedEndYear,
startYear,
endYear))
config.set(section, 'startYear', str(startYear))
config.set(section, 'endYear', str(endYear))
startDate = '{:04d}-01-01_00:00:00'.format(startYear)
config.set(section, 'startDate', startDate)
endDate = '{:04d}-12-31_23:59:59'.format(endYear)
config.set(section, 'endDate', endDate)
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
sys.dont_write_bytecode = 1
from PyQt4 import QtGui
from MainWindow import MainWindow
__author__ = "Uname"
__version__ = "0.1"
__email__ = "ehcapa@qq.com"
def main():
app = QtGui.QApplication(sys.argv)
app.setStyle("cleanlooks")
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
import struct
fp = open('test.jbig2', 'rb')
# headerFlags, = struct.unpack('>B', fp.read(1))
# fileOrganisation = headerFlags & 1
# randomAccessOrganisation = fileOrganisation == 0
# pagesKnown = headerFlags & 2
# noOfPagesKnown = pagesKnown == 0
# print('headerFlags:', headerFlags)
segmentNumber = struct.unpack('>4H', fp.read(8))
segmentHeaderFlags, = struct.unpack('>B', fp.read(1))
# referedToSegmentCountAndRetentionFlags, = struct.unpack('>B', fp.read(1))
# referredToSegmentCount = (referedToSegmentCountAndRetentionFlags & 224) >> 5
# retentionFlags = (referedToSegmentCountAndRetentionFlags & 31)
print('segmentNumber:', segmentNumber)
print('segmentHeaderFlags:', segmentHeaderFlags)
# print('referredToSegmentCount:', referredToSegmentCount)
# print('retentionFlags:', retentionFlags)
fp.close()
|
from qutip.cy.spmatfuncs import *
|
# Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Response class."""
import functools
import mimetypes
from falcon.constants import _UNSET
from falcon.constants import DEFAULT_MEDIA_TYPE
from falcon.errors import HeaderNotSupported
from falcon.media import Handlers
from falcon.response_helpers import format_content_disposition
from falcon.response_helpers import format_etag_header
from falcon.response_helpers import format_header_value_list
from falcon.response_helpers import format_range
from falcon.response_helpers import header_property
from falcon.response_helpers import is_ascii_encodable
from falcon.util import dt_to_http
from falcon.util import http_cookies
from falcon.util import structures
from falcon.util import TimezoneGMT
from falcon.util.deprecation import deprecated
from falcon.util.uri import encode_check_escaped as uri_encode
from falcon.util.uri import encode_value_check_escaped as uri_encode_value
GMT_TIMEZONE = TimezoneGMT()
_ADD_LINK_DEPRECATED_MSG = (
'The add_link() method has been deprecated and will be removed in Falcon 4.0. '
'Please use append_link() instead.'
)
_STREAM_LEN_REMOVED_MSG = (
'The deprecated stream_len property was removed in Falcon 3.0. '
'Please use Response.set_stream() or Response.content_length instead.'
)
_RESERVED_CROSSORIGIN_VALUES = frozenset({'anonymous', 'use-credentials'})
_RESERVED_SAMESITE_VALUES = frozenset({'lax', 'strict', 'none'})
class Response:
"""Represents an HTTP response to a client request.
Note:
``Response`` is not meant to be instantiated directly by responders.
Keyword Arguments:
options (dict): Set of global options passed from the App handler.
Attributes:
status: HTTP status code or line (e.g., ``'200 OK'``). This may be set
to a member of :class:`http.HTTPStatus`, an HTTP status line string
or byte string (e.g., ``'200 OK'``), or an ``int``.
Note:
The Falcon framework itself provides a number of constants for
common status codes. They all start with the ``HTTP_`` prefix,
as in: ``falcon.HTTP_204``. (See also: :ref:`status`.)
media (object): A serializable object supported by the media handlers
configured via :class:`falcon.RequestOptions`.
Note:
See also :ref:`media` for more information regarding media
handling.
text (str): String representing response content.
Note:
Falcon will encode the given text as UTF-8
in the response. If the content is already a byte string,
use the :attr:`data` attribute instead (it's faster).
body (str): Deprecated alias for :attr:`text`. Will be removed in a
future Falcon version.
data (bytes): Byte string representing response content.
Use this attribute in lieu of `text` when your content is
already a byte string (of type ``bytes``). See also the note below.
Warning:
Always use the `text` attribute for text, or encode it
first to ``bytes`` when using the `data` attribute, to
ensure Unicode characters are properly encoded in the
HTTP response.
stream: Either a file-like object with a `read()` method that takes
an optional size argument and returns a block of bytes, or an
iterable object, representing response content, and yielding
blocks as byte strings. Falcon will use *wsgi.file_wrapper*, if
provided by the WSGI server, in order to efficiently serve
file-like objects.
Note:
If the stream is set to an iterable object that requires
resource cleanup, it can implement a close() method to do so.
The close() method will be called upon completion of the request.
context (object): Empty object to hold any data (in its attributes)
about the response which is specific to your app (e.g. session
object). Falcon itself will not interact with this attribute after
it has been initialized.
Note:
**New in 2.0:** The default `context_type` (see below) was
changed from :class:`dict` to a bare class; the preferred way to
pass response-specific data is now to set attributes directly
on the `context` object. For example::
resp.context.cache_strategy = 'lru'
context_type (class): Class variable that determines the factory or
type to use for initializing the `context` attribute. By default,
the framework will instantiate bare objects (instances of the bare
:class:`falcon.Context` class). However, you may override this
behavior by creating a custom child class of
:class:`falcon.Response`, and then passing that new class to
``falcon.App()`` by way of the latter's `response_type` parameter.
Note:
When overriding `context_type` with a factory function (as
opposed to a class), the function is called like a method of
the current Response instance. Therefore the first argument is
the Response instance itself (self).
options (dict): Set of global options passed from the App handler.
headers (dict): Copy of all headers set for the response,
sans cookies. Note that a new copy is created and returned each
time this property is referenced.
complete (bool): Set to ``True`` from within a middleware method to
signal to the framework that request processing should be
short-circuited (see also :ref:`Middleware <middleware>`).
"""
__slots__ = (
'text',
'context',
'options',
'status',
'stream',
'_cookies',
'_data',
'_extra_headers',
'_headers',
'_media',
'_media_rendered',
'__dict__',
)
complete = False
# Child classes may override this
context_type = structures.Context
def __init__(self, options=None):
self.status = '200 OK'
self._headers = {}
# NOTE(kgriffs): Collection of additional headers as a list of raw
# tuples, to use in cases where we need more control over setting
# headers and duplicates are allowable or even necessary.
#
# PERF(kgriffs): Save some CPU cycles and a few bytes of RAM by
# only instantiating the list object later on IFF it is needed.
self._extra_headers = None
self.options = options if options else ResponseOptions()
# NOTE(tbug): will be set to a SimpleCookie object
# when cookie is set via set_cookie
self._cookies = None
self.text = None
self.stream = None
self._data = None
self._media = None
self._media_rendered = _UNSET
self.context = self.context_type()
@property # type: ignore
@deprecated('Please use text instead.', is_property=True)
def body(self):
return self.text
@body.setter # type: ignore
@deprecated('Please use text instead.', is_property=True)
def body(self, value):
self.text = value
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def headers(self):
return self._headers.copy()
@property
def media(self):
return self._media
@media.setter
def media(self, value):
self._media = value
self._media_rendered = _UNSET
@property
def stream_len(self):
# NOTE(kgriffs): Provide some additional information by raising the
# error explicitly.
raise AttributeError(_STREAM_LEN_REMOVED_MSG)
@stream_len.setter
def stream_len(self, value):
# NOTE(kgriffs): We explicitly disallow setting the deprecated attribute
# so that apps relying on it do not fail silently.
raise AttributeError(_STREAM_LEN_REMOVED_MSG)
def render_body(self):
"""Get the raw bytestring content for the response body.
This method returns the raw data for the HTTP response body, taking
into account the :attr:`~.text`, :attr:`~.data`, and :attr:`~.media`
attributes.
Note:
This method ignores :attr:`~.stream`; the caller must check
and handle that attribute directly.
Returns:
bytes: The UTF-8 encoded value of the `text` attribute, if
set. Otherwise, the value of the `data` attribute if set, or
finally the serialized value of the `media` attribute. If
none of these attributes are set, ``None`` is returned.
"""
text = self.text
if text is None:
data = self._data
if data is None and self._media is not None:
# NOTE(kgriffs): We use a special _UNSET singleton since
# None is ambiguous (the media handler might return None).
if self._media_rendered is _UNSET:
if not self.content_type:
self.content_type = self.options.default_media_type
handler, _, _ = self.options.media_handlers._resolve(
self.content_type, self.options.default_media_type
)
self._media_rendered = handler.serialize(
self._media, self.content_type
)
data = self._media_rendered
else:
try:
# NOTE(kgriffs): Normally we expect text to be a string
data = text.encode()
except AttributeError:
# NOTE(kgriffs): Assume it was a bytes object already
data = text
return data
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.status)
def set_stream(self, stream, content_length):
"""Set both `stream` and `content_length`.
Although the :attr:`~falcon.Response.stream` and
:attr:`~falcon.Response.content_length` properties may be set
directly, using this method ensures
:attr:`~falcon.Response.content_length` is not accidentally
neglected when the length of the stream is known in advance. Using this
method is also slightly more performant as compared to setting the
properties individually.
Note:
If the stream length is unknown, you can set
:attr:`~falcon.Response.stream` directly, and ignore
:attr:`~falcon.Response.content_length`. In this case, the ASGI
server may choose to use chunked encoding or one
of the other strategies suggested by PEP-3333.
Args:
stream: A readable file-like object.
content_length (int): Length of the stream, used for the
Content-Length header in the response.
"""
self.stream = stream
# PERF(kgriffs): Set directly rather than incur the overhead of
# the self.content_length property.
self._headers['content-length'] = str(content_length)
def set_cookie(
self,
name,
value,
expires=None,
max_age=None,
domain=None,
path=None,
secure=None,
http_only=True,
same_site=None,
):
"""Set a response cookie.
Note:
This method can be called multiple times to add one or
more cookies to the response.
See Also:
To learn more about setting cookies, see
:ref:`Setting Cookies <setting-cookies>`. The parameters
listed below correspond to those defined in `RFC 6265`_.
Args:
name (str): Cookie name
value (str): Cookie value
Keyword Args:
expires (datetime): Specifies when the cookie should expire.
By default, cookies expire when the user agent exits.
(See also: RFC 6265, Section 4.1.2.1)
max_age (int): Defines the lifetime of the cookie in
seconds. By default, cookies expire when the user agent
exits. If both `max_age` and `expires` are set, the
latter is ignored by the user agent.
Note:
Coercion to ``int`` is attempted if provided with
``float`` or ``str``.
(See also: RFC 6265, Section 4.1.2.2)
domain (str): Restricts the cookie to a specific domain and
any subdomains of that domain. By default, the user
agent will return the cookie only to the origin server.
When overriding this default behavior, the specified
domain must include the origin server. Otherwise, the
user agent will reject the cookie.
Note:
Cookies do not provide isolation by port, so the domain
should not provide one. (See also: RFC 6265, Section 8.5)
(See also: RFC 6265, Section 4.1.2.3)
path (str): Scopes the cookie to the given path plus any
subdirectories under that path (the "/" character is
interpreted as a directory separator). If the cookie
does not specify a path, the user agent defaults to the
path component of the requested URI.
Warning:
User agent interfaces do not always isolate
cookies by path, and so this should not be
considered an effective security measure.
(See also: RFC 6265, Section 4.1.2.4)
secure (bool): Direct the client to only return the cookie
in subsequent requests if they are made over HTTPS
(default: ``True``). This prevents attackers from
reading sensitive cookie data.
Note:
The default value for this argument is normally
``True``, but can be modified by setting
:py:attr:`~.ResponseOptions.secure_cookies_by_default`
via :any:`App.resp_options`.
Warning:
For the `secure` cookie attribute to be effective,
your application will need to enforce HTTPS.
(See also: RFC 6265, Section 4.1.2.5)
http_only (bool): The HttpOnly attribute limits the scope of the
cookie to HTTP requests. In particular, the attribute
instructs the user agent to omit the cookie when providing
access to cookies via "non-HTTP" APIs. This is intended to
mitigate some forms of cross-site scripting. (default: ``True``)
Note:
HttpOnly cookies are not visible to javascript scripts
in the browser. They are automatically sent to the server
on javascript ``XMLHttpRequest`` or ``Fetch`` requests.
(See also: RFC 6265, Section 4.1.2.6)
same_site (str): Helps protect against CSRF attacks by restricting
when a cookie will be attached to the request by the user agent.
When set to ``'Strict'``, the cookie will only be sent along
with "same-site" requests. If the value is ``'Lax'``, the
cookie will be sent with same-site requests, and with
"cross-site" top-level navigations. If the value is ``'None'``,
the cookie will be sent with same-site and cross-site requests.
Finally, when this attribute is not set on the cookie, the
attribute will be treated as if it had been set to ``'None'``.
(See also: `Same-Site RFC Draft`_)
Raises:
KeyError: `name` is not a valid cookie name.
ValueError: `value` is not a valid cookie value.
.. _RFC 6265:
http://tools.ietf.org/html/rfc6265
.. _Same-Site RFC Draft:
https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-4.1.2.7
"""
if not is_ascii_encodable(name):
raise KeyError('name is not ascii encodable')
if not is_ascii_encodable(value):
raise ValueError('value is not ascii encodable')
value = str(value)
if self._cookies is None:
self._cookies = http_cookies.SimpleCookie()
try:
self._cookies[name] = value
except http_cookies.CookieError as e: # pragma: no cover
# NOTE(tbug): we raise a KeyError here, to avoid leaking
# the CookieError to the user. SimpleCookie (well, BaseCookie)
# only throws CookieError on issues with the cookie key
raise KeyError(str(e))
if expires:
# set Expires on cookie. Format is Wdy, DD Mon YYYY HH:MM:SS GMT
# NOTE(tbug): we never actually need to
# know that GMT is named GMT when formatting cookies.
# It is a function call less to just write "GMT" in the fmt string:
fmt = '%a, %d %b %Y %H:%M:%S GMT'
if expires.tzinfo is None:
# naive
self._cookies[name]['expires'] = expires.strftime(fmt)
else:
# aware
gmt_expires = expires.astimezone(GMT_TIMEZONE)
self._cookies[name]['expires'] = gmt_expires.strftime(fmt)
if max_age:
# RFC 6265 section 5.2.2 says about the max-age value:
# "If the remainder of attribute-value contains a non-DIGIT
# character, ignore the cookie-av."
# That is, RFC-compliant response parsers will ignore the max-age
# attribute if the value contains a dot, as in floating point
# numbers. Therefore, attempt to convert the value to an integer.
self._cookies[name]['max-age'] = int(max_age)
if domain:
self._cookies[name]['domain'] = domain
if path:
self._cookies[name]['path'] = path
is_secure = self.options.secure_cookies_by_default if secure is None else secure
if is_secure:
self._cookies[name]['secure'] = True
if http_only:
self._cookies[name]['httponly'] = http_only
# PERF(kgriffs): Morsel.__setitem__() will lowercase this anyway,
# so we can just pass this in and when __setitem__() calls
# lower() it will be very slightly faster.
if same_site:
same_site = same_site.lower()
if same_site not in _RESERVED_SAMESITE_VALUES:
raise ValueError(
"same_site must be set to either 'lax', 'strict', or 'none'"
)
self._cookies[name]['samesite'] = same_site.capitalize()
def unset_cookie(self, name, domain=None, path=None):
"""Unset a cookie in the response.
Clears the contents of the cookie, and instructs the user
agent to immediately expire its own copy of the cookie.
Note:
Modern browsers place restriction on cookies without the
"same-site" cookie attribute set. To that end this attribute
is set to ``'Lax'`` by this method.
(See also: `Same-Site warnings`_)
Warning:
In order to successfully remove a cookie, both the
path and the domain must match the values that were
used when the cookie was created.
Args:
name (str): Cookie name
Keyword Args:
domain (str): Restricts the cookie to a specific domain and
any subdomains of that domain. By default, the user
agent will return the cookie only to the origin server.
When overriding this default behavior, the specified
domain must include the origin server. Otherwise, the
user agent will reject the cookie.
Note:
Cookies do not provide isolation by port, so the domain
should not provide one. (See also: RFC 6265, Section 8.5)
(See also: RFC 6265, Section 4.1.2.3)
path (str): Scopes the cookie to the given path plus any
subdirectories under that path (the "/" character is
interpreted as a directory separator). If the cookie
does not specify a path, the user agent defaults to the
path component of the requested URI.
Warning:
User agent interfaces do not always isolate
cookies by path, and so this should not be
considered an effective security measure.
(See also: RFC 6265, Section 4.1.2.4)
.. _Same-Site warnings:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite#Fixing_common_warnings
""" # noqa: E501
if self._cookies is None:
self._cookies = http_cookies.SimpleCookie()
self._cookies[name] = ''
# NOTE(Freezerburn): SimpleCookie apparently special cases the
# expires attribute to automatically use strftime and set the
# time as a delta from the current time. We use -1 here to
# basically tell the browser to immediately expire the cookie,
# thus removing it from future request objects.
self._cookies[name]['expires'] = -1
# NOTE(CaselIT): Set SameSite to Lax to avoid setting invalid cookies.
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite#Fixing_common_warnings # noqa: E501
self._cookies[name]['samesite'] = 'Lax'
if domain:
self._cookies[name]['domain'] = domain
if path:
self._cookies[name]['path'] = path
def get_header(self, name, default=None):
"""Retrieve the raw string value for the given header.
Normally, when a header has multiple values, they will be
returned as a single, comma-delimited string. However, the
Set-Cookie header does not support this format, and so
attempting to retrieve it will raise an error.
Args:
name (str): Header name, case-insensitive. Must be of type ``str``
or ``StringType``, and only character values 0x00 through 0xFF
may be used on platforms that use wide characters.
Keyword Args:
default: Value to return if the header
is not found (default ``None``).
Raises:
ValueError: The value of the 'Set-Cookie' header(s) was requested.
Returns:
str: The value of the specified header if set, or
the default value if not set.
"""
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('Getting Set-Cookie is not currently supported.')
return self._headers.get(name, default)
def set_header(self, name, value):
"""Set a header for this response to a given value.
Warning:
Calling this method overwrites any values already set for this
header. To append an additional value for this header, use
:meth:`~.append_header` instead.
Warning:
This method cannot be used to set cookies; instead, use
:meth:`~.append_header` or :meth:`~.set_cookie`.
Args:
name (str): Header name (case-insensitive). The name may contain
only US-ASCII characters.
value (str): Value for the header. As with the header's name, the
value may contain only US-ASCII characters.
Raises:
ValueError: `name` cannot be ``'Set-Cookie'``.
"""
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to set cookies')
self._headers[name] = value
def delete_header(self, name):
"""Delete a header that was previously set for this response.
If the header was not previously set, nothing is done (no error is
raised). Otherwise, all values set for the header will be removed
from the response.
Note that calling this method is equivalent to setting the
corresponding header property (when said property is available) to
``None``. For example::
resp.etag = None
Warning:
This method cannot be used with the Set-Cookie header. Instead,
use :meth:`~.unset_cookie` to remove a cookie and ensure that the
user agent expires its own copy of the data as well.
Args:
name (str): Header name (case-insensitive). The name may
contain only US-ASCII characters.
Raises:
ValueError: `name` cannot be ``'Set-Cookie'``.
"""
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to remove cookies')
self._headers.pop(name, None)
def append_header(self, name, value):
"""Set or append a header for this response.
If the header already exists, the new value will normally be appended
to it, delimited by a comma. The notable exception to this rule is
Set-Cookie, in which case a separate header line for each value will be
included in the response.
Note:
While this method can be used to efficiently append raw
Set-Cookie headers to the response, you may find
:py:meth:`~.set_cookie` to be more convenient.
Args:
name (str): Header name (case-insensitive). The name may contain
only US-ASCII characters.
value (str): Value for the header. As with the header's name, the
value may contain only US-ASCII characters.
"""
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
if not self._extra_headers:
self._extra_headers = [(name, value)]
else:
self._extra_headers.append((name, value))
else:
if name in self._headers:
value = self._headers[name] + ', ' + value
self._headers[name] = value
def set_headers(self, headers):
"""Set several headers at once.
This method can be used to set a collection of raw header names and
values all at once.
Warning:
Calling this method overwrites any existing values for the given
header. If a list containing multiple instances of the same header
is provided, only the last value will be used. To add multiple
values to the response for a given header, see
:meth:`~.append_header`.
Warning:
This method cannot be used to set cookies; instead, use
:meth:`~.append_header` or :meth:`~.set_cookie`.
Args:
headers (Iterable[[str, str]]): An iterable of ``[name, value]`` two-member
iterables, or a dict-like object that implements an ``items()`` method.
Both *name* and *value* must be of type ``str`` and
contain only US-ASCII characters.
Note:
Falcon can process an iterable of tuples slightly faster
than a dict.
Raises:
ValueError: `headers` was not a ``dict`` or ``list`` of ``tuple``
or ``Iterable[[str, str]]``.
"""
header_items = getattr(headers, 'items', None)
if callable(header_items):
headers = header_items()
# NOTE(kgriffs): We can't use dict.update because we have to
# normalize the header names.
_headers = self._headers
for name, value in headers:
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to set cookies')
_headers[name] = value
def append_link(
self,
target,
rel,
title=None,
title_star=None,
anchor=None,
hreflang=None,
type_hint=None,
crossorigin=None,
):
"""Append a link header to the response.
(See also: RFC 5988, Section 1)
Note:
Calling this method repeatedly will cause each link to be
appended to the Link header value, separated by commas.
Note:
So-called "link-extension" elements, as defined by RFC 5988,
are not yet supported. See also Issue #288.
Args:
target (str): Target IRI for the resource identified by the
link. Will be converted to a URI, if necessary, per
RFC 3987, Section 3.1.
rel (str): Relation type of the link, such as "next" or
"bookmark".
(See also:
http://www.iana.org/assignments/link-relations/link-relations.xhtml)
Keyword Args:
title (str): Human-readable label for the destination of
the link (default ``None``). If the title includes non-ASCII
characters, you will need to use `title_star` instead, or
provide both a US-ASCII version using `title` and a
Unicode version using `title_star`.
title_star (tuple of str): Localized title describing the
destination of the link (default ``None``). The value must be a
two-member tuple in the form of (*language-tag*, *text*),
where *language-tag* is a standard language identifier as
defined in RFC 5646, Section 2.1, and *text* is a Unicode
string.
Note:
*language-tag* may be an empty string, in which case the
client will assume the language from the general context
of the current request.
Note:
*text* will always be encoded as UTF-8.
anchor (str): Override the context IRI with a different URI
(default None). By default, the context IRI for the link is
simply the IRI of the requested resource. The value
provided may be a relative URI.
hreflang (str or iterable): Either a single *language-tag*, or
a ``list`` or ``tuple`` of such tags to provide a hint to the
client as to the language of the result of following the link.
A list of tags may be given in order to indicate to the
client that the target resource is available in multiple
languages.
type_hint(str): Provides a hint as to the media type of the
result of dereferencing the link (default ``None``). As noted
in RFC 5988, this is only a hint and does not override the
Content-Type header returned when the link is followed.
crossorigin(str): Determines how cross origin requests are handled.
Can take values 'anonymous' or 'use-credentials' or None.
(See:
https://www.w3.org/TR/html50/infrastructure.html#cors-settings-attribute)
"""
# PERF(kgriffs): Heuristic to detect possiblity of an extension
# relation type, in which case it will be a URL that may contain
# reserved characters. Otherwise, don't waste time running the
# string through uri.encode
#
# Example values for rel:
#
# "next"
# "http://example.com/ext-type"
# "https://example.com/ext-type"
# "alternate http://example.com/ext-type"
# "http://example.com/ext-type alternate"
#
if '//' in rel:
if ' ' in rel:
rel = '"' + ' '.join([uri_encode(r) for r in rel.split()]) + '"'
else:
rel = '"' + uri_encode(rel) + '"'
value = '<' + uri_encode(target) + '>; rel=' + rel
if title is not None:
value += '; title="' + title + '"'
if title_star is not None:
value += (
"; title*=UTF-8'"
+ title_star[0]
+ "'"
+ uri_encode_value(title_star[1])
)
if type_hint is not None:
value += '; type="' + type_hint + '"'
if hreflang is not None:
if isinstance(hreflang, str):
value += '; hreflang=' + hreflang
else:
value += '; '
value += '; '.join(['hreflang=' + lang for lang in hreflang])
if anchor is not None:
value += '; anchor="' + uri_encode(anchor) + '"'
if crossorigin is not None:
crossorigin = crossorigin.lower()
if crossorigin not in _RESERVED_CROSSORIGIN_VALUES:
raise ValueError(
'crossorigin must be set to either '
"'anonymous' or 'use-credentials'"
)
if crossorigin == 'anonymous':
value += '; crossorigin'
else: # crossorigin == 'use-credentials'
# PERF(vytas): the only remaining value is inlined.
# Un-inline in case more values are supported in the future.
value += '; crossorigin="use-credentials"'
_headers = self._headers
if 'link' in _headers:
_headers['link'] += ', ' + value
else:
_headers['link'] = value
# NOTE(kgriffs): Alias deprecated as of 3.0
add_link = deprecated(_ADD_LINK_DEPRECATED_MSG, method_name='add_link')(append_link)
cache_control = header_property(
'Cache-Control',
"""Set the Cache-Control header.
Used to set a list of cache directives to use as the value of the
Cache-Control header. The list will be joined with ", " to produce
the value for the header.
""",
format_header_value_list,
)
content_location = header_property(
'Content-Location',
"""Set the Content-Location header.
This value will be URI encoded per RFC 3986. If the value that is
being set is already URI encoded it should be decoded first or the
header should be set manually using the set_header method.
""",
uri_encode,
)
content_length = header_property(
'Content-Length',
"""Set the Content-Length header.
This property can be used for responding to HEAD requests when you
aren't actually providing the response body, or when streaming the
response. If either the `text` property or the `data` property is set
on the response, the framework will force Content-Length to be the
length of the given text bytes. Therefore, it is only necessary to
manually set the content length when those properties are not used.
Note:
In cases where the response content is a stream (readable
file-like object), Falcon will not supply a Content-Length header
to the server unless `content_length` is explicitly set.
Consequently, the server may choose to use chunked encoding in this
case.
""",
)
content_range = header_property(
'Content-Range',
"""A tuple to use in constructing a value for the Content-Range header.
The tuple has the form (*start*, *end*, *length*, [*unit*]), where *start* and
*end* designate the range (inclusive), and *length* is the
total length, or '\\*' if unknown. You may pass ``int``'s for
these numbers (no need to convert to ``str`` beforehand). The optional value
*unit* describes the range unit and defaults to 'bytes'
Note:
You only need to use the alternate form, 'bytes \\*/1234', for
responses that use the status '416 Range Not Satisfiable'. In this
case, raising ``falcon.HTTPRangeNotSatisfiable`` will do the right
thing.
(See also: RFC 7233, Section 4.2)
""",
format_range,
)
content_type = header_property(
'Content-Type',
"""Sets the Content-Type header.
The ``falcon`` module provides a number of constants for
common media types, including ``falcon.MEDIA_JSON``,
``falcon.MEDIA_MSGPACK``, ``falcon.MEDIA_YAML``,
``falcon.MEDIA_XML``, ``falcon.MEDIA_HTML``,
``falcon.MEDIA_JS``, ``falcon.MEDIA_TEXT``,
``falcon.MEDIA_JPEG``, ``falcon.MEDIA_PNG``,
and ``falcon.MEDIA_GIF``.
""",
)
downloadable_as = header_property(
'Content-Disposition',
"""Set the Content-Disposition header using the given filename.
The value will be used for the ``filename`` directive. For example,
given ``'report.pdf'``, the Content-Disposition header would be set
to: ``'attachment; filename="report.pdf"'``.
As per `RFC 6266 <https://tools.ietf.org/html/rfc6266#appendix-D>`_
recommendations, non-ASCII filenames will be encoded using the
``filename*`` directive, whereas ``filename`` will contain the US
ASCII fallback.
""",
functools.partial(format_content_disposition, disposition_type='attachment'),
)
viewable_as = header_property(
'Content-Disposition',
"""Set an inline Content-Disposition header using the given filename.
The value will be used for the ``filename`` directive. For example,
given ``'report.pdf'``, the Content-Disposition header would be set
to: ``'inline; filename="report.pdf"'``.
As per `RFC 6266 <https://tools.ietf.org/html/rfc6266#appendix-D>`_
recommendations, non-ASCII filenames will be encoded using the
``filename*`` directive, whereas ``filename`` will contain the US
ASCII fallback.
.. versionadded:: 3.1
""",
functools.partial(format_content_disposition, disposition_type='inline'),
)
etag = header_property(
'ETag',
"""Set the ETag header.
The ETag header will be wrapped with double quotes ``"value"`` in case
the user didn't pass it.
""",
format_etag_header,
)
expires = header_property(
'Expires',
"""Set the Expires header. Set to a ``datetime`` (UTC) instance.
Note:
Falcon will format the ``datetime`` as an HTTP date string.
""",
dt_to_http,
)
last_modified = header_property(
'Last-Modified',
"""Set the Last-Modified header. Set to a ``datetime`` (UTC) instance.
Note:
Falcon will format the ``datetime`` as an HTTP date string.
""",
dt_to_http,
)
location = header_property(
'Location',
"""Set the Location header.
This value will be URI encoded per RFC 3986. If the value that is
being set is already URI encoded it should be decoded first or the
header should be set manually using the set_header method.
""",
uri_encode,
)
retry_after = header_property(
'Retry-After',
"""Set the Retry-After header.
The expected value is an integral number of seconds to use as the
value for the header. The HTTP-date syntax is not supported.
""",
str,
)
vary = header_property(
'Vary',
"""Value to use for the Vary header.
Set this property to an iterable of header names. For a single
asterisk or field value, simply pass a single-element ``list``
or ``tuple``.
The "Vary" header field in a response describes what parts of
a request message, aside from the method, Host header field,
and request target, might influence the origin server's
process for selecting and representing this response. The
value consists of either a single asterisk ("*") or a list of
header field names (case-insensitive).
(See also: RFC 7231, Section 7.1.4)
""",
format_header_value_list,
)
accept_ranges = header_property(
'Accept-Ranges',
"""Set the Accept-Ranges header.
The Accept-Ranges header field indicates to the client which
range units are supported (e.g. "bytes") for the target
resource.
If range requests are not supported for the target resource,
the header may be set to "none" to advise the client not to
attempt any such requests.
Note:
"none" is the literal string, not Python's built-in ``None``
type.
""",
)
def _set_media_type(self, media_type=None):
"""Set a content-type; wrapper around set_header.
Args:
media_type: Media type to use for the Content-Type
header.
"""
# PERF(kgriffs): Using "in" like this is faster than dict.setdefault()
# in most cases, except on PyPy where it is only a fraction of a
# nanosecond slower. Last tested on Python versions 3.5-3.7.
if media_type is not None and 'content-type' not in self._headers:
self._headers['content-type'] = media_type
def _wsgi_headers(self, media_type=None):
"""Convert headers into the format expected by WSGI servers.
Args:
media_type: Default media type to use for the Content-Type
header if the header was not set explicitly (default ``None``).
"""
headers = self._headers
# PERF(vytas): uglier inline version of Response._set_media_type
if media_type is not None and 'content-type' not in headers:
headers['content-type'] = media_type
items = list(headers.items())
if self._extra_headers:
items += self._extra_headers
# NOTE(kgriffs): It is important to append these after self._extra_headers
# in case the latter contains Set-Cookie headers that should be
# overridden by a call to unset_cookie().
if self._cookies is not None:
# PERF(tbug):
# The below implementation is ~23% faster than
# the alternative:
#
# self._cookies.output().split("\\r\\n")
#
# Even without the .split("\\r\\n"), the below
# is still ~17% faster, so don't use .output()
items += [('set-cookie', c.OutputString()) for c in self._cookies.values()]
return items
class ResponseOptions:
"""Defines a set of configurable response options.
An instance of this class is exposed via :attr:`falcon.App.resp_options`
and :attr:`falcon.asgi.App.resp_options` for configuring certain
:py:class:`~.Response` behaviors.
Attributes:
secure_cookies_by_default (bool): Set to ``False`` in development
environments to make the `secure` attribute for all cookies
default to ``False``. This can make testing easier by
not requiring HTTPS. Note, however, that this setting can
be overridden via `set_cookie()`'s `secure` kwarg.
default_media_type (str): The default Internet media type (RFC 2046) to
use when rendering a response, when the Content-Type header
is not set explicitly. This value is normally set to the
media type provided when a :class:`falcon.App` is initialized;
however, if created independently, this will default to
:attr:`falcon.DEFAULT_MEDIA_TYPE`..
media_handlers (Handlers): A dict-like object for configuring the
media-types to handle. By default, handlers are provided for the
``application/json``, ``application/x-www-form-urlencoded`` and
``multipart/form-data`` media types.
static_media_types (dict): A mapping of dot-prefixed file extensions to
Internet media types (RFC 2046). Defaults to ``mimetypes.types_map``
after calling ``mimetypes.init()``.
"""
__slots__ = (
'secure_cookies_by_default',
'default_media_type',
'media_handlers',
'static_media_types',
)
def __init__(self):
self.secure_cookies_by_default = True
self.default_media_type = DEFAULT_MEDIA_TYPE
self.media_handlers = Handlers()
if not mimetypes.inited:
mimetypes.init()
self.static_media_types = mimetypes.types_map
|
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
def test_passed(executed_docstring_source):
"""
>>> def test_passed_example():
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_passed_example",
with_status("passed")
)
)
def test_failed(executed_docstring_source):
"""
>>> def test_failed_example():
... assert False
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_failed_example",
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("def test_failed_example():")
)
)
)
def test_broken(executed_docstring_source):
"""
>>> def test_broken_example():
... raise IndentationError()
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_broken_example",
with_status("broken"),
has_status_details(with_message_contains("IndentationError"),
with_trace_contains("def test_broken_example():")
)
)
)
def test_call_pytest_fail(executed_docstring_source):
"""
>>> import pytest
>>> def test_call_pytest_fail_example():
... pytest.fail()
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_call_pytest_fail_example",
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("def test_call_pytest_fail_example():")
)
)
)
def test_call_pytest_fail_with_reason(executed_docstring_source):
"""
>>> import pytest
>>> def test_call_pytest_fail_with_reason_example():
... pytest.fail("Fail message")
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_call_pytest_fail_with_reason_example",
with_status("failed"),
has_status_details(with_message_contains("Fail message"),
with_trace_contains("def test_call_pytest_fail_with_reason_example():")
)
)
)
|
#!/bin/python3.6
"""
Date Created: Feb 10 2020
This file contains the model descriptions, including original x-vector
architecture. The first two models are in active developement. All others
are provided below
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
class simpleTDNN(nn.Module):
def __init__(self, numSpkrs, p_dropout):
super(simpleTDNN, self).__init__()
self.tdnn1 = nn.Conv1d(in_channels=30, out_channels=128, kernel_size=5, dilation=1)
self.bn_tdnn1 = nn.BatchNorm1d(128, momentum=0.1, affine=False)
self.dropout_tdnn1 = nn.Dropout(p=p_dropout)
self.tdnn2 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=5, dilation=2)
self.bn_tdnn2 = nn.BatchNorm1d(128, momentum=0.1, affine=False)
self.dropout_tdnn2 = nn.Dropout(p=p_dropout)
self.tdnn3 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=1, dilation=1)
self.bn_tdnn3 = nn.BatchNorm1d(128, momentum=0.1, affine=False)
self.dropout_tdnn3 = nn.Dropout(p=p_dropout)
self.fc1 = nn.Linear(2*128,128)
self.bn_fc1 = nn.BatchNorm1d(128, momentum=0.1, affine=False)
self.dropout_fc1 = nn.Dropout(p=p_dropout)
self.fc2 = nn.Linear(128,64)
self.bn_fc2 = nn.BatchNorm1d(64, momentum=0.1, affine=False)
self.dropout_fc2 = nn.Dropout(p=p_dropout)
self.fc3 = nn.Linear(64,numSpkrs)
def forward(self, x, eps):
# Note: x must be (batch_size, feat_dim, chunk_len)
x = self.dropout_tdnn1(self.bn_tdnn1(F.relu(self.tdnn1(x))))
x = self.dropout_tdnn2(self.bn_tdnn2(F.relu(self.tdnn2(x))))
x = self.dropout_tdnn3(self.bn_tdnn3(F.relu(self.tdnn3(x))))
if self.training:
x = x + torch.randn(x.size()).cuda()*eps
stats = torch.cat((x.mean(dim=2), x.std(dim=2)), dim=1)
x = self.dropout_fc1(self.bn_fc1(F.relu(self.fc1(stats))))
x = self.dropout_fc2(self.bn_fc2(F.relu(self.fc2(x))))
x = self.fc3(x)
return x
class xvecTDNN(nn.Module):
def __init__(self, numSpkrs, p_dropout):
super(xvecTDNN, self).__init__()
self.tdnn1 = nn.Conv1d(in_channels=30, out_channels=512, kernel_size=5, dilation=1)
self.bn_tdnn1 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn1 = nn.Dropout(p=p_dropout)
self.tdnn2 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=5, dilation=2)
self.bn_tdnn2 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn2 = nn.Dropout(p=p_dropout)
self.tdnn3 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=7, dilation=3)
self.bn_tdnn3 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn3 = nn.Dropout(p=p_dropout)
self.tdnn4 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=1, dilation=1)
self.bn_tdnn4 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn4 = nn.Dropout(p=p_dropout)
self.tdnn5 = nn.Conv1d(in_channels=512, out_channels=1500, kernel_size=1, dilation=1)
self.bn_tdnn5 = nn.BatchNorm1d(1500, momentum=0.1, affine=False)
self.dropout_tdnn5 = nn.Dropout(p=p_dropout)
self.fc1 = nn.Linear(3000,512)
self.bn_fc1 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_fc1 = nn.Dropout(p=p_dropout)
self.fc2 = nn.Linear(512,512)
self.bn_fc2 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_fc2 = nn.Dropout(p=p_dropout)
self.fc3 = nn.Linear(512,numSpkrs)
def forward(self, x, eps):
# Note: x must be (batch_size, feat_dim, chunk_len)
x = self.dropout_tdnn1(self.bn_tdnn1(F.relu(self.tdnn1(x))))
x = self.dropout_tdnn2(self.bn_tdnn2(F.relu(self.tdnn2(x))))
x = self.dropout_tdnn3(self.bn_tdnn3(F.relu(self.tdnn3(x))))
x = self.dropout_tdnn4(self.bn_tdnn4(F.relu(self.tdnn4(x))))
x = self.dropout_tdnn5(self.bn_tdnn5(F.relu(self.tdnn5(x))))
if self.training:
shape=x.size()
noise = torch.cuda.FloatTensor(shape) if torch.cuda.is_available() else torch.FloatTensor(shape)
torch.randn(shape, out=noise)
x += noise*eps
stats = torch.cat((x.mean(dim=2), x.std(dim=2)), dim=1)
x = self.dropout_fc1(self.bn_fc1(F.relu(self.fc1(stats))))
x = self.dropout_fc2(self.bn_fc2(F.relu(self.fc2(x))))
x = self.fc3(x)
return x
"""============================ OLD MODELS ==============================="""
class simpleCNN(nn.Module):
def __init__(self):
super(simpleCNN, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 16, 5)
self.conv2 = nn.Conv2d(16, 16, 3)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 21 * 1, 64) # 6*6 from image dimension
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 460)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (5, 5))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 3)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class simpleLSTM(nn.Module):
def __init__(self):
super(simpleLSTM, self).__init__()
self.lstm1 = nn.LSTM(input_size=30, hidden_size=128, num_layers=1, batch_first=True)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64,64)
self.fc3 = nn.Linear(64,460)
def forward(self, x):
# x's shape must be (batch, seq_len, input_size)
_,(h,_) = self.lstm1(x)
x = F.relu(self.fc1(h.view(h.shape[1], h.shape[2])))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
import sys
def pwpath():
import os
SPLIT_STR = '/'
if os.name == 'nt':
SPLIT_STR = '\\'
RPATH = os.path.realpath(__file__)
PPATH = SPLIT_STR.join(RPATH.split(SPLIT_STR)[:-1])
PWS_PATH = os.path.join(PPATH, os.path.pardir)
RPWPATH = os.path.realpath(PWS_PATH)
return RPWPATH
sys.path.append(pwpath())
import pwserver
import app
|
from rest_framework.response import Response
from iaso.tasks.copy_version import copy_version
from iaso.api.tasks import TaskSerializer
from iaso.models import DataSource, SourceVersion, Task, OrgUnit
from rest_framework import viewsets, permissions, serializers
from iaso.api.common import HasPermission
from django.shortcuts import get_object_or_404
import logging
logger = logging.getLogger(__name__)
class CopyVersionSerializer(serializers.Serializer):
source_source_id = serializers.IntegerField(required=True)
source_version_number = serializers.IntegerField(required=True)
destination_source_id = serializers.IntegerField(required=False, default=None)
destination_version_number = serializers.CharField(max_length=200, required=False, default=None)
force = serializers.BooleanField(required=False, default=False)
def validate(self, attrs):
validated_data = super().validate(attrs)
request = self.context["request"]
user = request.user
account = user.iaso_profile.account
possible_data_sources = (
DataSource.objects.filter(projects__in=account.project_set.all()).distinct().values_list("id", flat=True)
)
possible_data_sources = list(possible_data_sources)
force = attrs["force"]
source_source_id = attrs["source_source_id"]
destination_source_id = attrs["destination_source_id"]
source_version = get_object_or_404(
SourceVersion, data_source_id=source_source_id, number=attrs["source_version_number"]
)
try:
destination_version = SourceVersion.objects.get(
data_source_id=destination_source_id, number=attrs["destination_version_number"]
)
except:
destination_version = None
if destination_version and source_version.id == destination_version.id:
raise serializers.ValidationError("Cannot copy a version to the same version")
version_count = OrgUnit.objects.filter(version=destination_version).count()
if version_count > 0 and not force and destination_version is not None:
raise serializers.ValidationError(
"This is going to delete %d org units records. Use the force parameter to proceed" % version_count
)
if validated_data["source_source_id"] not in possible_data_sources:
raise serializers.ValidationError("Unauthorized source_source_id")
if destination_version and validated_data["destination_source_id"] not in possible_data_sources:
raise serializers.ValidationError("Unauthorized destination_source_id")
return validated_data
class CopyVersionViewSet(viewsets.ViewSet):
permission_classes = [permissions.IsAuthenticated, HasPermission("menupermissions.iaso_sources")]
serializer_class = CopyVersionSerializer
def create(self, request):
data = request.data
serializer = CopyVersionSerializer(
data=request.data,
context={"request": request},
)
serializer.is_valid(raise_exception=True)
source_source_id = data.get("source_source_id", None)
source_version_number = data.get("source_version_number", None)
destination_source_id = data.get("destination_source_id", None)
destination_version_number = data.get("destination_version_number", None)
force = data.get("force", False)
task = copy_version(
source_source_id,
source_version_number,
destination_source_id,
destination_version_number,
force,
user=request.user,
)
return Response({"task": TaskSerializer(instance=task).data})
|
"""
Project:
Author:
"""
from .mqtt_controller import MQTTController
from .handlers.hello_world_handler import HelloWorldHandler
from .handlers.esp32_handler import Esp32Handler
class FridayController(MQTTController):
""" MQTT Controller with setting for Friday project """
HANDLER_LIST = [
HelloWorldHandler(),
Esp32Handler(),
]
if __name__ == "__main__":
import os
hostname = os.getenv("FRIDAY_HOSTNAME", "localhost")
port = os.getenv("FRIDAY_PORT", 1883)
mqttc = FridayController()
mqttc.initialize(hostname, port, 60)
rc = mqttc.run()
print("rc: " + str(rc))
|
"""Change pdp_users.password to bytea
Revision ID: f3d30db17bed
Revises: 41da831646e4
Create Date: 2020-12-16 21:26:08.548724
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f3d30db17bed"
down_revision = "41da831646e4"
branch_labels = None
depends_on = None
def upgrade():
op.drop_column("pdp_users", "password")
op.add_column("pdp_users", sa.Column("password", sa.LargeBinary, nullable=False))
def downgrade():
op.drop_column("pdp_users", "password")
op.add_column("pdp_users", "password", sa.String(50), nullable=False),
|
import contextlib
import os
import re
import sys
import time
from enum import IntEnum
from logging import getLogger
import sqlparse
from rpy2 import robjects
from . import util
from .dsl import interpreter
logger = getLogger('squares')
class ExitCode(IntEnum):
OK = 0
NON_OPTIMAL = 3
ERROR = 1
SQL_FAILED = 2
SQL_FAILED_NON_OPTIMAL = 4
END_SEARCH_SPACE = 5
start_time = time.time()
specification = None
solution = None
solution_found = False
solution_size = None
n_cubes = 0
blocked_cubes = 0
n_attempts = 0
n_rejects = 0
n_fails = 0
n_blocks = 0
exit_code = ExitCode.ERROR
exceeded_max_loc = False
analysis_time = 0
enum_time = 0
init_time = 0
block_time = 0
empty_output = 0
redundant_lines = 0
def handle_sigint(signal, stackframe):
print()
print_results()
exit(exit_code)
def beautifier(sql):
sql = re.sub(r'\.(?=other(\.other)*`)', '_', sql)
sql = re.sub(r"""`(?=([^"'\\]*(\\.|"([^"'\\]*\\.)*[^"'\\]*"))*[^"']*$)""", '', sql) # remove backticks if not inside strings
return sqlparse.format(sql, reindent=True, keyword_case='upper')
def print_results():
global exit_code
logger.info('Statistics:')
if n_cubes:
logger.info('\tGenerated cubes: %d', n_cubes)
logger.info('\tBlocked cubes: %d (%f / generated avg.)', blocked_cubes, blocked_cubes / n_cubes if n_cubes else 0)
logger.info('\tAttempted programs: %d (approx)', n_attempts)
logger.info('\t\tRejected: %d (approx)', n_rejects)
logger.info('\t\tFailed: %d (approx)', n_fails)
logger.info('\t\tEmpty outputs: %d (%.1f%%) (approx)', empty_output, empty_output / n_attempts * 100 if n_attempts else 0)
logger.info('\t\tRedundant lines: %d (approx)', redundant_lines)
logger.info('\tBlocked programs: %d (%f / attempted avg.) (approx)', n_blocks, n_blocks / n_attempts if n_attempts else 0)
logger.info('\tTotal time spent in enumerator init: %f (approx)', init_time)
logger.info('\tTotal time spent in enumerator: %f (approx)', enum_time)
if enum_time != 0:
logger.info('\t\tEnumerated %f programs/s avg. (just enumeration time)', n_attempts / enum_time)
logger.info('\t\tEnumerated %f programs/s avg. (overall)', n_attempts / (time.time() - start_time))
logger.info('\tTotal time spent in evaluation & testing: %f (approx)', analysis_time)
logger.info('\tTotal time spent blocking cubes/programs: %f (approx)', block_time)
if solution:
logger.info(f'Solution found: {solution}')
logger.info(f'Solution size: {solution_size}')
old_cache = util.get_config().cache_ops
util.get_config().cache_ops = True
interp = interpreter.SquaresInterpreter(specification, True)
evaluation = interp.eval(solution, specification.tables)
assert interp.equals(evaluation, 'expected_output')[0] # this call makes it so that the select() appears in the output
util.get_config().cache_ops = old_cache
try:
program = specification.r_init + interp.program
robjects.r(program)
sql_query = robjects.r(f'sink(); sql_render(out, bare_identifier_ok=T)')
except:
logger.error('Error while trying to convert R code to SQL.')
sql_query = None
exit_code = ExitCode.SQL_FAILED if exit_code != ExitCode.NON_OPTIMAL else ExitCode.SQL_FAILED_NON_OPTIMAL
print()
if util.get_config().print_r:
pass
print("------------------------------------- R Solution ---------------------------------------\n")
print(specification.r_init + '\n' + interp.program)
if sql_query is not None:
print()
print("+++++++++++++++++++++++++++++++++++++ SQL Solution +++++++++++++++++++++++++++++++++++++\n")
print(beautifier(str(sql_query)[6:]))
else:
print('Failed to generate SQL query')
else:
if exceeded_max_loc:
exit_code = ExitCode.END_SEARCH_SPACE
if not solution_found:
print("No solution found")
def update_stats(attempts, rejects, fails, blocks, emptys, enum_t, analysis_t, init_t, block_t, redundant):
global n_attempts, n_rejects, n_fails, n_blocks, empty_output, enum_time, analysis_time, init_time, block_time, redundant_lines
n_attempts += attempts
n_rejects += rejects
n_fails += fails
n_blocks += blocks
empty_output += emptys
enum_time += enum_t
analysis_time += analysis_t
init_time += init_t
block_time += block_t
redundant_lines += redundant
def increment_cubes():
global n_cubes
n_cubes += 1
def store_solution(sol, size: int, optimal: bool):
global solution, solution_size, exit_code, solution_found
solution = sol
solution_size = size
exit_code = ExitCode.OK if optimal else ExitCode.NON_OPTIMAL
solution_found = True
|
#!/usr/bin/env python
import chess
import chess.svg
from board import Board
from flask import Flask, request, make_response, render_template
app = Flask(__name__)
game = Board()
moves = []
# Flask route for home page.
@app.route('/')
def index():
game = Board()
moves.clear()
return render_template("index.html")
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/ai')
def ai():
game = Board()
moves.clear()
return render_template("ai.html")
@app.route('/contact')
def contact():
return render_template("contact.html")
@app.route('/bots')
def bots():
return render_template("bots.html")
@app.route('/leaderboard')
def leaderboard():
return render_template("leaderboard.html")
@app.route('/player')
def player():
game = Board()
moves.clear()
return render_template("player.html")
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/register')
def register():
return render_template("register.html")
@app.route('/updates')
def updates():
return render_template("updates.html")
@app.route("/reset")
def reset():
game.board.reset()
moves.clear()
return make_response(game.board.fen())
# Return if the game is over or not.
@app.route('/is_game_over')
def is_game_over():
return str(game.board.is_game_over())
# Return current board state.
@app.route('/current_board_state')
def current_board_state():
return make_response(game.board.fen())
# Return current board pgn.
@app.route('/past_moves')
def past_moves():
return make_response(str(moves))
# Flask route for moving the pieces.
@app.route('/move')
def move():
# print(game.board.piece_map())
# Get the source and target of the piece moved by a request.
source = int(request.args.get('source'))
target = int(request.args.get('target'))
depth = int(request.args.get('depth'))
ai = request.args.get('ai')
# Create a san board state move with source and target.
move = chess.Move(
source, target, promotion=chess.QUEEN if request.args.get('promotion') == "true" else None)
print("User's Move: " + str(move))
if move in list(game.board.legal_moves):
try:
moves.append(game.board.san(move))
game.board.push(move)
comp_move = game.comp_move(depth, ai)
moves.append(game.board.san(comp_move))
game.board.push(comp_move)
except Exception as e:
print(e)
# Return response.
return make_response(game.board.fen())
@app.route('/selfplay')
def self_play_move():
depth = int(request.args.get('depth'))
player = request.args.get('player')
ai = request.args.get('ai')
print(depth, player, ai)
try:
comp_move = game.comp_move(depth, ai)
moves.append(game.board.san(comp_move))
game.board.push(comp_move)
except Exception as e:
print(e)
# Return response.
return make_response(game.board.fen())
# Create new Flask Application.
if __name__ == '__main__':
app.run(debug=True)
|
import tempfile
from numpy.testing import assert_equal
from statsmodels.compat.python import lrange, BytesIO
from statsmodels.iolib.smpickle import save_pickle, load_pickle
def test_pickle():
tmpdir = tempfile.mkdtemp(prefix='pickle')
a = lrange(10)
save_pickle(a, tmpdir+'/res.pkl')
b = load_pickle(tmpdir+'/res.pkl')
assert_equal(a, b)
# cleanup, tested on Windows
try:
import os
os.remove(tmpdir+'/res.pkl')
os.rmdir(tmpdir)
except (OSError, IOError):
pass
assert not os.path.exists(tmpdir)
# test with file handle
fh = BytesIO()
save_pickle(a, fh)
fh.seek(0, 0)
c = load_pickle(fh)
fh.close()
assert_equal(a, c)
|
class ClockCollection(object,ICollection[Clock],IEnumerable[Clock],IEnumerable):
""" Represents an ordered collection of System.Windows.Media.Animation.Clock objects. """
def Add(self,item):
"""
Add(self: ClockCollection,item: Clock)
Adds a new System.Windows.Media.Animation.Clock object to the end of this
System.Windows.Media.Animation.ClockCollection.
item: The object to add.
"""
pass
def Clear(self):
"""
Clear(self: ClockCollection)
Removes all items from this System.Windows.Media.Animation.ClockCollection.
"""
pass
def Contains(self,item):
"""
Contains(self: ClockCollection,item: Clock) -> bool
Indicates whether the System.Windows.Media.Animation.ClockCollection contains the specified
System.Windows.Media.Animation.Clock object.
item: The object to locate.
Returns: true if item is found; otherwise,false.
"""
pass
def CopyTo(self,array,index):
"""
CopyTo(self: ClockCollection,array: Array[Clock],index: int)
Copies the System.Windows.Media.Animation.Clock objects of this
System.Windows.Media.Animation.ClockCollection to an array of Clocks,starting at the specified
index position.
array: The destination array.
index: The zero-based index position where copying begins.
"""
pass
def Equals(self,*__args):
"""
Equals(objA: ClockCollection,objB: ClockCollection) -> bool
Indicates whether the two specified System.Windows.Media.Animation.ClockCollection collections
are equal.
objA: The first value to compare.
objB: The second value to compare.
Returns: true if objA and objB are equal; otherwise,false.
Equals(self: ClockCollection,obj: object) -> bool
Indicates whether this instance is equal to the specified object.
obj: The object to compare with this instance.
Returns: true if obj is equal to this instance; otherwise false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: ClockCollection) -> int
Returns a 32-bit signed integer hash code representing this instance.
Returns: A 32-bit signed integer.
"""
pass
def Remove(self,item):
"""
Remove(self: ClockCollection,item: Clock) -> bool
Removes the specified System.Windows.Media.Animation.Clock from the
System.Windows.Media.Animation.ClockCollection.
item: The object to remove.
Returns: true if item was successfully removed; otherwise,false.
"""
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
""" __contains__(self: ICollection[Clock],item: Clock) -> bool """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
def __ne__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Count=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the number of items contained in this System.Windows.Media.Animation.ClockCollection.
Get: Count(self: ClockCollection) -> int
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Windows.Media.Animation.ClockCollection is read-only.
Get: IsReadOnly(self: ClockCollection) -> bool
"""
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['rqt_servicebot_pan_tilt'],
package_dir={'': 'src'},
requires=['std_msgs', 'rospy']
)
setup(**setup_args)
|
#!/usr/bin/env python
import rospy
from lowpass import LowPassFilter
from yaw_controller import YawController
from pid import PID
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.0
mn = 0.0 # min throttle
mx = 0.2 # max throttle
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1/(2pi*tau) = curoff freq
ts = 0.02 # sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
# rospy.logwarn("Angular vel: {0}".format(angular_vel))
# rospy.logwarn("Target vel: {0}".format(linear_vel))
# rospy.logwarn("Target angular vel: {0}\n".format(angular_vel))
# rospy.logwarn("Current vel: {0}".format(current_vel))
# rospy.logwarn("Filtered vel: {0}".format(self.vel_lpf.get()))
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
# original: car slows down but never stop.
# if linear_vel == 0. and current_vel < 0:
# throttle = 0
# decel = max(vel_error, self.decel_limit)
# brake = abs(decel)*self.vehicle_mass*self.wheel_radius # Torque N*m
if linear_vel == 0. and current_vel < 0.1:
throttle = 0
brake = 400 # N to hold the car
elif throttle < 0.1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius # Torque N*m
return throttle, brake, steering
|
# -*- coding: utf-8 -*-
"""
"""
import concurrent.futures
import imaplib
import warnings
from pprint import pprint
from typing import List, Tuple, Dict
from pymaillib.imap.entity.email_message import ImapFetchedItem, EmailMessage
from pymaillib.imap.query.builders.fetch import FetchQueryBuilder
from pymaillib.imap.client import ImapClient, ImapFolder
from pymaillib.imap.entity.server import ImapNamespace, Namespaces
from pymaillib.mailbox import UserMailbox
from pymaillib.settings import Config
#imaplib.Debug = 55
from_config = Config().from_config_file('./from_server.ini')
from_mailbox = UserMailbox('sxadmin', '1', from_config)
to_config = Config().from_config_file('./to_server.ini')
to_mailbox = UserMailbox('migrate', '123456', from_config)
def get_folders(namespace: ImapNamespace, imap: ImapClient) -> List[ImapFolder]:
"""
:param namespace:
:param imap:
:return:
"""
res = []
for _namespace in namespace:
if _namespace.name:
print(_namespace)
res.append(ImapFolder(_namespace.name.strip('/'),
_namespace.separator, {b'\\Noselect': b''}))
res.extend(imap.folders(namespace=_namespace))
return res
def get_user_folders(namespaces: Namespaces, imap: ImapClient) -> \
Tuple[List[str], Dict[str, ImapFolder]]:
"""
:param namespaces:
:param imap:
:return:
"""
folders = {}
public_folders = []
for namespace in [namespaces.public_folders, namespaces.other_users]:
for folder_ in get_folders(namespace, imap):
public_folders.append(folder_.name)
for folder_ in get_folders(namespaces.private, imap):
if folder_.name in public_folders:
continue
folders[folder_.name] = folder_
return public_folders, folders
from_folders = []
from_public_folders = []
with from_mailbox.imap() as imap:
from_public_folders, from_folders = get_user_folders(imap.namespace(), imap)
print('Skipped following not private folders')
pprint(from_public_folders)
print('Private folders in server migrating from is:')
pprint(list(from_folders))
to_folders = []
to_public_folders = []
with to_mailbox.imap() as imap:
to_public_folders, to_folders = get_user_folders(imap.namespace(), imap)
print('Skipped non private folders at destination server')
pprint(to_public_folders)
print('Private folders in destination server is:')
pprint(list(to_folders))
folder_diff = set(from_folders).difference(set(to_folders))
if folder_diff:
print('Destination mailbox does not have such folders')
pprint(folder_diff)
print('Lets create them')
with to_mailbox.imap() as imap:
for folder_name in iter(folder_diff):
folder = from_folders.get(folder_name)
parent = folder.parent()
top_level = [folder]
while parent:
top_level.append(parent)
parent = parent.parent()
for top_folder in reversed(top_level):
if not imap.folder_exists(top_folder):
imap.create_folder(top_folder.name)
def get_folder_messages(folder:ImapFolder, imap:ImapClient) -> \
List[ImapFetchedItem]:
"""
:param folder:
:param imap:
:return:
"""
res = {}
if not folder.total:
return res
data = range(1, folder.total)
n = 200 # by chunks
for item in [data[i:i + n] for i in range(1, len(data), n)]:
fp = FetchQueryBuilder(list(item)).fetch_envelope()
for item in imap.fetch(fp):
res[item.envelope.message_id] = item
return res
def guess_email_class(email: EmailMessage) -> str :
"""
:param email:
:return:
"""
for part in email.walk():
if part.get_content_type() == 'text/calendar':
return 'IPM.Appointment'
else:
return 'IPM.Note'
def fill_mailbox(source_mailbox, dest_mailbox, folder:ImapFolder):
if not folder.selectable:
warnings.warn('Folder {} is not selectable'.format(folder.name),
RuntimeWarning)
return
from_messages = {}
with source_mailbox.imap() as imap:
imap.update_folder_info(folder)
from_messages = get_folder_messages(folder, imap)
to_messages = {}
with dest_mailbox.imap() as imap:
imap.update_folder_info(folder)
to_messages = get_folder_messages(folder, imap)
msgs_diff = set(from_messages).difference(set(to_messages))
count = 0
print('New messages', msgs_diff)
if not msgs_diff:
return count
with source_mailbox.imap() as from_imap:
with dest_mailbox.imap(True) as dest_imap:
dest_imap.select_folder(folder)
for msg_id in msgs_diff:
msg = from_messages.get(msg_id)
if not msg:
warnings.warn('Oops not found {}'.format(msg_id),
RuntimeWarning)
continue
fp = FetchQueryBuilder(uids=msg.uid).fetch_rfc822()\
.fetch_flags()
msg = list(from_imap.fetch(fp))[-1]
if not msg:
warnings.warn('Oops not found {}'.format(msg_id),
RuntimeWarning)
continue
rfc822 = msg.rfc822
if not rfc822['X-Scalix-Class']:
rfc822.add_header('X-Scalix-Class',
guess_email_class(rfc822))
print('Migrate message UID', msg.uid)
dest_imap.append_message(rfc822, folder, msg.flags)
#dest_imap.check()
count += 1
return count
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(fill_mailbox, from_mailbox.clone(),
to_mailbox.clone(), folder): folder
for folder in from_folders.values()}
for future in concurrent.futures.as_completed(future_to_url):
folder = future_to_url[future]
try:
data = future.result()
except Exception as exc:
print('{} generated an exception: {}'.format(folder, exc))
else:
print('{} msgs synched {}'.format(folder, data))
|
import os
import signal
import gc
import time
import sys
import select
import curses
import threading
import contextlib
import kaa
import kaa.log
from . import keydef, color, dialog
from kaa import clipboard
from kaa import document
from kaa import keyboard
from kaa import macro
from kaa.exceptions import KaaError
class CuiApp:
DEFAULT_MENU_MESSAGE = 'Type F1 or alt+m for menu.'
DEFAULT_THEME = 'basic'
DEFAULT_PALETTE = 'dark'
NUM_NEWFILE = 1
def __init__(self, config):
self.config = config
self.clipboard = clipboard.select_clipboard()
self.colors = None
self._idleprocs = None
self.lastcommands = ()
self.focus = None
self._quit = False
self.theme = self.DEFAULT_THEME
self.last_dir = '.'
self._input_readers = []
self._lock = threading.RLock()
self._tasks = []
self.commands = {}
self.is_availables = {}
self.sigwinch_rfd, self.sigwinch_wfd = os.pipe()
signal.signal(signal.SIGWINCH,
lambda *args: os.write(self.sigwinch_wfd, b'0'))
def register_commandobj(self, cmds):
self.commands.update(cmds.get_commands())
self.is_availables.update(cmds.get_commands_is_enable())
def init_commands(self):
from kaa.commands import appcommand, toolcommand, filecommand, gitcommand
self.app_commands = appcommand.ApplicationCommands()
self.register_commandobj(self.app_commands)
self.file_commands = filecommand.FileCommands()
self.register_commandobj(self.file_commands)
self.register_commandobj(toolcommand.ToolCommands())
self.register_commandobj(appcommand.MacroCommands())
self.register_commandobj(gitcommand.GitCommands())
for name in dir(self):
attr = getattr(self, name)
if hasattr(attr, 'COMMAND_ID') and callable(attr):
self.commands[getattr(attr, 'COMMAND_ID')] = attr
def init(self, mainframe):
self.init_commands()
if self.config.palette:
self.set_palette(self.config.palette)
elif not self.colors:
self.set_palette(self.DEFAULT_PALETTE)
self.config.init_history()
from kaa.ui.messagebar import messagebarmode
self.messagebar = messagebarmode.MessageBarMode()
doc = document.Document()
doc.setmode(self.messagebar)
mainframe.set_messagebar(doc)
self.mainframe = mainframe
self.focus = self.mainframe
self.macro = macro.Macro()
self.mainframe.on_console_resized()
self.messagebar.set_message(self.DEFAULT_MENU_MESSAGE)
def on_shutdown(self):
os.close(self.sigwinch_rfd)
os.close(self.sigwinch_wfd)
self.config.close()
def get_current_theme(self):
return self.theme
def set_palette(self, name):
palette = self.get_palette(name)
self.colors = color.Colors(palette)
def get_palette(self, name):
if name == 'light':
return color.LightPalette()
else:
return color.DarkPalette()
def quit(self):
self._quit = True
def call_later(self, secs, f, *args, **kwargs):
with self._lock:
self._tasks.append((time.time() + secs, f, args, kwargs))
SCHEDULE_WAIT_MARGIN = 0.05
def _next_scheduled_task(self):
with self._lock:
tasks = sorted(t for t, f, a, k in self._tasks)
if tasks:
wait = max(0, tasks[0] - time.time())
return wait + wait * self.SCHEDULE_WAIT_MARGIN
def _run_scheduled_task(self):
with self._lock:
now = time.time()
for n, (t, f, a, k) in enumerate(self._tasks):
if t <= now:
del self._tasks[n]
f(*a, **k)
return
def get_command(self, commandid):
cmd = self.commands.get(commandid, None)
if cmd:
is_available = self.is_availables.get(commandid, None)
return (is_available, cmd)
def set_idlejob(self):
self._idleprocs = [
doc.mode.on_idle for doc in document.Document.all if doc.mode]
def on_idle(self):
if self._idleprocs:
proc = self._idleprocs.pop(0)
# proc() returns True if proc() still has job to be done.
if proc():
self._idleprocs.append(proc)
return True
else:
return False
def translate_theme(self, theme):
overlays = {}
for name, overlay in theme.overlays.items():
fg = bg = None
if overlay.fgcolor:
fg = self.colors.colornames.get(overlay.fgcolor.upper())
if overlay.bgcolor:
bg = self.colors.colornames.get(overlay.bgcolor.upper())
overlays[name] = (fg, bg)
for style in theme.styles.values():
fg, bg = (self.colors.colornames.get(style.fgcolor.upper()),
self.colors.colornames.get(style.bgcolor.upper()))
attr = self.colors.get_color(fg, bg)
style.cui_colorattr = attr
style.cui_overlays = {}
for name, (o_fg, o_bg) in overlays.items():
if o_fg is None:
o_fg = fg
if o_bg is None:
o_bg = bg
style.cui_overlays[name] = self.colors.get_color(o_fg, o_bg)
def get_keyname(self, key):
try:
if not isinstance(key, int):
key = ord(key)
return str(curses.keyname(key), 'utf-8', 'replace')
except Exception:
return '?'
def translate_key(self, mod, c):
"""Translate kaa's key value to curses keycode"""
alt = keyboard.alt in mod
ctrl = keyboard.ctrl in mod
shift = keyboard.shift in mod
if alt:
meta = '\x1b'
else:
meta = ''
if isinstance(c, str):
if shift:
raise KaaError(
'Cannot use shift key for character: {!r}'.format((mod, c)))
if ctrl:
c = c.upper()
if not (0x40 <= ord(c) <= 0x5f):
raise KaaError(
'Cannot use control key for character: {!r}'.format((mod, c)))
return meta + chr(ord(c) - 0x40)
else:
return meta + c
else:
ret = keydef.keyfromname(c, ctrl, shift)
if ret is None:
raise KaaError(
'Cannot convert character: {!r}'.format((mod, c)))
return [ret] if not meta else [meta, ret]
def set_focus(self, wnd):
if wnd is self.focus:
return
if self.focus:
self.focus.on_killfocus()
self.focus = wnd
if wnd:
wnd.on_focus()
def show_doc(self, doc):
'''
Create new window for the doc and show it.
'''
ret = self.mainframe.show_doc(doc)
return ret
def show_inputline(self, doc):
self._idleprocs = None # Reschedule idle procs
dlg = dialog.DialogWnd(parent=self.mainframe, doc=doc)
self.mainframe.show_inputline(dlg)
return dlg
def show_dialog(self, doc):
dlg = dialog.DialogWnd(parent=self.mainframe, doc=doc)
self.mainframe.show_dialog(dlg)
return dlg
def get_frames(self):
return self.mainframe.childframes[:]
def get_activeframe(self):
return self.mainframe.activeframe
def show_cursor(self, f):
try:
curses.curs_set(f)
except curses.error:
# curses.curs_set() occasionally fails if $TERM=xterm-color
pass
@contextlib.contextmanager
def restore_teminal(self):
curses.def_prog_mode()
curses.endwin()
try:
yield
finally:
curses.reset_prog_mode()
self.mainframe.refresh()
def add_input_reader(self, reader):
self._input_readers.append(reader)
def del_input_reader(self, reader):
if reader in self._input_readers:
self._input_readers.remove(reader)
def dump_panel(self):
import curses.panel
panels = []
p = curses.panel.top_panel()
while p:
panels.append(p)
p = p.below()
for w in self.mainframe.walk_children():
idx = panels.index(w._panel)
d = getattr(w, 'document', w)
m = getattr(d, 'mode', w)
panels[idx] = m
def run(self):
# def f(t, i):
# _trace(t, i)
# gc.callbacks.append(f)
gc.set_threshold(2000, 10, 10)
nonblocking = True
while not self._quit:
try:
if not self.focus:
kaa.log.error('Internal error: invalid focus window.')
break
self.focus.restore_cursor_pos()
if not nonblocking:
# update screen before sleep.
curses.panel.update_panels()
curses.doupdate()
rd = []
for f in self._input_readers:
rd.extend(f.get_reader())
try:
rlist, _, _ = select.select(
[sys.stdin, self.sigwinch_rfd] + rd, [], [],
0 if nonblocking else self._next_scheduled_task())
except InterruptedError:
pass
if not nonblocking and not rlist:
# timeout
self._run_scheduled_task()
self.set_idlejob() # Reschedule idle procs
if self.sigwinch_rfd in rlist:
os.read(self.sigwinch_rfd, 1)
# sigh. pep-0475 prevents us to handling SIGWINCH.
# force curses to resize window.
import struct
import fcntl
import termios
v = fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
lines, cols, _, _ = struct.unpack('HHHH', v)
curses.resizeterm(lines, cols)
self.mainframe.on_console_resized()
ready = [r for r in rlist if r not in
(sys.stdin, self.sigwinch_rfd)]
if ready:
nonblocking = True
for r in ready:
idx = rd.index(r)
self._input_readers[idx].read_input(r)
self.set_idlejob() # Reschedule idle procs
inputs = self.focus.do_input(nonblocking=True)
for c in inputs:
if isinstance(c, keydef.KeyEvent):
nonblocking = True
if c.key == curses.KEY_RESIZE:
self.mainframe.on_console_resized()
continue
if self.focus.editmode:
self.focus.editmode.on_keyevent(self.focus, c)
if self.focus:
self.focus.update_window()
if not inputs:
if self.mainframe.on_idle():
self.set_idlejob() # Reschedule idle procs
continue
# no input
if not self.on_idle():
# No more idle jobs
nonblocking = False
else:
self.set_idlejob() # Reschedule idle procs
except Exception as e:
kaa.log.error('Unhandled exception', exc_info=True)
kaa.app.messagebar.set_message(' '.join(str(e).split('\n')))
nonblocking = True
|
class ApxSignature(object):
def __init__(self,mainType,name,dsg,attr=""):
self.mainType=mainType
self.name=name
self.dsg=dsg
self.attr=attr
def __str__(self):
if (self.attr != None) and len(self.attr)>0:
return '%s"%s"%s:%s'%(self.mainType,self.name,self.dsg,self.attr)
else:
return '%s"%s"%s'%(self.mainType,self.name,self.dsg)
class ApxType(object):
@staticmethod
def _calcUIntTypeLen(dataType):
if dataType['type']=='integer':
if dataType['min'] == 0:
return int(math.ceil(math.log(dataType['max'],2)))
return None
@staticmethod
def _calcIntTypeLen(dataType):
if dataType['type']=='integer':
if dataType['min'] < 0:
return int(math.ceil(math.log(abs(dataType['max']),2)))+1
return None
@staticmethod
def _calcDataSignature(dataType):
global typeData
global args
typeCode = None
if dataType['type']=='boolean':
return 'C(0,1)'
if dataType['type']=='integer':
return ApxType._getIntegerTypeCode(dataType)
elif dataType['type'] == 'array':
typeCode = ApxType._getIntegerTypeCode(typeData.find(dataType['typeRef']))
if typeCode != None:
return "%s[%d]"%(typeCode,int(dataType['length']))
else:
raise Exception("unsupported type: %s"%typeData.find(dataType['typeRef']))
elif dataType['type'] == 'string':
typeCode = 'a'
if typeCode != None:
return "%s[%d]"%(typeCode,int(dataType['length'])+1)
elif dataType['type'] == 'record':
result="{"
for elem in dataType['elements']:
#uncomment to remove _RE from end of element names
#if elem['name'].endswith('_RE'):
#elem['name']=elem['name'][:-3]
childType = typeData.find(elem['typeRef'])
result+='"%s"%s'%(elem['name'],ApxType._calcDataSignature(childType))
result+="}"
return result
else: raise Exception('uhandled data type: %s'%dataType['type'])
return ""
@staticmethod
def _getIntegerTypeCode(dataType):
global args
if dataType['min'] >= 0:
bits = ApxType._calcUIntTypeLen(dataType)
if bits <=8:
if (dataType['min']>0) or (dataType['max']<255):
return 'C(%d,%d)'%(dataType['min'],dataType['max'])
else:
return 'C'
elif bits <=16:
return 'S'
elif bits <=32:
return 'L'
elif bits <=64:
return 'U'
elif dataType['min']<0:
bits = ApxType._calcIntTypeLen(dataType)
if bits <=8:
if (dataType['min']>-128) or dataType['max']<127:
return 'c(%d,%d)'%(dataType['min'],dataType['max'])
else:
return 'c'
elif bits <=16:
return 's'
elif bits <=32:
return 'l'
elif bits <=64:
return 'u'
else:
print("not implemented (min=%s)"%dataType['min'])
@staticmethod
def _calcAttribute(dataType):
if dataType['type']=='integer':
typeSemantics = typeData.find('/DataType/Semantics/%s'%dataType['name'])
if (typeSemantics != None) and ('valueTable' in typeSemantics):
v=','.join(['"%s"'%x for x in typeSemantics['valueTable']])
return "VT(%s)"%v
return None
def __init__(self,dataType):
self.name = dataType['name']
self.signature = ApxSignature('T',dataType['name'],ApxType._calcDataSignature(dataType),ApxType._calcAttribute(dataType))
class ApxPort(object):
def __init__(self,name,typeIndex,attrib):
self.name = name
self.typeIndex = typeIndex
self.attrib=attrib
def getAttrStr(self):
result=""
if self.attrib['initValue']!=None:
result+="=%s"%self.attrib['initValue']
if self.attrib['isQueued']:
if self.attrib['queueLen']!=None:
result+="Q[%d]"%self.attrib['queueLen']
else:
result+="Q"
if self.attrib['isParameter']:
result+="P"
return result
|
#!/usr/bin/env python3
"""
Author : Adam Matteck
Date : 2021-12-02
"""
import argparse
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Picnic game",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"items", metavar="str", help="Item(s) to bring", nargs="+"
)
parser.add_argument(
"-s", "--sorted", help="Sort the items", action="store_true"
)
return parser.parse_args()
def get_output(items):
"""Generate correct program output according to specs"""
brought = ''
if len(items) == 1:
brought = items[0]
elif len(items) == 2:
brought = ' and '.join(items)
elif len(items) >= 3:
items[-1] = 'and ' + items[-1]
brought = ', '.join(items)
return f'You are bringing {brought}.'
def main():
"""Main program logic"""
args = get_args()
items = args.items
sorted = args.sorted
if sorted:
items.sort()
print(get_output(items))
if __name__ == "__main__":
main()
|
import argparse
import time
import numpy as np
from pycompss.api.api import barrier, compss_wait_on
import dislib as ds
from dislib.classification import RandomForestClassifier
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--svmlight", help="read files in SVMLight format",
action="store_true")
parser.add_argument("-dt", "--detailed_times",
help="get detailed execution times (read and fit)",
action="store_true")
parser.add_argument("-e", "--estimators", metavar="N_ESTIMATORS",
type=int, help="default is 10", default=10)
parser.add_argument("-b", "--block_size", metavar="BLOCK_SIZE", type=str,
help="two comma separated ints that represent the "
"size of the blocks in which to divide the input "
"data (default is 100,100)",
default="100,100")
parser.add_argument("-md", "--max_depth", metavar="MAX_DEPTH",
type=int, help="default is np.inf", required=False)
parser.add_argument("-dd", "--dist_depth", metavar="DIST_DEPTH", type=int,
help="default is auto", required=False)
parser.add_argument("-f", "--features", metavar="N_FEATURES",
help="number of features of the input data "
"(only for SVMLight files)",
type=int, default=None, required=False)
parser.add_argument("--dense", help="use dense data structures",
action="store_true")
parser.add_argument("-t", "--test-file", metavar="TEST_FILE_PATH",
help="test file path", type=str, required=False)
parser.add_argument("train_data",
help="input file in CSV or SVMLight format", type=str)
args = parser.parse_args()
train_data = args.train_data
s_time = time.time()
read_time = 0
sparse = not args.dense
bsize = args.block_size.split(",")
block_size = (int(bsize[0]), int(bsize[1]))
if args.svmlight:
x, y = ds.load_svmlight_file(train_data, block_size, args.features,
sparse)
else:
x = ds.load_txt_file(train_data, block_size)
y = x[:, x.shape[1] - 2: x.shape[1] - 1]
x = x[:, :x.shape[1] - 1]
if args.detailed_times:
barrier()
read_time = time.time() - s_time
s_time = time.time()
if args.dist_depth:
dist_depth = args.dist_depth
else:
dist_depth = "auto"
if args.max_depth:
max_depth = args.max_depth
else:
max_depth = np.inf
forest = RandomForestClassifier(n_estimators=args.estimators,
max_depth=max_depth,
distr_depth=dist_depth)
forest.fit(x, y)
barrier()
fit_time = time.time() - s_time
out = [forest.n_estimators, forest.distr_depth, forest.max_depth,
read_time, fit_time]
if args.test_file:
if args.svmlight:
x_test, y_test = ds.load_svmlight_file(args.test_file, block_size,
args.features,
sparse)
else:
x_test = ds.load_txt_file(args.test_file, block_size)
y_test = x_test[:, x_test.shape[1] - 1: x_test.shape[1]]
x_test = x_test[:, :x_test.shape[1] - 1]
out.append(compss_wait_on(forest.score(x_test, y_test)))
print(out)
if __name__ == "__main__":
main()
|
#Here we import the libraries
#requests is a library to make webcalls
#beautifulsoup is our scraping library
#unicodecsv is a replacement for the normal Python csv library, this on supports unicode characters
#New on this one is 're' which allows for regular expressions
import requests, re
from bs4 import BeautifulSoup
#getting our html file
r = requests.get('http://www.knmi.nl/cms/content/23519/kouderecord_van_nederland')
#Making it a beautiful soup object
soup = BeautifulSoup(r.text)
article = soup.find('div',{'class':'article'})
#Quickly get only link containing the word Apple (or apple because IGNORECASE)
values = re.findall('-?([0-9]+)(,[0-9]+)?(?=\s?graden)',article.text)
for value in values:
print ''.join(value)
#done!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.