hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb1a82d84415cd7f52c037a27cea65ca59f4abfd | 1,373 | py | Python | cacreader/swig-4.0.2/Examples/test-suite/python/extend_template_method_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 1,031 | 2015-01-02T14:08:47.000Z | 2022-03-29T02:25:27.000Z | cacreader/swig-4.0.2/Examples/test-suite/python/extend_template_method_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 240 | 2015-01-11T04:27:19.000Z | 2022-03-30T00:35:57.000Z | cacreader/swig-4.0.2/Examples/test-suite/python/extend_template_method_runme.py | kyletanyag/LL-Smartcard | 02abea9de5a13f8bae4d7832ab34cb7f0d9514c9 | [
"BSD-3-Clause"
] | 224 | 2015-01-05T06:13:54.000Z | 2022-02-25T14:39:51.000Z | from extend_template_method import *
em = ExtendMe()
ret_double = em.do_stuff_double(1, 1.1)
if ret_double != 1.1:
raise RuntimeError("double failed " + ret_double)
ret_string = em.do_stuff_string(1, "hello there")
if ret_string != "hello there":
raise RuntimeError("string failed " + ret_string)
ret_double = em.do_overloaded_stuff(1.1)
if ret_double != 1.1:
raise RuntimeError("double failed " + ret_double)
ret_string = em.do_overloaded_stuff("hello there")
if ret_string != "hello there":
raise RuntimeError("string failed " + ret_string)
if ExtendMe.static_method(123) != 123:
raise RuntimeError("static_method failed")
em2 = ExtendMe(123)
em = TemplateExtend()
ret_double = em.do_template_stuff_double(1, 1.1)
if ret_double != 1.1:
raise RuntimeError("double failed " + ret_double)
ret_string = em.do_template_stuff_string(1, "hello there")
if ret_string != "hello there":
raise RuntimeError("string failed " + ret_string)
ret_double = em.do_template_overloaded_stuff(1.1)
if ret_double != 1.1:
raise RuntimeError("double failed " + ret_double)
ret_string = em.do_template_overloaded_stuff("hello there")
if ret_string != "hello there":
raise RuntimeError("string failed " + ret_string)
if TemplateExtend.static_template_method(123) != 123:
raise RuntimeError("static_template_method failed")
em2 = TemplateExtend(123)
| 29.847826 | 59 | 0.744355 |
9cf22dc84b46282c751265958c1eccd85b5233ab | 603 | py | Python | backend/api/__init__.py | justinac0/LibraryDB | bcdc61251e8f9b30598ab51649c8581843733d25 | [
"MIT"
] | null | null | null | backend/api/__init__.py | justinac0/LibraryDB | bcdc61251e8f9b30598ab51649c8581843733d25 | [
"MIT"
] | null | null | null | backend/api/__init__.py | justinac0/LibraryDB | bcdc61251e8f9b30598ab51649c8581843733d25 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app(config=None):
app = Flask(__name__)
app.config.from_mapping(config)
app.config["SQLALCHEMY_DATABASE_URI"] = "mariadb+mariadbconnector://root:example@db/librarydb"
app.config["SECRET_KEY"] = "haha"
CORS(app)
db.init_app(app)
with app.app_context():
from . import routes
from . import auth
db.create_all()
app.register_blueprint(auth.auth)
app.register_blueprint(routes.libdb)
return app | 20.1 | 98 | 0.689884 |
6edc3b1b70b453af0fd9316d539b730cc2b17b96 | 2,310 | py | Python | src/pretix/control/__init__.py | fakegit/pretix | b6e9e64ff967f7b4f91fe88694f4157d8a0787b4 | [
"Apache-2.0"
] | null | null | null | src/pretix/control/__init__.py | fakegit/pretix | b6e9e64ff967f7b4f91fe88694f4157d8a0787b4 | [
"Apache-2.0"
] | 56 | 2020-05-07T07:54:17.000Z | 2021-04-19T12:14:14.000Z | src/pretix/control/__init__.py | fakegit/pretix | b6e9e64ff967f7b4f91fe88694f4157d8a0787b4 | [
"Apache-2.0"
] | null | null | null | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Tobias Kunze
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from django.apps import AppConfig
class PretixControlConfig(AppConfig):
name = 'pretix.control'
label = 'pretixcontrol'
def ready(self):
from .views import dashboards # noqa
from . import logdisplay # noqa
default_app_config = 'pretix.control.PretixControlConfig'
| 48.125 | 118 | 0.771429 |
57553f4397c80e16eb5c9ff26b5f638a646a7cce | 8,429 | py | Python | kubernetes/client/models/v2beta1_container_resource_metric_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v2beta1_container_resource_metric_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | 3 | 2021-11-30T03:11:13.000Z | 2022-02-09T03:39:41.000Z | kubernetes/client/models/v2beta1_container_resource_metric_status.py | sthagen/kubernetes-client-python | 3a183048d7d568ba5ea418bcfb8f61713908d3ea | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta1ContainerResourceMetricStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container': 'str',
'current_average_utilization': 'int',
'current_average_value': 'str',
'name': 'str'
}
attribute_map = {
'container': 'container',
'current_average_utilization': 'currentAverageUtilization',
'current_average_value': 'currentAverageValue',
'name': 'name'
}
def __init__(self, container=None, current_average_utilization=None, current_average_value=None, name=None, local_vars_configuration=None): # noqa: E501
"""V2beta1ContainerResourceMetricStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container = None
self._current_average_utilization = None
self._current_average_value = None
self._name = None
self.discriminator = None
self.container = container
if current_average_utilization is not None:
self.current_average_utilization = current_average_utilization
self.current_average_value = current_average_value
self.name = name
@property
def container(self):
"""Gets the container of this V2beta1ContainerResourceMetricStatus. # noqa: E501
container is the name of the container in the pods of the scaling target # noqa: E501
:return: The container of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V2beta1ContainerResourceMetricStatus.
container is the name of the container in the pods of the scaling target # noqa: E501
:param container: The container of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container is None: # noqa: E501
raise ValueError("Invalid value for `container`, must not be `None`") # noqa: E501
self._container = container
@property
def current_average_utilization(self):
"""Gets the current_average_utilization of this V2beta1ContainerResourceMetricStatus. # noqa: E501
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification. # noqa: E501
:return: The current_average_utilization of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:rtype: int
"""
return self._current_average_utilization
@current_average_utilization.setter
def current_average_utilization(self, current_average_utilization):
"""Sets the current_average_utilization of this V2beta1ContainerResourceMetricStatus.
currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification. # noqa: E501
:param current_average_utilization: The current_average_utilization of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:type: int
"""
self._current_average_utilization = current_average_utilization
@property
def current_average_value(self):
"""Gets the current_average_value of this V2beta1ContainerResourceMetricStatus. # noqa: E501
currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification. # noqa: E501
:return: The current_average_value of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._current_average_value
@current_average_value.setter
def current_average_value(self, current_average_value):
"""Sets the current_average_value of this V2beta1ContainerResourceMetricStatus.
currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification. # noqa: E501
:param current_average_value: The current_average_value of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and current_average_value is None: # noqa: E501
raise ValueError("Invalid value for `current_average_value`, must not be `None`") # noqa: E501
self._current_average_value = current_average_value
@property
def name(self):
"""Gets the name of this V2beta1ContainerResourceMetricStatus. # noqa: E501
name is the name of the resource in question. # noqa: E501
:return: The name of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V2beta1ContainerResourceMetricStatus.
name is the name of the resource in question. # noqa: E501
:param name: The name of this V2beta1ContainerResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1ContainerResourceMetricStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta1ContainerResourceMetricStatus):
return True
return self.to_dict() != other.to_dict()
| 40.138095 | 313 | 0.673983 |
78478fc3d6c768a46db4a5bdf9082e14f89beee2 | 579 | py | Python | tarefa013/09/registros_medio.py | MateSilver/cs-2021-1 | eedb924508ecf47297ceeda3812039401bd325a4 | [
"MIT"
] | null | null | null | tarefa013/09/registros_medio.py | MateSilver/cs-2021-1 | eedb924508ecf47297ceeda3812039401bd325a4 | [
"MIT"
] | 1 | 2022-01-22T17:41:56.000Z | 2022-01-22T17:41:56.000Z | tarefa013/10/registros_medio.py | MateSilver/cs-2021-1 | eedb924508ecf47297ceeda3812039401bd325a4 | [
"MIT"
] | null | null | null | import registros_basico
class registro_medio(registros_basico.registro_basico):
"""registro basico herdado de registro"""
def __init__(self, nome, id, escola, escola_medio, cargo):
super().__init__(self,nome,id,escola)
self._escola_medio = escola_medio
self._saldo *= 1.5
if cargo == 'gerente':
self._saldo += 1500.0
elif cargo == 'supervisor':
self._saldo += 600.0
elif cargo == 'vendedor':
self._saldo += 250.0
def show(self):
print(self._nome)
print(self._saldo) | 32.166667 | 62 | 0.604491 |
b9804d106457e5aceace82bc71944a2e32562feb | 5,378 | py | Python | pyfmodex/geometry.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | pyfmodex/geometry.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | pyfmodex/geometry.py | CDanSantana/pyfmodex | d820712a9a5203e2e2a2547e29f9e9a02f404282 | [
"MIT"
] | null | null | null | from ctypes import *
from .fmodobject import FmodObject
from .globalvars import dll as _dll
from .structures import VECTOR
from .utils import ckresult
class PolygonAttributes(object):
def __init__(self, gptr, index):
self._gptr = gptr
self.index = index
self._directocclusion = c_float()
self._reverbocclusion = c_float()
self._doublesided = c_bool()
self._refresh_state()
def _refresh_state(self):
ckresult(_dll.FMOD_Geometry_GetPolygonAttributes(self._gptr, self.index, byref(self._directocclusion), byref(self._reverbocclusion), byref(self._doublesided)))
@property
def direct_occlusion(self):
self._refresh_state()
return self._directocclusion.value
@direct_occlusion.setter
def direct_occlusion(self, occ):
ckresult(_dll.FMOD_Geometry_SetPolygonAttributes(self._gptr, self.index, c_float(occ), self._reverbocclusion, self._doublesided))
@property
def reverb_occlusion(self):
self._refresh_state()
return self._reverbocclusion.value
@reverb_occlusion.setter
def reverb_occlusion(self, occ):
ckresult(_dll.FMOD_Geometry_SetPolygonAttributes(self._gptr, self.index, self._directocclusion, c_float(occ), self._doublesided))
@property
def double_sided(self):
self._refresh_state()
return self._doublesided
@double_sided.setter
def double_sided(self, dval):
ckresult(_dll.FMOD_Geometry_SetPolygonAttributes(self._gptr, self.index, self._directocclusion, self._reverbocclusion, dval))
@property
def num_vertices(self):
num = c_int()
ckresult(_dll.FMOD_Geometry_GetPolygonNumVertices(self._gptr, self.index, byref(num)))
return num.value
def get_vertex(self, index):
vertex = VECTOR()
ckresult(_dll.FMOD_Geometry_GetPolygonVertex(self._gptr, self.index, index, byref(vertex)))
return vertex.to_list()
def set_vertex(self, index, vertex):
vvec = VECTOR.from_list(vertex)
ckresult(_dll.FMOD_Geometry_SetPolygonVertex(self._gptr, self.index, index, vvec))
class Geometry(FmodObject):
def add_polygon(self, directocclusion, reverbocclusion, doublesided, *vertices):
va = VECTOR * len(vertices)
varray = va(*vertices)
idx = c_int()
self._call_fmod("FMOD_Geometry_AddPolygon", c_float(directocclusion), c_float(reverbocclusion), c_bool(doublesided), len(vertices), varray, byref(idx))
return idx.value
@property
def active(self):
active = c_bool()
self._call_fmod("FMOD_Geometry_GetActive", byref(active))
return active.value
@active.setter
def active(self, ac):
self._call_fmod("FMOD_Geometry_SetActive", ac)
@property
def _creation_limits(self):
maxpols, maxverts = (c_int(), c_int())
self._call_fmod("FMOD_Geometry_GetMaxPolygons", byref(maxpols), byref(maxverts))
return (maxpols.value, maxverts.value)
@property
def max_polygons(self):
return self._creation_limits[0]
@property
def max_vertices(self):
return self._creation_limits[1]
@property
def num_polygons(self):
num = c_int()
self._call_fmod("FMOD_Geometry_GetNumPolygons", byref(num))
return num.value
def get_polygon(self, index):
return PolygonAttributes(self._ptr, index)
@property
def position(self):
pos = VECTOR()
self._call_fmod("FMOD_Geometry_GetPosition", byref(pos))
return pos.to_list()
@position.setter
def position(self, pos):
posv = VECTOR.from_list(pos)
self._call_fmod("FMOD_Geometry_SetPosition", posv)
@property
def _rotation(self):
fwd = VECTOR()
up = VECTOR()
self._call_fmod("FMOD_Geometry_GetRotation", byref(fwd), byref(up))
return [fwd.to_list(), up.to_list()]
@_rotation.setter
def _rotation(self, rot):
fwd = VECTOR.from_list(rot[0])
up = VECTOR.from_list(rot[1])
self._call_fmod("FMOD_Geometry_SetRotation", fwd, up)
@property
def forward_rotation(self):
return self._rotation[0]
@forward_rotation.setter
def forward_rotation(self, rot):
r = self._rotation
r[0] = rot
self._rotation = r
@property
def up_rotation(self):
return self._rotation[1]
@up_rotation.setter
def up_rotation(self, rot):
r = self._rotation
r[1] = rot
self._rotation = r
@property
def scale(self):
scale = VECTOR()
self._call_fmod("FMOD_Geometry_GetScale", byref(scale))
return scale.to_list()
@scale.setter
def scale(self, scale):
scalev = VECTOR.from_list(scale)
self._call_fmod("FMOD_Geometry_SetScale", byref(scalev))
def release(self):
self._call_fmod("FMOD_Geometry_Release")
def save(self):
size = c_int()
self._call_fmod("FMOD_Geometry_Save", None, byref(size))
ptr = create_string_buffer(size.value)
self._call_fmod("FMOD_Geometry_Save", ptr, byref(size))
return ptr.raw
return string_at(ptr, size) | 34.037975 | 168 | 0.646151 |
8ad474a55456ebf29c5dd9bd7ce380bdb26b33de | 955 | py | Python | src/sendkeyboardinput/pw2.py | mbcrump/mbcrump-twitch | 2932c9aefbc5e6a490441ea1caaca0fca8378647 | [
"MIT"
] | 3 | 2020-11-25T19:49:09.000Z | 2021-02-14T12:58:12.000Z | src/sendkeyboardinput/pw2.py | mbcrump/mbcrump-twitch | 2932c9aefbc5e6a490441ea1caaca0fca8378647 | [
"MIT"
] | null | null | null | src/sendkeyboardinput/pw2.py | mbcrump/mbcrump-twitch | 2932c9aefbc5e6a490441ea1caaca0fca8378647 | [
"MIT"
] | 4 | 2021-01-06T13:09:03.000Z | 2021-11-18T13:25:45.000Z | #!/usr/bin/env python3
## be able to quickly find password using alpha characters
## no password file
## build engine to do it ourselves
import random
import time
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
chars_list = list(chars)
password = input("What is your pw?") ##kiradeveloper
guess_password = ""
i = 0
named_tuple = time.localtime()
seconds = time.time()
print("Starting time" + time.strftime("%m/%d/%Y, %H:%M:%S:" + str(seconds), named_tuple))
while (guess_password != password):
guess_password = random.choices(chars_list, k=len(password))
#print("App tried" + str(guess_password))
i += 1
if (guess_password == list(password)):
print("Your password is = " + "".join(guess_password))
print("Total number of guesses = " + str(i))
named_tuple = time.localtime()
seconds = time.time()
print("Stop time" + time.strftime("%m/%d/%Y, %H:%M:%S:" + str(seconds), named_tuple))
break
| 32.931034 | 93 | 0.66178 |
44f35fd24ebecdf59254c4c86c331c278adedd63 | 19,450 | py | Python | python101/code/color_list.py | nirmeshk/mase | c31fb41f39bb7a333f6fd33aa43fa0b23531030a | [
"Unlicense"
] | 9 | 2015-08-23T01:46:56.000Z | 2019-04-25T00:48:21.000Z | python101/code/color_list.py | sadiaTab/mase | 95339dbb045c0613b40d69de9774368c0a2eb5ae | [
"Unlicense"
] | 20 | 2015-08-08T03:39:25.000Z | 2015-11-03T22:22:56.000Z | python101/code/color_list.py | sadiaTab/mase | 95339dbb045c0613b40d69de9774368c0a2eb5ae | [
"Unlicense"
] | 43 | 2015-05-08T05:55:41.000Z | 2020-04-27T14:13:26.000Z | """Code for handling color names and RGB codes.
This module is part of Swampy, and used in Think Python and
Think Complexity, by Allen Downey.
http://greenteapress.com
Copyright 2013 Allen B. Downey.
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import re
# the following is the contents of /etc/X11/rgb.txt
COLORS = """
! $Xorg: rgb.txt,v 1.3 2000/08/17 19:54:00 cpqbld Exp $
255 250 250 snow
248 248 255 ghost white
248 248 255 GhostWhite
245 245 245 white smoke
245 245 245 WhiteSmoke
220 220 220 gainsboro
255 250 240 floral white
255 250 240 FloralWhite
253 245 230 old lace
253 245 230 OldLace
250 240 230 linen
250 235 215 antique white
250 235 215 AntiqueWhite
255 239 213 papaya whip
255 239 213 PapayaWhip
255 235 205 blanched almond
255 235 205 BlanchedAlmond
255 228 196 bisque
255 218 185 peach puff
255 218 185 PeachPuff
255 222 173 navajo white
255 222 173 NavajoWhite
255 228 181 moccasin
255 248 220 cornsilk
255 255 240 ivory
255 250 205 lemon chiffon
255 250 205 LemonChiffon
255 245 238 seashell
240 255 240 honeydew
245 255 250 mint cream
245 255 250 MintCream
240 255 255 azure
240 248 255 alice blue
240 248 255 AliceBlue
230 230 250 lavender
255 240 245 lavender blush
255 240 245 LavenderBlush
255 228 225 misty rose
255 228 225 MistyRose
255 255 255 white
0 0 0 black
47 79 79 dark slate gray
47 79 79 DarkSlateGray
47 79 79 dark slate grey
47 79 79 DarkSlateGrey
105 105 105 dim gray
105 105 105 DimGray
105 105 105 dim grey
105 105 105 DimGrey
112 128 144 slate gray
112 128 144 SlateGray
112 128 144 slate grey
112 128 144 SlateGrey
119 136 153 light slate gray
119 136 153 LightSlateGray
119 136 153 light slate grey
119 136 153 LightSlateGrey
190 190 190 gray
190 190 190 grey
211 211 211 light grey
211 211 211 LightGrey
211 211 211 light gray
211 211 211 LightGray
25 25 112 midnight blue
25 25 112 MidnightBlue
0 0 128 navy
0 0 128 navy blue
0 0 128 NavyBlue
100 149 237 cornflower blue
100 149 237 CornflowerBlue
72 61 139 dark slate blue
72 61 139 DarkSlateBlue
106 90 205 slate blue
106 90 205 SlateBlue
123 104 238 medium slate blue
123 104 238 MediumSlateBlue
132 112 255 light slate blue
132 112 255 LightSlateBlue
0 0 205 medium blue
0 0 205 MediumBlue
65 105 225 royal blue
65 105 225 RoyalBlue
0 0 255 blue
30 144 255 dodger blue
30 144 255 DodgerBlue
0 191 255 deep sky blue
0 191 255 DeepSkyBlue
135 206 235 sky blue
135 206 235 SkyBlue
135 206 250 light sky blue
135 206 250 LightSkyBlue
70 130 180 steel blue
70 130 180 SteelBlue
176 196 222 light steel blue
176 196 222 LightSteelBlue
173 216 230 light blue
173 216 230 LightBlue
176 224 230 powder blue
176 224 230 PowderBlue
175 238 238 pale turquoise
175 238 238 PaleTurquoise
0 206 209 dark turquoise
0 206 209 DarkTurquoise
72 209 204 medium turquoise
72 209 204 MediumTurquoise
64 224 208 turquoise
0 255 255 cyan
224 255 255 light cyan
224 255 255 LightCyan
95 158 160 cadet blue
95 158 160 CadetBlue
102 205 170 medium aquamarine
102 205 170 MediumAquamarine
127 255 212 aquamarine
0 100 0 dark green
0 100 0 DarkGreen
85 107 47 dark olive green
85 107 47 DarkOliveGreen
143 188 143 dark sea green
143 188 143 DarkSeaGreen
46 139 87 sea green
46 139 87 SeaGreen
60 179 113 medium sea green
60 179 113 MediumSeaGreen
32 178 170 light sea green
32 178 170 LightSeaGreen
152 251 152 pale green
152 251 152 PaleGreen
0 255 127 spring green
0 255 127 SpringGreen
124 252 0 lawn green
124 252 0 LawnGreen
0 255 0 green
127 255 0 chartreuse
0 250 154 medium spring green
0 250 154 MediumSpringGreen
173 255 47 green yellow
173 255 47 GreenYellow
50 205 50 lime green
50 205 50 LimeGreen
154 205 50 yellow green
154 205 50 YellowGreen
34 139 34 forest green
34 139 34 ForestGreen
107 142 35 olive drab
107 142 35 OliveDrab
189 183 107 dark khaki
189 183 107 DarkKhaki
240 230 140 khaki
238 232 170 pale goldenrod
238 232 170 PaleGoldenrod
250 250 210 light goldenrod yellow
250 250 210 LightGoldenrodYellow
255 255 224 light yellow
255 255 224 LightYellow
255 255 0 yellow
255 215 0 gold
238 221 130 light goldenrod
238 221 130 LightGoldenrod
218 165 32 goldenrod
184 134 11 dark goldenrod
184 134 11 DarkGoldenrod
188 143 143 rosy brown
188 143 143 RosyBrown
205 92 92 indian red
205 92 92 IndianRed
139 69 19 saddle brown
139 69 19 SaddleBrown
160 82 45 sienna
205 133 63 peru
222 184 135 burlywood
245 245 220 beige
245 222 179 wheat
244 164 96 sandy brown
244 164 96 SandyBrown
210 180 140 tan
210 105 30 chocolate
178 34 34 firebrick
165 42 42 brown
233 150 122 dark salmon
233 150 122 DarkSalmon
250 128 114 salmon
255 160 122 light salmon
255 160 122 LightSalmon
255 165 0 orange
255 140 0 dark orange
255 140 0 DarkOrange
255 127 80 coral
240 128 128 light coral
240 128 128 LightCoral
255 99 71 tomato
255 69 0 orange red
255 69 0 OrangeRed
255 0 0 red
255 105 180 hot pink
255 105 180 HotPink
255 20 147 deep pink
255 20 147 DeepPink
255 192 203 pink
255 182 193 light pink
255 182 193 LightPink
219 112 147 pale violet red
219 112 147 PaleVioletRed
176 48 96 maroon
199 21 133 medium violet red
199 21 133 MediumVioletRed
208 32 144 violet red
208 32 144 VioletRed
255 0 255 magenta
238 130 238 violet
221 160 221 plum
218 112 214 orchid
186 85 211 medium orchid
186 85 211 MediumOrchid
153 50 204 dark orchid
153 50 204 DarkOrchid
148 0 211 dark violet
148 0 211 DarkViolet
138 43 226 blue violet
138 43 226 BlueViolet
160 32 240 purple
147 112 219 medium purple
147 112 219 MediumPurple
216 191 216 thistle
255 250 250 snow1
238 233 233 snow2
205 201 201 snow3
139 137 137 snow4
255 245 238 seashell1
238 229 222 seashell2
205 197 191 seashell3
139 134 130 seashell4
255 239 219 AntiqueWhite1
238 223 204 AntiqueWhite2
205 192 176 AntiqueWhite3
139 131 120 AntiqueWhite4
255 228 196 bisque1
238 213 183 bisque2
205 183 158 bisque3
139 125 107 bisque4
255 218 185 PeachPuff1
238 203 173 PeachPuff2
205 175 149 PeachPuff3
139 119 101 PeachPuff4
255 222 173 NavajoWhite1
238 207 161 NavajoWhite2
205 179 139 NavajoWhite3
139 121 94 NavajoWhite4
255 250 205 LemonChiffon1
238 233 191 LemonChiffon2
205 201 165 LemonChiffon3
139 137 112 LemonChiffon4
255 248 220 cornsilk1
238 232 205 cornsilk2
205 200 177 cornsilk3
139 136 120 cornsilk4
255 255 240 ivory1
238 238 224 ivory2
205 205 193 ivory3
139 139 131 ivory4
240 255 240 honeydew1
224 238 224 honeydew2
193 205 193 honeydew3
131 139 131 honeydew4
255 240 245 LavenderBlush1
238 224 229 LavenderBlush2
205 193 197 LavenderBlush3
139 131 134 LavenderBlush4
255 228 225 MistyRose1
238 213 210 MistyRose2
205 183 181 MistyRose3
139 125 123 MistyRose4
240 255 255 azure1
224 238 238 azure2
193 205 205 azure3
131 139 139 azure4
131 111 255 SlateBlue1
122 103 238 SlateBlue2
105 89 205 SlateBlue3
71 60 139 SlateBlue4
72 118 255 RoyalBlue1
67 110 238 RoyalBlue2
58 95 205 RoyalBlue3
39 64 139 RoyalBlue4
0 0 255 blue1
0 0 238 blue2
0 0 205 blue3
0 0 139 blue4
30 144 255 DodgerBlue1
28 134 238 DodgerBlue2
24 116 205 DodgerBlue3
16 78 139 DodgerBlue4
99 184 255 SteelBlue1
92 172 238 SteelBlue2
79 148 205 SteelBlue3
54 100 139 SteelBlue4
0 191 255 DeepSkyBlue1
0 178 238 DeepSkyBlue2
0 154 205 DeepSkyBlue3
0 104 139 DeepSkyBlue4
135 206 255 SkyBlue1
126 192 238 SkyBlue2
108 166 205 SkyBlue3
74 112 139 SkyBlue4
176 226 255 LightSkyBlue1
164 211 238 LightSkyBlue2
141 182 205 LightSkyBlue3
96 123 139 LightSkyBlue4
198 226 255 SlateGray1
185 211 238 SlateGray2
159 182 205 SlateGray3
108 123 139 SlateGray4
202 225 255 LightSteelBlue1
188 210 238 LightSteelBlue2
162 181 205 LightSteelBlue3
110 123 139 LightSteelBlue4
191 239 255 LightBlue1
178 223 238 LightBlue2
154 192 205 LightBlue3
104 131 139 LightBlue4
224 255 255 LightCyan1
209 238 238 LightCyan2
180 205 205 LightCyan3
122 139 139 LightCyan4
187 255 255 PaleTurquoise1
174 238 238 PaleTurquoise2
150 205 205 PaleTurquoise3
102 139 139 PaleTurquoise4
152 245 255 CadetBlue1
142 229 238 CadetBlue2
122 197 205 CadetBlue3
83 134 139 CadetBlue4
0 245 255 turquoise1
0 229 238 turquoise2
0 197 205 turquoise3
0 134 139 turquoise4
0 255 255 cyan1
0 238 238 cyan2
0 205 205 cyan3
0 139 139 cyan4
151 255 255 DarkSlateGray1
141 238 238 DarkSlateGray2
121 205 205 DarkSlateGray3
82 139 139 DarkSlateGray4
127 255 212 aquamarine1
118 238 198 aquamarine2
102 205 170 aquamarine3
69 139 116 aquamarine4
193 255 193 DarkSeaGreen1
180 238 180 DarkSeaGreen2
155 205 155 DarkSeaGreen3
105 139 105 DarkSeaGreen4
84 255 159 SeaGreen1
78 238 148 SeaGreen2
67 205 128 SeaGreen3
46 139 87 SeaGreen4
154 255 154 PaleGreen1
144 238 144 PaleGreen2
124 205 124 PaleGreen3
84 139 84 PaleGreen4
0 255 127 SpringGreen1
0 238 118 SpringGreen2
0 205 102 SpringGreen3
0 139 69 SpringGreen4
0 255 0 green1
0 238 0 green2
0 205 0 green3
0 139 0 green4
127 255 0 chartreuse1
118 238 0 chartreuse2
102 205 0 chartreuse3
69 139 0 chartreuse4
192 255 62 OliveDrab1
179 238 58 OliveDrab2
154 205 50 OliveDrab3
105 139 34 OliveDrab4
202 255 112 DarkOliveGreen1
188 238 104 DarkOliveGreen2
162 205 90 DarkOliveGreen3
110 139 61 DarkOliveGreen4
255 246 143 khaki1
238 230 133 khaki2
205 198 115 khaki3
139 134 78 khaki4
255 236 139 LightGoldenrod1
238 220 130 LightGoldenrod2
205 190 112 LightGoldenrod3
139 129 76 LightGoldenrod4
255 255 224 LightYellow1
238 238 209 LightYellow2
205 205 180 LightYellow3
139 139 122 LightYellow4
255 255 0 yellow1
238 238 0 yellow2
205 205 0 yellow3
139 139 0 yellow4
255 215 0 gold1
238 201 0 gold2
205 173 0 gold3
139 117 0 gold4
255 193 37 goldenrod1
238 180 34 goldenrod2
205 155 29 goldenrod3
139 105 20 goldenrod4
255 185 15 DarkGoldenrod1
238 173 14 DarkGoldenrod2
205 149 12 DarkGoldenrod3
139 101 8 DarkGoldenrod4
255 193 193 RosyBrown1
238 180 180 RosyBrown2
205 155 155 RosyBrown3
139 105 105 RosyBrown4
255 106 106 IndianRed1
238 99 99 IndianRed2
205 85 85 IndianRed3
139 58 58 IndianRed4
255 130 71 sienna1
238 121 66 sienna2
205 104 57 sienna3
139 71 38 sienna4
255 211 155 burlywood1
238 197 145 burlywood2
205 170 125 burlywood3
139 115 85 burlywood4
255 231 186 wheat1
238 216 174 wheat2
205 186 150 wheat3
139 126 102 wheat4
255 165 79 tan1
238 154 73 tan2
205 133 63 tan3
139 90 43 tan4
255 127 36 chocolate1
238 118 33 chocolate2
205 102 29 chocolate3
139 69 19 chocolate4
255 48 48 firebrick1
238 44 44 firebrick2
205 38 38 firebrick3
139 26 26 firebrick4
255 64 64 brown1
238 59 59 brown2
205 51 51 brown3
139 35 35 brown4
255 140 105 salmon1
238 130 98 salmon2
205 112 84 salmon3
139 76 57 salmon4
255 160 122 LightSalmon1
238 149 114 LightSalmon2
205 129 98 LightSalmon3
139 87 66 LightSalmon4
255 165 0 orange1
238 154 0 orange2
205 133 0 orange3
139 90 0 orange4
255 127 0 DarkOrange1
238 118 0 DarkOrange2
205 102 0 DarkOrange3
139 69 0 DarkOrange4
255 114 86 coral1
238 106 80 coral2
205 91 69 coral3
139 62 47 coral4
255 99 71 tomato1
238 92 66 tomato2
205 79 57 tomato3
139 54 38 tomato4
255 69 0 OrangeRed1
238 64 0 OrangeRed2
205 55 0 OrangeRed3
139 37 0 OrangeRed4
255 0 0 red1
238 0 0 red2
205 0 0 red3
139 0 0 red4
215 7 81 DebianRed
255 20 147 DeepPink1
238 18 137 DeepPink2
205 16 118 DeepPink3
139 10 80 DeepPink4
255 110 180 HotPink1
238 106 167 HotPink2
205 96 144 HotPink3
139 58 98 HotPink4
255 181 197 pink1
238 169 184 pink2
205 145 158 pink3
139 99 108 pink4
255 174 185 LightPink1
238 162 173 LightPink2
205 140 149 LightPink3
139 95 101 LightPink4
255 130 171 PaleVioletRed1
238 121 159 PaleVioletRed2
205 104 137 PaleVioletRed3
139 71 93 PaleVioletRed4
255 52 179 maroon1
238 48 167 maroon2
205 41 144 maroon3
139 28 98 maroon4
255 62 150 VioletRed1
238 58 140 VioletRed2
205 50 120 VioletRed3
139 34 82 VioletRed4
255 0 255 magenta1
238 0 238 magenta2
205 0 205 magenta3
139 0 139 magenta4
255 131 250 orchid1
238 122 233 orchid2
205 105 201 orchid3
139 71 137 orchid4
255 187 255 plum1
238 174 238 plum2
205 150 205 plum3
139 102 139 plum4
224 102 255 MediumOrchid1
209 95 238 MediumOrchid2
180 82 205 MediumOrchid3
122 55 139 MediumOrchid4
191 62 255 DarkOrchid1
178 58 238 DarkOrchid2
154 50 205 DarkOrchid3
104 34 139 DarkOrchid4
155 48 255 purple1
145 44 238 purple2
125 38 205 purple3
85 26 139 purple4
171 130 255 MediumPurple1
159 121 238 MediumPurple2
137 104 205 MediumPurple3
93 71 139 MediumPurple4
255 225 255 thistle1
238 210 238 thistle2
205 181 205 thistle3
139 123 139 thistle4
0 0 0 gray0
0 0 0 grey0
3 3 3 gray1
3 3 3 grey1
5 5 5 gray2
5 5 5 grey2
8 8 8 gray3
8 8 8 grey3
10 10 10 gray4
10 10 10 grey4
13 13 13 gray5
13 13 13 grey5
15 15 15 gray6
15 15 15 grey6
18 18 18 gray7
18 18 18 grey7
20 20 20 gray8
20 20 20 grey8
23 23 23 gray9
23 23 23 grey9
26 26 26 gray10
26 26 26 grey10
28 28 28 gray11
28 28 28 grey11
31 31 31 gray12
31 31 31 grey12
33 33 33 gray13
33 33 33 grey13
36 36 36 gray14
36 36 36 grey14
38 38 38 gray15
38 38 38 grey15
41 41 41 gray16
41 41 41 grey16
43 43 43 gray17
43 43 43 grey17
46 46 46 gray18
46 46 46 grey18
48 48 48 gray19
48 48 48 grey19
51 51 51 gray20
51 51 51 grey20
54 54 54 gray21
54 54 54 grey21
56 56 56 gray22
56 56 56 grey22
59 59 59 gray23
59 59 59 grey23
61 61 61 gray24
61 61 61 grey24
64 64 64 gray25
64 64 64 grey25
66 66 66 gray26
66 66 66 grey26
69 69 69 gray27
69 69 69 grey27
71 71 71 gray28
71 71 71 grey28
74 74 74 gray29
74 74 74 grey29
77 77 77 gray30
77 77 77 grey30
79 79 79 gray31
79 79 79 grey31
82 82 82 gray32
82 82 82 grey32
84 84 84 gray33
84 84 84 grey33
87 87 87 gray34
87 87 87 grey34
89 89 89 gray35
89 89 89 grey35
92 92 92 gray36
92 92 92 grey36
94 94 94 gray37
94 94 94 grey37
97 97 97 gray38
97 97 97 grey38
99 99 99 gray39
99 99 99 grey39
102 102 102 gray40
102 102 102 grey40
105 105 105 gray41
105 105 105 grey41
107 107 107 gray42
107 107 107 grey42
110 110 110 gray43
110 110 110 grey43
112 112 112 gray44
112 112 112 grey44
115 115 115 gray45
115 115 115 grey45
117 117 117 gray46
117 117 117 grey46
120 120 120 gray47
120 120 120 grey47
122 122 122 gray48
122 122 122 grey48
125 125 125 gray49
125 125 125 grey49
127 127 127 gray50
127 127 127 grey50
130 130 130 gray51
130 130 130 grey51
133 133 133 gray52
133 133 133 grey52
135 135 135 gray53
135 135 135 grey53
138 138 138 gray54
138 138 138 grey54
140 140 140 gray55
140 140 140 grey55
143 143 143 gray56
143 143 143 grey56
145 145 145 gray57
145 145 145 grey57
148 148 148 gray58
148 148 148 grey58
150 150 150 gray59
150 150 150 grey59
153 153 153 gray60
153 153 153 grey60
156 156 156 gray61
156 156 156 grey61
158 158 158 gray62
158 158 158 grey62
161 161 161 gray63
161 161 161 grey63
163 163 163 gray64
163 163 163 grey64
166 166 166 gray65
166 166 166 grey65
168 168 168 gray66
168 168 168 grey66
171 171 171 gray67
171 171 171 grey67
173 173 173 gray68
173 173 173 grey68
176 176 176 gray69
176 176 176 grey69
179 179 179 gray70
179 179 179 grey70
181 181 181 gray71
181 181 181 grey71
184 184 184 gray72
184 184 184 grey72
186 186 186 gray73
186 186 186 grey73
189 189 189 gray74
189 189 189 grey74
191 191 191 gray75
191 191 191 grey75
194 194 194 gray76
194 194 194 grey76
196 196 196 gray77
196 196 196 grey77
199 199 199 gray78
199 199 199 grey78
201 201 201 gray79
201 201 201 grey79
204 204 204 gray80
204 204 204 grey80
207 207 207 gray81
207 207 207 grey81
209 209 209 gray82
209 209 209 grey82
212 212 212 gray83
212 212 212 grey83
214 214 214 gray84
214 214 214 grey84
217 217 217 gray85
217 217 217 grey85
219 219 219 gray86
219 219 219 grey86
222 222 222 gray87
222 222 222 grey87
224 224 224 gray88
224 224 224 grey88
227 227 227 gray89
227 227 227 grey89
229 229 229 gray90
229 229 229 grey90
232 232 232 gray91
232 232 232 grey91
235 235 235 gray92
235 235 235 grey92
237 237 237 gray93
237 237 237 grey93
240 240 240 gray94
240 240 240 grey94
242 242 242 gray95
242 242 242 grey95
245 245 245 gray96
245 245 245 grey96
247 247 247 gray97
247 247 247 grey97
250 250 250 gray98
250 250 250 grey98
252 252 252 gray99
252 252 252 grey99
255 255 255 gray100
255 255 255 grey100
169 169 169 dark grey
169 169 169 DarkGrey
169 169 169 dark gray
169 169 169 DarkGray
0 0 139 dark blue
0 0 139 DarkBlue
0 139 139 dark cyan
0 139 139 DarkCyan
139 0 139 dark magenta
139 0 139 DarkMagenta
139 0 0 dark red
139 0 0 DarkRed
144 238 144 light green
144 238 144 LightGreen
"""
def make_color_dict(colors=COLORS):
"""Returns a dictionary that maps color names to RGB strings.
The format of RGB strings is '#RRGGBB'.
"""
# regular expressions to match numbers and color names
number = r'(\d+)'
space = r'[ \t]*'
name = r'([ \w]+)'
pattern = space + (number + space) * 3 + name
prog = re.compile(pattern)
# read the file
d = dict()
for line in colors.split('\n'):
ro = prog.match(line)
if ro:
r, g, b, name = ro.groups()
rgb = '#%02x%02x%02x' % (int(r), int(g), int(b))
d[name] = rgb
return d
def read_colors():
"""Returns color information in two data structures.
The format of RGB strings is '#RRGGBB'.
color_dict: map from color name to RGB string
rgbs: list of (rgb, names) pairs, where rgb is an RGB code and
names is a sorted list of color names
"""
color_dict = make_color_dict()
rgbs = invert_dict(color_dict).items()
rgbs.sort()
for rgb, names in rgbs:
names.sort()
return color_dict, rgbs
def invert_dict(d):
"""Returns a dictionary that maps from values to lists of keys.
d: dict
returns: dict
"""
inv = dict()
for key in d:
val = d[key]
if val not in inv:
inv[val] = [key]
else:
inv[val].append(key)
return inv
if __name__ == '__main__':
color_dict = make_color_dict()
for name, rgb in color_dict.iteritems():
print name, rgb
color_dict, rgbs = read_colors()
for name, rgb in color_dict.iteritems():
print name, rgb
for rgb, names in rgbs:
print rgb, names
| 23.072361 | 78 | 0.708483 |
052484f2072edcdf0fe9853389816fcaa6269b53 | 3,974 | py | Python | tests/test_manager.py | YohannL/KataMarkovChain | b4383791b5cdf5b58fc30ce5b347e008dfbb7bf6 | [
"Apache-2.0"
] | null | null | null | tests/test_manager.py | YohannL/KataMarkovChain | b4383791b5cdf5b58fc30ce5b347e008dfbb7bf6 | [
"Apache-2.0"
] | null | null | null | tests/test_manager.py | YohannL/KataMarkovChain | b4383791b5cdf5b58fc30ce5b347e008dfbb7bf6 | [
"Apache-2.0"
] | null | null | null | """
Project: katamarkovchain
Module: tests
file: test_manager.py
Class: TestManager
"""
from katamarkovchain.core.manager import Manager
from katamarkovchain.core.markovchain import MarkovChain
class TestManager:
"""
pytest class with all tests
"""
@staticmethod
def test_create_new_mv():
"""
Test the creation of one markov chain
:return:
"""
manager = Manager()
new_mv = manager.create_new_mv("test")
assert new_mv is not None
assert new_mv.word == "test"
@staticmethod
def test_create_unique_new_mv():
"""
Test the unicity of two markov chain objects with the same word
:return:
"""
manager = Manager()
new_mv_1 = manager.create_new_mv("test")
new_mv_2 = manager.create_new_mv("test")
assert new_mv_1 is not None
assert new_mv_1.word == "test"
assert new_mv_2 is not None
assert new_mv_2.word == "test"
assert new_mv_1 is new_mv_2
@staticmethod
def test_manager_run_called_creation(mocker):
"""
test
:return:
"""
manager = Manager()
mocker.patch.object(manager, 'creation_all_mv')
manager.run("This is a test.")
# pylint: disable=maybe-no-member
manager.creation_all_mv.assert_called_with("This is a test.")
@staticmethod
def test_manager_run_called_generator_if_link_created(mocker):
"""
test
:return:
"""
manager = Manager()
mocker.patch.object(manager, 'generator_text')
manager.run("This is a test.")
# pylint: disable=maybe-no-member
manager.generator_text.assert_called_with(0)
@staticmethod
def test_manager_run_no_called_generator_if_links_no_created(mocker):
"""
test
:return:
"""
manager = Manager()
mocker.patch.object(manager, 'generator_text')
manager.run(None)
# pylint: disable=maybe-no-member
manager.generator_text.assert_not_called()
@staticmethod
def test_manager_run_no_called_generator():
"""
test
:return:
"""
manager = Manager()
mv_start = MarkovChain(None)
mv_this = MarkovChain("this")
mv_is = MarkovChain("is")
mv_a = MarkovChain("a")
mv_test = MarkovChain("test")
mv_point = MarkovChain(".")
mv_start.add_transition(mv_this)
mv_this.add_transition(mv_is)
mv_is.add_transition(mv_a)
mv_a.add_transition(mv_test)
mv_test.add_transition(mv_point)
mv_point.add_transition(mv_start)
manager.markovchainlist[None] = mv_start
manager.markovchainlist[mv_this.word] = mv_this
manager.markovchainlist[mv_is.word] = mv_is
manager.markovchainlist[mv_a.word] = mv_a
manager.markovchainlist[mv_test.word] = mv_test
manager.markovchainlist[mv_point.word] = mv_point
generated_text = manager.generator_text(1)
assert generated_text == "this is a test."
@staticmethod
def test_creation_all_mv_called_splitter(mocker):
"""
test creation_all_mv_called_splitter function
:return:
"""
manager = Manager()
sentencesplitter = manager.sentencesplitter
mocker.patch.object(sentencesplitter, 'run')
manager.creation_all_mv("This is a test.")
# pylint: disable=maybe-no-member
sentencesplitter.run.assert_called_with("This is a test.")
@staticmethod
def test_creation_all_mv_called_create_new_mv(mocker):
"""
test creation_all_mv_called_create_new_mv function
:return:
"""
manager = Manager()
mocker.patch.object(manager, 'create_new_mv')
manager.creation_all_mv("This is a test.")
# pylint: disable=maybe-no-member
manager.create_new_mv.assert_called()
| 29.879699 | 73 | 0.630599 |
37f992c3ed89483e54990af0f9c30d56f1d3edee | 758 | py | Python | Topological Material/SSH model/codes/SSH-band-PBC.py | Hiloxik/Quantum-World | 00d3407cf3810252880af044e5954a30dc35db38 | [
"MIT"
] | null | null | null | Topological Material/SSH model/codes/SSH-band-PBC.py | Hiloxik/Quantum-World | 00d3407cf3810252880af044e5954a30dc35db38 | [
"MIT"
] | null | null | null | Topological Material/SSH model/codes/SSH-band-PBC.py | Hiloxik/Quantum-World | 00d3407cf3810252880af044e5954a30dc35db38 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import cmath
v=1 #intercell hopping
w=0.2 #betweencell hopping
N=8 #number of unit cells
#build the OBC Hamiltonian
hamiltonian = np.zeros((2*N,2*N))
for i in range(0,2*N,2):
hamiltonian[i,i+1] = v
hamiltonian[i+1,i] = v
for i in range(1,2*N-1,2):
hamiltonian[i,i+1] = w
hamiltonian[i+1,i] = w
hamiltonian[0,2*N-1] = w
hamiltonian[2*N-1,0] = w
#solve the eigen-problem
eigenvalue, eigenvector = np.linalg.eig(hamiltonian)
eigenvalue.sort()
k = np.arange(0,2*N)
#draw the spectrum
plt.scatter(k, eigenvalue)
plt.xlabel("eigenstate", fontdict={'size': 16})
plt.ylabel("energy", fontdict={'size':16})
plt.title("PBC", fontdict={'size': 20})
plt.show() | 24.451613 | 54 | 0.655673 |
f9f62eae97dc913a9ecc49367a285142bb2fb440 | 15,067 | py | Python | BestMVarMASTER.py | jdegene/BioStar | 9473c1a403abbe5caf49ed2277d278c6cce47dca | [
"MIT"
] | null | null | null | BestMVarMASTER.py | jdegene/BioStar | 9473c1a403abbe5caf49ed2277d278c6cce47dca | [
"MIT"
] | null | null | null | BestMVarMASTER.py | jdegene/BioStar | 9473c1a403abbe5caf49ed2277d278c6cce47dca | [
"MIT"
] | null | null | null |
#MASTER
#Ruft MVarSlave auf, laesst hiervon 4 Instanzen auf einmal rechnen
#Das Programm rechnet fuer jede Flaeche alle moeglichen Kombinationen (ohne reihenfolge zu beachten)
#von 11 unabhaengigen Eingangsvariablen die Koeffizienten einer linearen multivariaten Regression
#Fuer das beste Modell gibt das Prog die FL_NR, den F-Wert, den p-Wert, mRs, aRs und die
#RunNo aus. Die RunNo beschreibt bei welchem der moeglichen Durchlaeufe der hoechste F-Wert erreicht
#wurde (max 2047)
import os, re, rpy2
import linecache
import StringIO
import itertools
import subprocess
import time
from subprocess import Popen
import rpy2.robjects as robjects
r = robjects.r
#Ordner definieren
dirTxt = ".../TextPlants/" # Yield value folder
dirTxt2 = "D:/Test/TextClim/" # Climate value folder
dirOut = "D:/Test/OutputM/"
#Windows Temp Folder im Users Verzeichnis
tmpFol = ".../appdata/local/temp/"
#Ordner fuer Subprocess definieren
python_path = "C:/Python27/ArcGIS10.1/Python.exe"
python_script = ".../BestMVarSLAVE.py"
#Input .txt Dateien benennen
xbarley = dirTxt + "barley.csv"
xmaize_f = dirTxt + "maize_f.csv"
xmaize_m = dirTxt + "maize_m.csv"
xmaize_s = dirTxt + "maize_s.csv"
xrye = dirTxt + "rye.csv"
xsflower = dirTxt + "sflower.csv"
xsorghum = dirTxt + "sorghum.csv"
xswheat = dirTxt + "swheat.csv"
xtriti = dirTxt + "triti.csv"
xwheat = dirTxt + "wheat.csv"
xprecAut = dirTxt2 + "PrecAutumn.csv"
xprecSpr = dirTxt2 + "PrecSpring.csv"
xprecSum = dirTxt2 + "PrecSummer.csv"
xprecWin = dirTxt2 + "PrecWinter.csv"
xprecYear = dirTxt2 + "PrecYear.csv"
xtempAut = dirTxt2 + "TempAutumn.csv"
xtempSpr = dirTxt2 + "TempSpring.csv"
xtempSum = dirTxt2 + "TempSummer.csv"
xtempWin = dirTxt2 + "TempWinter.csv"
xtempYear = dirTxt2 + "TempYear.csv"
#Listen definieren
plants = ("barley","maize_f","maize_m", "maize_s", "rye", "sflower", "sorghum", "swheat",
"triti", "wheat")
xplants = (xbarley, xmaize_f, xmaize_m, xmaize_s, xrye, xsflower, xsorghum, xswheat,
xtriti, xwheat)
xinClimList = [xprecAut, xprecSpr, xprecSum, xprecWin, xprecYear, xtempAut, xtempSpr,
xtempSum, xtempWin, xtempYear]
co2 = [370,373,375,376.875,378.75,380.625,382.5,384.375,386.25,388.125,390,392.5,395,397.5,
400,402.5,405,407.5,410,412.5,415,418.5,422,425.5,429,432.5,436,439.5,443,446.5,450,
454.5,459,463.5,468,472.5,477,481.5,486,490.5,495,499.5,504,508.5,513,517.5,522,526.5,
531,535.5,540,543.5,547,550.5,554,557.5,561,564.5,568,571.5,575,578.5,582,585.5,589,
592.5,596,599.5,603,606.5,610,614,618,622,626,630,634,638,642,646,650,653,656,659,662,
665,668,671,674,677,680,683,686,689,692,695,698,701,704]
# Create output files for each of the 4 runs (with only 1 file writing errors may occur)
for plantNo in plants:
w1 = open(dirOut + plantNo + "1p50.txt", "a")
w1.write("FL_NR;Fstat;pValue;Rs;aRs;Run\n")
w2 = open(dirOut + plantNo + "2p50.txt", "a")
w2.write("FL_NR;Fstat;pValue;Rs;aRs;Run\n")
w3 = open(dirOut + plantNo + "3p50.txt", "a")
w3.write("FL_NR;Fstat;pValue;Rs;aRs;Run\n")
w4 = open(dirOut + plantNo + "4p50.txt", "a")
w4.write("FL_NR;Fstat;pValue;Rs;aRs;Run\n")
w1.close()
w2.close()
w3.close()
w4.close()
p = dirTxt + plantNo + ".csv"
sx0, sx1, sx2, sx3, sx4, sx5, sx6, sx7, sx8, sx9, ee = ([] for i in range(11))
#Alle Klimadaten in den Cache laden
for i in range(2,90214):
sx0.append(linecache.getline(xinClimList[0], i))
sx1.append(linecache.getline(xinClimList[1], i))
sx2.append(linecache.getline(xinClimList[2], i))
sx3.append(linecache.getline(xinClimList[3], i))
sx4.append(linecache.getline(xinClimList[4], i))
sx5.append(linecache.getline(xinClimList[5], i))
sx6.append(linecache.getline(xinClimList[6], i))
sx7.append(linecache.getline(xinClimList[7], i))
sx8.append(linecache.getline(xinClimList[8], i))
sx9.append(linecache.getline(xinClimList[9], i))
for i in range(2,91014):
ee.append(linecache.getline(p, i))
# For computation times reduction, only a part of all roughly 90000 areas are calculated
for i in range(179,82373,176):
von = 1
bis = 51
#tmp_____ Files aus dem tmp Ordner loeschen
try:
for files in os.listdir(tmpFol):
if files[:3] == "tmp":
os.remove(tmpFol + files)
else:
continue
except:
pass
#####################PROZESS 1#############################
#Aktuelle Flaechennummer FL_NR
curFLNRp1 = sx0[i][0:6]
#Da Klima .csv und Pflanzen .csv unterschiedlich viele Eintraege haben, wird erst der
#Pfkanzeneintrag gewaehlt. Danach hier die Klimawerte nach der aktuellen FL_NR durchsucht
for k in range(91012):
if ee[k][0:6] == curFLNRp1:
eekp1 = ee[k]
else:
continue
sxs0p1 = re.split(';|\n', sx0[i])
sxs1p1 = re.split(';|\n', sx1[i])
sxs2p1 = re.split(';|\n', sx2[i])
sxs3p1 = re.split(';|\n', sx3[i])
sxs4p1 = re.split(';|\n', sx4[i])
for k in range(90212):
if sx5[k][0:6] == curFLNRp1:
sxs5p1 = re.split(';|\n', sx5[k])
sxs6p1 = re.split(';|\n', sx6[k])
sxs7p1 = re.split(';|\n', sx7[k])
sxs8p1 = re.split(';|\n', sx8[k])
sxs9p1 = re.split(';|\n', sx9[k])
else:
continue
eesp1 = re.split(';|\n', eekp1)
#####################PROZESS 2#############################
#Aktuelle Flaechennummer FL_NR
curFLNRp2 = sx0[i-44][0:6]
#Da Klima .csv und Pflanzen .csv unterschiedlich viele Eintraege haben, wird erst der
#Pfkanzeneintrag gewaehlt. Danach hier die Klimawerte nach der aktuellen FL_NR durchsucht
for k in range(91012):
if ee[k][0:6] == curFLNRp2:
eekp2 = ee[k]
else:
continue
sxs0p2 = re.split(';|\n', sx0[i-44])
sxs1p2 = re.split(';|\n', sx1[i-44])
sxs2p2 = re.split(';|\n', sx2[i-44])
sxs3p2 = re.split(';|\n', sx3[i-44])
sxs4p2 = re.split(';|\n', sx4[i-44])
for k in range(90212):
if sx5[k][0:6] == curFLNRp2:
sxs5p2 = re.split(';|\n', sx5[k])
sxs6p2 = re.split(';|\n', sx6[k])
sxs7p2 = re.split(';|\n', sx7[k])
sxs8p2 = re.split(';|\n', sx8[k])
sxs9p2 = re.split(';|\n', sx9[k])
else:
continue
eesp2 = re.split(';|\n', eekp2)
#####################PROZESS 3#############################
#Aktuelle Flaechennummer FL_NR
curFLNRp3 = sx0[i-88][0:6]
#Da Klima .csv und Pflanzen .csv unterschiedlich viele Eintraege haben, wird erst der
#Pfkanzeneintrag gewaehlt. Danach hier die Klimawerte nach der aktuellen FL_NR durchsucht
for k in range(91012):
if ee[k][0:6] == curFLNRp3:
eekp3 = ee[k]
else:
continue
sxs0p3 = re.split(';|\n', sx0[i-88])
sxs1p3 = re.split(';|\n', sx1[i-88])
sxs2p3 = re.split(';|\n', sx2[i-88])
sxs3p3 = re.split(';|\n', sx3[i-88])
sxs4p3 = re.split(';|\n', sx4[i-88])
for k in range(90212):
if sx5[k][0:6] == curFLNRp3:
sxs5p3 = re.split(';|\n', sx5[k])
sxs6p3 = re.split(';|\n', sx6[k])
sxs7p3 = re.split(';|\n', sx7[k])
sxs8p3 = re.split(';|\n', sx8[k])
sxs9p3 = re.split(';|\n', sx9[k])
else:
continue
eesp3 = re.split(';|\n', eekp3)
#####################PROZESS 4#############################
#Aktuelle Flaechennummer FL_NR
curFLNRp4 = sx0[i-132][0:6]
#Da Klima .csv und Pflanzen .csv unterschiedlich viele Eintraege haben, wird erst der
#Pfkanzeneintrag gewaehlt. Danach hier die Klimawerte nach der aktuellen FL_NR durchsucht
for k in range(91012):
if ee[k][0:6] == curFLNRp4:
eekp4 = ee[k]
else:
continue
sxs0p4 = re.split(';|\n', sx0[i-132])
sxs1p4 = re.split(';|\n', sx1[i-132])
sxs2p4 = re.split(';|\n', sx2[i-132])
sxs3p4 = re.split(';|\n', sx3[i-132])
sxs4p4 = re.split(';|\n', sx4[i-132])
for k in range(90212):
if sx5[k][0:6] == curFLNRp4:
sxs5p4 = re.split(';|\n', sx5[k])
sxs6p4 = re.split(';|\n', sx6[k])
sxs7p4 = re.split(';|\n', sx7[k])
sxs8p4 = re.split(';|\n', sx8[k])
sxs9p4 = re.split(';|\n', sx9[k])
else:
continue
eesp4 = re.split(';|\n', eekp4)
#Umwandeln aller Listen in String, da nur String uebergeben werden koennen
co2S = ''
for kk in co2[von-1:bis-1]:
co2S += '~~' + str(kk)
eesp1S = ''
for kk in eesp1[von:bis]:
eesp1S += '~~' + kk
sxs0p1S = ''
for kk in sxs0p1[von:bis]:
sxs0p1S += '~~' + kk
sxs1p1S = ''
for kk in sxs1p1[von:bis]:
sxs1p1S += '~~' + kk
sxs2p1S = ''
for kk in sxs2p1[von:bis]:
sxs2p1S += '~~' + kk
sxs3p1S = ''
for kk in sxs3p1[von:bis]:
sxs3p1S += '~~' + kk
sxs4p1S = ''
for kk in sxs4p1[von:bis]:
sxs4p1S += '~~' + kk
sxs5p1S = ''
for kk in sxs5p1[von:bis]:
sxs5p1S += '~~' + kk
sxs6p1S = ''
for kk in sxs6p1[von:bis]:
sxs6p1S += '~~' + kk
sxs7p1S = ''
for kk in sxs7p1[von:bis]:
sxs7p1S += '~~' + kk
sxs8p1S = ''
for kk in sxs8p1[von:bis]:
sxs8p1S += '~~' + kk
sxs9p1S = ''
for kk in sxs9p1[von:bis]:
sxs9p1S += '~~' + kk
eesp2S = ''
for kk in eesp2[von:bis]:
eesp2S += '~~' + kk
sxs0p2S = ''
for kk in sxs0p2[von:bis]:
sxs0p2S += '~~' + kk
sxs1p2S = ''
for kk in sxs1p2[von:bis]:
sxs1p2S += '~~' + kk
sxs2p2S = ''
for kk in sxs2p2[von:bis]:
sxs2p2S += '~~' + kk
sxs3p2S = ''
for kk in sxs3p2[von:bis]:
sxs3p2S += '~~' + kk
sxs4p2S = ''
for kk in sxs4p2[von:bis]:
sxs4p2S += '~~' + kk
sxs5p2S = ''
for kk in sxs5p2[von:bis]:
sxs5p2S += '~~' + kk
sxs6p2S = ''
for kk in sxs6p2[von:bis]:
sxs6p2S += '~~' + kk
sxs7p2S = ''
for kk in sxs7p2[von:bis]:
sxs7p2S += '~~' + kk
sxs8p2S = ''
for kk in sxs8p2[von:bis]:
sxs8p2S += '~~' + kk
sxs9p2S = ''
for kk in sxs9p2[von:bis]:
sxs9p2S += '~~' + kk
eesp3S = ''
for kk in eesp3[von:bis]:
eesp3S += '~~' + kk
sxs0p3S = ''
for kk in sxs0p3[von:bis]:
sxs0p3S += '~~' + kk
sxs1p3S = ''
for kk in sxs1p3[von:bis]:
sxs1p3S += '~~' + kk
sxs2p3S = ''
for kk in sxs2p3[von:bis]:
sxs2p3S += '~~' + kk
sxs3p3S = ''
for kk in sxs3p3[von:bis]:
sxs3p3S += '~~' + kk
sxs4p3S = ''
for kk in sxs4p3[von:bis]:
sxs4p3S += '~~' + kk
sxs5p3S = ''
for kk in sxs5p3[von:bis]:
sxs5p3S += '~~' + kk
sxs6p3S = ''
for kk in sxs6p3[von:bis]:
sxs6p3S += '~~' + kk
sxs7p3S = ''
for kk in sxs7p3[von:bis]:
sxs7p3S += '~~' + kk
sxs8p3S = ''
for kk in sxs8p3[von:bis]:
sxs8p3S += '~~' + kk
sxs9p3S = ''
for kk in sxs9p3[von:bis]:
sxs9p3S += '~~' + kk
eesp4S = ''
for kk in eesp4[von:bis]:
eesp4S += '~~' + kk
sxs0p4S = ''
for kk in sxs0p4[von:bis]:
sxs0p4S += '~~' + kk
sxs1p4S = ''
for kk in sxs1p4[von:bis]:
sxs1p4S += '~~' + kk
sxs2p4S = ''
for kk in sxs2p4[von:bis]:
sxs2p4S += '~~' + kk
sxs3p4S = ''
for kk in sxs3p4[von:bis]:
sxs3p4S += '~~' + kk
sxs4p4S = ''
for kk in sxs4p4[von:bis]:
sxs4p4S += '~~' + kk
sxs5p4S = ''
for kk in sxs5p4[von:bis]:
sxs5p4S += '~~' + kk
sxs6p4S = ''
for kk in sxs6p4[von:bis]:
sxs6p4S += '~~' + kk
sxs7p4S = ''
for kk in sxs7p4[von:bis]:
sxs7p4S += '~~' + kk
sxs8p4S = ''
for kk in sxs8p4[von:bis]:
sxs8p4S += '~~' + kk
sxs9p4S = ''
for kk in sxs9p4[von:bis]:
sxs9p4S += '~~' + kk
p1 = subprocess.Popen([python_path, python_script, eesp1S, sxs0p1S, sxs1p1S,
sxs2p1S, sxs3p1S, sxs4p1S, sxs5p1S, sxs6p1S,
sxs7p1S, sxs8p1S, sxs9p1S, co2S, dirOut, dirTxt,
dirTxt2, "1", plantNo, tmpFol, curFLNRp1])
p2 = subprocess.Popen([python_path, python_script, eesp2S, sxs0p2S, sxs1p2S,
sxs2p2S, sxs3p2S, sxs4p2S, sxs5p2S, sxs6p2S,
sxs7p2S, sxs8p2S, sxs9p2S, co2S, dirOut, dirTxt,
dirTxt2, "2", plantNo, tmpFol, curFLNRp2])
p3 = subprocess.Popen([python_path, python_script, eesp3S, sxs0p3S, sxs1p3S,
sxs2p3S, sxs3p3S, sxs4p3S, sxs5p3S, sxs6p3S,
sxs7p3S, sxs8p3S, sxs9p3S, co2S, dirOut, dirTxt,
dirTxt2, "3", plantNo, tmpFol, curFLNRp3])
p4 = subprocess.Popen([python_path, python_script, eesp4S, sxs0p4S, sxs1p4S,
sxs2p4S, sxs3p4S, sxs4p4S, sxs5p4S, sxs6p4S,
sxs7p4S, sxs8p4S, sxs9p4S, co2S, dirOut, dirTxt,
dirTxt2, "4", plantNo, tmpFol, curFLNRp4])
print "Done " + curFLNRp1 + " " + curFLNRp2 + " " + curFLNRp3 + " " + curFLNRp4 + " at ", time.asctime()[11:19]
Popen.wait(p1) #Wartet bis p1 beendet ist
Popen.wait(p2) #Wartet bis p2 beendet ist
Popen.wait(p3) #Wartet bis p3 beendet ist
Popen.wait(p4) #Wartet bis p4 beendet ist
linecache.clearcache()
| 30.561866 | 119 | 0.498175 |
a1b1f7cd73bda907479459969ad2127f2876f38b | 467 | py | Python | metaopt/objective/integer/slow/explicit/f.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 8 | 2015-02-02T21:42:23.000Z | 2019-06-30T18:12:43.000Z | metaopt/objective/integer/slow/explicit/f.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 4 | 2015-09-24T14:12:38.000Z | 2021-12-08T22:42:52.000Z | metaopt/objective/integer/slow/explicit/f.py | cigroup-ol/metaopt | 6dfd5105d3c6eaf00f96670175cae16021069514 | [
"BSD-3-Clause"
] | 6 | 2015-02-27T12:35:33.000Z | 2020-10-15T21:04:02.000Z | # -*- coding: utf-8 -*-
"""
Hanging function with integer parameters and explicit maximization.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Standard Library
from time import sleep
# First Party
from metaopt.core.paramspec.util import param
from metaopt.core.returnspec.util.decorator import maximize
@maximize("y")
@param.int("x", interval=[0, 10])
def f(x):
sleep(0.1)
return x
| 21.227273 | 67 | 0.738758 |
d01f4b109067e97b2c32f71f39d65a8fe557d47f | 1,335 | py | Python | midgard_client/test/test_specification_api.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | midgard_client/test/test_specification_api.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | midgard_client/test/test_specification_api.py | hoodieonwho/thorchain-python-client | fccfd66552e16bdab1dbb90b68022475c7a9693d | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Midgard Public API
The Midgard Public API queries THORChain and any chains linked via the Bifröst and prepares information about the network to be readily available for public users. The API parses transaction event data from THORChain and stores them in a time-series database to make time-dependent queries easy. Midgard does not hold critical information. To interact with BEPSwap and Asgardex, users should query THORChain directly. # noqa: E501
OpenAPI spec version: 2.5.12
Contact: devs@thorchain.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import midgard_client
from midgard_client.api.specification_api import SpecificationApi # noqa: E501
from midgard_client.rest import ApiException
class TestSpecificationApi(unittest.TestCase):
"""SpecificationApi unit test stubs"""
def setUp(self):
self.api = SpecificationApi() # noqa: E501
def tearDown(self):
pass
def test_get_docs(self):
"""Test case for get_docs
Documentation # noqa: E501
"""
pass
def test_get_swagger(self):
"""Test case for get_swagger
Swagger File # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 27.8125 | 435 | 0.71161 |
f436be7b736705fdf8f1bd31e94ca23b8faee38e | 44,256 | py | Python | models/progress.py | osrf/cloudsim-ed-web | f26da401d36544ceb48032bb7e8f8c326201e11f | [
"Apache-2.0"
] | 1 | 2015-04-15T08:38:08.000Z | 2015-04-15T08:38:08.000Z | models/progress.py | osrf/cloudsim-ed-web | f26da401d36544ceb48032bb7e8f8c326201e11f | [
"Apache-2.0"
] | 1 | 2021-06-08T09:49:12.000Z | 2021-06-08T09:49:12.000Z | models/progress.py | osrf/cloudsim-ed-web | f26da401d36544ceb48032bb7e8f8c326201e11f | [
"Apache-2.0"
] | 3 | 2015-10-25T12:39:07.000Z | 2021-06-08T09:47:34.000Z | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student progress trackers."""
__author__ = 'Sean Lip (sll@google.com)'
import datetime
import logging
import os
from tools import verify
import courses
from models import QuestionDAO
from models import QuestionGroupDAO
from models import StudentPropertyEntity
import transforms
class UnitLessonCompletionTracker(object):
"""Tracks student completion for a unit/lesson-based linear course."""
PROPERTY_KEY = 'linear-course-completion'
# Here are representative examples of the keys for the various entities
# used in this class:
# Unit 1: u.1
# Unit 1, Lesson 1: u.1.l.1
# Unit 1, Lesson 1, Activity 0: u.1.l.1.a.0
# Unit 1, Lesson 1, Activity 0, Block 4: u.1.l.1.a.0.b.4
# Assessment 'Pre': s.Pre
# At the moment, we do not divide assessments into blocks.
#
# The following keys were added in v1.5:
# Unit 1, Lesson 1, HTML: u.1.l.1.h.0
# Unit 1, Lesson 1, HTML, Component with instanceid id: u.1.l.1.h.0.c.id
#
# The number after the 'h' and 'a' codes is always zero, since a lesson may
# have at most one HTML body and one activity.
#
# IMPORTANT NOTE: The values of the keys mean different things depending on
# whether the entity is a composite entity or not.
# If it is a composite entity (unit, lesson, activity), then the value is
# - 0 if none of its sub-entities has been completed
# - 1 if some, but not all, of its sub-entities have been completed
# - 2 if all its sub-entities have been completed.
# If it is not a composite entity (i.e. block, assessment, component), then
# the value is just the number of times the event has been triggered.
# Constants for recording the state of composite entities.
# TODO(sll): Change these to enums.
NOT_STARTED_STATE = 0
IN_PROGRESS_STATE = 1
COMPLETED_STATE = 2
MULTIPLE_CHOICE = 'multiple choice'
MULTIPLE_CHOICE_GROUP = 'multiple choice group'
QUESTION_GROUP = 'question-group'
QUESTION = 'question'
EVENT_CODE_MAPPING = {
'unit': 'u',
'lesson': 'l',
'activity': 'a',
'html': 'h',
'block': 'b',
'assessment': 's',
'component': 'c',
}
COMPOSITE_ENTITIES = [
EVENT_CODE_MAPPING['unit'],
EVENT_CODE_MAPPING['lesson'],
EVENT_CODE_MAPPING['activity'],
EVENT_CODE_MAPPING['html']
]
# Names of component tags that are tracked for progress calculations.
TRACKABLE_COMPONENTS = frozenset([
'question',
'question-group',
])
def __init__(self, course):
self._course = course
def _get_course(self):
return self._course
def get_activity_as_python(self, unit_id, lesson_id):
"""Gets the corresponding activity as a Python object."""
root_name = 'activity'
course = self._get_course()
activity_text = course.app_context.fs.get(
os.path.join(course.app_context.get_home(),
course.get_activity_filename(unit_id, lesson_id)))
content, noverify_text = verify.convert_javascript_to_python(
activity_text, root_name)
activity = verify.evaluate_python_expression_from_text(
content, root_name, verify.Activity().scope, noverify_text)
return activity
def _get_unit_key(self, unit_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['unit'], unit_id)
def _get_lesson_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id
)
def _get_activity_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0
)
def _get_html_key(self, unit_id, lesson_id):
return '%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0
)
def _get_component_key(self, unit_id, lesson_id, component_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['html'], 0,
self.EVENT_CODE_MAPPING['component'], component_id
)
def _get_block_key(self, unit_id, lesson_id, block_id):
return '%s.%s.%s.%s.%s.%s.%s.%s' % (
self.EVENT_CODE_MAPPING['unit'], unit_id,
self.EVENT_CODE_MAPPING['lesson'], lesson_id,
self.EVENT_CODE_MAPPING['activity'], 0,
self.EVENT_CODE_MAPPING['block'], block_id
)
def _get_assessment_key(self, assessment_id):
return '%s.%s' % (self.EVENT_CODE_MAPPING['assessment'], assessment_id)
def get_entity_type_from_key(self, progress_entity_key):
return progress_entity_key.split('.')[-2]
def determine_if_composite_entity(self, progress_entity_key):
return self.get_entity_type_from_key(
progress_entity_key) in self.COMPOSITE_ENTITIES
def get_valid_component_ids(self, unit_id, lesson_id):
"""Returns a list of component ids representing trackable components."""
question_component_ids = [cpt['instanceid'] for cpt in (
self._get_course().get_question_components(
unit_id, lesson_id)) if cpt['instanceid']]
question_group_component_ids = [cpt['instanceid'] for cpt in (
self._get_course().get_question_group_components(
unit_id, lesson_id)) if cpt['instanceid']]
return question_component_ids + question_group_component_ids
def get_valid_block_ids(self, unit_id, lesson_id):
"""Returns a list of block ids representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[0] for block in valid_blocks_data]
def get_valid_blocks(self, unit_id, lesson_id):
"""Returns a list of blocks representing interactive activities."""
valid_blocks_data = self._get_valid_blocks_data(unit_id, lesson_id)
return [block[1] for block in valid_blocks_data]
def _get_valid_blocks_data(self, unit_id, lesson_id):
"""Returns a list of (b_id, block) representing trackable activities."""
valid_blocks = []
# Check if activity exists before calling get_activity_as_python.
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
if unit and lesson and lesson.activity:
# Get the activity corresponding to this unit/lesson combination.
activity = self.get_activity_as_python(unit_id, lesson_id)
for block_id in range(len(activity['activity'])):
block = activity['activity'][block_id]
if isinstance(block, dict):
valid_blocks.append((block_id, block))
return valid_blocks
def get_id_to_questions_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in lessons. The keys of this
dict are question ids, and the corresponding values are dicts, each
containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_questions = {}
for unit in self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT):
unit_id = unit.unit_id
for lesson in self._get_course().get_lessons(unit_id):
lesson_id = lesson.lesson_id
# Add mapping dicts for questions in old-style activities.
if lesson.activity:
blocks = self._get_valid_blocks_data(unit_id, lesson_id)
for block_index, (block_id, block) in enumerate(blocks):
if block['questionType'] == self.MULTIPLE_CHOICE:
# Old style question.
id_to_questions.update(
self._create_old_style_question_dict(
block, block_id, block_index, unit, lesson))
elif (block['questionType'] ==
self.MULTIPLE_CHOICE_GROUP):
# Old style multiple choice group.
for ind, q in enumerate(block['questionsList']):
id_to_questions.update(
self._create_old_style_question_dict(
q, block_id, block_index, unit,
lesson, index=ind))
# Add mapping dicts for CBv1.5 style questions.
if lesson.objectives:
for cpt in self._get_course().get_question_components(
unit_id, lesson_id):
# CB v1.5 style questions.
id_to_questions.update(
self._create_v15_lesson_question_dict(
cpt, unit, lesson))
for cpt in self._get_course().get_question_group_components(
unit_id, lesson_id):
# CB v1.5 style question groups.
id_to_questions.update(
self._create_v15_lesson_question_group_dict(
cpt, unit, lesson))
return id_to_questions
def get_id_to_assessments_dict(self):
"""Returns a dict that maps each question to a list of its answers.
Returns:
A dict that represents the questions in assessments. The keys of
this dict are question ids, and the corresponding values are dicts,
each containing the following five key-value pairs:
- answers: a list of 0's with length corresponding to number of
choices a question has.
- location: str. href value of the location of the question in the
course.
- num_attempts: int. Number of attempts for this question. This is
used as the denominator when calculating the average score for a
question. This value may differ from the sum of the elements in
'answers' because of event entities that record an answer but
not a score.
- score: int. Aggregated value of the scores.
- label: str. Human readable identifier for this question.
"""
id_to_assessments = {}
for assessment in self._get_course().get_assessment_list():
if not self._get_course().needs_human_grader(assessment):
assessment_components = self._get_course(
).get_assessment_components(assessment.unit_id)
# CB v1.5 style assessments.
for cpt in assessment_components:
if cpt['cpt_name'] == self.QUESTION_GROUP:
id_to_assessments.update(
self._create_v15_assessment_question_group_dict(
cpt, assessment))
elif cpt['cpt_name'] == self.QUESTION:
id_to_assessments.update(
self._create_v15_assessment_question_dict(
cpt, assessment))
# Old style javascript assessments.
try:
content = self._get_course().get_assessment_content(
assessment)
id_to_assessments.update(
self._create_old_style_assessment_dict(
content['assessment'], assessment))
except AttributeError:
# Assessment file does not exist.
continue
return id_to_assessments
def _get_link_for_assessment(self, assessment_id):
return 'assessment?name=%s' % (assessment_id)
def _get_link_for_activity(self, unit_id, lesson_id):
return 'activity?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _get_link_for_lesson(self, unit_id, lesson_id):
return 'unit?unit=%s&lesson=%s' % (unit_id, lesson_id)
def _create_v15_question_dict(self, q_id, label, link, num_choices):
"""Returns a dict that represents CB v1.5 style question."""
return {
q_id: {
'answer_counts': [0] * num_choices,
'label': label,
'location': link,
'score': 0,
'num_attempts': 0
}
}
def _create_v15_lesson_question_dict(self, cpt, unit, lesson):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'])
label = 'Unit %s Lesson %s, Question %s' % (
unit.index, lesson.index, question.description)
link = self._get_link_for_lesson(unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_lesson_question_group_dict(self, cpt, unit, lesson):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 'u.%s.l.%s.c.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, cpt['instanceid'], ind)
label = ('Unit %s Lesson %s, Question Group %s Question %s'
% (unit.index, lesson.index,
question_group.description,
question.description))
link = self._get_link_for_lesson(
unit.unit_id, lesson.lesson_id)
num_choices = len(question.dict['choices'])
questions.update(self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_group_dict(self, cpt, assessment):
try:
question_group = QuestionGroupDAO.load(cpt['qgid'])
questions = {}
for ind, quid in enumerate(question_group.question_ids):
question = QuestionDAO.load(quid)
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s.i.%s' % (
assessment.unit_id, cpt['instanceid'], ind)
label = '%s, Question Group %s Question %s' % (
assessment.title, question_group.description,
question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
questions.update(
self._create_v15_question_dict(
q_id, label, link, num_choices))
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_v15_assessment_question_dict(self, cpt, assessment):
try:
question = QuestionDAO.load(cpt['quid'])
if question.type == question.MULTIPLE_CHOICE:
q_id = 's.%s.c.%s' % (assessment.unit_id, cpt['instanceid'])
label = '%s, Question %s' % (
assessment.title, question.description)
link = self._get_link_for_assessment(assessment.unit_id)
num_choices = len(question.dict['choices'])
return self._create_v15_question_dict(
q_id, label, link, num_choices)
else:
return {}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, cpt)
return {}
def _create_old_style_question_dict(self, block, block_id, block_index,
unit, lesson, index=None):
try:
if index is not None:
# Question is in a multiple choice group.
b_id = 'u.%s.l.%s.b.%s.i.%s' % (
unit.unit_id, lesson.lesson_id, block_id, index)
label = 'Unit %s Lesson %s Activity, Item %s Part %s' % (
unit.index, lesson.index, block_index + 1, index + 1)
else:
b_id = 'u.%s.l.%s.b.%s' % (
unit.unit_id, lesson.lesson_id, block_id)
label = 'Unit %s Lesson %s Activity, Item %s' % (
unit.index, lesson.index, block_index + 1)
return {
b_id: {
'answer_counts': [0] * len(block['choices']),
'label': label,
'location': self._get_link_for_activity(
unit.unit_id, lesson.lesson_id),
'score': 0,
'num_attempts': 0
}
}
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, block)
return {}
def _create_old_style_assessment_dict(self, content, assessment):
try:
questions = {}
for ind, question in enumerate(content['questionsList']):
if 'choices' in question:
questions.update(
{
's.%s.i.%s' % (assessment.unit_id, ind): {
'answer_counts': [0] * len(question['choices']),
'label': '%s, Question %s' % (
assessment.title, ind + 1),
'location': self._get_link_for_assessment(
assessment.unit_id),
'score': 0,
'num_attempts': 0
}
}
)
return questions
except Exception as e: # pylint: disable-msg=broad-except
logging.error(
'Failed to process the question data. '
'Error: %s, data: %s', e, content)
return {}
def _update_unit(self, progress, event_key):
"""Updates a unit's progress if all its lessons have been completed."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 2
unit_id = split_event_key[1]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one lesson in this unit has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
# Check if all lessons in this unit have been completed.
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if (self.get_lesson_status(
progress,
unit_id, lesson.lesson_id) != self.COMPLETED_STATE):
return
# Record that all lessons in this unit have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_lesson(self, progress, event_key):
"""Updates a lesson's progress based on the progress of its children."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 4
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one part of this lesson has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
lessons = self._get_course().get_lessons(unit_id)
for lesson in lessons:
if str(lesson.lesson_id) == lesson_id and lesson:
# Is the activity completed?
if (lesson.activity and self.get_activity_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Are all components of the lesson completed?
if (self.get_html_status(
progress, unit_id, lesson_id) != self.COMPLETED_STATE):
return
# Record that all activities in this lesson have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_activity(self, progress, event_key):
"""Updates activity's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
valid_block_ids = self.get_valid_block_ids(unit_id, lesson_id)
for block_id in valid_block_ids:
if not self.is_block_completed(
progress, unit_id, lesson_id, block_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
def _update_html(self, progress, event_key):
"""Updates html's progress when all interactive blocks are done."""
split_event_key = event_key.split('.')
assert len(split_event_key) == 6
unit_id = split_event_key[1]
lesson_id = split_event_key[3]
if self._get_entity_value(progress, event_key) == self.COMPLETED_STATE:
return
# Record that at least one block in this activity has been completed.
self._set_entity_value(progress, event_key, self.IN_PROGRESS_STATE)
cpt_ids = self.get_valid_component_ids(unit_id, lesson_id)
for cpt_id in cpt_ids:
if not self.is_component_completed(
progress, unit_id, lesson_id, cpt_id):
return
# Record that all blocks in this activity have been completed.
self._set_entity_value(progress, event_key, self.COMPLETED_STATE)
UPDATER_MAPPING = {
'activity': _update_activity,
'html': _update_html,
'lesson': _update_lesson,
'unit': _update_unit
}
# Dependencies for recording derived events. The key is the current
# event, and the value is a tuple, each element of which contains:
# - the dependent entity to be updated
# - the transformation to apply to the id of the current event to get the
# id for the derived parent event
DERIVED_EVENTS = {
'block': (
{
'entity': 'activity',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'activity': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'lesson': (
{
'entity': 'unit',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'component': (
{
'entity': 'html',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
'html': (
{
'entity': 'lesson',
'generate_parent_id': (lambda s: '.'.join(s.split('.')[:-2])),
},
),
}
def put_activity_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed an activity."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'activity', self._get_activity_key(unit_id, lesson_id))
def put_html_completed(self, student, unit_id, lesson_id):
"""Records that the given student has completed a lesson page."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
self._put_event(
student, 'html', self._get_html_key(unit_id, lesson_id))
def put_block_completed(self, student, unit_id, lesson_id, block_id):
"""Records that the given student has completed an activity block."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if block_id not in self.get_valid_block_ids(unit_id, lesson_id):
return
self._put_event(
student,
'block',
self._get_block_key(unit_id, lesson_id, block_id)
)
def put_component_completed(self, student, unit_id, lesson_id, cpt_id):
"""Records completion of a component in a lesson body."""
if not self._get_course().is_valid_unit_lesson_id(unit_id, lesson_id):
return
if cpt_id not in self.get_valid_component_ids(unit_id, lesson_id):
return
self._put_event(
student,
'component',
self._get_component_key(unit_id, lesson_id, cpt_id)
)
def put_assessment_completed(self, student, assessment_id):
"""Records that the given student has completed the given assessment."""
if not self._get_course().is_valid_assessment_id(assessment_id):
return
self._put_event(
student, 'assessment', self._get_assessment_key(assessment_id))
def put_activity_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this activity."""
# This method currently exists because we need to mark activities
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_block_ids(unit_id, lesson_id):
self.put_activity_completed(student, unit_id, lesson_id)
def put_html_accessed(self, student, unit_id, lesson_id):
"""Records that the given student has accessed this lesson page."""
# This method currently exists because we need to mark lesson bodies
# without interactive blocks as 'completed' when they are accessed.
if not self.get_valid_component_ids(unit_id, lesson_id):
self.put_html_completed(student, unit_id, lesson_id)
def _put_event(self, student, event_entity, event_key):
"""Starts a cascade of updates in response to an event taking place."""
if student.is_transient or event_entity not in self.EVENT_CODE_MAPPING:
return
progress = self.get_or_create_progress(student)
self._update_event(
student, progress, event_entity, event_key, direct_update=True)
progress.updated_on = datetime.datetime.now()
progress.put()
def _update_event(self, student, progress, event_entity, event_key,
direct_update=False):
"""Updates statistics for the given event, and for derived events.
Args:
student: the student
progress: the StudentProgressEntity for the student
event_entity: the name of the affected entity (unit, lesson, etc.)
event_key: the key for the recorded event
direct_update: True if this event is being updated explicitly; False
if it is being auto-updated.
"""
if direct_update or event_entity not in self.UPDATER_MAPPING:
if event_entity in self.UPDATER_MAPPING:
# This is a derived event, so directly mark it as completed.
self._set_entity_value(
progress, event_key, self.COMPLETED_STATE)
else:
# This is not a derived event, so increment its counter by one.
self._inc(progress, event_key)
else:
self.UPDATER_MAPPING[event_entity](self, progress, event_key)
if event_entity in self.DERIVED_EVENTS:
for derived_event in self.DERIVED_EVENTS[event_entity]:
self._update_event(
student=student,
progress=progress,
event_entity=derived_event['entity'],
event_key=derived_event['generate_parent_id'](event_key),
)
def get_unit_status(self, progress, unit_id):
return self._get_entity_value(progress, self._get_unit_key(unit_id))
def get_lesson_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_lesson_key(unit_id, lesson_id))
def get_activity_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_activity_key(unit_id, lesson_id))
def get_html_status(self, progress, unit_id, lesson_id):
return self._get_entity_value(
progress, self._get_html_key(unit_id, lesson_id))
def get_block_status(self, progress, unit_id, lesson_id, block_id):
return self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
def get_assessment_status(self, progress, assessment_id):
return self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
def is_block_completed(self, progress, unit_id, lesson_id, block_id):
value = self._get_entity_value(
progress, self._get_block_key(unit_id, lesson_id, block_id))
return value is not None and value > 0
def is_component_completed(self, progress, unit_id, lesson_id, cpt_id):
value = self._get_entity_value(
progress, self._get_component_key(unit_id, lesson_id, cpt_id))
return value is not None and value > 0
def is_assessment_completed(self, progress, assessment_id):
value = self._get_entity_value(
progress, self._get_assessment_key(assessment_id))
return value is not None and value > 0
@classmethod
def get_or_create_progress(cls, student):
progress = StudentPropertyEntity.get(student, cls.PROPERTY_KEY)
if not progress:
progress = StudentPropertyEntity.create(
student=student, property_name=cls.PROPERTY_KEY)
progress.put()
return progress
def get_unit_progress(self, student):
"""Returns a dict with the states of each unit."""
if student.is_transient:
return {}
units = self._get_course().get_units()
progress = self.get_or_create_progress(student)
result = {}
for unit in units:
if unit.type == 'A':
result[unit.unit_id] = self.is_assessment_completed(
progress, unit.unit_id)
elif unit.type == 'U':
value = self.get_unit_status(progress, unit.unit_id)
result[unit.unit_id] = value or 0
return result
def get_lesson_progress(self, student, unit_id):
"""Returns a dict saying which lessons in this unit are completed."""
if student.is_transient:
return {}
lessons = self._get_course().get_lessons(unit_id)
progress = self.get_or_create_progress(student)
result = {}
for lesson in lessons:
result[lesson.lesson_id] = {
'html': self.get_html_status(
progress, unit_id, lesson.lesson_id) or 0,
'activity': self.get_activity_status(
progress, unit_id, lesson.lesson_id) or 0,
}
return result
def get_component_progress(self, student, unit_id, lesson_id, cpt_id):
"""Returns the progress status of the given component."""
if student.is_transient:
return 0
progress = self.get_or_create_progress(student)
return self.is_component_completed(
progress, unit_id, lesson_id, cpt_id) or 0
def _get_entity_value(self, progress, event_key):
if not progress.value:
return None
return transforms.loads(progress.value).get(event_key)
def _set_entity_value(self, student_property, key, value):
"""Sets the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
progress_dict[key] = value
student_property.value = transforms.dumps(progress_dict)
def _inc(self, student_property, key, value=1):
"""Increments the integer value of a student property.
Note: this method does not commit the change. The calling method should
call put() on the StudentPropertyEntity.
Args:
student_property: the StudentPropertyEntity
key: the student property whose value should be incremented
value: the value to increment this property by
"""
try:
progress_dict = transforms.loads(student_property.value)
except (AttributeError, TypeError):
progress_dict = {}
if key not in progress_dict:
progress_dict[key] = 0
progress_dict[key] += value
student_property.value = transforms.dumps(progress_dict)
class ProgressStats(object):
"""Defines the course structure definition for course progress tracking."""
def __init__(self, course):
self._course = course
self._tracker = UnitLessonCompletionTracker(course)
def compute_entity_dict(self, entity, parent_ids):
"""Computes the course structure dictionary.
Args:
entity: str. Represents for which level of entity the dict is being
computed. Valid entity levels are defined as keys to the dict
defined below, COURSE_STRUCTURE_DICT.
parent_ids: list of ids necessary to get children of the current
entity.
Returns:
A nested dictionary representing the structure of the course.
Every other level of the dictionary consists of a key, the label of
the entity level defined by EVENT_CODE_MAPPING in
UnitLessonCompletionTracker, whose value is a dictionary
INSTANCES_DICT. The keys of INSTANCES_DICT are instance_ids of the
corresponding entities, and the values are the entity_dicts of the
instance's children, in addition to a field called 'label'. Label
represents the user-facing name of the entity rather than
its intrinsic id. If one of these values is empty, this means
that the corresponding entity has no children.
Ex:
A Course with the following outlined structure:
Pre Assessment
Unit 1
Lesson 1
Unit 2
will have the following dictionary representation:
{
's': {
1: {
'label': 'Pre Assessment'
}
},
'u': {
2: {
'l': {
3: {
'label': 1
}
},
'label': 1
},
4: {
'label': 2
}
}
'label': 'UNTITLED COURSE'
}
"""
entity_dict = {'label': self._get_label(entity, parent_ids)}
for child_entity, get_children_ids in self.COURSE_STRUCTURE_DICT[
entity]['children']:
child_entity_dict = {}
for child_id in get_children_ids(self, *parent_ids):
new_parent_ids = parent_ids + [child_id]
child_entity_dict[child_id] = self.compute_entity_dict(
child_entity, new_parent_ids)
entity_dict[UnitLessonCompletionTracker.EVENT_CODE_MAPPING[
child_entity]] = child_entity_dict
return entity_dict
def _get_course(self):
return self._course
def _get_unit_ids_of_type_unit(self):
units = self._get_course().get_units_of_type(verify.UNIT_TYPE_UNIT)
return [unit.unit_id for unit in units]
def _get_assessment_ids(self):
assessments = self._get_course().get_assessment_list()
return [a.unit_id for a in assessments]
def _get_lesson_ids(self, unit_id):
lessons = self._get_course().get_lessons(unit_id)
return [lesson.lesson_id for lesson in lessons]
def _get_activity_ids(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
if self._get_course().find_lesson_by_id(unit, lesson_id).activity:
return [0]
return []
def _get_html_ids(self, unused_unit_id, unused_lesson_id):
return [0]
def _get_block_ids(self, unit_id, lesson_id, unused_activity_id):
return self._tracker.get_valid_block_ids(unit_id, lesson_id)
def _get_component_ids(self, unit_id, lesson_id, unused_html_id):
return self._tracker.get_valid_component_ids(unit_id, lesson_id)
def _get_label(self, entity, parent_ids):
return self.ENTITY_TO_HUMAN_READABLE_NAME_DICT[entity](
self, *parent_ids)
def _get_course_label(self):
# pylint: disable-msg=protected-access
return courses.Course.get_environ(self._get_course().app_context)[
'course']['title']
def _get_unit_label(self, unit_id):
unit = self._get_course().find_unit_by_id(unit_id)
return 'Unit %s' % unit.index
def _get_assessment_label(self, unit_id):
assessment = self._get_course().find_unit_by_id(unit_id)
return assessment.title
def _get_lesson_label(self, unit_id, lesson_id):
unit = self._get_course().find_unit_by_id(unit_id)
lesson = self._get_course().find_lesson_by_id(unit, lesson_id)
return lesson.index
def _get_activity_label(self, unit_id, lesson_id, unused_activity_id):
return str('L%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id)))
def _get_html_label(self, unit_id, lesson_id, unused_html_id):
return self._get_activity_label(unit_id, lesson_id, unused_html_id)
def _get_block_label(self, unit_id, lesson_id, unused_activity_id,
block_id):
return str('L%s.%s.%s' % (
self._get_course().find_unit_by_id(unit_id).index,
self._get_lesson_label(unit_id, lesson_id),
block_id))
def _get_component_label(self, unit_id, lesson_id, unused_html_id,
component_id):
return self._get_block_label(
unit_id, lesson_id, unused_html_id, component_id)
# Outlines the structure of the course. The key is the entity level, and
# its value is a dictionary with following keys and its values:
# 'children': list of tuples. Each tuple consists of string representation
# of the child entity(ex: 'lesson') and a function to get the
# children elements. If the entity does not have children, the
# value will be an empty list.
# 'id': instance_id of the entity. If the entity is represented by a class
# with an id attribute(ex: units), string representation of the
# attribute is stored here. If the entity is defined by a dictionary
# (ex: components), then the value is the string 'None'.
#
COURSE_STRUCTURE_DICT = {
'course': {
'children': [('unit', _get_unit_ids_of_type_unit),
('assessment', _get_assessment_ids)],
},
'unit': {
'children': [('lesson', _get_lesson_ids)],
},
'assessment': {
'children': [],
},
'lesson': {
'children': [('activity', _get_activity_ids),
('html', _get_html_ids)],
},
'activity': {
'children': [('block', _get_block_ids)],
},
'html': {
'children': [('component', _get_component_ids)],
},
'block': {
'children': [],
},
'component': {
'children': [],
}
}
ENTITY_TO_HUMAN_READABLE_NAME_DICT = {
'course': _get_course_label,
'unit': _get_unit_label,
'assessment': _get_assessment_label,
'lesson': _get_lesson_label,
'activity': _get_activity_label,
'html': _get_html_label,
'block': _get_block_label,
'component': _get_component_label
}
| 42.229008 | 80 | 0.592033 |
e50bb01212ca1ed2277bb8a2c81ef2ccca11a570 | 448 | py | Python | train_tracker.py | Aurangazeeb/qwiktools | b9ddaf3cac6ba2a3e26259901de0b1f6dff4410c | [
"Apache-2.0"
] | null | null | null | train_tracker.py | Aurangazeeb/qwiktools | b9ddaf3cac6ba2a3e26259901de0b1f6dff4410c | [
"Apache-2.0"
] | null | null | null | train_tracker.py | Aurangazeeb/qwiktools | b9ddaf3cac6ba2a3e26259901de0b1f6dff4410c | [
"Apache-2.0"
] | null | null | null | def track_error(history, track_metric = 'error'):
# keep a global variable to keep track referring to the same name
global tracking_df
train_err = history[track_metric]
val_err = history['val_' + track_metric]
row = dict(train_error = train_err, val_error = val_err)
row_df = pd.DataFrame(row, index = [0])
tracking_df = pd.concat([tracking_df, entry_df])
tracking_df.reset_index(drop =True, inplace = True)
print('History saved') | 44.8 | 67 | 0.736607 |
2e96029ecbeb46cd91cc4e031339aa62b585a229 | 943 | py | Python | murali_recv.py | nmkgm/Cisco_Training_Network_Automation | d188814061c9f648fa8b7485c01c4499bcfa6193 | [
"Apache-2.0"
] | null | null | null | murali_recv.py | nmkgm/Cisco_Training_Network_Automation | d188814061c9f648fa8b7485c01c4499bcfa6193 | [
"Apache-2.0"
] | null | null | null | murali_recv.py | nmkgm/Cisco_Training_Network_Automation | d188814061c9f648fa8b7485c01c4499bcfa6193 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import socket, time
# Checking for functions related to socket
print ([i for i in dir(socket) if 'socket' in i])
# creation of UDP socket
# IPv4 socket will be IPv4 + 2 bytes port
# IPv6 socket will be IPv6 + 2 bytes port
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# For IPv4 For UDP socket
s.bind(("", 8894))
# bind will accept tuple format for IP and port.
'''
This is for sender only.
Enter input message
message = input("enter the data to send")
# need to convet data into byte like string.
msg = message.encode('ascii')
s.sendto(msg,("Target IP", Port number))
s.close()
'''
while True:
Data_received= s.recvfrom(1000) # here 1000 is the buffer size.
print ("The data is ", Data_received[0])
print ("The ADDRESS is ", Data_received[1])
message = input ('Enter reply')
new_msg = message.encode('ascii')
s.sendto(new_msg,Data_received[1])
s.close()
| 24.179487 | 67 | 0.673383 |
3dfda5cb9566369a666a82445ed88e002b491a59 | 75,770 | py | Python | Lib/test/test_types.py | lostbeta/cpython | e9898bf153d26059261ffef11f7643ae991e2a4c | [
"0BSD"
] | 52,316 | 2015-01-01T15:56:25.000Z | 2022-03-31T23:19:01.000Z | Lib/test/test_types.py | lostbeta/cpython | e9898bf153d26059261ffef11f7643ae991e2a4c | [
"0BSD"
] | 25,286 | 2015-03-03T23:18:02.000Z | 2022-03-31T23:17:27.000Z | Lib/test/test_types.py | lostbeta/cpython | e9898bf153d26059261ffef11f7643ae991e2a4c | [
"0BSD"
] | 31,623 | 2015-01-01T13:29:37.000Z | 2022-03-31T19:55:06.000Z | # Python test set -- part 6, built-in types
from test.support import run_with_locale, cpython_only
import collections.abc
from collections import namedtuple
import copy
import gc
import inspect
import pickle
import locale
import sys
import types
import unittest.mock
import weakref
import typing
T = typing.TypeVar("T")
class Example:
pass
class Forward: ...
def clear_typing_caches():
for f in typing._cleanups:
f()
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
self.assertRaises(ValueError, float, '5_5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0
except ZeroDivisionError: pass
else: self.fail("5 / 0 didn't raise ZeroDivisionError")
try: 5 // 0
except ZeroDivisionError: pass
else: self.fail("5 // 0 didn't raise ZeroDivisionError")
try: 5 % 0
except ZeroDivisionError: pass
else: self.fail("5 % 0 didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0.0 or 1 != 1.0 or -1 != -1.0:
self.fail('int/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
self.assertEqual('%g' % 1.0, '1')
self.assertEqual('%#g' % 1.0, '1.00000')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxsize - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for unified integral type
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not int:
self.fail("expected type(%r) to be int, not %r" %
(prod, type(prod)))
# Check for unified integral type
m = sys.maxsize
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not int:
self.fail("expected type(%r) to be int, not %r" %
(prod, type(prod)))
x = sys.maxsize
self.assertIsInstance(x + 1, int,
"(sys.maxsize + 1) should have returned int")
self.assertIsInstance(-x - 1, int,
"(-sys.maxsize - 1) should have returned int")
self.assertIsInstance(-x - 2, int,
"(-sys.maxsize - 2) should have returned int")
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100:100:2], '02468')
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure we have the unified type for integers
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
test(123, ',', '123')
test(-123, ',', '-123')
test(1234, ',', '1,234')
test(-1234, ',', '-1,234')
test(123456, ',', '123,456')
test(-123456, ',', '-123,456')
test(1234567, ',', '1,234,567')
test(-1234567, ',', '-1,234,567')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# Unified type for integers
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'n'
self.assertRaises(ValueError, 3 .__format__, ",n")
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# can't have '#' with 'c'
self.assertRaises(ValueError, 3 .__format__, "#c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format_string('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format_string('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format_string('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
def test(f, format_spec, result):
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(format(f, format_spec), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 3.0 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totally empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate float formatting
test(1.0, '.0e', '1e+00')
test(1.0, '#.0e', '1.e+00')
test(1.0, '.0f', '1')
test(1.0, '#.0f', '1.')
test(1.1, 'g', '1.1')
test(1.1, '#g', '1.10000')
test(1.0, '.0%', '100%')
test(1.0, '#.0%', '100.%')
# Issue 7094: Alternate formatting (specified by #)
test(1.0, '0e', '1.000000e+00')
test(1.0, '#0e', '1.000000e+00')
test(1.0, '0f', '1.000000' )
test(1.0, '#0f', '1.000000')
test(1.0, '.1e', '1.0e+00')
test(1.0, '#.1e', '1.0e+00')
test(1.0, '.1f', '1.0')
test(1.0, '#.1f', '1.0')
test(1.0, '.1%', '100.0%')
test(1.0, '#.1%', '100.0%')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_slot_wrapper_types(self):
self.assertIsInstance(object.__init__, types.WrapperDescriptorType)
self.assertIsInstance(object.__str__, types.WrapperDescriptorType)
self.assertIsInstance(object.__lt__, types.WrapperDescriptorType)
self.assertIsInstance(int.__lt__, types.WrapperDescriptorType)
def test_method_wrapper_types(self):
self.assertIsInstance(object().__init__, types.MethodWrapperType)
self.assertIsInstance(object().__str__, types.MethodWrapperType)
self.assertIsInstance(object().__lt__, types.MethodWrapperType)
self.assertIsInstance((42).__lt__, types.MethodWrapperType)
def test_method_descriptor_types(self):
self.assertIsInstance(str.join, types.MethodDescriptorType)
self.assertIsInstance(list.append, types.MethodDescriptorType)
self.assertIsInstance(''.join, types.BuiltinMethodType)
self.assertIsInstance([].append, types.BuiltinMethodType)
self.assertIsInstance(int.__dict__['from_bytes'], types.ClassMethodDescriptorType)
self.assertIsInstance(int.from_bytes, types.BuiltinMethodType)
self.assertIsInstance(int.__new__, types.BuiltinMethodType)
def test_ellipsis_type(self):
self.assertIsInstance(Ellipsis, types.EllipsisType)
def test_notimplemented_type(self):
self.assertIsInstance(NotImplemented, types.NotImplementedType)
def test_none_type(self):
self.assertIsInstance(None, types.NoneType)
class UnionTests(unittest.TestCase):
def test_or_types_operator(self):
self.assertEqual(int | str, typing.Union[int, str])
self.assertNotEqual(int | list, typing.Union[int, str])
self.assertEqual(str | int, typing.Union[int, str])
self.assertEqual(int | None, typing.Union[int, None])
self.assertEqual(None | int, typing.Union[int, None])
self.assertEqual(int | type(None), int | None)
self.assertEqual(type(None) | int, None | int)
self.assertEqual(int | str | list, typing.Union[int, str, list])
self.assertEqual(int | (str | list), typing.Union[int, str, list])
self.assertEqual(str | (int | list), typing.Union[int, str, list])
self.assertEqual(typing.List | typing.Tuple, typing.Union[typing.List, typing.Tuple])
self.assertEqual(typing.List[int] | typing.Tuple[int], typing.Union[typing.List[int], typing.Tuple[int]])
self.assertEqual(typing.List[int] | None, typing.Union[typing.List[int], None])
self.assertEqual(None | typing.List[int], typing.Union[None, typing.List[int]])
self.assertEqual(str | float | int | complex | int, (int | str) | (float | complex))
self.assertEqual(typing.Union[str, int, typing.List[int]], str | int | typing.List[int])
self.assertIs(int | int, int)
self.assertEqual(
BaseException |
bool |
bytes |
complex |
float |
int |
list |
map |
set,
typing.Union[
BaseException,
bool,
bytes,
complex,
float,
int,
list,
map,
set,
])
with self.assertRaises(TypeError):
int | 3
with self.assertRaises(TypeError):
3 | int
with self.assertRaises(TypeError):
Example() | int
x = int | str
self.assertEqual(x, int | str)
self.assertEqual(x, str | int)
self.assertNotEqual(x, {}) # should not raise exception
with self.assertRaises(TypeError):
x < x
with self.assertRaises(TypeError):
x <= x
y = typing.Union[str, int]
with self.assertRaises(TypeError):
x < y
y = int | bool
with self.assertRaises(TypeError):
x < y
# Check that we don't crash if typing.Union does not have a tuple in __args__
y = typing.Union[str, int]
y.__args__ = [str, int]
self.assertEqual(x, y)
def test_hash(self):
self.assertEqual(hash(int | str), hash(str | int))
self.assertEqual(hash(int | str), hash(typing.Union[int, str]))
def test_instancecheck(self):
x = int | str
self.assertIsInstance(1, x)
self.assertIsInstance(True, x)
self.assertIsInstance('a', x)
self.assertNotIsInstance(None, x)
self.assertTrue(issubclass(int, x))
self.assertTrue(issubclass(bool, x))
self.assertTrue(issubclass(str, x))
self.assertFalse(issubclass(type(None), x))
x = int | None
self.assertIsInstance(None, x)
self.assertTrue(issubclass(type(None), x))
x = int | collections.abc.Mapping
self.assertIsInstance({}, x)
self.assertTrue(issubclass(dict, x))
def test_bad_instancecheck(self):
class BadMeta(type):
def __instancecheck__(cls, inst):
1/0
x = int | BadMeta('A', (), {})
self.assertTrue(isinstance(1, x))
self.assertRaises(ZeroDivisionError, isinstance, [], x)
def test_bad_subclasscheck(self):
class BadMeta(type):
def __subclasscheck__(cls, sub):
1/0
x = int | BadMeta('A', (), {})
self.assertTrue(issubclass(int, x))
self.assertRaises(ZeroDivisionError, issubclass, list, x)
def test_or_type_operator_with_TypeVar(self):
TV = typing.TypeVar('T')
assert TV | str == typing.Union[TV, str]
assert str | TV == typing.Union[str, TV]
self.assertIs((int | TV)[int], int)
self.assertIs((TV | int)[int], int)
def test_union_args(self):
def check(arg, expected):
clear_typing_caches()
self.assertEqual(arg.__args__, expected)
check(int | str, (int, str))
check((int | str) | list, (int, str, list))
check(int | (str | list), (int, str, list))
check((int | str) | int, (int, str))
check(int | (str | int), (int, str))
check((int | str) | (str | int), (int, str))
check(typing.Union[int, str] | list, (int, str, list))
check(int | typing.Union[str, list], (int, str, list))
check((int | str) | (list | int), (int, str, list))
check((int | str) | typing.Union[list, int], (int, str, list))
check(typing.Union[int, str] | (list | int), (int, str, list))
check((str | int) | (int | list), (str, int, list))
check((str | int) | typing.Union[int, list], (str, int, list))
check(typing.Union[str, int] | (int | list), (str, int, list))
check(int | type(None), (int, type(None)))
check(type(None) | int, (type(None), int))
args = (int, list[int], typing.List[int],
typing.Tuple[int, int], typing.Callable[[int], int],
typing.Hashable, typing.TypeVar('T'))
for x in args:
with self.subTest(x):
check(x | None, (x, type(None)))
check(None | x, (type(None), x))
def test_union_parameter_chaining(self):
T = typing.TypeVar("T")
S = typing.TypeVar("S")
self.assertEqual((float | list[T])[int], float | list[int])
self.assertEqual(list[int | list[T]].__parameters__, (T,))
self.assertEqual(list[int | list[T]][str], list[int | list[str]])
self.assertEqual((list[T] | list[S]).__parameters__, (T, S))
self.assertEqual((list[T] | list[S])[int, T], list[int] | list[T])
self.assertEqual((list[T] | list[S])[int, int], list[int])
def test_union_parameter_substitution(self):
def eq(actual, expected, typed=True):
self.assertEqual(actual, expected)
if typed:
self.assertIs(type(actual), type(expected))
T = typing.TypeVar('T')
S = typing.TypeVar('S')
NT = typing.NewType('NT', str)
x = int | T | bytes
eq(x[str], int | str | bytes, typed=False)
eq(x[list[int]], int | list[int] | bytes, typed=False)
eq(x[typing.List], int | typing.List | bytes)
eq(x[typing.List[int]], int | typing.List[int] | bytes)
eq(x[typing.Hashable], int | typing.Hashable | bytes)
eq(x[collections.abc.Hashable],
int | collections.abc.Hashable | bytes, typed=False)
eq(x[typing.Callable[[int], str]],
int | typing.Callable[[int], str] | bytes)
eq(x[collections.abc.Callable[[int], str]],
int | collections.abc.Callable[[int], str] | bytes, typed=False)
eq(x[typing.Tuple[int, str]], int | typing.Tuple[int, str] | bytes)
eq(x[typing.Literal['none']], int | typing.Literal['none'] | bytes)
eq(x[str | list], int | str | list | bytes, typed=False)
eq(x[typing.Union[str, list]], typing.Union[int, str, list, bytes])
eq(x[str | int], int | str | bytes, typed=False)
eq(x[typing.Union[str, int]], typing.Union[int, str, bytes])
eq(x[NT], int | NT | bytes)
eq(x[S], int | S | bytes)
def test_union_pickle(self):
orig = list[T] | int
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(orig, proto)
loaded = pickle.loads(s)
self.assertEqual(loaded, orig)
self.assertEqual(loaded.__args__, orig.__args__)
self.assertEqual(loaded.__parameters__, orig.__parameters__)
def test_union_copy(self):
orig = list[T] | int
for copied in (copy.copy(orig), copy.deepcopy(orig)):
self.assertEqual(copied, orig)
self.assertEqual(copied.__args__, orig.__args__)
self.assertEqual(copied.__parameters__, orig.__parameters__)
def test_union_parameter_substitution_errors(self):
T = typing.TypeVar("T")
x = int | T
with self.assertRaises(TypeError):
x[42]
def test_or_type_operator_with_forward(self):
T = typing.TypeVar('T')
ForwardAfter = T | 'Forward'
ForwardBefore = 'Forward' | T
def forward_after(x: ForwardAfter[int]) -> None: ...
def forward_before(x: ForwardBefore[int]) -> None: ...
assert typing.get_args(typing.get_type_hints(forward_after)['x']) == (int, Forward)
assert typing.get_args(typing.get_type_hints(forward_before)['x']) == (int, Forward)
def test_or_type_operator_with_Protocol(self):
class Proto(typing.Protocol):
def meth(self) -> int:
...
assert Proto | str == typing.Union[Proto, str]
def test_or_type_operator_with_Alias(self):
assert list | str == typing.Union[list, str]
assert typing.List | str == typing.Union[typing.List, str]
def test_or_type_operator_with_NamedTuple(self):
NT=namedtuple('A', ['B', 'C', 'D'])
assert NT | str == typing.Union[NT,str]
def test_or_type_operator_with_TypedDict(self):
class Point2D(typing.TypedDict):
x: int
y: int
label: str
assert Point2D | str == typing.Union[Point2D, str]
def test_or_type_operator_with_NewType(self):
UserId = typing.NewType('UserId', int)
assert UserId | str == typing.Union[UserId, str]
def test_or_type_operator_with_IO(self):
assert typing.IO | str == typing.Union[typing.IO, str]
def test_or_type_operator_with_SpecialForm(self):
assert typing.Any | str == typing.Union[typing.Any, str]
assert typing.NoReturn | str == typing.Union[typing.NoReturn, str]
assert typing.Optional[int] | str == typing.Union[typing.Optional[int], str]
assert typing.Optional[int] | str == typing.Union[int, str, None]
assert typing.Union[int, bool] | str == typing.Union[int, bool, str]
def test_or_type_repr(self):
assert repr(int | str) == "int | str"
assert repr((int | str) | list) == "int | str | list"
assert repr(int | (str | list)) == "int | str | list"
assert repr(int | None) == "int | None"
assert repr(int | type(None)) == "int | None"
assert repr(int | typing.GenericAlias(list, int)) == "int | list[int]"
def test_or_type_operator_with_genericalias(self):
a = list[int]
b = list[str]
c = dict[float, str]
class SubClass(types.GenericAlias): ...
d = SubClass(list, float)
# equivalence with typing.Union
self.assertEqual(a | b | c | d, typing.Union[a, b, c, d])
# de-duplicate
self.assertEqual(a | c | b | b | a | c | d | d, a | b | c | d)
# order shouldn't matter
self.assertEqual(a | b | d, b | a | d)
self.assertEqual(repr(a | b | c | d),
"list[int] | list[str] | dict[float, str] | list[float]")
class BadType(type):
def __eq__(self, other):
return 1 / 0
bt = BadType('bt', (), {})
# Comparison should fail and errors should propagate out for bad types.
with self.assertRaises(ZeroDivisionError):
list[int] | list[bt]
union_ga = (int | list[str], int | collections.abc.Callable[..., str],
int | d)
# Raise error when isinstance(type, type | genericalias)
for type_ in union_ga:
with self.subTest(f"check isinstance/issubclass is invalid for {type_}"):
with self.assertRaises(TypeError):
isinstance(1, type_)
with self.assertRaises(TypeError):
issubclass(int, type_)
def test_or_type_operator_with_bad_module(self):
class BadMeta(type):
__qualname__ = 'TypeVar'
@property
def __module__(self):
1 / 0
TypeVar = BadMeta('TypeVar', (), {})
_SpecialForm = BadMeta('_SpecialForm', (), {})
# Crashes in Issue44483
with self.assertRaises((TypeError, ZeroDivisionError)):
str | TypeVar()
with self.assertRaises((TypeError, ZeroDivisionError)):
str | _SpecialForm()
@cpython_only
def test_or_type_operator_reference_cycle(self):
if not hasattr(sys, 'gettotalrefcount'):
self.skipTest('Cannot get total reference count.')
gc.collect()
before = sys.gettotalrefcount()
for _ in range(30):
T = typing.TypeVar('T')
U = int | list[T]
T.blah = U
del T
del U
gc.collect()
leeway = 15
self.assertLessEqual(sys.gettotalrefcount() - before, leeway,
msg='Check for union reference leak.')
class MappingProxyTests(unittest.TestCase):
mappingproxy = types.MappingProxyType
def test_constructor(self):
class userdict(dict):
pass
mapping = {'x': 1, 'y': 2}
self.assertEqual(self.mappingproxy(mapping), mapping)
mapping = userdict(x=1, y=2)
self.assertEqual(self.mappingproxy(mapping), mapping)
mapping = collections.ChainMap({'x': 1}, {'y': 2})
self.assertEqual(self.mappingproxy(mapping), mapping)
self.assertRaises(TypeError, self.mappingproxy, 10)
self.assertRaises(TypeError, self.mappingproxy, ("a", "tuple"))
self.assertRaises(TypeError, self.mappingproxy, ["a", "list"])
def test_methods(self):
attrs = set(dir(self.mappingproxy({}))) - set(dir(object()))
self.assertEqual(attrs, {
'__contains__',
'__getitem__',
'__class_getitem__',
'__ior__',
'__iter__',
'__len__',
'__or__',
'__reversed__',
'__ror__',
'copy',
'get',
'items',
'keys',
'values',
})
def test_get(self):
view = self.mappingproxy({'a': 'A', 'b': 'B'})
self.assertEqual(view['a'], 'A')
self.assertEqual(view['b'], 'B')
self.assertRaises(KeyError, view.__getitem__, 'xxx')
self.assertEqual(view.get('a'), 'A')
self.assertIsNone(view.get('xxx'))
self.assertEqual(view.get('xxx', 42), 42)
def test_missing(self):
class dictmissing(dict):
def __missing__(self, key):
return "missing=%s" % key
view = self.mappingproxy(dictmissing(x=1))
self.assertEqual(view['x'], 1)
self.assertEqual(view['y'], 'missing=y')
self.assertEqual(view.get('x'), 1)
self.assertEqual(view.get('y'), None)
self.assertEqual(view.get('y', 42), 42)
self.assertTrue('x' in view)
self.assertFalse('y' in view)
def test_customdict(self):
class customdict(dict):
def __contains__(self, key):
if key == 'magic':
return True
else:
return dict.__contains__(self, key)
def __iter__(self):
return iter(('iter',))
def __len__(self):
return 500
def copy(self):
return 'copy'
def keys(self):
return 'keys'
def items(self):
return 'items'
def values(self):
return 'values'
def __getitem__(self, key):
return "getitem=%s" % dict.__getitem__(self, key)
def get(self, key, default=None):
return "get=%s" % dict.get(self, key, 'default=%r' % default)
custom = customdict({'key': 'value'})
view = self.mappingproxy(custom)
self.assertTrue('key' in view)
self.assertTrue('magic' in view)
self.assertFalse('xxx' in view)
self.assertEqual(view['key'], 'getitem=value')
self.assertRaises(KeyError, view.__getitem__, 'xxx')
self.assertEqual(tuple(view), ('iter',))
self.assertEqual(len(view), 500)
self.assertEqual(view.copy(), 'copy')
self.assertEqual(view.get('key'), 'get=value')
self.assertEqual(view.get('xxx'), 'get=default=None')
self.assertEqual(view.items(), 'items')
self.assertEqual(view.keys(), 'keys')
self.assertEqual(view.values(), 'values')
def test_chainmap(self):
d1 = {'x': 1}
d2 = {'y': 2}
mapping = collections.ChainMap(d1, d2)
view = self.mappingproxy(mapping)
self.assertTrue('x' in view)
self.assertTrue('y' in view)
self.assertFalse('z' in view)
self.assertEqual(view['x'], 1)
self.assertEqual(view['y'], 2)
self.assertRaises(KeyError, view.__getitem__, 'z')
self.assertEqual(tuple(sorted(view)), ('x', 'y'))
self.assertEqual(len(view), 2)
copy = view.copy()
self.assertIsNot(copy, mapping)
self.assertIsInstance(copy, collections.ChainMap)
self.assertEqual(copy, mapping)
self.assertEqual(view.get('x'), 1)
self.assertEqual(view.get('y'), 2)
self.assertIsNone(view.get('z'))
self.assertEqual(tuple(sorted(view.items())), (('x', 1), ('y', 2)))
self.assertEqual(tuple(sorted(view.keys())), ('x', 'y'))
self.assertEqual(tuple(sorted(view.values())), (1, 2))
def test_contains(self):
view = self.mappingproxy(dict.fromkeys('abc'))
self.assertTrue('a' in view)
self.assertTrue('b' in view)
self.assertTrue('c' in view)
self.assertFalse('xxx' in view)
def test_views(self):
mapping = {}
view = self.mappingproxy(mapping)
keys = view.keys()
values = view.values()
items = view.items()
self.assertEqual(list(keys), [])
self.assertEqual(list(values), [])
self.assertEqual(list(items), [])
mapping['key'] = 'value'
self.assertEqual(list(keys), ['key'])
self.assertEqual(list(values), ['value'])
self.assertEqual(list(items), [('key', 'value')])
def test_len(self):
for expected in range(6):
data = dict.fromkeys('abcde'[:expected])
self.assertEqual(len(data), expected)
view = self.mappingproxy(data)
self.assertEqual(len(view), expected)
def test_iterators(self):
keys = ('x', 'y')
values = (1, 2)
items = tuple(zip(keys, values))
view = self.mappingproxy(dict(items))
self.assertEqual(set(view), set(keys))
self.assertEqual(set(view.keys()), set(keys))
self.assertEqual(set(view.values()), set(values))
self.assertEqual(set(view.items()), set(items))
def test_reversed(self):
d = {'a': 1, 'b': 2, 'foo': 0, 'c': 3, 'd': 4}
mp = self.mappingproxy(d)
del d['foo']
r = reversed(mp)
self.assertEqual(list(r), list('dcba'))
self.assertRaises(StopIteration, next, r)
def test_copy(self):
original = {'key1': 27, 'key2': 51, 'key3': 93}
view = self.mappingproxy(original)
copy = view.copy()
self.assertEqual(type(copy), dict)
self.assertEqual(copy, original)
original['key1'] = 70
self.assertEqual(view['key1'], 70)
self.assertEqual(copy['key1'], 27)
def test_union(self):
mapping = {'a': 0, 'b': 1, 'c': 2}
view = self.mappingproxy(mapping)
with self.assertRaises(TypeError):
view | [('r', 2), ('d', 2)]
with self.assertRaises(TypeError):
[('r', 2), ('d', 2)] | view
with self.assertRaises(TypeError):
view |= [('r', 2), ('d', 2)]
other = {'c': 3, 'p': 0}
self.assertDictEqual(view | other, {'a': 0, 'b': 1, 'c': 3, 'p': 0})
self.assertDictEqual(other | view, {'c': 2, 'p': 0, 'a': 0, 'b': 1})
self.assertEqual(view, {'a': 0, 'b': 1, 'c': 2})
self.assertDictEqual(mapping, {'a': 0, 'b': 1, 'c': 2})
self.assertDictEqual(other, {'c': 3, 'p': 0})
class ClassCreationTests(unittest.TestCase):
class Meta(type):
def __init__(cls, name, bases, ns, **kw):
super().__init__(name, bases, ns)
@staticmethod
def __new__(mcls, name, bases, ns, **kw):
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases, **kw):
ns = super().__prepare__(name, bases)
ns["y"] = 1
ns.update(kw)
return ns
def test_new_class_basics(self):
C = types.new_class("C")
self.assertEqual(C.__name__, "C")
self.assertEqual(C.__bases__, (object,))
def test_new_class_subclass(self):
C = types.new_class("C", (int,))
self.assertTrue(issubclass(C, int))
def test_new_class_meta(self):
Meta = self.Meta
settings = {"metaclass": Meta, "z": 2}
# We do this twice to make sure the passed in dict isn't mutated
for i in range(2):
C = types.new_class("C" + str(i), (), settings)
self.assertIsInstance(C, Meta)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_exec_body(self):
Meta = self.Meta
def func(ns):
ns["x"] = 0
C = types.new_class("C", (), {"metaclass": Meta, "z": 2}, func)
self.assertIsInstance(C, Meta)
self.assertEqual(C.x, 0)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_metaclass_keywords(self):
#Test that keywords are passed to the metaclass:
def meta_func(name, bases, ns, **kw):
return name, bases, ns, kw
res = types.new_class("X",
(int, object),
dict(metaclass=meta_func, x=0))
self.assertEqual(res, ("X", (int, object), {}, {"x": 0}))
def test_new_class_defaults(self):
# Test defaults/keywords:
C = types.new_class("C", (), {}, None)
self.assertEqual(C.__name__, "C")
self.assertEqual(C.__bases__, (object,))
def test_new_class_meta_with_base(self):
Meta = self.Meta
def func(ns):
ns["x"] = 0
C = types.new_class(name="C",
bases=(int,),
kwds=dict(metaclass=Meta, z=2),
exec_body=func)
self.assertTrue(issubclass(C, int))
self.assertIsInstance(C, Meta)
self.assertEqual(C.x, 0)
self.assertEqual(C.y, 1)
self.assertEqual(C.z, 2)
def test_new_class_with_mro_entry(self):
class A: pass
class C:
def __mro_entries__(self, bases):
return (A,)
c = C()
D = types.new_class('D', (c,), {})
self.assertEqual(D.__bases__, (A,))
self.assertEqual(D.__orig_bases__, (c,))
self.assertEqual(D.__mro__, (D, A, object))
def test_new_class_with_mro_entry_genericalias(self):
L1 = types.new_class('L1', (typing.List[int],), {})
self.assertEqual(L1.__bases__, (list, typing.Generic))
self.assertEqual(L1.__orig_bases__, (typing.List[int],))
self.assertEqual(L1.__mro__, (L1, list, typing.Generic, object))
L2 = types.new_class('L2', (list[int],), {})
self.assertEqual(L2.__bases__, (list,))
self.assertEqual(L2.__orig_bases__, (list[int],))
self.assertEqual(L2.__mro__, (L2, list, object))
def test_new_class_with_mro_entry_none(self):
class A: pass
class B: pass
class C:
def __mro_entries__(self, bases):
return ()
c = C()
D = types.new_class('D', (A, c, B), {})
self.assertEqual(D.__bases__, (A, B))
self.assertEqual(D.__orig_bases__, (A, c, B))
self.assertEqual(D.__mro__, (D, A, B, object))
def test_new_class_with_mro_entry_error(self):
class A: pass
class C:
def __mro_entries__(self, bases):
return A
c = C()
with self.assertRaises(TypeError):
types.new_class('D', (c,), {})
def test_new_class_with_mro_entry_multiple(self):
class A1: pass
class A2: pass
class B1: pass
class B2: pass
class A:
def __mro_entries__(self, bases):
return (A1, A2)
class B:
def __mro_entries__(self, bases):
return (B1, B2)
D = types.new_class('D', (A(), B()), {})
self.assertEqual(D.__bases__, (A1, A2, B1, B2))
def test_new_class_with_mro_entry_multiple_2(self):
class A1: pass
class A2: pass
class A3: pass
class B1: pass
class B2: pass
class A:
def __mro_entries__(self, bases):
return (A1, A2, A3)
class B:
def __mro_entries__(self, bases):
return (B1, B2)
class C: pass
D = types.new_class('D', (A(), C, B()), {})
self.assertEqual(D.__bases__, (A1, A2, A3, C, B1, B2))
# Many of the following tests are derived from test_descr.py
def test_prepare_class(self):
# Basic test of metaclass derivation
expected_ns = {}
class A(type):
def __new__(*args, **kwargs):
return type.__new__(*args, **kwargs)
def __prepare__(*args):
return expected_ns
B = types.new_class("B", (object,))
C = types.new_class("C", (object,), {"metaclass": A})
# The most derived metaclass of D is A rather than type.
meta, ns, kwds = types.prepare_class("D", (B, C), {"metaclass": type})
self.assertIs(meta, A)
self.assertIs(ns, expected_ns)
self.assertEqual(len(kwds), 0)
def test_bad___prepare__(self):
# __prepare__() must return a mapping.
class BadMeta(type):
@classmethod
def __prepare__(*args):
return None
with self.assertRaisesRegex(TypeError,
r'^BadMeta\.__prepare__\(\) must '
r'return a mapping, not NoneType$'):
class Foo(metaclass=BadMeta):
pass
# Also test the case in which the metaclass is not a type.
class BadMeta:
@classmethod
def __prepare__(*args):
return None
with self.assertRaisesRegex(TypeError,
r'^<metaclass>\.__prepare__\(\) must '
r'return a mapping, not NoneType$'):
class Bar(metaclass=BadMeta()):
pass
def test_resolve_bases(self):
class A: pass
class B: pass
class C:
def __mro_entries__(self, bases):
if A in bases:
return ()
return (A,)
c = C()
self.assertEqual(types.resolve_bases(()), ())
self.assertEqual(types.resolve_bases((c,)), (A,))
self.assertEqual(types.resolve_bases((C,)), (C,))
self.assertEqual(types.resolve_bases((A, C)), (A, C))
self.assertEqual(types.resolve_bases((c, A)), (A,))
self.assertEqual(types.resolve_bases((A, c)), (A,))
x = (A,)
y = (C,)
z = (A, C)
t = (A, C, B)
for bases in [x, y, z, t]:
self.assertIs(types.resolve_bases(bases), bases)
def test_resolve_bases_with_mro_entry(self):
self.assertEqual(types.resolve_bases((typing.List[int],)),
(list, typing.Generic))
self.assertEqual(types.resolve_bases((list[int],)), (list,))
def test_metaclass_derivation(self):
# issue1294232: correct metaclass calculation
new_calls = [] # to check the order of __new__ calls
class AMeta(type):
def __new__(mcls, name, bases, ns):
new_calls.append('AMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
return {}
class BMeta(AMeta):
def __new__(mcls, name, bases, ns):
new_calls.append('BMeta')
return super().__new__(mcls, name, bases, ns)
@classmethod
def __prepare__(mcls, name, bases):
ns = super().__prepare__(name, bases)
ns['BMeta_was_here'] = True
return ns
A = types.new_class("A", (), {"metaclass": AMeta})
self.assertEqual(new_calls, ['AMeta'])
new_calls.clear()
B = types.new_class("B", (), {"metaclass": BMeta})
# BMeta.__new__ calls AMeta.__new__ with super:
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
C = types.new_class("C", (A, B))
# The most derived metaclass is BMeta:
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
# BMeta.__prepare__ should've been called:
self.assertIn('BMeta_was_here', C.__dict__)
# The order of the bases shouldn't matter:
C2 = types.new_class("C2", (B, A))
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', C2.__dict__)
# Check correct metaclass calculation when a metaclass is declared:
D = types.new_class("D", (C,), {"metaclass": type})
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', D.__dict__)
E = types.new_class("E", (C,), {"metaclass": AMeta})
self.assertEqual(new_calls, ['BMeta', 'AMeta'])
new_calls.clear()
self.assertIn('BMeta_was_here', E.__dict__)
def test_metaclass_override_function(self):
# Special case: the given metaclass isn't a class,
# so there is no metaclass calculation.
class A(metaclass=self.Meta):
pass
marker = object()
def func(*args, **kwargs):
return marker
X = types.new_class("X", (), {"metaclass": func})
Y = types.new_class("Y", (object,), {"metaclass": func})
Z = types.new_class("Z", (A,), {"metaclass": func})
self.assertIs(marker, X)
self.assertIs(marker, Y)
self.assertIs(marker, Z)
def test_metaclass_override_callable(self):
# The given metaclass is a class,
# but not a descendant of type.
new_calls = [] # to check the order of __new__ calls
prepare_calls = [] # to track __prepare__ calls
class ANotMeta:
def __new__(mcls, *args, **kwargs):
new_calls.append('ANotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('ANotMeta')
return {}
class BNotMeta(ANotMeta):
def __new__(mcls, *args, **kwargs):
new_calls.append('BNotMeta')
return super().__new__(mcls)
@classmethod
def __prepare__(mcls, name, bases):
prepare_calls.append('BNotMeta')
return super().__prepare__(name, bases)
A = types.new_class("A", (), {"metaclass": ANotMeta})
self.assertIs(ANotMeta, type(A))
self.assertEqual(prepare_calls, ['ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['ANotMeta'])
new_calls.clear()
B = types.new_class("B", (), {"metaclass": BNotMeta})
self.assertIs(BNotMeta, type(B))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
C = types.new_class("C", (A, B))
self.assertIs(BNotMeta, type(C))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
C2 = types.new_class("C2", (B, A))
self.assertIs(BNotMeta, type(C2))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
# This is a TypeError, because of a metaclass conflict:
# BNotMeta is neither a subclass, nor a superclass of type
with self.assertRaises(TypeError):
D = types.new_class("D", (C,), {"metaclass": type})
E = types.new_class("E", (C,), {"metaclass": ANotMeta})
self.assertIs(BNotMeta, type(E))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
F = types.new_class("F", (object(), C))
self.assertIs(BNotMeta, type(F))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
F2 = types.new_class("F2", (C, object()))
self.assertIs(BNotMeta, type(F2))
self.assertEqual(prepare_calls, ['BNotMeta', 'ANotMeta'])
prepare_calls.clear()
self.assertEqual(new_calls, ['BNotMeta', 'ANotMeta'])
new_calls.clear()
# TypeError: BNotMeta is neither a
# subclass, nor a superclass of int
with self.assertRaises(TypeError):
X = types.new_class("X", (C, int()))
with self.assertRaises(TypeError):
X = types.new_class("X", (int(), C))
def test_one_argument_type(self):
expected_message = 'type.__new__() takes exactly 3 arguments (1 given)'
# Only type itself can use the one-argument form (#27157)
self.assertIs(type(5), int)
class M(type):
pass
with self.assertRaises(TypeError) as cm:
M(5)
self.assertEqual(str(cm.exception), expected_message)
class N(type, metaclass=M):
pass
with self.assertRaises(TypeError) as cm:
N(5)
self.assertEqual(str(cm.exception), expected_message)
def test_metaclass_new_error(self):
# bpo-44232: The C function type_new() must properly report the
# exception when a metaclass constructor raises an exception and the
# winner class is not the metaclass.
class ModelBase(type):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
new_class = super_new(cls, name, bases, {})
if name != "Model":
raise RuntimeWarning(f"{name=}")
return new_class
class Model(metaclass=ModelBase):
pass
with self.assertRaises(RuntimeWarning):
type("SouthPonies", (Model,), {})
class SimpleNamespaceTests(unittest.TestCase):
def test_constructor(self):
ns1 = types.SimpleNamespace()
ns2 = types.SimpleNamespace(x=1, y=2)
ns3 = types.SimpleNamespace(**dict(x=1, y=2))
with self.assertRaises(TypeError):
types.SimpleNamespace(1, 2, 3)
with self.assertRaises(TypeError):
types.SimpleNamespace(**{1: 2})
self.assertEqual(len(ns1.__dict__), 0)
self.assertEqual(vars(ns1), {})
self.assertEqual(len(ns2.__dict__), 2)
self.assertEqual(vars(ns2), {'y': 2, 'x': 1})
self.assertEqual(len(ns3.__dict__), 2)
self.assertEqual(vars(ns3), {'y': 2, 'x': 1})
def test_unbound(self):
ns1 = vars(types.SimpleNamespace())
ns2 = vars(types.SimpleNamespace(x=1, y=2))
self.assertEqual(ns1, {})
self.assertEqual(ns2, {'y': 2, 'x': 1})
def test_underlying_dict(self):
ns1 = types.SimpleNamespace()
ns2 = types.SimpleNamespace(x=1, y=2)
ns3 = types.SimpleNamespace(a=True, b=False)
mapping = ns3.__dict__
del ns3
self.assertEqual(ns1.__dict__, {})
self.assertEqual(ns2.__dict__, {'y': 2, 'x': 1})
self.assertEqual(mapping, dict(a=True, b=False))
def test_attrget(self):
ns = types.SimpleNamespace(x=1, y=2, w=3)
self.assertEqual(ns.x, 1)
self.assertEqual(ns.y, 2)
self.assertEqual(ns.w, 3)
with self.assertRaises(AttributeError):
ns.z
def test_attrset(self):
ns1 = types.SimpleNamespace()
ns2 = types.SimpleNamespace(x=1, y=2, w=3)
ns1.a = 'spam'
ns1.b = 'ham'
ns2.z = 4
ns2.theta = None
self.assertEqual(ns1.__dict__, dict(a='spam', b='ham'))
self.assertEqual(ns2.__dict__, dict(x=1, y=2, w=3, z=4, theta=None))
def test_attrdel(self):
ns1 = types.SimpleNamespace()
ns2 = types.SimpleNamespace(x=1, y=2, w=3)
with self.assertRaises(AttributeError):
del ns1.spam
with self.assertRaises(AttributeError):
del ns2.spam
del ns2.y
self.assertEqual(vars(ns2), dict(w=3, x=1))
ns2.y = 'spam'
self.assertEqual(vars(ns2), dict(w=3, x=1, y='spam'))
del ns2.y
self.assertEqual(vars(ns2), dict(w=3, x=1))
ns1.spam = 5
self.assertEqual(vars(ns1), dict(spam=5))
del ns1.spam
self.assertEqual(vars(ns1), {})
def test_repr(self):
ns1 = types.SimpleNamespace(x=1, y=2, w=3)
ns2 = types.SimpleNamespace()
ns2.x = "spam"
ns2._y = 5
name = "namespace"
self.assertEqual(repr(ns1), "{name}(x=1, y=2, w=3)".format(name=name))
self.assertEqual(repr(ns2), "{name}(x='spam', _y=5)".format(name=name))
def test_equal(self):
ns1 = types.SimpleNamespace(x=1)
ns2 = types.SimpleNamespace()
ns2.x = 1
self.assertEqual(types.SimpleNamespace(), types.SimpleNamespace())
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns2, types.SimpleNamespace())
def test_nested(self):
ns1 = types.SimpleNamespace(a=1, b=2)
ns2 = types.SimpleNamespace()
ns3 = types.SimpleNamespace(x=ns1)
ns2.spam = ns1
ns2.ham = '?'
ns2.spam = ns3
self.assertEqual(vars(ns1), dict(a=1, b=2))
self.assertEqual(vars(ns2), dict(spam=ns3, ham='?'))
self.assertEqual(ns2.spam, ns3)
self.assertEqual(vars(ns3), dict(x=ns1))
self.assertEqual(ns3.x.a, 1)
def test_recursive(self):
ns1 = types.SimpleNamespace(c='cookie')
ns2 = types.SimpleNamespace()
ns3 = types.SimpleNamespace(x=1)
ns1.spam = ns1
ns2.spam = ns3
ns3.spam = ns2
self.assertEqual(ns1.spam, ns1)
self.assertEqual(ns1.spam.spam, ns1)
self.assertEqual(ns1.spam.spam, ns1.spam)
self.assertEqual(ns2.spam, ns3)
self.assertEqual(ns3.spam, ns2)
self.assertEqual(ns2.spam.spam, ns2)
def test_recursive_repr(self):
ns1 = types.SimpleNamespace(c='cookie')
ns2 = types.SimpleNamespace()
ns3 = types.SimpleNamespace(x=1)
ns1.spam = ns1
ns2.spam = ns3
ns3.spam = ns2
name = "namespace"
repr1 = "{name}(c='cookie', spam={name}(...))".format(name=name)
repr2 = "{name}(spam={name}(x=1, spam={name}(...)))".format(name=name)
self.assertEqual(repr(ns1), repr1)
self.assertEqual(repr(ns2), repr2)
def test_as_dict(self):
ns = types.SimpleNamespace(spam='spamspamspam')
with self.assertRaises(TypeError):
len(ns)
with self.assertRaises(TypeError):
iter(ns)
with self.assertRaises(TypeError):
'spam' in ns
with self.assertRaises(TypeError):
ns['spam']
def test_subclass(self):
class Spam(types.SimpleNamespace):
pass
spam = Spam(ham=8, eggs=9)
self.assertIs(type(spam), Spam)
self.assertEqual(vars(spam), {'ham': 8, 'eggs': 9})
def test_pickle(self):
ns = types.SimpleNamespace(breakfast="spam", lunch="spam")
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
pname = "protocol {}".format(protocol)
try:
ns_pickled = pickle.dumps(ns, protocol)
except TypeError as e:
raise TypeError(pname) from e
ns_roundtrip = pickle.loads(ns_pickled)
self.assertEqual(ns, ns_roundtrip, pname)
def test_fake_namespace_compare(self):
# Issue #24257: Incorrect use of PyObject_IsInstance() caused
# SystemError.
class FakeSimpleNamespace(str):
__class__ = types.SimpleNamespace
self.assertFalse(types.SimpleNamespace() == FakeSimpleNamespace())
self.assertTrue(types.SimpleNamespace() != FakeSimpleNamespace())
with self.assertRaises(TypeError):
types.SimpleNamespace() < FakeSimpleNamespace()
with self.assertRaises(TypeError):
types.SimpleNamespace() <= FakeSimpleNamespace()
with self.assertRaises(TypeError):
types.SimpleNamespace() > FakeSimpleNamespace()
with self.assertRaises(TypeError):
types.SimpleNamespace() >= FakeSimpleNamespace()
class CoroutineTests(unittest.TestCase):
def test_wrong_args(self):
samples = [None, 1, object()]
for sample in samples:
with self.assertRaisesRegex(TypeError,
'types.coroutine.*expects a callable'):
types.coroutine(sample)
def test_non_gen_values(self):
@types.coroutine
def foo():
return 'spam'
self.assertEqual(foo(), 'spam')
class Awaitable:
def __await__(self):
return ()
aw = Awaitable()
@types.coroutine
def foo():
return aw
self.assertIs(aw, foo())
# decorate foo second time
foo = types.coroutine(foo)
self.assertIs(aw, foo())
def test_async_def(self):
# Test that types.coroutine passes 'async def' coroutines
# without modification
async def foo(): pass
foo_code = foo.__code__
foo_flags = foo.__code__.co_flags
decorated_foo = types.coroutine(foo)
self.assertIs(foo, decorated_foo)
self.assertEqual(foo.__code__.co_flags, foo_flags)
self.assertIs(decorated_foo.__code__, foo_code)
foo_coro = foo()
def bar(): return foo_coro
for _ in range(2):
bar = types.coroutine(bar)
coro = bar()
self.assertIs(foo_coro, coro)
self.assertEqual(coro.cr_code.co_flags, foo_flags)
coro.close()
def test_duck_coro(self):
class CoroLike:
def send(self): pass
def throw(self): pass
def close(self): pass
def __await__(self): return self
coro = CoroLike()
@types.coroutine
def foo():
return coro
self.assertIs(foo(), coro)
self.assertIs(foo().__await__(), coro)
def test_duck_corogen(self):
class CoroGenLike:
def send(self): pass
def throw(self): pass
def close(self): pass
def __await__(self): return self
def __iter__(self): return self
def __next__(self): pass
coro = CoroGenLike()
@types.coroutine
def foo():
return coro
self.assertIs(foo(), coro)
self.assertIs(foo().__await__(), coro)
def test_duck_gen(self):
class GenLike:
def send(self): pass
def throw(self): pass
def close(self): pass
def __iter__(self): pass
def __next__(self): pass
# Setup generator mock object
gen = unittest.mock.MagicMock(GenLike)
gen.__iter__ = lambda gen: gen
gen.__name__ = 'gen'
gen.__qualname__ = 'test.gen'
self.assertIsInstance(gen, collections.abc.Generator)
self.assertIs(gen, iter(gen))
@types.coroutine
def foo(): return gen
wrapper = foo()
self.assertIsInstance(wrapper, types._GeneratorWrapper)
self.assertIs(wrapper.__await__(), wrapper)
# Wrapper proxies duck generators completely:
self.assertIs(iter(wrapper), wrapper)
self.assertIsInstance(wrapper, collections.abc.Coroutine)
self.assertIsInstance(wrapper, collections.abc.Awaitable)
self.assertIs(wrapper.__qualname__, gen.__qualname__)
self.assertIs(wrapper.__name__, gen.__name__)
# Test AttributeErrors
for name in {'gi_running', 'gi_frame', 'gi_code', 'gi_yieldfrom',
'cr_running', 'cr_frame', 'cr_code', 'cr_await'}:
with self.assertRaises(AttributeError):
getattr(wrapper, name)
# Test attributes pass-through
gen.gi_running = object()
gen.gi_frame = object()
gen.gi_code = object()
gen.gi_yieldfrom = object()
self.assertIs(wrapper.gi_running, gen.gi_running)
self.assertIs(wrapper.gi_frame, gen.gi_frame)
self.assertIs(wrapper.gi_code, gen.gi_code)
self.assertIs(wrapper.gi_yieldfrom, gen.gi_yieldfrom)
self.assertIs(wrapper.cr_running, gen.gi_running)
self.assertIs(wrapper.cr_frame, gen.gi_frame)
self.assertIs(wrapper.cr_code, gen.gi_code)
self.assertIs(wrapper.cr_await, gen.gi_yieldfrom)
wrapper.close()
gen.close.assert_called_once_with()
wrapper.send(1)
gen.send.assert_called_once_with(1)
gen.reset_mock()
next(wrapper)
gen.__next__.assert_called_once_with()
gen.reset_mock()
wrapper.throw(1, 2, 3)
gen.throw.assert_called_once_with(1, 2, 3)
gen.reset_mock()
wrapper.throw(1, 2)
gen.throw.assert_called_once_with(1, 2)
gen.reset_mock()
wrapper.throw(1)
gen.throw.assert_called_once_with(1)
gen.reset_mock()
# Test exceptions propagation
error = Exception()
gen.throw.side_effect = error
try:
wrapper.throw(1)
except Exception as ex:
self.assertIs(ex, error)
else:
self.fail('wrapper did not propagate an exception')
# Test invalid args
gen.reset_mock()
with self.assertRaises(TypeError):
wrapper.throw()
self.assertFalse(gen.throw.called)
with self.assertRaises(TypeError):
wrapper.close(1)
self.assertFalse(gen.close.called)
with self.assertRaises(TypeError):
wrapper.send()
self.assertFalse(gen.send.called)
# Test that we do not double wrap
@types.coroutine
def bar(): return wrapper
self.assertIs(wrapper, bar())
# Test weakrefs support
ref = weakref.ref(wrapper)
self.assertIs(ref(), wrapper)
def test_duck_functional_gen(self):
class Generator:
"""Emulates the following generator (very clumsy):
def gen(fut):
result = yield fut
return result * 2
"""
def __init__(self, fut):
self._i = 0
self._fut = fut
def __iter__(self):
return self
def __next__(self):
return self.send(None)
def send(self, v):
try:
if self._i == 0:
assert v is None
return self._fut
if self._i == 1:
raise StopIteration(v * 2)
if self._i > 1:
raise StopIteration
finally:
self._i += 1
def throw(self, tp, *exc):
self._i = 100
if tp is not GeneratorExit:
raise tp
def close(self):
self.throw(GeneratorExit)
@types.coroutine
def foo(): return Generator('spam')
wrapper = foo()
self.assertIsInstance(wrapper, types._GeneratorWrapper)
async def corofunc():
return await foo() + 100
coro = corofunc()
self.assertEqual(coro.send(None), 'spam')
try:
coro.send(20)
except StopIteration as ex:
self.assertEqual(ex.args[0], 140)
else:
self.fail('StopIteration was expected')
def test_gen(self):
def gen_func():
yield 1
return (yield 2)
gen = gen_func()
@types.coroutine
def foo(): return gen
wrapper = foo()
self.assertIsInstance(wrapper, types._GeneratorWrapper)
self.assertIs(wrapper.__await__(), gen)
for name in ('__name__', '__qualname__', 'gi_code',
'gi_running', 'gi_frame'):
self.assertIs(getattr(foo(), name),
getattr(gen, name))
self.assertIs(foo().cr_code, gen.gi_code)
self.assertEqual(next(wrapper), 1)
self.assertEqual(wrapper.send(None), 2)
with self.assertRaisesRegex(StopIteration, 'spam'):
wrapper.send('spam')
gen = gen_func()
wrapper = foo()
wrapper.send(None)
with self.assertRaisesRegex(Exception, 'ham'):
wrapper.throw(Exception, Exception('ham'))
# decorate foo second time
foo = types.coroutine(foo)
self.assertIs(foo().__await__(), gen)
def test_returning_itercoro(self):
@types.coroutine
def gen():
yield
gencoro = gen()
@types.coroutine
def foo():
return gencoro
self.assertIs(foo(), gencoro)
# decorate foo second time
foo = types.coroutine(foo)
self.assertIs(foo(), gencoro)
def test_genfunc(self):
def gen(): yield
self.assertIs(types.coroutine(gen), gen)
self.assertIs(types.coroutine(types.coroutine(gen)), gen)
self.assertTrue(gen.__code__.co_flags & inspect.CO_ITERABLE_COROUTINE)
self.assertFalse(gen.__code__.co_flags & inspect.CO_COROUTINE)
g = gen()
self.assertTrue(g.gi_code.co_flags & inspect.CO_ITERABLE_COROUTINE)
self.assertFalse(g.gi_code.co_flags & inspect.CO_COROUTINE)
self.assertIs(types.coroutine(gen), gen)
def test_wrapper_object(self):
def gen():
yield
@types.coroutine
def coro():
return gen()
wrapper = coro()
self.assertIn('GeneratorWrapper', repr(wrapper))
self.assertEqual(repr(wrapper), str(wrapper))
self.assertTrue(set(dir(wrapper)).issuperset({
'__await__', '__iter__', '__next__', 'cr_code', 'cr_running',
'cr_frame', 'gi_code', 'gi_frame', 'gi_running', 'send',
'close', 'throw'}))
if __name__ == '__main__':
unittest.main()
| 36.639265 | 113 | 0.551947 |
eead65a75bc6f311a0870e42a8afca6f7c08a687 | 369 | py | Python | yatube/posts/migrations/0003_auto_20220129_1523.py | VladimirRu1975/hw02_community | 2288eae66afab06e5b8402d2287457feb0712984 | [
"BSD-3-Clause"
] | null | null | null | yatube/posts/migrations/0003_auto_20220129_1523.py | VladimirRu1975/hw02_community | 2288eae66afab06e5b8402d2287457feb0712984 | [
"BSD-3-Clause"
] | null | null | null | yatube/posts/migrations/0003_auto_20220129_1523.py | VladimirRu1975/hw02_community | 2288eae66afab06e5b8402d2287457feb0712984 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.19 on 2022-01-29 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20220128_1830'),
]
operations = [
migrations.AlterField(
model_name='group',
name='slug',
field=models.SlugField(),
),
]
| 19.421053 | 48 | 0.582656 |
f4bcaab3e2ea850bef8544542cb129f222dcb1eb | 1,389 | py | Python | netmiko/watchguard/fireware_ssh.py | mtuska/netmiko | 90ae69a7c251c13e483f7c52629dbbe4356e7a6d | [
"MIT"
] | 2,833 | 2015-01-04T20:04:10.000Z | 2022-03-31T13:03:17.000Z | netmiko/watchguard/fireware_ssh.py | mtuska/netmiko | 90ae69a7c251c13e483f7c52629dbbe4356e7a6d | [
"MIT"
] | 2,137 | 2015-01-28T17:33:41.000Z | 2022-03-31T18:41:21.000Z | netmiko/watchguard/fireware_ssh.py | georgesnow/netmiko | 185f51ca5c24ea2977d6ca31db1ae263aa72cc12 | [
"MIT"
] | 1,367 | 2015-01-04T20:04:10.000Z | 2022-03-31T19:13:28.000Z | import time
from typing import Any
from netmiko.base_connection import BaseConnection
class WatchguardFirewareSSH(BaseConnection):
"""
Implements methods for communicating with Watchguard Firebox firewalls.
"""
def session_preparation(self) -> None:
"""
Prepare the session after the connection has been established.
Set the base prompt for interaction ('#').
"""
self._test_channel_read()
self.set_base_prompt()
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def check_config_mode(self, check_string: str = ")#", pattern: str = "#") -> bool:
"""
Checks if the device is in configuration mode or not.
"""
return super().check_config_mode(check_string=check_string, pattern=pattern)
def config_mode(
self, config_command: str = "configure", pattern: str = r"\#", re_flags: int = 0
) -> str:
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
def exit_config_mode(self, exit_config: str = "exit", pattern: str = "#") -> str:
return super().exit_config_mode(exit_config=exit_config, pattern=pattern)
def save_config(self, *args: Any, **kwargs: Any) -> str:
"""No save config on Watchguard."""
pass
| 32.302326 | 88 | 0.643629 |
6c4db15207a7d9326a3a7ccec7a49ed6951f3908 | 2,220 | py | Python | imgproc.py | xuannianz/CRAFT-pytorch | 7a34ae2518f3e94dbfafe6a6128e0e9e4a2f8426 | [
"MIT"
] | null | null | null | imgproc.py | xuannianz/CRAFT-pytorch | 7a34ae2518f3e94dbfafe6a6128e0e9e4a2f8426 | [
"MIT"
] | null | null | null | imgproc.py | xuannianz/CRAFT-pytorch | 7a34ae2518f3e94dbfafe6a6128e0e9e4a2f8426 | [
"MIT"
] | null | null | null | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import numpy as np
from skimage import io
import cv2
def loadImage(img_file):
img = io.imread(img_file) # RGB order
if img.shape[0] == 2:
img = img[0]
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if img.shape[2] == 4:
img = img[:, :, :3]
img = np.array(img)
return img
def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
# should be RGB order
img = in_img.copy().astype(np.float32)
img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32)
img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32)
return img
def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
# should be RGB order
img = in_img.copy()
img *= variance
img += mean
img *= 255.0
img = np.clip(img, 0, 255).astype(np.uint8)
return img
def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1):
height, width, channel = img.shape
# magnify image size
target_size = mag_ratio * max(height, width)
# set original image size
if target_size > square_size:
target_size = square_size
ratio = target_size / max(height, width)
target_h, target_w = int(height * ratio), int(width * ratio)
proc = cv2.resize(img, (target_w, target_h), interpolation=interpolation)
# make canvas and paste image
target_h32, target_w32 = target_h, target_w
if target_h % 32 != 0:
target_h32 = target_h + (32 - target_h % 32)
if target_w % 32 != 0:
target_w32 = target_w + (32 - target_w % 32)
resized = np.zeros((target_h32, target_w32, channel), dtype=np.float32)
resized[0:target_h, 0:target_w, :] = proc
target_h, target_w = target_h32, target_w32
size_heatmap = (int(target_w/2), int(target_h/2))
return resized, ratio, size_heatmap
def cvt2HeatmapImg(img):
img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
img = cv2.applyColorMap(img, cv2.COLORMAP_JET)
return img
| 28.461538 | 102 | 0.630631 |
e33a3db8c21c74dc9859e8cd23843a4c2050aefb | 3,757 | py | Python | tests/integration/cattletest/core/test_ha.py | pranavs18/cattle | db64d9ffc782d82bbbc44709fb23e63b43c3c549 | [
"Apache-2.0"
] | null | null | null | tests/integration/cattletest/core/test_ha.py | pranavs18/cattle | db64d9ffc782d82bbbc44709fb23e63b43c3c549 | [
"Apache-2.0"
] | null | null | null | tests/integration/cattletest/core/test_ha.py | pranavs18/cattle | db64d9ffc782d82bbbc44709fb23e63b43c3c549 | [
"Apache-2.0"
] | null | null | null | from common_fixtures import * # NOQA
def _process_names(processes):
return set([x.processName for x in processes])
def test_container_ha_default(admin_client, sim_context):
c = admin_client.create_container(imageUuid=sim_context['imageUuid'],
data={'simForgetImmediately': True})
c = admin_client.wait_success(c)
ping = one(admin_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(admin_client, c, type='instance')
if 'instance.stop' not in _process_names(processes):
return None
return processes
processes = wait_for(callback)
c = admin_client.wait_success(c)
assert c.state == 'stopped'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop'])
def test_container_ha_stop(admin_client, sim_context):
c = admin_client.create_container(imageUuid=sim_context['imageUuid'],
instanceTriggeredStop='stop',
data={'simForgetImmediately': True})
c = admin_client.wait_success(c)
ping = one(admin_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(admin_client, c, type='instance')
if 'instance.stop' not in _process_names(processes):
return None
return processes
processes = wait_for(callback)
c = admin_client.wait_success(c)
assert c.state == 'stopped'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop'])
def test_container_ha_restart(admin_client, sim_context):
c = admin_client.create_container(imageUuid=sim_context['imageUuid'],
instanceTriggeredStop='restart',
data={'simForgetImmediately': True})
c = admin_client.wait_success(c)
ping = one(admin_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(admin_client, c, type='instance')
if 'instance.start' not in _process_names(processes):
return None
return processes
processes = wait_for(callback)
c = admin_client.wait_success(c)
assert c.state == 'running'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop',
'instance.start'])
def test_container_ha_remove(admin_client, sim_context):
c = admin_client.create_container(imageUuid=sim_context['imageUuid'],
instanceTriggeredStop='remove',
data={'simForgetImmediately': True})
c = admin_client.wait_success(c)
ping = one(admin_client.list_task, name='agent.ping')
ping.execute()
def callback():
processes = process_instances(admin_client, c, type='instance')
if 'instance.remove' not in _process_names(processes):
return None
return processes
processes = wait_for(callback)
c = admin_client.wait_success(c)
assert c.state == 'removed'
assert _process_names(processes) == set(['instance.create',
'instance.restart',
'instance.stop',
'instance.remove'])
| 36.475728 | 74 | 0.570934 |
c355df45aa4b60543f2b6b996f3bcb1210d92753 | 1,634 | py | Python | oo/pessoa.py | Marcio-Souza/pythonbirds | 4a86e28e4a91a5f33bd17094e334e58d17ef630b | [
"MIT"
] | null | null | null | oo/pessoa.py | Marcio-Souza/pythonbirds | 4a86e28e4a91a5f33bd17094e334e58d17ef630b | [
"MIT"
] | null | null | null | oo/pessoa.py | Marcio-Souza/pythonbirds | 4a86e28e4a91a5f33bd17094e334e58d17ef630b | [
"MIT"
] | null | null | null | class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=35):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá, meu nome é {self.nome}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classe(cls):
return f'{cls} - olhos {cls.olhos}'
class Homem(Pessoa):
def cumprimentar(self):
cumprimentar_da_classe = super().cumprimentar()
return f'{cumprimentar_da_classe}. Aperto de mão'
class Mutante(Pessoa):
olhos = 3
if __name__ == '__main__':
renzo = Mutante(nome='Renzo')
luciano = Homem(renzo, nome='Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())
print(luciano.nome)
print(luciano.idade)
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome = 'Ramalho'
del luciano.filhos
luciano.olhos = 1
del luciano.olhos
print(renzo.__dict__)
print(luciano.__dict__)
print(Pessoa.olhos)
print(luciano.olhos)
print(renzo.olhos)
print(id(Pessoa.olhos), id(luciano.olhos), id(renzo.olhos))
print(Pessoa.metodo_estatico(), luciano.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe(), luciano.nome_e_atributos_de_classe())
pessoa = Pessoa('Anonimo')
print(isinstance(pessoa, Pessoa))
print(isinstance(pessoa, Homem))
print(isinstance(renzo, Pessoa))
print(isinstance(renzo, Homem))
print(renzo.olhos)
print(luciano.cumprimentar())
print(renzo.cumprimentar())
| 26.786885 | 84 | 0.667075 |
463a35ba621a0e645661a747732f4813670c8a66 | 7,225 | py | Python | notebooks/Instaseis-Syngine/Instaseis_Tutorial_04_finite_source_solution.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | 3 | 2020-07-11T10:01:39.000Z | 2020-12-16T14:26:03.000Z | notebooks/Instaseis-Syngine/Instaseis_Tutorial_04_finite_source_solution.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | null | null | null | notebooks/Instaseis-Syngine/Instaseis_Tutorial_04_finite_source_solution.py | krischer/seismo_live_build | e4e8e59d9bf1b020e13ac91c0707eb907b05b34f | [
"CC-BY-3.0"
] | 3 | 2020-11-11T05:05:41.000Z | 2022-03-12T09:36:24.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='background-image: url("../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Instaseis Tutorial</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Part 4: Finite Sources</div>
# </div>
# </div>
# </div>
# Seismo-Live: http://seismo-live.org
#
# ##### Authors:
# * Martin van Driel ([@martinvandriel](https://github.com/martinvandriel))
# * Lion Krischer ([@krischer](https://github.com/krischer))
# ---
# <img style="width:50%" src="images/logo.png">
# ## Advanced Exercise 2: Finite Source Effects
#
# For earthquakes with Magnitude up to about 5 recorded at teleseismic distances, approximating the fault by a point source is a reasonable approach. However, for larger earthquakes with longer rupture duration this approximation is not valid anymore. In this exercise, you will compare the point source approximation with finite source solutions to understand its limitations.
#
# For three of the earthquakes we use in this tutorial, USGS provides finite fault solutions:
# the recent event in [Nepal](http://earthquake.usgs.gov/earthquakes/eventpage/us20002926#scientific_finitefault),
# the largest [aftershock](http://earthquake.usgs.gov/earthquakes/eventpage/us20002ejl#scientific_finitefault)
# and the one in [Chile](http://earthquake.usgs.gov/earthquakes/eventpage/usc000nzvd#scientific_finitefault). This is the fault solution and slip as a function of time for the Nepal M7.9 event:
# <p style="float: left; font-size: 9pt; text-align: center; width: 49%; margin-right: 1%; margin-bottom: 0.5em;"><img src="images/finite_source_fault.png" style="width: 100%">Fault representation (image: USGS)</p><p style="float: right; font-size: 9pt; text-align: center; width: 49%; margin-right: 1%; margin-bottom: 0.5em;"><img src="images/finite_source_stf.png" style="width: 100%">Source Time Function (image: USGS)</p><p style="clear: both;">
# -----
#
# Basic lines to set up the notebook and some paths.
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
import obspy
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (10, 8)
# Import Instaseis and open the database:
import instaseis
db = instaseis.open_db("data/database")
# -----
# In Instaseis, a finite fault is represented as set of point sources, where each point source represents one of the fault patches with individual source time function. This functionality is provided by the `instaseis.FiniteSource` object [(see Documentation)](http://instaseis.net/source.html#instaseis.source.FiniteSource). It can be initialized in two ways: from a list of point sources, or more conveniently by reading \*.param files provided by USGS or standard rupture format (\*.srf) files (these can also be used in the GUI).
finite_source = instaseis.FiniteSource.from_usgs_param_file(
'data/events/finite_source/FINITE_SOURCE_2015_05_12__Mw_7_2_Nepal.param')
print(finite_source)
# A point source can be computed as a sum of all point sources weighted by their moment:
finite_source.compute_centroid()
print(finite_source.CMT)
# The hypo- and epicenter can be found as the fault patch that ruptures first:
finite_source.find_hypocenter()
print('hypocenter latitude:', finite_source.hypocenter_latitude,
'longitude:', finite_source.hypocenter_longitude,
'depth:', finite_source.hypocenter_depth_in_m / 1e3)
# **Task:** Compare the seismograms for three different representations of the source:
#
# * A point source with simple gaussian source time function (using CMTSOLUTION or quakeml files),
# * the CMT solution using the more complex source time function provided by `finite_source.CMT`
# * the full finite source solution using the `FiniteSource` object and `db.get_seismograms_finite_source()`
#
# **Note:** First, you have to adapt the sampling of the source time functions in the finite source to the database, which works like this:
# +
# reloading finite source here to be sure to have a clean source time function
finite_source = instaseis.FiniteSource.from_usgs_param_file(
'data/events/finite_source/FINITE_SOURCE_2015_05_12__Mw_7_2_Nepal.param')
# prepare the source time functions to be at the same sampling as the database
# first use enough samples such that the lowpassed stf will still be correctly represented
nsamp = int(db.info.period / finite_source[0].dt) * 50
finite_source.resample_sliprate(dt=finite_source[0].dt, nsamp=nsamp)
# lowpass to avoid aliasing
finite_source.lp_sliprate(freq=1.0/db.info.period)
# finally resample to the sampling as the database
finite_source.resample_sliprate(dt=db.info.dt, nsamp=db.info.npts)
finite_source.compute_centroid()
# + {"tags": ["exercise"]}
# load receivers from stations xml file
receivers = instaseis.Receiver.parse('data/stations/all_stations.xml')
simple_source = instaseis.Source.parse(
'data/events/quakeml/GCMT_2015_04_25__Mw_7_9.xml')
# compute seismogram with CMT solution and no simple source time function (gaussian):
tr_simple =
# compute seismogram with CMT solution and source time function computed as the
# sum of all source time functions in the finite source (reconvolve_stf=True):
tr_cmt =
# compute seismogram for finite source
tr_finite =
plt.plot(tr_simple.times(), tr_simple.data, label='simple')
plt.plot(...)
plt.plot(...)
plt.legend()
plt.xlim(0, tr_simple.times()[-1])
plt.show()
# + {"tags": ["solution"], "cell_type": "markdown"}
# **Solution**
# + {"tags": ["solution"]}
# load receivers from stations xml file
receivers = instaseis.Receiver.parse('data/stations/all_stations.xml')
simple_source = instaseis.Source.parse(
'data/events/quakeml/GCMT_2015_04_25__Mw_7_9.xml')
# compute seismogram with CMT solution and no simple source time function (gaussian):
tr_simple = db.get_seismograms(
simple_source, receivers[0], components=('Z'), dt=1.0)[0]
# compute seismogram with CMT solution and source time function computed as the
# sum of all source time functions in the finite source (reconvolve_stf=True):
tr_cmt = db.get_seismograms(
finite_source.CMT, receivers[0], components=('Z'),
dt=1.0, reconvolve_stf=True, remove_source_shift=False)[0]
# compute seismogram for finite source
tr_finite = db.get_seismograms_finite_source(
finite_source, receivers[0], components=('Z'), dt=1.0)[0]
plt.plot(tr_simple.times(), tr_simple.data, label='simple')
plt.plot(tr_cmt.times(), tr_cmt.data, label='cmt')
plt.plot(tr_finite.times(), tr_finite.data, label='finite')
plt.legend()
plt.xlim(0, tr_simple.times()[-1])
plt.show()
| 44.325153 | 533 | 0.740484 |
58496c5dc7dd24e5e1b00a7b2a61bc1b683a3806 | 3,602 | py | Python | airflow/ti_deps/deps/prev_dagrun_dep.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 8 | 2017-04-20T16:15:44.000Z | 2020-10-11T13:44:10.000Z | airflow/ti_deps/deps/prev_dagrun_dep.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 219 | 2017-03-15T18:40:16.000Z | 2022-02-28T22:52:43.000Z | airflow/ti_deps/deps/prev_dagrun_dep.py | IGIT-CN/airflow | a6e5bcd59198afe5716813e84ebc4c59eade532c | [
"Apache-2.0"
] | 3 | 2016-07-14T21:51:10.000Z | 2020-10-12T13:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.state import State
class PrevDagrunDep(BaseTIDep):
"""
Is the past dagrun in a state that allows this task instance to run, e.g. did this
task instance's task in the previous dagrun complete if we are depending on past.
"""
NAME = "Previous Dagrun State"
IGNOREABLE = True
IS_TASK_DEP = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_depends_on_past:
yield self._passing_status(
reason="The context specified that the state of past DAGs could be "
"ignored.")
return
if not ti.task.depends_on_past:
yield self._passing_status(
reason="The task did not have depends_on_past set.")
return
# Don't depend on the previous task instance if we are the first task
dag = ti.task.dag
if dag.catchup:
if dag.previous_schedule(ti.execution_date) is None:
yield self._passing_status(
reason="This task does not have a schedule or is @once"
)
return
if dag.previous_schedule(ti.execution_date) < ti.task.start_date:
yield self._passing_status(
reason="This task instance was the first task instance for its task.")
return
else:
dr = ti.get_dagrun()
last_dagrun = dr.get_previous_dagrun() if dr else None
if not last_dagrun:
yield self._passing_status(
reason="This task instance was the first task instance for its task.")
return
previous_ti = ti.previous_ti
if not previous_ti:
yield self._failing_status(
reason="depends_on_past is true for this task's DAG, but the previous "
"task instance has not run yet.")
return
if previous_ti.state not in {State.SKIPPED, State.SUCCESS}:
yield self._failing_status(
reason="depends_on_past is true for this task, but the previous task "
"instance {0} is in the state '{1}' which is not a successful "
"state.".format(previous_ti, previous_ti.state))
previous_ti.task = ti.task
if (ti.task.wait_for_downstream and
not previous_ti.are_dependents_done(session=session)):
yield self._failing_status(
reason="The tasks downstream of the previous task instance {0} haven't "
"completed (and wait_for_downstream is True).".format(previous_ti))
| 41.883721 | 90 | 0.640478 |
4e3c5bd1f295508606e60299794f1d3b2c3d9d4f | 5,785 | py | Python | smokey/test/test_hdfs_api.py | godatadriven/hdp-smokey | b60c6941d77b66b4f84747d5fe10c972db88f7f7 | [
"Apache-2.0"
] | 1 | 2018-02-11T14:05:06.000Z | 2018-02-11T14:05:06.000Z | smokey/test/test_hdfs_api.py | godatadriven/hdp-smokey | b60c6941d77b66b4f84747d5fe10c972db88f7f7 | [
"Apache-2.0"
] | null | null | null | smokey/test/test_hdfs_api.py | godatadriven/hdp-smokey | b60c6941d77b66b4f84747d5fe10c972db88f7f7 | [
"Apache-2.0"
] | 1 | 2021-01-29T18:08:53.000Z | 2021-01-29T18:08:53.000Z | # flake8: noqa
import unittest
from unittest.mock import Mock, patch
import hdfs.hdfs_api as api
class TestHdfsApi(unittest.TestCase):
def setUp(self):
self.hdfs = api.HdfsApi(request_timeout=2)
self.correct_output = """
"FSCK started by tester (auth:KERBEROS_SSL) from /192.168.0.489 for path /user/tester/subdir/20161221/host.hortonworks.com.log at Wed Feb 22 10:19:52 CET 2017
/user/tester/subdir/20161221/host.hortonworks.com.log 1066336015 bytes, 8 block(s): OK
0. BP-1605498153-192.168.0.241-1426773903491:blk_1084465072_10763065 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.32:1019, /Group1/DC1-X/RACK1/192.168.2.29:1019, /Group1/DC1-X/RACK1/192.168.2.14:1019]
1. BP-1605498153-192.168.0.241-1426773903491:blk_1084465073_10763066 len=134217728 repl=3 [/Group1/DC2-X/RACK2/192.168.3.9:1019, /Group1/DC1-X/RACK1/192.168.2.9:1019, /Group1/DC1-X/RACK1/192.168.2.14:1019]
2. BP-1605498153-192.168.0.241-1426773903491:blk_1084465074_10763067 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.10:1019, /Group1/DC1-X/RACK1/192.168.2.12:1019, /Group1/DC2-X/RACK2/192.168.3.9:1019]
3. BP-1605498153-192.168.0.241-1426773903491:blk_1084465075_10763068 len=134217728 repl=3 [/Group1/DC2-X/RACK2/192.168.3.11:1019, /Group1/DC1-X/RACK1/192.168.2.38:1019, /Group1/DC1-X/RACK1/192.168.2.33:1019]
4. BP-1605498153-192.168.0.241-1426773903491:blk_1084465076_10763069 len=134217728 repl=3 [/Group1/DC2-X/RACK2/192.168.3.11:1019, /Group1/DC2-X/RACK2/192.168.3.16:1019, /Group1/DC1-X/RACK1/192.168.2.33:1019]
5. BP-1605498153-192.168.0.241-1426773903491:blk_1084465077_10763070 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.10:1019, /Group1/DC1-X/RACK1/192.168.2.12:1019, /Group1/DC2-X/RACK2/192.168.3.10:1019]
6. BP-1605498153-192.168.0.241-1426773903491:blk_1084465078_10763071 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.10:1019, /Group1/DC2-X/RACK2/192.168.3.8:1019, /Group1/DC1-X/RACK1/192.168.2.7:1019]
7. BP-1605498153-192.168.0.241-1426773903491:blk_1084465085_10763078 len=126811919 repl=3 [/Group1/DC1-X/RACK1/192.168.2.17:1019, /Group1/DC2-X/RACK2/192.168.3.7:1019, /Group1/DC2-X/RACK2/192.168.3.16:1019]
Status: HEALTHY
Total size:\t1066336015 B
Total dirs:\t0
Total files:\t1
Total symlinks:\t\t0
Total blocks (validated):\t8 (avg. block size 133292001 B)
Minimally replicated blocks:\t8 (100.0 %)
Over-replicated blocks:\t0 (0.0 %)
Under-replicated blocks:\t0 (0.0 %)
Mis-replicated blocks:\t\t0 (0.0 %)
Default replication factor:\t3
Average block replication:\t3.0
Corrupt blocks:\t\t0
Missing replicas:\t\t0 (0.0 %)
Number of data-nodes:\t\t27
Number of racks:\t\t3
FSCK ended at Wed Feb 22 10:19:52 CET 2017 in 2 milliseconds
The filesystem under path '/user/tester/subdir/20161221/host.hortonworks.com.log' is HEALTHY"
"""
self.empty_output = """
"FSCK started by tester (auth:KERBEROS_SSL) from /192.168.1.19 for path /user/tester/subdir/20161221/host.hortonworks.com.log at Wed Feb 22 10:29:05 CET 2017
FSCK ended at Wed Feb 22 10:29:05 CET 2017 in 1 milliseconds
Operation category READ is not supported in state standby
Fsck on path '/user/tester/subdir/20161221/host.hortonworks.com.log' FAILED"
"""
@patch('requests.request')
def test_mocking_request(self, mock_request):
mock_request.return_value = Mock(ok=True, status_code=200, text=self.correct_output)
hdfs_response = self.hdfs.get_block_info_for_file("/user/tester/subdir/20161221/host.hortonworks.com.log")
self.assertEqual(self.correct_output, hdfs_response.text)
@patch('requests.request')
def test_check_response_status(self, mock_request):
mock_request.return_value = Mock(ok=True, status_code=200, text=self.correct_output)
hdfs_response = self.hdfs.get_block_info_for_file("/user/tester/subdir/20161221/host.hortonworks.com.log")
self.hdfs._check_response_status(hdfs_response)
@patch('requests.request')
def test_check_response_status_wrong_response_status_code(self, mock_request):
mock_request.return_value = Mock(ok=True, status_code=403, text=self.empty_output)
with self.assertRaises(api.HdfsRequestError):
hdfs_response = self.hdfs.get_block_info_for_file("/user/tester/subdir/20161221/host.hortonworks.com.log")
self.hdfs._check_response_status(hdfs_response)
def test_get_first_block_info(self):
expected = "0. BP-1605498153-192.168.0.241-1426773903491:blk_1084465072_10763065 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.32:1019, /Group1/DC1-X/RACK1/192.168.2.29:1019, /Group1/DC1-X/RACK1/192.168.2.14:1019]"
blockinfo = self.hdfs.get_first_block_info("/user/tester/subdir/20161221/host.hortonworks.com.log",
self.correct_output)
self.assertEqual(expected, blockinfo)
def test_get_first_block_info_from_empty_response(self):
with self.assertRaises(api.HdfsRequestError):
self.hdfs.get_first_block_info("/user/tester/subdir/20161221/host.hortonworks.com.log", self.empty_output)
def test_get_location_of_first_block(self):
first_block_info = "0. BP-1605498153-192.168.0.241-1426773903491:blk_1084465072_10763065 len=134217728 repl=3 [/Group1/DC1-X/RACK1/192.168.2.32:1019, /Group1/DC1-X/RACK1/192.168.2.29:1019, /Group1/DC1-X/RACK1/192.168.2.14:1019]"
result = self.hdfs.get_location_of_first_block(first_block_info)
self.assertEqual("192.168.2.32", result)
def test_get_location_of_first_block_in_unknown_response(self):
first_block_info = "0. BP-1605498153-192.168.0.241-1426773903491:blk_1084465072_10763065 len=134217728 repl=3"
with self.assertRaises(api.HdfsRequestError):
self.hdfs.get_location_of_first_block(first_block_info)
| 61.542553 | 236 | 0.747969 |
3c96be284b08338dd3935f89e00bb090ff43ccbe | 2,467 | py | Python | django_matplotlib/views.py | shelldweller/django-matplotlib | f2d7ecb2a263b8b01f7c6c0db63223e5d8c5b0d2 | [
"MIT",
"Unlicense"
] | 2 | 2018-07-13T03:35:04.000Z | 2020-03-18T18:06:32.000Z | django_matplotlib/views.py | shelldweller/django-matplotlib | f2d7ecb2a263b8b01f7c6c0db63223e5d8c5b0d2 | [
"MIT",
"Unlicense"
] | null | null | null | django_matplotlib/views.py | shelldweller/django-matplotlib | f2d7ecb2a263b8b01f7c6c0db63223e5d8c5b0d2 | [
"MIT",
"Unlicense"
] | null | null | null | import django_matplotlib.settings as mpl_settings
from django_matplotlib.mapping import extract_kwargs, KNOWN_CHART_LIST, str2num, str2bool
import matplotlib
from django.http import HttpResponse, HttpResponseNotFound
matplotlib.use(mpl_settings.BACKEND)
from matplotlib import pyplot
def plot_view(request, plot_type, format="png"):
"""
Universal view for generating plots. Valid plot types are defined in
`django_matplotlib.mapping.KNOWN_CHART_LIST`
"""
# initial validation
try:
content_type = mpl_settings.FORMATS[format]
except KeyError:
return HttpResponseNotFound("Unknown output format: '%s'" % format)
if plot_type not in KNOWN_CHART_LIST:
return HttpResponseNotFound("Unknown plot type: '%s'" % plot_type)
# prepare plot
fig = pyplot.figure(**extract_kwargs("figure", request.GET))
# FIXME: need to be able to set other things like:
#plt.ylabel('Scores')
#plt.title('Scores by group and gender')
#plt.xticks(ind+width/2., ('G1', 'G2', 'G3', 'G4', 'G5') )
#plt.yticks(np.arange(0,81,10))
#plt.legend( (p1[0], p2[0]), ('Men', 'Women') )
ax = fig.add_subplot(111) # FIXME: what's 111? And why do we need subplot?
plot_object = getattr(ax, plot_type) # we shouldn't really get AttributeError here
# initialize args and draw plot
kwargs = extract_kwargs(plot_type, request.GET)
# TODO: plot_object(**kwargs) error handling
# TypeError if not enough arguments
# AssertionError if arguments don't make sense
plot_object(**kwargs)
# set plot properties
if "ylabel" in request.GET:
ax.set_ylabel(request.GET.get("ylabel"))
if "xlabel" in request.GET:
ax.set_xlabel(request.GET.get("xlabel"))
if "title" in request.GET:
ax.set_title(request.GET.get("title"))
if "xtickoffset" in request.GET: # FIXME: xtickoffset is probably a misfeature
offset = str2num(request.GET.get("xtickoffset"))
xticks = ax.get_xticks()
ax.set_xticks(map(lambda x:x+offset, xticks))
ax.set_xticklabels(xticks)
if "legend" in request.GET and str2bool(request.GET.get("legend")):
ax.legend()
if str2bool(request.GET.get("grid", "0")):
ax.grid()
# set_xticklabels
#
# prepare and output HTTP response
response=HttpResponse(content_type=content_type)
fig.savefig(response, format=format)
return response
| 35.753623 | 89 | 0.675719 |
f7622a47090f6e6a819c495ef27a4a1e1d020d7b | 145,923 | py | Python | src/config/api-server/vnc_cfg_api_server.py | sagarc-contrail/contrail-controller | 834302367f3ff81f1ce93f4036b6b3788dfd6994 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server.py | sagarc-contrail/contrail-controller | 834302367f3ff81f1ce93f4036b6b3788dfd6994 | [
"Apache-2.0"
] | null | null | null | src/config/api-server/vnc_cfg_api_server.py | sagarc-contrail/contrail-controller | 834302367f3ff81f1ce93f4036b6b3788dfd6994 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
This is the main module in vnc_cfg_api_server package. It manages interaction
between http/rest, address management, authentication and database interfaces.
"""
from gevent import monkey
monkey.patch_all()
from gevent import hub
# from neutron plugin to api server, the request URL could be large.
# fix the const
import gevent.pywsgi
gevent.pywsgi.MAX_REQUEST_LINE = 65535
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import ConfigParser
import functools
import hashlib
import logging
import logging.config
import signal
import os
import re
import random
import socket
from cfgm_common import jsonutils as json
from provision_defaults import *
import uuid
import copy
from pprint import pformat
from cStringIO import StringIO
# import GreenletProfiler
from cfgm_common import vnc_cgitb
logger = logging.getLogger(__name__)
"""
Following is needed to silence warnings on every request when keystone
auth_token middleware + Sandesh is used. Keystone or Sandesh alone
do not produce these warnings.
Exception AttributeError: AttributeError(
"'_DummyThread' object has no attribute '_Thread__block'",)
in <module 'threading' from '/usr/lib64/python2.7/threading.pyc'> ignored
See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
for more information.
"""
import threading
threading._DummyThread._Thread__stop = lambda x: 42
CONFIG_VERSION = '1.0'
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024000
import utils
import context
from context import get_request, get_context, set_context, use_context
from context import ApiContext
import vnc_cfg_types
from vnc_db import VncDbClient
import cfgm_common
from cfgm_common import ignore_exceptions
from cfgm_common.uve.vnc_api.ttypes import VncApiCommon, VncApiConfigLog,\
VncApiDebug, VncApiInfo, VncApiNotice, VncApiError
from cfgm_common import illegal_xml_chars_RE
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType,\
NodeTypeNames, INSTANCE_ID_DEFAULT
from provision_defaults import Provision
from vnc_quota import *
from gen.resource_xsd import *
from gen.resource_common import *
from gen.vnc_api_client_gen import all_resource_type_tuples
import cfgm_common
from cfgm_common.utils import cgitb_hook
from cfgm_common.rest import LinkObject, hdr_server_tenant
from cfgm_common.exceptions import *
from cfgm_common.vnc_extensions import ExtensionManager
import gen.resource_xsd
import vnc_addr_mgmt
import vnc_auth
import vnc_auth_keystone
import vnc_perms
import vnc_rbac
from cfgm_common.uve.cfgm_cpuinfo.ttypes import ModuleCpuState, ModuleCpuStateTrace
from cfgm_common.buildinfo import build_info
from cfgm_common.vnc_api_stats import log_api_stats
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
# from gen_py.vnc_api.ttypes import *
import netifaces
from pysandesh.connection_info import ConnectionState
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from sandesh.traces.ttypes import RestApiTrace
from vnc_bottle import get_bottle_server
from cfgm_common.vnc_greenlets import VncGreenlet
_ACTION_RESOURCES = [
{'uri': '/prop-collection-get', 'link_name': 'prop-collection-get',
'method': 'GET', 'method_name': 'prop_collection_http_get'},
{'uri': '/prop-collection-update', 'link_name': 'prop-collection-update',
'method': 'POST', 'method_name': 'prop_collection_http_post'},
{'uri': '/ref-update', 'link_name': 'ref-update',
'method': 'POST', 'method_name': 'ref_update_http_post'},
{'uri': '/ref-relax-for-delete', 'link_name': 'ref-relax-for-delete',
'method': 'POST', 'method_name': 'ref_relax_for_delete_http_post'},
{'uri': '/fqname-to-id', 'link_name': 'name-to-id',
'method': 'POST', 'method_name': 'fq_name_to_id_http_post'},
{'uri': '/id-to-fqname', 'link_name': 'id-to-name',
'method': 'POST', 'method_name': 'id_to_fq_name_http_post'},
{'uri': '/useragent-kv', 'link_name': 'useragent-keyvalue',
'method': 'POST', 'method_name': 'useragent_kv_http_post'},
{'uri': '/db-check', 'link_name': 'database-check',
'method': 'POST', 'method_name': 'db_check'},
{'uri': '/fetch-records', 'link_name': 'fetch-records',
'method': 'POST', 'method_name': 'fetch_records'},
{'uri': '/start-profile', 'link_name': 'start-profile',
'method': 'POST', 'method_name': 'start_profile'},
{'uri': '/stop-profile', 'link_name': 'stop-profile',
'method': 'POST', 'method_name': 'stop_profile'},
{'uri': '/list-bulk-collection', 'link_name': 'list-bulk-collection',
'method': 'POST', 'method_name': 'list_bulk_collection_http_post'},
{'uri': '/obj-perms', 'link_name': 'obj-perms',
'method': 'GET', 'method_name': 'obj_perms_http_get'},
{'uri': '/chown', 'link_name': 'chown',
'method': 'POST', 'method_name': 'obj_chown_http_post'},
{'uri': '/chmod', 'link_name': 'chmod',
'method': 'POST', 'method_name': 'obj_chmod_http_post'},
{'uri': '/multi-tenancy', 'link_name': 'multi-tenancy',
'method': 'PUT', 'method_name': 'mt_http_put'},
{'uri': '/aaa-mode', 'link_name': 'aaa-mode',
'method': 'PUT', 'method_name': 'aaa_mode_http_put'},
]
def error_400(err):
return err.body
# end error_400
def error_403(err):
return err.body
# end error_403
def error_404(err):
return err.body
# end error_404
def error_409(err):
return err.body
# end error_409
@bottle.error(412)
def error_412(err):
return err.body
# end error_412
def error_500(err):
return err.body
# end error_500
def error_503(err):
return err.body
# end error_503
class VncApiServer(object):
"""
This is the manager class co-ordinating all classes present in the package
"""
_INVALID_NAME_CHARS = set(':')
_GENERATE_DEFAULT_INSTANCE = [
'namespace',
'project',
'virtual_network', 'virtual-network',
'network_ipam', 'network-ipam',
]
def __new__(cls, *args, **kwargs):
obj = super(VncApiServer, cls).__new__(cls, *args, **kwargs)
obj.api_bottle = bottle.Bottle()
obj.route('/', 'GET', obj.homepage_http_get)
obj.api_bottle.error_handler = {
400: error_400,
403: error_403,
404: error_404,
409: error_409,
500: error_500,
503: error_503,
}
cls._generate_resource_crud_methods(obj)
cls._generate_resource_crud_uri(obj)
for act_res in _ACTION_RESOURCES:
http_method = act_res.get('method', 'POST')
method_name = getattr(obj, act_res['method_name'])
obj.route(act_res['uri'], http_method, method_name)
return obj
# end __new__
@classmethod
def _validate_complex_type(cls, dict_cls, dict_body):
if dict_body is None:
return
for key, value in dict_body.items():
if key not in dict_cls.attr_fields:
raise ValueError('class %s does not have field %s' % (
str(dict_cls), key))
attr_type_vals = dict_cls.attr_field_type_vals[key]
attr_type = attr_type_vals['attr_type']
restrictions = attr_type_vals['restrictions']
is_array = attr_type_vals.get('is_array', False)
if value is None:
continue
if is_array:
if not isinstance(value, list):
raise ValueError('Field %s must be a list. Received value: %s'
% (key, str(value)))
values = value
else:
values = [value]
if attr_type_vals['is_complex']:
attr_cls = cfgm_common.utils.str_to_class(attr_type, __name__)
for item in values:
cls._validate_complex_type(attr_cls, item)
else:
simple_type = attr_type_vals['simple_type']
for item in values:
cls._validate_simple_type(key, attr_type,
simple_type, item,
restrictions)
# end _validate_complex_type
@classmethod
def _validate_communityattribute_type(cls, value):
poss_values = ["no-export",
"accept-own",
"no-advertise",
"no-export-subconfed",
"no-reoriginate"]
if value in poss_values:
return
res = re.match('[0-9]+:[0-9]+', value)
if res is None:
raise ValueError('Invalid community format %s. '
'Change to \'number:number\''
% value)
asn = value.split(':')
if int(asn[0]) > 65535:
raise ValueError('Out of range ASN value %s. '
'ASN values cannot exceed 65535.'
% value)
@classmethod
def _validate_serviceinterface_type(cls, value):
poss_values = ["management",
"left",
"right"]
if value in poss_values:
return
res = re.match('other[0-9]*', value)
if res is None:
raise ValueError('Invalid service interface type %s. '
'Valid values are: management|left|right|other[0-9]*'
% value)
@classmethod
def _validate_simple_type(cls, type_name, xsd_type, simple_type, value, restrictions=None):
if value is None:
return
elif xsd_type in ('unsignedLong', 'integer'):
if not isinstance(value, (int, long)):
# If value is not an integer, then try to convert it to integer
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('%s: integer value expected instead of %s' %(
type_name, value))
if restrictions:
if not (int(restrictions[0]) <= value <= int(restrictions[1])):
raise ValueError('%s: value must be between %s and %s' %(
type_name, restrictions[0], restrictions[1]))
elif xsd_type == 'boolean':
if not isinstance(value, bool):
raise ValueError('%s: true/false expected instead of %s' %(
type_name, value))
elif xsd_type == 'string' and simple_type == 'CommunityAttribute':
cls._validate_communityattribute_type(value)
elif xsd_type == 'string' and simple_type == 'ServiceInterfaceType':
cls._validate_serviceinterface_type(value)
else:
if not isinstance(value, basestring):
raise ValueError('%s: string value expected instead of %s' %(
type_name, value))
if restrictions and value not in restrictions:
raise ValueError('%s: value must be one of %s' % (
type_name, str(restrictions)))
return value
# end _validate_simple_type
def _validate_props_in_request(self, resource_class, obj_dict):
for prop_name in resource_class.prop_fields:
prop_field_types = resource_class.prop_field_types[prop_name]
is_simple = not prop_field_types['is_complex']
prop_type = prop_field_types['xsd_type']
restrictions = prop_field_types['restrictions']
simple_type = prop_field_types['simple_type']
is_list_prop = prop_name in resource_class.prop_list_fields
is_map_prop = prop_name in resource_class.prop_map_fields
prop_value = obj_dict.get(prop_name)
if not prop_value:
continue
if is_simple:
try:
obj_dict[prop_name] = self._validate_simple_type(prop_name,
prop_type, simple_type,
prop_value, restrictions)
except Exception as e:
err_msg = 'Error validating property ' + str(e)
return False, err_msg
else:
continue
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
if isinstance(prop_value, dict):
try:
self._validate_complex_type(prop_cls, prop_value)
except Exception as e:
err_msg = 'Error validating property %s value %s ' %(
prop_name, prop_value)
err_msg += str(e)
return False, err_msg
else: # complex-type + value isn't dict or wrapped in list or map
err_msg = 'Error in property %s type %s value of %s ' %(
prop_name, prop_cls, prop_value)
return False, err_msg
# end for all properties
return True, ''
# end _validate_props_in_request
def _validate_refs_in_request(self, resource_class, obj_dict):
for ref_name in resource_class.ref_fields:
ref_fld_types_list = list(resource_class.ref_field_types[ref_name])
ref_link_type = ref_fld_types_list[1]
if ref_link_type == 'None':
continue
attr_cls = cfgm_common.utils.str_to_class(ref_link_type, __name__)
for ref_dict in obj_dict.get(ref_name) or []:
try:
self._validate_complex_type(attr_cls, ref_dict['attr'])
except Exception as e:
err_msg = 'Error validating reference %s value %s ' \
%(ref_name, ref_dict)
err_msg += str(e)
return False, err_msg
return True, ''
# end _validate_refs_in_request
def _validate_perms_in_request(self, resource_class, obj_type, obj_dict):
for ref_name in resource_class.ref_fields:
for ref in obj_dict.get(ref_name) or []:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_name[:-5],
ref['to'])
(ok, status) = self._permissions.check_perms_link(
get_request(), ref_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
# end _validate_perms_in_request
def _validate_resource_type(self, type):
try:
r_class = self.get_resource_class(type)
return r_class.resource_type, r_class
except TypeError:
raise cfgm_common.exceptions.HttpError(
404, "Resource type '%s' not found" % type)
def undo(self, result, obj_type, id=None, fq_name=None):
(code, msg) = result
if self._db_engine == 'cassandra':
get_context().invoke_undo(code, msg, self.config_log)
failed_stage = get_context().get_state()
self.config_object_error(
id, fq_name, obj_type, failed_stage, msg)
# end undo
# http_resource_<oper> - handlers invoked from
# a. bottle route (on-the-wire) OR
# b. internal requests
# using normalized get_request() from ApiContext
@log_api_stats
def http_resource_create(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
obj_dict = get_request().json[resource_type]
# check visibility
user_visible = (obj_dict.get('id_perms') or {}).get('user_visible', True)
if not user_visible and not self.is_admin_request():
result = 'This object is not visible by users'
self.config_object_error(None, None, obj_type, 'http_post', result)
raise cfgm_common.exceptions.HttpError(400, result)
self._post_validate(obj_type, obj_dict=obj_dict)
fq_name = obj_dict['fq_name']
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# properties validator
ok, result = self._validate_props_in_request(r_class, obj_dict)
if not ok:
result = 'Bad property in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in create: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource create
(ok, result) = self._post_common(get_request(), obj_type,
obj_dict)
if not ok:
(code, msg) = result
fq_name_str = ':'.join(obj_dict.get('fq_name', []))
self.config_object_error(None, fq_name_str, obj_type, 'http_post', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
uuid_in_req = result
name = obj_dict['fq_name'][-1]
fq_name = obj_dict['fq_name']
db_conn = self._db_conn
# if client gave parent_type of config-root, ignore and remove
if 'parent_type' in obj_dict and obj_dict['parent_type'] == 'config-root':
del obj_dict['parent_type']
parent_class = None
if 'parent_type' in obj_dict:
# non config-root child, verify parent exists
parent_res_type, parent_class = self._validate_resource_type(
obj_dict['parent_type'])
parent_obj_type = parent_class.object_type
parent_res_type = parent_class.resource_type
parent_fq_name = obj_dict['fq_name'][:-1]
try:
parent_uuid = self._db_conn.fq_name_to_uuid(parent_obj_type,
parent_fq_name)
(ok, status) = self._permissions.check_perms_write(
get_request(), parent_uuid)
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
self._permissions.set_user_role(get_request(), obj_dict)
obj_dict['parent_uuid'] = parent_uuid
except NoIdError:
err_msg = 'Parent %s type %s does not exist' % (
pformat(parent_fq_name), parent_res_type)
fq_name_str = ':'.join(parent_fq_name)
self.config_object_error(None, fq_name_str, obj_type, 'http_post', err_msg)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
400, 'Unknown reference in resource create %s.' %(obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
obj_ids = {}
def stateful_create():
# Alloc and Store id-mappings before creating entry on pubsub store.
# Else a subscriber can ask for an id mapping before we have stored it
(ok, result) = db_conn.dbe_alloc(obj_type, obj_dict,
uuid_in_req)
if not ok:
return (ok, result)
get_context().push_undo(db_conn.dbe_release, obj_type, fq_name)
obj_ids.update(result)
env = get_request().headers.environ
tenant_name = env.get(hdr_server_tenant()) or 'default-project'
get_context().set_state('PRE_DBE_CREATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_create(tenant_name, obj_dict,
db_conn)
if not ok:
return (ok, result)
callable = getattr(r_class, 'http_post_collection_fail', None)
if callable:
cleanup_on_failure.append((callable, [tenant_name, obj_dict, db_conn]))
ok, quota_limit, proj_uuid = r_class.get_quota_for_resource(obj_type,
obj_dict, db_conn)
if not ok:
return ok, quota_limit
get_context().set_state('DBE_CREATE')
if quota_limit >= 0:
#master_election
ret = {'ok': None, 'result': None}
def _create():
(ok, result) = r_class.check_for_quota(obj_type, obj_dict,
quota_limit, proj_uuid, db_conn)
if not ok:
ret['ok'] = ok
ret['result'] = result
return
(_ok, _result) = db_conn.dbe_create(obj_type, obj_ids,
obj_dict)
ret['ok'] = _ok
ret['result'] = _result
self._db_conn._zk_db.master_election("/vnc_api_server_obj_create/" + obj_type,
_create)
if not ret['ok']:
return ret['ok'], ret['result']
else:
#normal execution
(ok, result) = db_conn.dbe_create(obj_type, obj_ids,
obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_CREATE')
# type-specific hook
try:
ok, err_msg = r_class.post_dbe_create(tenant_name, obj_dict, db_conn)
except Exception as e:
ok = False
err_msg = '%s:%s post_dbe_create had an exception: %s' %(
obj_type, obj_ids['uuid'], str(e))
err_msg += cfgm_common.utils.detailed_traceback()
if not ok:
# Create is done, log to system, no point in informing user
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return True, ''
# end stateful_create
try:
ok, result = stateful_create()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
fq_name_str = ':'.join(fq_name)
self.undo(result, obj_type, fq_name=fq_name_str)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['name'] = name
rsp_body['fq_name'] = fq_name
rsp_body['uuid'] = obj_ids['uuid']
rsp_body['href'] = self.generate_url(resource_type, obj_ids['uuid'])
if parent_class:
# non config-root child, send back parent uuid/href
rsp_body['parent_uuid'] = parent_uuid
rsp_body['parent_href'] = self.generate_url(parent_res_type,
parent_uuid)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_create' %(obj_type), obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_create an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_create
@log_api_stats
def http_resource_read(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_read' %(obj_type), id)
except Exception as e:
pass
etag = get_request().headers.get('If-None-Match')
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = db_conn.uuid_to_fq_name(id)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# common handling for all resource get
(ok, result) = self._get_common(get_request(), id)
if not ok:
(code, msg) = result
self.config_object_error(
id, None, obj_type, 'http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
db_conn = self._db_conn
if etag:
obj_ids = {'uuid': id}
(ok, result) = db_conn.dbe_is_latest(obj_ids, etag.strip('"'))
if not ok:
# Not present in DB
self.config_object_error(
id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
is_latest = result
if is_latest:
# send Not-Modified, caches use this for read optimization
bottle.response.status = 304
return
#end if etag
obj_ids = {'uuid': id}
# Generate field list for db layer
obj_fields = r_class.prop_fields | r_class.ref_fields
if 'fields' in get_request().query:
obj_fields |= set(get_request().query.fields.split(','))
else: # default props + children + refs + backrefs
if 'exclude_back_refs' not in get_request().query:
obj_fields |= r_class.backref_fields
if 'exclude_children' not in get_request().query:
obj_fields |= r_class.children_fields
(ok, result) = r_class.pre_dbe_read(obj_ids['uuid'], db_conn)
if not ok:
(code, msg) = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
(ok, result) = db_conn.dbe_read(obj_type, obj_ids,
list(obj_fields), ret_readonly=True)
if not ok:
self.config_object_error(id, None, obj_type, 'http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
if not self.is_admin_request():
result = self.obj_view(resource_type, result)
(ok, err_msg) = r_class.post_dbe_read(result, db_conn)
if not ok:
(code, msg) = err_msg
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['name'] = result['fq_name'][-1]
if 'exclude_hrefs' not in get_request().query:
result = self.generate_hrefs(resource_type, result)
rsp_body.update(result)
id_perms = result['id_perms']
bottle.response.set_header('ETag', '"' + id_perms['last_modified'] + '"')
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_read' %(obj_type), id, rsp_body)
except Exception as e:
pass
return {resource_type: rsp_body}
# end http_resource_read
# filter object references based on permissions
def obj_view(self, resource_type, obj_dict):
ret_obj_dict = {}
ret_obj_dict.update(obj_dict)
r_class = self.get_resource_class(resource_type)
obj_links = (r_class.ref_fields | r_class.backref_fields | r_class.children_fields) \
& set(obj_dict.keys())
obj_uuids = [ref['uuid'] for link in obj_links for ref in list(obj_dict[link])]
obj_dicts = self._db_conn._object_db.object_raw_read(obj_uuids, ["perms2"])
uuid_to_perms2 = dict((o['uuid'], o['perms2']) for o in obj_dicts)
for link_field in obj_links:
links = obj_dict[link_field]
# build new links in returned dict based on permissions on linked object
ret_obj_dict[link_field] = [l for l in links
if self._permissions.check_perms_read(get_request(), l['uuid'], id_perms=uuid_to_perms2[l['uuid']])[0] == True]
return ret_obj_dict
@log_api_stats
def http_resource_update(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
# Early return if there is no body or an empty body
request = get_request()
if (not hasattr(request, 'json') or
not request.json or
not request.json[resource_type]):
return
obj_dict = get_request().json[resource_type]
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_update' %(obj_type), id, obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
db_conn = self._db_conn
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
obj_ids = {'uuid': id}
(read_ok, read_result) = db_conn.dbe_read(obj_type, obj_ids)
if not read_ok:
bottle.abort(
404, 'No %s object found for id %s' %(resource_type, id))
fq_name = read_result['fq_name']
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_put', result)
raise cfgm_common.exceptions.HttpError(404, result)
# properties validator
ok, result = self._validate_props_in_request(r_class, obj_dict)
if not ok:
result = 'Bad property in update: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# references validator
ok, result = self._validate_refs_in_request(r_class, obj_dict)
if not ok:
result = 'Bad reference in update: ' + result
raise cfgm_common.exceptions.HttpError(400, result)
# common handling for all resource put
(ok, result) = self._put_common(
get_request(), obj_type, id, fq_name, obj_dict)
if not ok:
(code, msg) = result
self.config_object_error(id, None, obj_type, 'http_put', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# Validate perms on references
try:
self._validate_perms_in_request(r_class, obj_type, obj_dict)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400,
'Unknown reference in resource update %s %s.'
%(obj_type, obj_dict))
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
obj_ids = {'uuid': id}
if 'uuid' not in obj_dict:
obj_dict['uuid'] = id
def stateful_update():
get_context().set_state('PRE_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.pre_dbe_update(
id, fq_name, obj_dict, self._db_conn)
if not ok:
return (ok, result)
get_context().set_state('DBE_UPDATE')
(ok, result) = db_conn.dbe_update(obj_type, obj_ids,
obj_dict)
if not ok:
return (ok, result)
get_context().set_state('POST_DBE_UPDATE')
# type-specific hook
(ok, result) = r_class.post_dbe_update(id, fq_name, obj_dict, self._db_conn)
if not ok:
return (ok, result)
return (ok, result)
# end stateful_update
try:
ok, result = stateful_update()
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=id)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
rsp_body = {}
rsp_body['uuid'] = id
rsp_body['href'] = self.generate_url(resource_type, id)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_update' %(obj_type), id, obj_dict, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
return {resource_type: rsp_body}
# end http_resource_update
@log_api_stats
def http_resource_delete(self, obj_type, id):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
# if obj doesn't exist return early
try:
req_obj_type = db_conn.uuid_to_obj_type(id)
if req_obj_type != obj_type:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
_ = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
try:
self._extension_mgrs['resourceApi'].map_method(
'pre_%s_delete' %(obj_type), id)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# read in obj from db (accepting error) to get details of it
obj_ids = {'uuid': id}
try:
(read_ok, read_result) = db_conn.dbe_read(obj_type, obj_ids)
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
if not read_ok:
self.config_object_error(
id, None, obj_type, 'http_delete', read_result)
# proceed down to delete the resource
# check visibility
if (not read_result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(id, None, obj_type, 'http_delete', result)
raise cfgm_common.exceptions.HttpError(404, result)
# common handling for all resource delete
parent_uuid = read_result.get('parent_uuid')
(ok, del_result) = self._delete_common(
get_request(), obj_type, id, parent_uuid)
if not ok:
(code, msg) = del_result
self.config_object_error(id, None, obj_type, 'http_delete', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
fq_name = read_result['fq_name']
# fail if non-default children or non-derived backrefs exist
default_names = {}
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
child_cls = self.get_resource_class(child_type)
default_child_name = 'default-%s' %(
child_cls(parent_type=obj_type).get_type())
default_names[child_type] = default_child_name
exist_hrefs = []
for child in read_result.get(child_field, []):
if child['to'][-1] == default_child_name:
continue
exist_hrefs.append(
self.generate_url(child_type, child['uuid']))
if exist_hrefs:
err_msg = 'Delete when children still present: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
relaxed_refs = set(db_conn.dbe_get_relaxed_refs(id))
for backref_field in r_class.backref_fields:
backref_type, _, is_derived = \
r_class.backref_field_types[backref_field]
if is_derived:
continue
exist_hrefs = [self.generate_url(backref_type, backref['uuid'])
for backref in read_result.get(backref_field, [])
if backref['uuid'] not in relaxed_refs]
if exist_hrefs:
err_msg = 'Delete when resource still referred: %s' %(
exist_hrefs)
self.config_object_error(
id, None, obj_type, 'http_delete', err_msg)
raise cfgm_common.exceptions.HttpError(409, err_msg)
# State modification starts from here. Ensure that cleanup is done for all state changes
cleanup_on_failure = []
def stateful_delete():
get_context().set_state('PRE_DBE_DELETE')
(ok, del_result) = r_class.pre_dbe_delete(id, read_result, db_conn)
if not ok:
return (ok, del_result)
# Delete default children first
for child_field in r_class.children_fields:
child_type, is_derived = r_class.children_field_types[child_field]
if is_derived:
continue
if child_field in self._GENERATE_DEFAULT_INSTANCE:
self.delete_default_children(child_type, read_result)
callable = getattr(r_class, 'http_delete_fail', None)
if callable:
cleanup_on_failure.append((callable, [id, read_result, db_conn]))
get_context().set_state('DBE_DELETE')
(ok, del_result) = db_conn.dbe_delete(
obj_type, obj_ids, read_result)
if not ok:
return (ok, del_result)
# type-specific hook
get_context().set_state('POST_DBE_DELETE')
try:
ok, err_msg = r_class.post_dbe_delete(id, read_result, db_conn)
except Exception as e:
ok = False
err_msg = '%s:%s post_dbe_delete had an exception: ' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
if not ok:
# Delete is done, log to system, no point in informing user
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (True, '')
# end stateful_delete
try:
ok, result = stateful_delete()
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(
404, 'No %s object found for id %s' %(resource_type, id))
except Exception as e:
ok = False
err_msg = cfgm_common.utils.detailed_traceback()
result = (500, err_msg)
if not ok:
self.undo(result, obj_type, id=id)
code, msg = result
raise cfgm_common.exceptions.HttpError(code, msg)
try:
self._extension_mgrs['resourceApi'].map_method(
'post_%s_delete' %(obj_type), id, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_delete an extension had error for %s' \
%(obj_type, id)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# end http_resource_delete
@log_api_stats
def http_resource_list(self, obj_type):
resource_type, r_class = self._validate_resource_type(obj_type)
db_conn = self._db_conn
env = get_request().headers.environ
parent_uuids = None
back_ref_uuids = None
obj_uuids = None
if (('parent_fq_name_str' in get_request().query) and
('parent_type' in get_request().query)):
parent_fq_name = get_request().query.parent_fq_name_str.split(':')
parent_res_type = get_request().query.parent_type
_, parent_class = self._validate_resource_type(parent_res_type)
parent_uuids = [self._db_conn.fq_name_to_uuid(
parent_class.object_type, parent_fq_name)]
elif 'parent_id' in get_request().query:
parent_uuids = get_request().query.parent_id.split(',')
if 'back_ref_id' in get_request().query:
back_ref_uuids = get_request().query.back_ref_id.split(',')
if 'obj_uuids' in get_request().query:
obj_uuids = get_request().query.obj_uuids.split(',')
# common handling for all resource get
for parent_uuid in list(parent_uuids or []):
(ok, result) = self._get_common(get_request(), parent_uuid)
if not ok:
parent_uuids.remove(parent_uuid)
if obj_uuids is None and back_ref_uuids is None and parent_uuids == []:
return {'%ss' %(resource_type): []}
if 'count' in get_request().query:
is_count = 'true' in get_request().query.count.lower()
else:
is_count = False
if 'detail' in get_request().query:
is_detail = 'true' in get_request().query.detail.lower()
else:
is_detail = False
if 'fields' in get_request().query:
req_fields = get_request().query.fields.split(',')
else:
req_fields = []
if 'shared' in get_request().query:
include_shared = 'true' in get_request().query.shared.lower()
else:
include_shared = False
try:
filters = utils.get_filters(get_request().query.filters)
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().query.filters)
if 'exclude_hrefs' in get_request().query:
exclude_hrefs = True
else:
exclude_hrefs = False
return self._list_collection(obj_type, parent_uuids, back_ref_uuids,
obj_uuids, is_count, is_detail, filters,
req_fields, include_shared, exclude_hrefs)
# end http_resource_list
# internal_request_<oper> - handlers of internally generated requests
# that save-ctx, generate-ctx and restore-ctx
def internal_request_create(self, resource_type, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_create(object_type)
return True, ""
finally:
set_context(orig_context)
# end internal_request_create
def internal_request_update(self, resource_type, obj_uuid, obj_json):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%ss' %(resource_type),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
json_as_dict = {'%s' %(resource_type): obj_json}
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
json_as_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_update(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_update
def internal_request_delete(self, resource_type, obj_uuid):
object_type = self.get_resource_class(resource_type).object_type
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/%s/%s' %(resource_type, obj_uuid),
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
None, None)
set_context(context.ApiContext(internal_req=i_req))
self.http_resource_delete(object_type, obj_uuid)
return True, ""
finally:
set_context(orig_context)
# end internal_request_delete
def internal_request_ref_update(self,
res_type, obj_uuid, operation, ref_res_type, ref_uuid, attr=None):
req_dict = {'type': res_type,
'uuid': obj_uuid,
'operation': operation,
'ref-type': ref_res_type,
'ref-uuid': ref_uuid,
'attr': attr}
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{'PATH_INFO': '/ref-update',
'bottle.app': orig_request.environ['bottle.app'],
'HTTP_X_USER': 'contrail-api',
'HTTP_X_ROLE': self.cloud_admin_role})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers,
req_dict, None)
set_context(context.ApiContext(internal_req=i_req))
self.ref_update_http_post()
return True, ""
finally:
set_context(orig_context)
# end internal_request_ref_update
def alloc_vn_id(self, name):
return self._db_conn._zk_db.alloc_vn_id(name) + 1
def create_default_children(self, object_type, parent_obj):
r_class = self.get_resource_class(object_type)
for child_fields in r_class.children_fields:
# Create a default child only if provisioned for
child_res_type, is_derived =\
r_class.children_field_types[child_fields]
if is_derived:
continue
if child_res_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_res_type)
child_obj_type = child_cls.object_type
child_obj = child_cls(parent_obj=parent_obj)
child_dict = child_obj.__dict__
child_dict['id_perms'] = self._get_default_id_perms()
child_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(child_obj_type, child_dict)
if not ok:
return (ok, result)
obj_ids = result
# For virtual networks, allocate an ID
if child_obj_type == 'virtual_network':
child_dict['virtual_network_network_id'] =\
self.alloc_vn_id(child_obj.get_fq_name_str())
(ok, result) = self._db_conn.dbe_create(child_obj_type, obj_ids,
child_dict)
if not ok:
# DB Create failed, log and stop further child creation.
err_msg = "DB Create failed creating %s" % child_res_type
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
return (ok, result)
# recurse down type hierarchy
self.create_default_children(child_obj_type, child_obj)
# end create_default_children
def delete_default_children(self, resource_type, parent_dict):
r_class = self.get_resource_class(resource_type)
for child_field in r_class.children_fields:
# Delete a default child only if provisioned for
child_type, is_derived = r_class.children_field_types[child_field]
if child_type not in self._GENERATE_DEFAULT_INSTANCE:
continue
child_cls = self.get_resource_class(child_type)
# first locate default child then delete it")
default_child_name = 'default-%s' %(child_type)
child_infos = parent_dict.get(child_field, [])
for child_info in child_infos:
if child_info['to'][-1] == default_child_name:
default_child_id = child_info['uuid']
self.http_resource_delete(child_type, default_child_id)
break
# end delete_default_children
@classmethod
def _generate_resource_crud_methods(cls, obj):
for object_type, _ in all_resource_type_tuples:
create_method = functools.partial(obj.http_resource_create,
object_type)
functools.update_wrapper(create_method, obj.http_resource_create)
setattr(obj, '%ss_http_post' %(object_type), create_method)
read_method = functools.partial(obj.http_resource_read,
object_type)
functools.update_wrapper(read_method, obj.http_resource_read)
setattr(obj, '%s_http_get' %(object_type), read_method)
update_method = functools.partial(obj.http_resource_update,
object_type)
functools.update_wrapper(update_method, obj.http_resource_update)
setattr(obj, '%s_http_put' %(object_type), update_method)
delete_method = functools.partial(obj.http_resource_delete,
object_type)
functools.update_wrapper(delete_method, obj.http_resource_delete)
setattr(obj, '%s_http_delete' %(object_type), delete_method)
list_method = functools.partial(obj.http_resource_list,
object_type)
functools.update_wrapper(list_method, obj.http_resource_list)
setattr(obj, '%ss_http_get' %(object_type), list_method)
# end _generate_resource_crud_methods
@classmethod
def _generate_resource_crud_uri(cls, obj):
for object_type, resource_type in all_resource_type_tuples:
# CRUD + list URIs of the form
# obj.route('/virtual-network/<id>', 'GET', obj.virtual_network_http_get)
# obj.route('/virtual-network/<id>', 'PUT', obj.virtual_network_http_put)
# obj.route('/virtual-network/<id>', 'DELETE', obj.virtual_network_http_delete)
# obj.route('/virtual-networks', 'POST', obj.virtual_networks_http_post)
# obj.route('/virtual-networks', 'GET', obj.virtual_networks_http_get)
# leaf resource
obj.route('/%s/<id>' %(resource_type),
'GET',
getattr(obj, '%s_http_get' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'PUT',
getattr(obj, '%s_http_put' %(object_type)))
obj.route('/%s/<id>' %(resource_type),
'DELETE',
getattr(obj, '%s_http_delete' %(object_type)))
# collection of leaf
obj.route('/%ss' %(resource_type),
'POST',
getattr(obj, '%ss_http_post' %(object_type)))
obj.route('/%ss' %(resource_type),
'GET',
getattr(obj, '%ss_http_get' %(object_type)))
# end _generate_resource_crud_uri
def __init__(self, args_str=None):
self._db_conn = None
self._get_common = None
self._post_common = None
self._resource_classes = {}
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
# aaa-mode is ignored if multi_tenancy is configured by user
if self._args.multi_tenancy is None:
# MT unconfigured by user - determine from aaa-mode
if self.aaa_mode not in cfgm_common.AAA_MODE_VALID_VALUES:
self.aaa_mode = cfgm_common.AAA_MODE_DEFAULT_VALUE
self._args.multi_tenancy = self.aaa_mode != 'no-auth'
else:
# MT configured by user - ignore aaa-mode
self.aaa_mode = "cloud-admin" if self._args.multi_tenancy else "no-auth"
# set python logging level from logging_level cmdline arg
if not self._args.logging_conf:
logging.basicConfig(level = getattr(logging, self._args.logging_level))
self._base_url = "http://%s:%s" % (self._args.listen_ip_addr,
self._args.listen_port)
# Generate LinkObjects for all entities
links = []
# Link for root
links.append(LinkObject('root', self._base_url , '/config-root',
'config-root'))
for _, resource_type in all_resource_type_tuples:
link = LinkObject('collection',
self._base_url , '/%ss' %(resource_type),
'%s' %(resource_type))
links.append(link)
for _, resource_type in all_resource_type_tuples:
link = LinkObject('resource-base',
self._base_url , '/%s' %(resource_type),
'%s' %(resource_type))
links.append(link)
self._homepage_links = links
self._pipe_start_app = None
#GreenletProfiler.set_clock_type('wall')
self._profile_info = None
# REST interface initialization
self._get_common = self._http_get_common
self._put_common = self._http_put_common
self._delete_common = self._http_delete_common
self._post_validate = self._http_post_validate
self._post_common = self._http_post_common
for act_res in _ACTION_RESOURCES:
link = LinkObject('action', self._base_url, act_res['uri'],
act_res['link_name'], act_res['method'])
self._homepage_links.append(link)
# Register for VN delete request. Disallow delete of system default VN
self.route('/virtual-network/<id>', 'DELETE', self.virtual_network_http_delete)
self.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
self._homepage_links.insert(
0, LinkObject('documentation', self._base_url,
'/documentation/index.html',
'documentation', 'GET'))
# APIs to reserve/free block of IP address from a VN/Subnet
self.route('/virtual-network/<id>/ip-alloc',
'POST', self.vn_ip_alloc_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-alloc',
'virtual-network-ip-alloc', 'POST'))
self.route('/virtual-network/<id>/ip-free',
'POST', self.vn_ip_free_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/ip-free',
'virtual-network-ip-free', 'POST'))
# APIs to find out number of ip instances from given VN subnet
self.route('/virtual-network/<id>/subnet-ip-count',
'POST', self.vn_subnet_ip_count_http_post)
self._homepage_links.append(
LinkObject('action', self._base_url,
'/virtual-network/%s/subnet-ip-count',
'virtual-network-subnet-ip-count', 'POST'))
# Enable/Disable multi tenancy
self.route('/multi-tenancy', 'GET', self.mt_http_get)
self.route('/multi-tenancy', 'PUT', self.mt_http_put)
self.route('/aaa-mode', 'GET', self.aaa_mode_http_get)
self.route('/aaa-mode', 'PUT', self.aaa_mode_http_put)
# randomize the collector list
self._random_collectors = self._args.collectors
self._chksum = "";
if self._args.collectors:
self._chksum = hashlib.md5(''.join(self._args.collectors)).hexdigest()
self._random_collectors = random.sample(self._args.collectors, \
len(self._args.collectors))
# sandesh init
self._sandesh = Sandesh()
# Reset the sandesh send rate limit value
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
module = Module.API_SERVER
module_name = ModuleNames[Module.API_SERVER]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
self.table = "ObjectConfigNode"
if self._args.worker_id:
instance_id = self._args.worker_id
else:
instance_id = INSTANCE_ID_DEFAULT
hostname = socket.gethostname()
self._sandesh.init_generator(module_name, hostname,
node_type_name, instance_id,
self._random_collectors,
'vnc_api_server_context',
int(self._args.http_server_port),
['cfgm_common', 'vnc_cfg_api_server.sandesh'],
logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
self._sandesh.trace_buffer_create(name="VncCfgTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="RestApiTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBRequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="DBUVERequestTraceBuf", size=1000)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
ConnectionState.init(self._sandesh, hostname, module_name,
instance_id,
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.table)
# Address Management interface
addr_mgmt = vnc_addr_mgmt.AddrMgmt(self)
self._addr_mgmt = addr_mgmt
vnc_cfg_types.Resource.addr_mgmt = addr_mgmt
# DB interface initialization
if self._args.wipe_config:
self._db_connect(True)
else:
self._db_connect(self._args.reset_config)
self._db_init_entries()
# API/Permissions check
# after db init (uses db_conn)
self._rbac = vnc_rbac.VncRbac(self, self._db_conn)
self._permissions = vnc_perms.VncPermissions(self, self._args)
if self.is_rbac_enabled():
self._create_default_rbac_rule()
if os.path.exists('/usr/bin/contrail-version'):
cfgm_cpu_uve = ModuleCpuState()
cfgm_cpu_uve.name = socket.gethostname()
cfgm_cpu_uve.config_node_ip = self.get_server_ip()
command = "contrail-version contrail-config | grep 'contrail-config'"
version = os.popen(command).read()
_, rpm_version, build_num = version.split()
cfgm_cpu_uve.build_info = build_info + '"build-id" : "' + \
rpm_version + '", "build-number" : "' + \
build_num + '"}]}'
cpu_info_trace = ModuleCpuStateTrace(data=cfgm_cpu_uve, sandesh=self._sandesh)
cpu_info_trace.send(sandesh=self._sandesh)
self.re_uuid = re.compile('^[0-9A-F]{8}-?[0-9A-F]{4}-?4[0-9A-F]{3}-?[89AB][0-9A-F]{3}-?[0-9A-F]{12}$',
re.IGNORECASE)
# VncZkClient client assignment
vnc_cfg_types.Resource.vnc_zk_client = self._db_conn._zk_db
# Load extensions
self._extension_mgrs = {}
self._load_extensions()
# Authn/z interface
if self._args.auth == 'keystone':
auth_svc = vnc_auth_keystone.AuthServiceKeystone(self, self._args)
else:
auth_svc = vnc_auth.AuthService(self, self._args)
self._pipe_start_app = auth_svc.get_middleware_app()
self._auth_svc = auth_svc
if int(self._args.worker_id) == 0:
try:
self._extension_mgrs['resync'].map(
self._resync_domains_projects)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# following allowed without authentication
self.white_list = [
'^/documentation', # allow all documentation
'^/$', # allow discovery
]
# end __init__
def _extensions_transform_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_request', request)
# end _extensions_transform_request
def _extensions_validate_request(self, request):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'validate_request', request)
# end _extensions_validate_request
def _extensions_transform_response(self, request, response):
extensions = self._extension_mgrs.get('resourceApi')
if not extensions or not extensions.names():
return None
return extensions.map_method(
'transform_response', request, response)
# end _extensions_transform_response
@ignore_exceptions
def _generate_rest_api_request_trace(self):
method = get_request().method.upper()
if method == 'GET':
return None
req_id = get_request().headers.get('X-Request-Id',
'req-%s' %(str(uuid.uuid4())))
gevent.getcurrent().trace_request_id = req_id
url = get_request().url
if method == 'DELETE':
req_data = ''
else:
try:
req_data = json.dumps(get_request().json)
except Exception as e:
req_data = '%s: Invalid request body' %(e)
rest_trace = RestApiTrace(request_id=req_id)
rest_trace.url = url
rest_trace.method = method
rest_trace.request_data = req_data
return rest_trace
# end _generate_rest_api_request_trace
@ignore_exceptions
def _generate_rest_api_response_trace(self, rest_trace, response):
if not rest_trace:
return
rest_trace.status = bottle.response.status
rest_trace.response_body = json.dumps(response)
rest_trace.trace_msg(name='RestApiTraceBuf', sandesh=self._sandesh)
# end _generate_rest_api_response_trace
# Public Methods
def route(self, uri, method, handler):
@use_context
def handler_trap_exception(*args, **kwargs):
try:
trace = None
self._extensions_transform_request(get_request())
self._extensions_validate_request(get_request())
trace = self._generate_rest_api_request_trace()
(ok, status) = self._rbac.validate_request(get_request())
if not ok:
(code, err_msg) = status
raise cfgm_common.exceptions.HttpError(code, err_msg)
response = handler(*args, **kwargs)
self._generate_rest_api_response_trace(trace, response)
self._extensions_transform_response(get_request(), response)
return response
except Exception as e:
if trace:
trace.trace_msg(name='RestApiTraceBuf',
sandesh=self._sandesh)
# don't log details of cfgm_common.exceptions.HttpError i.e handled error cases
if isinstance(e, cfgm_common.exceptions.HttpError):
bottle.abort(e.status_code, e.content)
else:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
raise
self.api_bottle.route(uri, method, handler_trap_exception)
# end route
def get_args(self):
return self._args
# end get_args
def get_server_ip(self):
ip_list = []
for i in netifaces.interfaces():
try:
if netifaces.AF_INET in netifaces.ifaddresses(i):
addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0][
'addr']
if addr != '127.0.0.1' and addr not in ip_list:
ip_list.append(addr)
except ValueError, e:
self.config_log("Skipping interface %s" % i,
level=SandeshLevel.SYS_DEBUG)
return ip_list
# end get_server_ip
def get_listen_ip(self):
return self._args.listen_ip_addr
# end get_listen_ip
def get_server_port(self):
return self._args.listen_port
# end get_server_port
def get_worker_id(self):
return int(self._args.worker_id)
# end get_worker_id
def get_pipe_start_app(self):
return self._pipe_start_app
# end get_pipe_start_app
def get_rabbit_health_check_interval(self):
return float(self._args.rabbit_health_check_interval)
# end get_rabbit_health_check_interval
def is_auth_disabled(self):
return self._args.auth is None or self._args.auth.lower() != 'keystone'
def is_admin_request(self):
if not self.is_multi_tenancy_set():
return True
env = bottle.request.headers.environ
for field in ('HTTP_X_API_ROLE', 'HTTP_X_ROLE'):
if field in env:
roles = env[field].split(',')
return self.cloud_admin_role in [x.lower() for x in roles]
return False
def get_auth_headers_from_token(self, request, token):
if self.is_auth_disabled() or not self.is_multi_tenancy_set():
return {}
return self._auth_svc.get_auth_headers_from_token(request, token)
# end get_auth_headers_from_token
# Check for the system created VN. Disallow such VN delete
def virtual_network_http_delete(self, id):
db_conn = self._db_conn
# if obj doesn't exist return early
try:
obj_type = db_conn.uuid_to_obj_type(id)
if obj_type != 'virtual_network':
raise cfgm_common.exceptions.HttpError(
404, 'No virtual-network object found for id %s' %(id))
vn_name = db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'ID %s does not exist' %(id))
if (vn_name == cfgm_common.IP_FABRIC_VN_FQ_NAME or
vn_name == cfgm_common.LINK_LOCAL_VN_FQ_NAME):
raise cfgm_common.exceptions.HttpError(
409,
'Can not delete system created default virtual-network '+id)
super(VncApiServer, self).virtual_network_http_delete(id)
# end
@use_context
def homepage_http_get(self):
json_body = {}
json_links = []
# strip trailing '/' in url
url = get_request().url[:-1]
for link in self._homepage_links:
# strip trailing '/' in url
json_links.append(
{'link': link.to_dict(with_url=url)}
)
json_body = {"href": url, "links": json_links}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
# ubuntu packaged path
doc_root = '/usr/share/doc/contrail-config/doc/contrail-config/html/'
if not os.path.exists(doc_root):
# centos packaged path
doc_root='/usr/share/doc/python-vnc_cfg_api_server/contrial-config/html/'
return bottle.static_file(
filename,
root=doc_root)
# end documentation_http_get
def obj_perms_http_get(self):
if self.is_auth_disabled() or not self.is_multi_tenancy_set():
result = {
'token_info': None,
'is_cloud_admin_role': False,
'is_global_read_only_role': False,
'permissions': 'RWX'
}
return result
if 'HTTP_X_USER_TOKEN' not in get_request().environ:
raise cfgm_common.exceptions.HttpError(
400, 'User token needed for validation')
user_token = get_request().environ['HTTP_X_USER_TOKEN'].encode("ascii")
# get permissions in internal context
try:
orig_context = get_context()
orig_request = get_request()
b_req = bottle.BaseRequest(
{
'HTTP_X_AUTH_TOKEN': user_token,
'REQUEST_METHOD' : 'GET',
'bottle.app': orig_request.environ['bottle.app'],
})
i_req = context.ApiInternalRequest(
b_req.url, b_req.urlparts, b_req.environ, b_req.headers, None, None)
set_context(context.ApiContext(internal_req=i_req))
token_info = self._auth_svc.validate_user_token(get_request())
finally:
set_context(orig_context)
# roles in result['token_info']['access']['user']['roles']
if token_info:
result = {'token_info' : token_info}
# Handle v2 and v3 responses
roles_list = []
if 'access' in token_info:
roles_list = [roles['name'] for roles in \
token_info['access']['user']['roles']]
elif 'token' in token_info:
roles_list = [roles['name'] for roles in \
token_info['token']['roles']]
result['is_cloud_admin_role'] = self.cloud_admin_role in roles_list
result['is_global_read_only_role'] = self.global_read_only_role in roles_list
if 'uuid' in get_request().query:
obj_uuid = get_request().query.uuid
result['permissions'] = self._permissions.obj_perms(get_request(), obj_uuid)
else:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
return result
#end check_obj_perms_http_get
def invalid_uuid(self, uuid):
return self.re_uuid.match(uuid) == None
def invalid_access(self, access):
return type(access) is not int or access not in range(0,8)
def invalid_share_type(self, share_type):
return share_type not in cfgm_common.PERMS2_VALID_SHARE_TYPES
# change ownership of an object
def obj_chown_http_post(self):
self._post_common(get_request(), None, None)
try:
obj_uuid = get_request().json['uuid']
owner = get_request().json['owner']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid) or self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object or owner id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400, 'Invalid object id')
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid},
obj_fields=['perms2'])
obj_dict['perms2']['owner'] = owner
self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict)
msg = "chown: %s owner set to %s" % (obj_uuid, owner)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chown_http_post
# chmod for an object
def obj_chmod_http_post(self):
self._post_common(get_request(), None, None)
try:
obj_uuid = get_request().json['uuid']
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if self.invalid_uuid(obj_uuid):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid object id")
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(400, 'Invalid object id')
# ensure user has RW permissions to object
perms = self._permissions.obj_perms(get_request(), obj_uuid)
if not 'RW' in perms:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
request_params = get_request().json
owner = request_params.get('owner')
share = request_params.get('share')
owner_access = request_params.get('owner_access')
global_access = request_params.get('global_access')
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid},
obj_fields=['perms2', 'is_shared'])
obj_perms = obj_dict['perms2']
old_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
if owner:
if self.invalid_uuid(owner):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner")
obj_perms['owner'] = owner.replace('-','')
if owner_access is not None:
if self.invalid_access(owner_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid owner_access value")
obj_perms['owner_access'] = owner_access
if share is not None:
try:
for item in share:
"""
item['tenant'] := [<share_type>:] <uuid>
share_type := ['domain' | 'tenant']
"""
(share_type, share_id) = cfgm_common.utils.shareinfo_from_perms2_tenant(item['tenant'])
if self.invalid_share_type(share_type) or self.invalid_uuid(share_id) or self.invalid_access(item['tenant_access']):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid share list")
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
obj_perms['share'] = share
if global_access is not None:
if self.invalid_access(global_access):
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, invalid global_access value")
obj_perms['global_access'] = global_access
obj_dict['is_shared'] = (global_access != 0)
new_perms = '%s/%d %d %s' % (obj_perms['owner'],
obj_perms['owner_access'], obj_perms['global_access'],
['%s:%d' % (item['tenant'], item['tenant_access']) for item in obj_perms['share']])
self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict)
msg = "chmod: %s perms old=%s, new=%s" % (obj_uuid, old_perms, new_perms)
self.config_log(msg, level=SandeshLevel.SYS_NOTICE)
return {}
#end obj_chmod_http_post
def prop_collection_http_get(self):
if 'uuid' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object uuid needed for property collection get')
obj_uuid = get_request().query.uuid
if 'fields' not in get_request().query:
raise cfgm_common.exceptions.HttpError(
400, 'Object fields needed for property collection get')
obj_fields = get_request().query.fields.split(',')
if 'position' in get_request().query:
fields_position = get_request().query.position
else:
fields_position = None
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for obj_field in obj_fields:
if ((obj_field not in resource_class.prop_list_fields) and
(obj_field not in resource_class.prop_map_fields)):
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
# request validations over
# common handling for all resource get
(ok, result) = self._get_common(get_request(), obj_uuid)
if not ok:
(code, msg) = result
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
try:
ok, result = self._db_conn.prop_collection_get(
obj_type, obj_uuid, obj_fields, fields_position)
if not ok:
self.config_object_error(
obj_uuid, None, None, 'prop_collection_http_get', result)
except NoIdError as e:
# Not present in DB
raise cfgm_common.exceptions.HttpError(404, str(e))
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
# check visibility
if (not result['id_perms'].get('user_visible', True) and
not self.is_admin_request()):
result = 'This object is not visible by users: %s' % id
self.config_object_error(
id, None, None, 'prop_collection_http_get', result)
raise cfgm_common.exceptions.HttpError(404, result)
# Prepare response
del result['id_perms']
return result
# end prop_collection_http_get
def prop_collection_http_post(self):
self._post_common(get_request(), None, None)
request_params = get_request().json
# validate each requested operation
obj_uuid = request_params.get('uuid')
if not obj_uuid:
err_msg = 'Error: prop_collection_update needs obj_uuid'
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + obj_uuid)
resource_class = self.get_resource_class(obj_type)
for req_param in request_params.get('updates') or []:
obj_field = req_param.get('field')
if obj_field in resource_class.prop_list_fields:
prop_coll_type = 'list'
elif obj_field in resource_class.prop_map_fields:
prop_coll_type = 'map'
else:
err_msg = '%s neither "ListProperty" nor "MapProperty"' %(
obj_field)
raise cfgm_common.exceptions.HttpError(400, err_msg)
req_oper = req_param.get('operation').lower()
field_val = req_param.get('value')
field_pos = str(req_param.get('position'))
prop_type = resource_class.prop_field_types[obj_field]['xsd_type']
prop_cls = cfgm_common.utils.str_to_class(prop_type, __name__)
prop_val_type = prop_cls.attr_field_type_vals[prop_cls.attr_fields[0]]['attr_type']
prop_val_cls = cfgm_common.utils.str_to_class(prop_val_type, __name__)
try:
self._validate_complex_type(prop_val_cls, field_val)
except Exception as e:
raise cfgm_common.exceptions.HttpError(400, str(e))
if prop_coll_type == 'list':
if req_oper not in ('add', 'modify', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'add') and field_val is None):
err_msg = 'Add needs field value in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'modify') and
None in (field_val, field_pos)):
err_msg = 'Modify needs field value and position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
elif prop_coll_type == 'map':
if req_oper not in ('set', 'delete'):
err_msg = 'Unsupported operation %s in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
if ((req_oper == 'set') and field_val is None):
err_msg = 'Set needs field value in request %s' %(
req_oper, json.dumps(req_param))
elif ((req_oper == 'delete') and field_pos is None):
err_msg = 'Delete needs field position in request %s' %(
req_oper, json.dumps(req_param))
raise cfgm_common.exceptions.HttpError(400, err_msg)
# Validations over. Invoke type specific hook and extension manager
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
(read_ok, read_result) = self._db_conn.dbe_read(
obj_type, {'uuid':obj_uuid})
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: '+obj_uuid)
except Exception as e:
read_ok = False
read_result = cfgm_common.utils.detailed_traceback()
if not read_ok:
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', read_result)
raise cfgm_common.exceptions.HttpError(500, read_result)
# invoke the extension
try:
pre_func = 'pre_'+obj_type+'_update'
self._extension_mgrs['resourceApi'].map_method(pre_func, obj_uuid, {},
prop_collection_updates=request_params.get('updates'))
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, request_params)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# type-specific hook
r_class = self.get_resource_class(obj_type)
get_context().set_state('PRE_DBE_UPDATE')
(ok, pre_update_result) = r_class.pre_dbe_update(
obj_uuid, fq_name, {}, self._db_conn,
prop_collection_updates=request_params.get('updates'))
if not ok:
(code, msg) = pre_update_result
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# the actual db update
try:
get_context().set_state('DBE_UPDATE')
ok, update_result = self._db_conn.prop_collection_update(
obj_type, obj_uuid, request_params.get('updates'))
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
if not ok:
(code, msg) = update_result
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# type-specific hook
get_context().set_state('POST_DBE_UPDATE')
(ok, post_update_result) = r_class.post_dbe_update(
obj_uuid, fq_name, {}, self._db_conn,
prop_collection_updates=request_params.get('updates'))
if not ok:
(code, msg) = pre_update_result
self.config_object_error(
obj_uuid, None, obj_type, 'prop_collection_update', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# invoke the extension
try:
post_func = 'post_'+obj_type+'_update'
self._extension_mgrs['resourceApi'].map_method(
post_func, obj_uuid, {}, read_result,
prop_collection_updates=request_params.get('updates'))
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, request_params)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'prop-collection-update'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end prop_collection_http_post
def ref_update_http_post(self):
self._post_common(get_request(), None, None)
# grab fields
type = get_request().json.get('type')
res_type, res_class = self._validate_resource_type(type)
obj_uuid = get_request().json.get('uuid')
ref_type = get_request().json.get('ref-type')
ref_res_type, ref_class = self._validate_resource_type(ref_type)
operation = get_request().json.get('operation')
ref_uuid = get_request().json.get('ref-uuid')
ref_fq_name = get_request().json.get('ref-fq-name')
attr = get_request().json.get('attr')
# validate fields
if None in (res_type, obj_uuid, ref_res_type, operation):
err_msg = 'Bad Request: type/uuid/ref-type/operation is null: '
err_msg += '%s, %s, %s, %s.' \
%(res_type, obj_uuid, ref_res_type, operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
operation = operation.upper()
if operation not in ['ADD', 'DELETE']:
err_msg = 'Bad Request: operation should be add or delete: %s' \
%(operation)
raise cfgm_common.exceptions.HttpError(400, err_msg)
if not ref_uuid and not ref_fq_name:
err_msg = 'Bad Request: ref-uuid or ref-fq-name must be specified'
raise cfgm_common.exceptions.HttpError(400, err_msg)
obj_type = res_class.object_type
ref_obj_type = ref_class.object_type
if not ref_uuid:
try:
ref_uuid = self._db_conn.fq_name_to_uuid(ref_obj_type, ref_fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(ref_fq_name) + ' not found')
# To verify existence of the reference being added
if operation == 'ADD':
try:
(read_ok, read_result) = self._db_conn.dbe_read(
ref_obj_type, {'uuid': ref_uuid}, obj_fields=['fq_name'])
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: ' + ref_uuid)
except Exception as e:
read_ok = False
read_result = cfgm_common.utils.detailed_traceback()
# To invoke type specific hook and extension manager
try:
(read_ok, read_result) = self._db_conn.dbe_read(
obj_type, get_request().json)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Object Not Found: '+obj_uuid)
except Exception as e:
read_ok = False
read_result = cfgm_common.utils.detailed_traceback()
if not read_ok:
self.config_object_error(obj_uuid, None, obj_type, 'ref_update', read_result)
raise cfgm_common.exceptions.HttpError(500, read_result)
obj_dict = copy.deepcopy(read_result)
# invoke the extension
try:
pre_func = 'pre_' + obj_type + '_update'
self._extension_mgrs['resourceApi'].map_method(pre_func, obj_uuid, obj_dict)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In pre_%s_update an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
# type-specific hook
if res_class:
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
if operation == 'ADD':
if ref_obj_type+'_refs' not in obj_dict:
obj_dict[ref_obj_type+'_refs'] = []
existing_ref = [ref for ref in obj_dict[ref_obj_type+'_refs']
if ref['uuid'] == ref_uuid]
if existing_ref:
ref['attr'] = attr
else:
obj_dict[ref_obj_type+'_refs'].append(
{'to':ref_fq_name, 'uuid': ref_uuid, 'attr':attr})
elif operation == 'DELETE':
for old_ref in obj_dict.get(ref_obj_type+'_refs', []):
if old_ref['to'] == ref_fq_name or old_ref['uuid'] == ref_uuid:
obj_dict[ref_obj_type+'_refs'].remove(old_ref)
break
(ok, put_result) = res_class.pre_dbe_update(
obj_uuid, fq_name, obj_dict, self._db_conn)
if not ok:
(code, msg) = put_result
self.config_object_error(obj_uuid, None, obj_type, 'ref_update', msg)
raise cfgm_common.exceptions.HttpError(code, msg)
# end if res_class
try:
self._db_conn.ref_update(obj_type, obj_uuid, ref_obj_type,
ref_uuid, {'attr': attr}, operation)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
# invoke the extension
try:
post_func = 'post_' + obj_type + '_update'
self._extension_mgrs['resourceApi'].map_method(post_func, obj_uuid, obj_dict, read_result)
except RuntimeError:
# lack of registered extension leads to RuntimeError
pass
except Exception as e:
err_msg = 'In post_%s_update an extension had error for %s' \
%(obj_type, obj_dict)
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_NOTICE)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-update'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_update_http_post
def ref_relax_for_delete_http_post(self):
self._post_common(get_request(), None, None)
# grab fields
obj_uuid = get_request().json.get('uuid')
ref_uuid = get_request().json.get('ref-uuid')
# validate fields
if None in (obj_uuid, ref_uuid):
err_msg = 'Bad Request: Both uuid and ref-uuid should be specified: '
err_msg += '%s, %s.' %(obj_uuid, ref_uuid)
raise cfgm_common.exceptions.HttpError(400, err_msg)
try:
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
self._db_conn.ref_relax_for_delete(obj_uuid, ref_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'uuid ' + obj_uuid + ' not found')
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'ref-relax-for-delete'
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return {'uuid': obj_uuid}
# end ref_relax_for_delete_http_post
def fq_name_to_id_http_post(self):
self._post_common(get_request(), None, None)
type = get_request().json.get('type')
res_type, r_class = self._validate_resource_type(type)
obj_type = r_class.object_type
fq_name = get_request().json['fq_name']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Name ' + pformat(fq_name) + ' not found')
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(bottle.request, id)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
return {'uuid': id}
# end fq_name_to_id_http_post
def id_to_fq_name_http_post(self):
self._post_common(get_request(), None, None)
obj_uuid = get_request().json['uuid']
# ensure user has access to this id
ok, result = self._permissions.check_perms_read(get_request(), obj_uuid)
if not ok:
err_code, err_msg = result
raise cfgm_common.exceptions.HttpError(err_code, err_msg)
try:
fq_name = self._db_conn.uuid_to_fq_name(obj_uuid)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'UUID ' + obj_uuid + ' not found')
obj_type = self._db_conn.uuid_to_obj_type(obj_uuid)
res_type = self.get_resource_class(obj_type).resource_type
return {'fq_name': fq_name, 'type': res_type}
# end id_to_fq_name_http_post
# Enables a user-agent to store and retrieve key-val pair
# TODO this should be done only for special/quantum plugin
def useragent_kv_http_post(self):
self._post_common(get_request(), None, None)
oper = get_request().json['operation']
key = get_request().json['key']
val = get_request().json.get('value', '')
# TODO move values to common
if oper == 'STORE':
self._db_conn.useragent_kv_store(key, val)
elif oper == 'RETRIEVE':
try:
result = self._db_conn.useragent_kv_retrieve(key)
return {'value': result}
except NoUserAgentKey:
raise cfgm_common.exceptions.HttpError(
404, "Unknown User-Agent key " + key)
elif oper == 'DELETE':
result = self._db_conn.useragent_kv_delete(key)
else:
raise cfgm_common.exceptions.HttpError(
404, "Invalid Operation " + oper)
# end useragent_kv_http_post
def db_check(self):
""" Check database for inconsistencies. No update to database """
check_result = self._db_conn.db_check()
return {'results': check_result}
# end db_check
def fetch_records(self):
""" Retrieve and return all records """
result = self._db_conn.db_read()
return {'results': result}
# end fetch_records
def start_profile(self):
#GreenletProfiler.start()
pass
# end start_profile
def stop_profile(self):
pass
#GreenletProfiler.stop()
#stats = GreenletProfiler.get_func_stats()
#self._profile_info = stats.print_all()
#return self._profile_info
# end stop_profile
def get_profile_info(self):
return self._profile_info
# end get_profile_info
def get_resource_class(self, type_str):
if type_str in self._resource_classes:
return self._resource_classes[type_str]
common_name = cfgm_common.utils.CamelCase(type_str)
server_name = '%sServer' % common_name
try:
resource_class = getattr(vnc_cfg_types, server_name)
except AttributeError:
common_class = cfgm_common.utils.str_to_class(common_name,
__name__)
if common_class is None:
raise TypeError('Invalid type: ' + type_str)
# Create Placeholder classes derived from Resource, <Type> so
# resource_class methods can be invoked in CRUD methods without
# checking for None
resource_class = type(
str(server_name),
(vnc_cfg_types.Resource, common_class, object),
{})
resource_class.server = self
self._resource_classes[resource_class.object_type] = resource_class
self._resource_classes[resource_class.resource_type] = resource_class
return resource_class
# end get_resource_class
def list_bulk_collection_http_post(self):
""" List collection when requested ids don't fit in query params."""
type = get_request().json.get('type') # e.g. virtual-network
resource_type, r_class = self._validate_resource_type(type)
try:
parent_uuids = get_request().json['parent_id'].split(',')
except KeyError:
parent_uuids = None
try:
back_ref_uuids = get_request().json['back_ref_id'].split(',')
except KeyError:
back_ref_uuids = None
try:
obj_uuids = get_request().json['obj_uuids'].split(',')
except KeyError:
obj_uuids = None
is_count = get_request().json.get('count', False)
is_detail = get_request().json.get('detail', False)
include_shared = get_request().json.get('shared', False)
try:
filters = utils.get_filters(get_request().json.get('filters'))
except Exception as e:
raise cfgm_common.exceptions.HttpError(
400, 'Invalid filter ' + get_request().json.get('filters'))
req_fields = get_request().json.get('fields', [])
if req_fields:
req_fields = req_fields.split(',')
exclude_hrefs = get_request().json.get('exclude_hrefs', False)
return self._list_collection(r_class.object_type, parent_uuids,
back_ref_uuids, obj_uuids, is_count,
is_detail, filters, req_fields,
include_shared, exclude_hrefs)
# end list_bulk_collection_http_post
# Private Methods
def _parse_args(self, args_str):
'''
Eg. python vnc_cfg_api_server.py --cassandra_server_list
10.1.2.3:9160 10.1.2.4:9160
--redis_server_ip 127.0.0.1
--redis_server_port 6382
--collectors 127.0.0.1:8086
--http_server_port 8090
--listen_ip_addr 127.0.0.1
--listen_port 8082
--admin_port 8095
--region_name RegionOne
--log_local
--log_level SYS_DEBUG
--logging_level DEBUG
--logging_conf <logger-conf-file>
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/vnc_openstack.err
--use_syslog
--syslog_facility LOG_USER
--worker_id 1
--rabbit_max_pending_updates 4096
--rabbit_health_check_interval 120.0
--cluster_id <testbed-name>
[--auth keystone]
[--default_encoding ascii ]
--object_cache_size 10000
--object_cache_exclude_types ''
'''
self._args, _ = utils.parse_args(args_str)
# end _parse_args
# sigchld handler is currently not engaged. See comment @sigchld
def sigchld_handler(self):
# DB interface initialization
self._db_connect(reset_config=False)
self._db_init_entries()
# end sigchld_handler
def sigterm_handler(self):
exit()
# sighup handler for applying new configs
def sighup_handler(self):
if self._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
random_collectors = random.sample(collectors, len(collectors))
self._sandesh.reconfig_collectors(random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _load_extensions(self):
try:
conf_sections = self._args.config_sections
self._extension_mgrs['resync'] = ExtensionManager(
'vnc_cfg_api.resync', api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['resourceApi'] = ExtensionManager(
'vnc_cfg_api.resourceApi',
propagate_map_exceptions=True,
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh)
self._extension_mgrs['neutronApi'] = ExtensionManager(
'vnc_cfg_api.neutronApi',
api_server_ip=self._args.listen_ip_addr,
api_server_port=self._args.listen_port,
conf_sections=conf_sections, sandesh=self._sandesh,
api_server_obj=self)
except Exception as e:
err_msg = cfgm_common.utils.detailed_traceback()
self.config_log("Exception in extension load: %s" %(err_msg),
level=SandeshLevel.SYS_ERR)
# end _load_extensions
def _db_connect(self, reset_config):
cass_server_list = self._args.cassandra_server_list
redis_server_ip = self._args.redis_server_ip
redis_server_port = self._args.redis_server_port
zk_server = self._args.zk_server_ip
rabbit_servers = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
cassandra_user = self._args.cassandra_user
cassandra_password = self._args.cassandra_password
obj_cache_entries = int(self._args.object_cache_entries)
obj_cache_exclude_types = \
[t.replace('-', '_').strip() for t in
self._args.object_cache_exclude_types.split(',')]
rdbms_server_list = self._args.rdbms_server_list
rdbms_user = self._args.rdbms_user
rdbms_password = self._args.rdbms_password
rdbms_connection = self._args.rdbms_connection
db_engine = self._args.db_engine
self._db_engine = db_engine
cred = None
db_server_list = None
if db_engine == 'cassandra':
if cassandra_user is not None and cassandra_password is not None:
cred = {'username':cassandra_user,'password':cassandra_password}
db_server_list = cass_server_list
if db_engine == 'rdbms':
db_server_list = rdbms_server_list
if rdbms_user is not None and rdbms_password is not None:
cred = {'username': rdbms_user,'password': rdbms_password}
self._db_conn = VncDbClient(
self, db_server_list, rabbit_servers, rabbit_port, rabbit_user,
rabbit_password, rabbit_vhost, rabbit_ha_mode, reset_config,
zk_server, self._args.cluster_id, db_credential=cred,
db_engine=db_engine, rabbit_use_ssl=self._args.rabbit_use_ssl,
kombu_ssl_version=self._args.kombu_ssl_version,
kombu_ssl_keyfile= self._args.kombu_ssl_keyfile,
kombu_ssl_certfile=self._args.kombu_ssl_certfile,
kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs,
obj_cache_entries=obj_cache_entries,
obj_cache_exclude_types=obj_cache_exclude_types, connection=rdbms_connection)
#TODO refacter db connection management.
self._addr_mgmt._get_db_conn()
# end _db_connect
def _ensure_id_perms_present(self, obj_uuid, obj_dict):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
id_perms = self._get_default_id_perms()
if (('id_perms' not in obj_dict) or
(obj_dict['id_perms'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['id_perms'] = id_perms
return
return
# retrieve the previous version of the id_perms
# from the database and update the id_perms with
# them.
if obj_uuid is not None:
try:
old_id_perms = self._db_conn.uuid_to_obj_perms(obj_uuid)
for field, value in old_id_perms.items():
if value is not None:
id_perms[field] = value
except NoIdError:
pass
# not all fields can be updated
if obj_uuid:
field_list = ['enable', 'description']
else:
field_list = ['enable', 'description', 'user_visible', 'creator']
# Start from default and update from obj_dict
req_id_perms = obj_dict['id_perms']
for key in field_list:
if key in req_id_perms:
id_perms[key] = req_id_perms[key]
# TODO handle perms present in req_id_perms
obj_dict['id_perms'] = id_perms
# end _ensure_id_perms_present
def _get_default_id_perms(self):
id_perms = copy.deepcopy(Provision.defaults.perms)
id_perms_json = json.dumps(id_perms, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
id_perms_dict = json.loads(id_perms_json)
return id_perms_dict
# end _get_default_id_perms
def _ensure_perms2_present(self, obj_type, obj_uuid, obj_dict,
project_id=None):
"""
Called at resource creation to ensure that id_perms is present in obj
"""
# retrieve object and permissions
perms2 = self._get_default_perms2()
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-','')
elif project_id:
perms2['owner'] = project_id
# set ownership of object to creator tenant
if obj_type == 'project' and 'uuid' in obj_dict:
perms2['owner'] = str(obj_dict['uuid']).replace('-','')
elif project_id:
perms2['owner'] = project_id
if (('perms2' not in obj_dict) or
(obj_dict['perms2'] is None)):
# Resource creation
if obj_uuid is None:
obj_dict['perms2'] = perms2
return (True, "")
# Resource already exist
try:
obj_dict['perms2'] = self._db_conn.uuid_to_obj_perms2(obj_uuid)
except NoIdError:
obj_dict['perms2'] = perms2
return (True, "")
# retrieve the previous version of the perms2
# from the database and update the perms2 with
# them.
if obj_uuid is not None:
try:
old_perms2 = self._db_conn.uuid_to_obj_perms2(obj_uuid)
for field, value in old_perms2.items():
if value is not None:
perms2[field] = value
except NoIdError:
pass
# Start from default and update from obj_dict
req_perms2 = obj_dict['perms2']
for key in req_perms2:
perms2[key] = req_perms2[key]
# TODO handle perms2 present in req_perms2
obj_dict['perms2'] = perms2
# ensure is_shared and global_access are consistent
shared = obj_dict.get('is_shared', None)
gaccess = obj_dict['perms2'].get('global_access', None)
if gaccess is not None and shared is not None and shared != (gaccess != 0):
error = "Inconsistent is_shared (%s a) and global_access (%s)" % (shared, gaccess)
return (False, (400, error))
return (True, "")
# end _ensure_perms2_present
def _get_default_perms2(self):
perms2 = copy.deepcopy(Provision.defaults.perms2)
perms2_json = json.dumps(perms2, default=lambda o: dict((k, v)
for k, v in o.__dict__.iteritems()))
perms2_dict = json.loads(perms2_json)
return perms2_dict
# end _get_default_perms2
def _db_init_entries(self):
# create singleton defaults if they don't exist already in db
glb_sys_cfg = self._create_singleton_entry(
GlobalSystemConfig(autonomous_system=64512,
config_version=CONFIG_VERSION))
def_domain = self._create_singleton_entry(Domain())
ip_fab_vn = self._create_singleton_entry(
VirtualNetwork(cfgm_common.IP_FABRIC_VN_FQ_NAME[-1]))
self._create_singleton_entry(
RoutingInstance('__default__', ip_fab_vn,
routing_instance_is_default=True))
link_local_vn = self._create_singleton_entry(
VirtualNetwork(cfgm_common.LINK_LOCAL_VN_FQ_NAME[-1]))
self._create_singleton_entry(
RoutingInstance('__link_local__', link_local_vn,
routing_instance_is_default=True))
try:
self._create_singleton_entry(
RoutingInstance('default-virtual-network',
routing_instance_is_default=True))
except Exception as e:
self.config_log('error while creating primary routing instance for'
'default-virtual-network: ' + str(e),
level=SandeshLevel.SYS_NOTICE)
self._create_singleton_entry(DiscoveryServiceAssignment())
self._create_singleton_entry(GlobalQosConfig())
if int(self._args.worker_id) == 0:
self._db_conn.db_resync()
# make default ipam available across tenants for backward compatability
obj_type = 'network_ipam'
fq_name = ['default-domain', 'default-project', 'default-network-ipam']
obj_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
(ok, obj_dict) = self._db_conn.dbe_read(obj_type, {'uuid':obj_uuid},
obj_fields=['perms2'])
obj_dict['perms2']['global_access'] = PERMS_RX
self._db_conn.dbe_update(obj_type, {'uuid': obj_uuid}, obj_dict)
# end _db_init_entries
# generate default rbac group rule
def _create_default_rbac_rule(self):
obj_type = 'api_access_list'
fq_name = ['default-global-system-config', 'default-api-access-list']
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
return
except NoIdError:
pass
# allow full access to cloud admin
rbac_rules = [
{
'rule_object':'fqname-to-id',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'id-to-fqname',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'useragent-kv',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'CRUD'}]
},
{
'rule_object':'documentation',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
{
'rule_object':'/',
'rule_field': '',
'rule_perms': [{'role_name':'*', 'role_crud':'R'}]
},
]
rge = RbacRuleEntriesType([])
for rule in rbac_rules:
rule_perms = [RbacPermType(role_name=p['role_name'], role_crud=p['role_crud']) for p in rule['rule_perms']]
rbac_rule = RbacRuleType(rule_object=rule['rule_object'],
rule_field=rule['rule_field'], rule_perms=rule_perms)
rge.add_rbac_rule(rbac_rule)
rge_dict = rge.exportDict('')
glb_rbac_cfg = ApiAccessList(parent_type='global-system-config',
fq_name=fq_name, api_access_list_entries = rge_dict)
try:
self._create_singleton_entry(glb_rbac_cfg)
except Exception as e:
err_msg = 'Error creating default api access list object'
err_msg += cfgm_common.utils.detailed_traceback()
self.config_log(err_msg, level=SandeshLevel.SYS_ERR)
# end _create_default_rbac_rule
def _resync_domains_projects(self, ext):
if hasattr(ext.obj, 'resync_domains_projects'):
ext.obj.resync_domains_projects()
# end _resync_domains_projects
def _create_singleton_entry(self, singleton_obj):
s_obj = singleton_obj
obj_type = s_obj.object_type
fq_name = s_obj.get_fq_name()
# TODO remove backward compat create mapping in zk
# for singleton START
try:
cass_uuid = self._db_conn._object_db.fq_name_to_uuid(obj_type, fq_name)
try:
zk_uuid = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
# doesn't exist in zookeeper but does so in cassandra,
# migrate this info to zookeeper
self._db_conn._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(cass_uuid))
except NoIdError:
# doesn't exist in cassandra as well as zookeeper, proceed normal
pass
# TODO backward compat END
# create if it doesn't exist yet
try:
id = self._db_conn.fq_name_to_uuid(obj_type, fq_name)
except NoIdError:
obj_dict = s_obj.serialize_to_json()
obj_dict['id_perms'] = self._get_default_id_perms()
obj_dict['perms2'] = self._get_default_perms2()
(ok, result) = self._db_conn.dbe_alloc(obj_type, obj_dict)
obj_ids = result
# For virtual networks, allocate an ID
if obj_type == 'virtual_network':
vn_id = self.alloc_vn_id(s_obj.get_fq_name_str())
obj_dict['virtual_network_network_id'] = vn_id
self._db_conn.dbe_create(obj_type, obj_ids, obj_dict)
self.create_default_children(obj_type, s_obj)
return s_obj
# end _create_singleton_entry
def _list_collection(self, obj_type, parent_uuids=None,
back_ref_uuids=None, obj_uuids=None,
is_count=False, is_detail=False, filters=None,
req_fields=None, include_shared=False,
exclude_hrefs=False):
resource_type, r_class = self._validate_resource_type(obj_type)
is_admin = self.is_admin_request()
if is_admin:
field_names = req_fields
else:
field_names = [u'id_perms'] + (req_fields or [])
(ok, result) = self._db_conn.dbe_list(obj_type,
parent_uuids, back_ref_uuids, obj_uuids, is_count,
filters, is_detail=is_detail, field_names=field_names,
include_shared=include_shared)
if not ok:
self.config_object_error(None, None, '%ss' %(obj_type),
'dbe_list', result)
raise cfgm_common.exceptions.HttpError(404, result)
# If only counting, return early
if is_count:
return {'%ss' %(resource_type): {'count': result}}
allowed_fields = ['uuid', 'href', 'fq_name'] + (req_fields or [])
obj_dicts = []
if is_admin:
for obj_result in result:
if not exclude_hrefs:
obj_result['href'] = self.generate_url(
resource_type, obj_result['uuid'])
if is_detail:
obj_result['name'] = obj_result['fq_name'][-1]
obj_dicts.append({resource_type: obj_result})
else:
obj_dicts.append(obj_result)
else:
for obj_result in result:
# TODO(nati) we should do this using sql query
id_perms = obj_result.get('id_perms')
if not id_perms:
# It is possible that the object was deleted, but received
# an update after that. We need to ignore it for now. In
# future, we should clean up such stale objects
continue
if not id_perms.get('user_visible', True):
# skip items not authorized
continue
(ok, status) = self._permissions.check_perms_read(
get_request(), obj_result['uuid'],
obj_result['id_perms'])
if not ok and status[0] == 403:
continue
obj_dict = {}
if is_detail:
obj_result = self.obj_view(resource_type, obj_result)
obj_result['name'] = obj_result['fq_name'][-1]
obj_dict.update(obj_result)
obj_dicts.append({resource_type: obj_dict})
else:
obj_dict.update(obj_result)
for key in obj_dict.keys():
if not key in allowed_fields:
del obj_dict[key]
if obj_dict.get('id_perms') and not 'id_perms' in allowed_fields:
del obj_dict['id_perms']
obj_dicts.append(obj_dict)
if not exclude_hrefs:
obj_dict['href'] = self.generate_url(resource_type, obj_result['uuid'])
return {'%ss' %(resource_type): obj_dicts}
# end _list_collection
def get_db_connection(self):
return self._db_conn
# end get_db_connection
def generate_url(self, resource_type, obj_uuid):
try:
url_parts = get_request().urlparts
return '%s://%s/%s/%s'\
% (url_parts.scheme, url_parts.netloc, resource_type, obj_uuid)
except Exception as e:
return '%s/%s/%s' % (self._base_url, resource_type, obj_uuid)
# end generate_url
def generate_hrefs(self, resource_type, obj_dict):
# return a copy of obj_dict with href keys for:
# self, parent, children, refs, backrefs
# don't update obj_dict as it may be cached object
r_class = self.get_resource_class(resource_type)
ret_obj_dict = obj_dict.copy()
ret_obj_dict['href'] = self.generate_url(
resource_type, obj_dict['uuid'])
try:
ret_obj_dict['parent_href'] = self.generate_url(
obj_dict['parent_type'], obj_dict['parent_uuid'])
except KeyError:
# No parent
pass
for child_field, child_field_info in \
r_class.children_field_types.items():
try:
children = obj_dict[child_field]
child_type = child_field_info[0]
ret_obj_dict[child_field] = [
dict(c, href=self.generate_url(child_type, c['uuid']))
for c in children]
except KeyError:
# child_field doesn't exist in original
pass
# end for all child fields
for ref_field, ref_field_info in r_class.ref_field_types.items():
try:
refs = obj_dict[ref_field]
ref_type = ref_field_info[0]
ret_obj_dict[ref_field] = [
dict(r, href=self.generate_url(ref_type, r['uuid']))
for r in refs]
except KeyError:
# ref_field doesn't exist in original
pass
# end for all ref fields
for backref_field, backref_field_info in \
r_class.backref_field_types.items():
try:
backrefs = obj_dict[backref_field]
backref_type = backref_field_info[0]
ret_obj_dict[backref_field] = [
dict(b, href=self.generate_url(backref_type, b['uuid']))
for b in backrefs]
except KeyError:
# backref_field doesn't exist in original
pass
# end for all backref fields
return ret_obj_dict
# end generate_hrefs
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
apiConfig = VncApiCommon()
if obj_type is not None:
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = id
apiConfig.operation = operation
if err_str:
apiConfig.error = "%s:%s" % (obj_type, err_str)
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# end config_object_error
def config_log(self, msg_str, level=SandeshLevel.SYS_INFO):
errcls = {
SandeshLevel.SYS_DEBUG: VncApiDebug,
SandeshLevel.SYS_INFO: VncApiInfo,
SandeshLevel.SYS_NOTICE: VncApiNotice,
SandeshLevel.SYS_ERR: VncApiError,
}
errcls.get(level, VncApiError)(
api_msg=msg_str, level=level, sandesh=self._sandesh).send(
sandesh=self._sandesh)
# end config_log
def _set_api_audit_info(self, apiConfig):
apiConfig.url = get_request().url
apiConfig.remote_ip = get_request().headers.get('Host')
useragent = get_request().headers.get('X-Contrail-Useragent')
if not useragent:
useragent = get_request().headers.get('User-Agent')
apiConfig.useragent = useragent
apiConfig.user = get_request().headers.get('X-User-Name')
apiConfig.project = get_request().headers.get('X-Project-Name')
apiConfig.domain = get_request().headers.get('X-Domain-Name', 'None')
if apiConfig.domain.lower() == 'none':
apiConfig.domain = 'default-domain'
if int(get_request().headers.get('Content-Length', 0)) > 0:
try:
body = json.dumps(get_request().json)
except:
body = str(get_request().json)
apiConfig.body = body
# end _set_api_audit_info
# uuid is parent's for collections
def _http_get_common(self, request, uuid=None):
# TODO check api + resource perms etc.
if self.is_multi_tenancy_set() and uuid:
if isinstance(uuid, list):
for u_id in uuid:
ok, result = self._permissions.check_perms_read(request,
u_id)
if not ok:
return ok, result
else:
return self._permissions.check_perms_read(request, uuid)
return (True, '')
# end _http_get_common
def _http_put_common(self, request, obj_type, obj_uuid, obj_fq_name,
obj_dict):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
if obj_dict:
fq_name_str = ":".join(obj_fq_name)
# TODO keep _id_perms.uuid_xxlong immutable in future
# dsetia - check with ajay regarding comment above
# if 'id_perms' in obj_dict:
# del obj_dict['id_perms']
if 'id_perms' in obj_dict and obj_dict['id_perms']['uuid']:
if not self._db_conn.match_uuid(obj_dict, obj_uuid):
log_msg = 'UUID mismatch from %s:%s' \
% (request.environ['REMOTE_ADDR'],
request.environ['HTTP_USER_AGENT'])
self.config_object_error(
obj_uuid, fq_name_str, obj_type, 'put', log_msg)
self._db_conn.set_uuid(obj_type, obj_dict,
uuid.UUID(obj_uuid),
do_lock=False)
# TODO remove this when the generator will be adapted to
# be consistent with the post method
# Ensure object has at least default permissions set
self._ensure_id_perms_present(obj_uuid, obj_dict)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name = fq_name_str
apiConfig.identifier_uuid = obj_uuid
apiConfig.operation = 'put'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig,
sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if self.is_multi_tenancy_set():
return self._permissions.check_perms_write(request, obj_uuid)
return (True, '')
# end _http_put_common
# parent_type needed for perms check. None for derived objects (eg.
# routing-instance)
def _http_delete_common(self, request, obj_type, uuid, parent_uuid):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
fq_name = self._db_conn.uuid_to_fq_name(uuid)
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=':'.join(fq_name)
apiConfig.identifier_uuid = uuid
apiConfig.operation = 'delete'
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
# TODO check api + resource perms etc.
if not self.is_multi_tenancy_set() or not parent_uuid:
return (True, '')
"""
Validate parent allows write access. Implicitly trust
parent info in the object since coming from our DB.
"""
return self._permissions.check_perms_delete(request, obj_type, uuid,
parent_uuid)
# end _http_delete_common
def _http_post_validate(self, obj_type=None, obj_dict=None):
if not obj_dict:
return
def _check_field_present(fname):
fval = obj_dict.get(fname)
if not fval:
raise cfgm_common.exceptions.HttpError(
400, "Bad Request, no %s in POST body" %(fname))
return fval
fq_name = _check_field_present('fq_name')
# well-formed name checks
if illegal_xml_chars_RE.search(fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has illegal xml characters")
if obj_type == 'route_target':
invalid_chars = self._INVALID_NAME_CHARS - set(':')
else:
invalid_chars = self._INVALID_NAME_CHARS
if any((c in invalid_chars) for c in fq_name[-1]):
raise cfgm_common.exceptions.HttpError(400,
"Bad Request, name has one of invalid chars %s"
%(invalid_chars))
# end _http_post_validate
def _http_post_common(self, request, obj_type, obj_dict):
# If not connected to zookeeper do not allow operations that
# causes the state change
if not self._db_conn._zk_db.is_connected():
return (False,
(503, "Not connected to zookeeper. Not able to perform requested action"))
if not obj_dict:
# TODO check api + resource perms etc.
return (True, None)
# If there are too many pending updates to rabbit, do not allow
# operations that cause state change
npending = self._db_conn.dbe_oper_publish_pending()
if (npending >= int(self._args.rabbit_max_pending_updates)):
err_str = str(MaxRabbitPendingError(npending))
return (False, (500, err_str))
# Fail if object exists already
try:
obj_uuid = self._db_conn.fq_name_to_uuid(
obj_type, obj_dict['fq_name'])
raise cfgm_common.exceptions.HttpError(
409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_uuid)
except NoIdError:
pass
# Ensure object has at least default permissions set
self._ensure_id_perms_present(None, obj_dict)
self._ensure_perms2_present(obj_type, None, obj_dict,
request.headers.environ.get('HTTP_X_PROJECT_ID', None))
# TODO check api + resource perms etc.
uuid_in_req = obj_dict.get('uuid', None)
# Set the display name
if (('display_name' not in obj_dict) or
(obj_dict['display_name'] is None)):
obj_dict['display_name'] = obj_dict['fq_name'][-1]
fq_name_str = ":".join(obj_dict['fq_name'])
apiConfig = VncApiCommon()
apiConfig.object_type = obj_type
apiConfig.identifier_name=fq_name_str
apiConfig.identifier_uuid = uuid_in_req
apiConfig.operation = 'post'
try:
body = json.dumps(request.json)
except:
body = str(request.json)
apiConfig.body = body
if uuid_in_req:
if uuid_in_req != str(uuid.UUID(uuid_in_req)):
bottle.abort(400, 'Invalid UUID format: ' + uuid_in_req)
try:
fq_name = self._db_conn.uuid_to_fq_name(uuid_in_req)
raise cfgm_common.exceptions.HttpError(
409, uuid_in_req + ' already exists with fq_name: ' +
pformat(fq_name))
except NoIdError:
pass
apiConfig.identifier_uuid = uuid_in_req
self._set_api_audit_info(apiConfig)
log = VncApiConfigLog(api_log=apiConfig, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
return (True, uuid_in_req)
# end _http_post_common
def reset(self):
# cleanup internal state/in-flight operations
if self._db_conn:
self._db_conn.reset()
# end reset
# allocate block of IP addresses from VN. Subnet info expected in request
# body
def vn_ip_alloc_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : "2.1.1.0/24", "count" : 4}
req_dict = get_request().json
count = req_dict.get('count', 1)
subnet = req_dict.get('subnet')
family = req_dict.get('family')
try:
result = vnc_cfg_types.VirtualNetworkServer.ip_alloc(
vn_fq_name, subnet, count, family)
except vnc_addr_mgmt.AddrMgmtSubnetUndefined as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except vnc_addr_mgmt.AddrMgmtSubnetExhausted as e:
raise cfgm_common.exceptions.HttpError(409, str(e))
return result
# end vn_ip_alloc_http_post
# free block of ip addresses to subnet
def vn_ip_free_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
"""
{
"subnet" : "2.1.1.0/24",
"ip_addr": [ "2.1.1.239", "2.1.1.238", "2.1.1.237", "2.1.1.236" ]
}
"""
req_dict = get_request().json
ip_list = req_dict['ip_addr'] if 'ip_addr' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.ip_free(
vn_fq_name, ip_list)
return result
# end vn_ip_free_http_post
# return no. of IP addresses from VN/Subnet
def vn_subnet_ip_count_http_post(self, id):
try:
vn_fq_name = self._db_conn.uuid_to_fq_name(id)
except NoIdError:
raise cfgm_common.exceptions.HttpError(
404, 'Virtual Network ' + id + ' not found!')
# expected format {"subnet_list" : ["2.1.1.0/24", "1.1.1.0/24"]
req_dict = get_request().json
try:
(ok, result) = self._db_conn.dbe_read('virtual_network', {'uuid': id})
except NoIdError as e:
raise cfgm_common.exceptions.HttpError(404, str(e))
except Exception as e:
ok = False
result = cfgm_common.utils.detailed_traceback()
if not ok:
raise cfgm_common.exceptions.HttpError(500, result)
obj_dict = result
subnet_list = req_dict[
'subnet_list'] if 'subnet_list' in req_dict else []
result = vnc_cfg_types.VirtualNetworkServer.subnet_ip_count(
vn_fq_name, subnet_list)
return result
# end vn_subnet_ip_count_http_post
def set_mt(self, multi_tenancy):
pipe_start_app = self.get_pipe_start_app()
try:
pipe_start_app.set_mt(multi_tenancy)
except AttributeError:
pass
self._args.multi_tenancy = multi_tenancy
# end
# check if token validatation needed
def is_multi_tenancy_set(self):
return self.aaa_mode != 'no-auth'
def is_rbac_enabled(self):
return self.aaa_mode == 'rbac'
def mt_http_get(self):
pipe_start_app = self.get_pipe_start_app()
mt = self.is_multi_tenancy_set()
try:
mt = pipe_start_app.get_mt()
except AttributeError:
pass
return {'enabled': mt}
# end
def mt_http_put(self):
multi_tenancy = get_request().json['enabled']
user_token = get_request().get_header('X-Auth-Token')
if user_token is None:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
data = self._auth_svc.verify_signed_token(user_token)
if data is None:
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.set_mt(multi_tenancy)
return {'enabled': self.is_multi_tenancy_set()}
# end
@property
def aaa_mode(self):
return self._args.aaa_mode
@aaa_mode.setter
def aaa_mode(self, mode):
self._args.aaa_mode = mode
# indication if multi tenancy with rbac is enabled or disabled
def aaa_mode_http_get(self):
return {'aaa-mode': self.aaa_mode}
def aaa_mode_http_put(self):
aaa_mode = get_request().json['aaa-mode']
if aaa_mode not in cfgm_common.AAA_MODE_VALID_VALUES:
raise ValueError('Invalid aaa-mode %s' % aaa_mode)
if not self._auth_svc.validate_user_token(get_request()):
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
if not self.is_admin_request():
raise cfgm_common.exceptions.HttpError(403, " Permission denied")
self.aaa_mode = aaa_mode
if self.is_rbac_enabled():
self._create_default_rbac_rule()
return {'aaa-mode': self.aaa_mode}
# end
@property
def cloud_admin_role(self):
return self._args.cloud_admin_role
@property
def global_read_only_role(self):
return self._args.global_read_only_role
def keystone_version(self):
k_v = 'v2.0'
try:
if 'v3' in self._args.auth_url:
k_v = 'v3'
except AttributeError:
pass
return k_v
# end class VncApiServer
def main(args_str=None, server=None):
vnc_api_server = server
pipe_start_app = vnc_api_server.get_pipe_start_app()
server_ip = vnc_api_server.get_listen_ip()
server_port = vnc_api_server.get_server_port()
""" @sigchld
Disable handling of SIG_CHLD for now as every keystone request to validate
token sends SIG_CHLD signal to API server.
"""
#hub.signal(signal.SIGCHLD, vnc_api_server.sigchld_handler)
hub.signal(signal.SIGTERM, vnc_api_server.sigterm_handler)
hub.signal(signal.SIGHUP, vnc_api_server.sighup_handler)
if pipe_start_app is None:
pipe_start_app = vnc_api_server.api_bottle
try:
bottle.run(app=pipe_start_app, host=server_ip, port=server_port,
server=get_bottle_server(server._args.max_requests))
except KeyboardInterrupt:
# quietly handle Ctrl-C
pass
except:
# dump stack on all other exceptions
raise
finally:
# always cleanup gracefully
vnc_api_server.reset()
# end main
def server_main(args_str=None):
vnc_cgitb.enable(format='text')
main(args_str, VncApiServer(args_str))
#server_main
if __name__ == "__main__":
server_main()
| 41.209545 | 136 | 0.587961 |
6e5170c0adeac0389353dfd652ac174e98f9ba8b | 744 | py | Python | manage.py | omukankurunziza/Blog-app | aca53cd0321c3a81afbb3f14085c5b48515d04c3 | [
"MIT"
] | null | null | null | manage.py | omukankurunziza/Blog-app | aca53cd0321c3a81afbb3f14085c5b48515d04c3 | [
"MIT"
] | null | null | null | manage.py | omukankurunziza/Blog-app | aca53cd0321c3a81afbb3f14085c5b48515d04c3 | [
"MIT"
] | null | null | null | from app import create_app,db
from flask_script import Manager,Server
from app.models import User,Post,Comment,Email
from flask_migrate import Migrate, MigrateCommand
# Creating app instance
app = create_app('production')
app = create_app('test')
manager = Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User = User, Post = Post, Comment = Comment,Email = Email )
if __name__ == '__main__':
manager.run() | 25.655172 | 93 | 0.737903 |
6512049d8dec3c9e51df263271c2589d77c003a7 | 3,142 | py | Python | utils/transform.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | 2 | 2018-08-31T18:30:28.000Z | 2018-11-27T01:50:06.000Z | utils/transform.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | null | null | null | utils/transform.py | posm/osm-export-tool2 | 5a1f4096f1afbe7420363376e6e1e8d42e47e1d1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import logging
import os
import subprocess
from string import Template
from osgeo import gdal, ogr, osr
logger = logging.getLogger(__name__)
class TransformSQlite(object):
"""
Applies a schema transformation to a sqlite database.
NOT IMPLEMENTED YET
"""
def __init__(self, sqlite=None, transform=None, transform_sqlite=None, debug=None):
self.sqlite = sqlite
self.transform = transform
if not os.path.exists(self.sqlite):
raise IOError('Cannot find SQlite database for this task.')
if not os.path.exists(self.transform):
raise IOError('Cannot find transform file for this task.')
self.debug = debug
"""
OGR Command to run.
"""
self.cmd = Template("""
spatialite $sqlite < $transform
""")
# Enable GDAL/OGR exceptions
gdal.UseExceptions()
self.srs = osr.SpatialReference()
self.srs.ImportFromEPSG(4326) # configurable
def transform_default_schema(self, ):
assert os.path.exists(self.sqlite), "No spatialite file found for schema transformation"
# transform the spatialite schema
self.update_sql = Template("spatialite $sqlite < $transform_sql")
sql_cmd = self.update_sql.safe_substitute({'sqlite': self.sqlite,
'transform_sql': self.transform})
if(self.debug):
print 'Running: %s' % sql_cmd
proc = subprocess.Popen(sql_cmd, shell=True, executable='/bin/bash',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
returncode = proc.wait()
if returncode != 1:
logger.error('%s', stderr)
raise Exception, "{0} process failed with returncode: {1}".format(sql_cmd, returncode)
if self.debug:
print 'spatialite returned: %s' % returncode
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Converts OSM (xml|pbf) to Spatialite.\n'
'Updates schema to create planet_osm_* tables.\n'
'Updates z_indexes on all layers.'
)
)
parser.add_argument('-o', '--osm-file', required=True, dest="osm", help='The OSM file to convert (xml or pbf)')
parser.add_argument('-s', '--spatialite-file', required=True, dest="sqlite", help='The sqlite output file')
parser.add_argument('-q', '--schema-sql', required=False, dest="schema", help='A sql file to refactor the output schema')
parser.add_argument('-d', '--debug', action="store_true", help="Turn on debug output")
args = parser.parse_args()
config = {}
for k, v in vars(args).items():
if (v == None):
continue
else:
config[k] = v
osm = config.get('osm')
sqlite = config.get('sqlite')
debug = False
if config.get('debug'):
debug = True
parser = OSMParser(osm=osm, sqlite=sqlite, debug=debug)
parser.create_spatialite()
parser.create_default_schema()
parser.update_zindexes()
| 36.114943 | 125 | 0.617123 |
59087db67b664a98d6a194bbf830c6c8fa83c4e9 | 1,633 | py | Python | absen/hadir/forms.py | petr0max/absensi | 79eb2d9ad9ff76eb59a1353d5d9d44a4822a7b92 | [
"MIT"
] | null | null | null | absen/hadir/forms.py | petr0max/absensi | 79eb2d9ad9ff76eb59a1353d5d9d44a4822a7b92 | [
"MIT"
] | null | null | null | absen/hadir/forms.py | petr0max/absensi | 79eb2d9ad9ff76eb59a1353d5d9d44a4822a7b92 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import (StringField, SubmitField, DateField, DateTimeField,
IntegerField, TimeField, BooleanField)
from wtforms.validators import DataRequired, Length, Optional
class PermitForm(FlaskForm):
long_date = IntegerField('Izin Berapa Hari', validators=[DataRequired()])
start_date = DateField('Mulai Tanggal', validators=[DataRequired()])
keterangan = StringField('Keperluan Izin', validators=[DataRequired(),
Length(1, 125)])
submit = SubmitField('Kirim')
class PermitConfirmForm(FlaskForm):
checkbox = BooleanField("Disetujui ?")
submit = SubmitField('Kirim')
class CheckInForm(FlaskForm):
dates = DateField('Tanggal', validators=[DataRequired()])
jam_input = TimeField('Jam Datang', validators=[DataRequired()],
format='%H:%M')
submit = SubmitField('Kirim')
class CheckOutForm(FlaskForm):
dates = DateField('Tanggal', validators=[DataRequired()])
jam_pulang = TimeField('Jam Pulang', validators=[DataRequired()],
format='%H:%M')
keterangan = StringField('Keterangan',
validators=[DataRequired(), Length(1, 64)])
submit = SubmitField('Kirim')
class SickForm(FlaskForm):
input_date = DateField('Tanggal', validators=[DataRequired()])
diagnosa = StringField('Diagnosa', validators=[DataRequired(),
Length(1, 64)])
long_date = IntegerField('Izin Hari', validators=[Optional()])
submit = SubmitField('Kirim')
| 39.829268 | 77 | 0.63319 |
23ea70c77eec5725e9b351011e9b728445aaa4ae | 2,115 | py | Python | yt_dlp/extractor/vupload.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/vupload.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/vupload.py | nxtreaming/yt-dlp | 385ffb467b2285e85a2a5495b90314ba1f8e0700 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_filesize,
extract_attributes,
int_or_none,
js_to_json
)
class VuploadIE(InfoExtractor):
_VALID_URL = r'https://vupload\.com/v/(?P<id>[a-z0-9]+)'
_TESTS = [{
'url': 'https://vupload.com/v/u28d0pl2tphy',
'md5': '9b42a4a193cca64d80248e58527d83c8',
'info_dict': {
'id': 'u28d0pl2tphy',
'ext': 'mp4',
'description': 'md5:e9e6c0045c78cbf0d5bb19a55ce199fb',
'title': 'md5:e9e6c0045c78cbf0d5bb19a55ce199fb',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_extract_title(webpage)
video_json = self._parse_json(self._html_search_regex(r'sources:\s*(.+?]),', webpage, 'video'), video_id, transform_source=js_to_json)
formats = []
for source in video_json:
if source['src'].endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(source['src'], video_id, m3u8_id='hls'))
duration = parse_duration(self._html_search_regex(
r'<i\s*class=["\']fad\s*fa-clock["\']></i>\s*([\d:]+)\s*</div>', webpage, 'duration', fatal=False))
filesize_approx = parse_filesize(self._html_search_regex(
r'<i\s*class=["\']fad\s*fa-save["\']></i>\s*([^<]+)\s*</div>', webpage, 'filesize', fatal=False))
extra_video_info = extract_attributes(self._html_search_regex(
r'(<video[^>]+>)', webpage, 'video_info', fatal=False))
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'formats': formats,
'duration': duration,
'filesize_approx': filesize_approx,
'width': int_or_none(extra_video_info.get('width')),
'height': int_or_none(extra_video_info.get('height')),
'format_id': extra_video_info.get('height', '') + 'p',
'title': title,
'description': description,
}
| 39.90566 | 142 | 0.594326 |
f1da70106066c1ea8dc3d5d257318418bc219030 | 3,716 | py | Python | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnUserDomainsRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 1 | 2020-12-05T03:03:46.000Z | 2020-12-05T03:03:46.000Z | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnUserDomainsRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-dcdn/aliyunsdkdcdn/request/v20180115/DescribeDcdnUserDomainsRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdcdn.endpoint import endpoint_data
class DescribeDcdnUserDomainsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dcdn', '2018-01-15', 'DescribeDcdnUserDomains')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_CheckDomainShow(self):
return self.get_query_params().get('CheckDomainShow')
def set_CheckDomainShow(self,CheckDomainShow):
self.add_query_param('CheckDomainShow',CheckDomainShow)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ChangeEndTime(self):
return self.get_query_params().get('ChangeEndTime')
def set_ChangeEndTime(self,ChangeEndTime):
self.add_query_param('ChangeEndTime',ChangeEndTime)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Tags(self):
return self.get_query_params().get('Tag')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DomainStatus(self):
return self.get_query_params().get('DomainStatus')
def set_DomainStatus(self,DomainStatus):
self.add_query_param('DomainStatus',DomainStatus)
def get_DomainSearchType(self):
return self.get_query_params().get('DomainSearchType')
def set_DomainSearchType(self,DomainSearchType):
self.add_query_param('DomainSearchType',DomainSearchType)
def get_ChangeStartTime(self):
return self.get_query_params().get('ChangeStartTime')
def set_ChangeStartTime(self,ChangeStartTime):
self.add_query_param('ChangeStartTime',ChangeStartTime) | 34.407407 | 89 | 0.757804 |
afa3b49c0c86bb9d044ade6dda42f635d8e95e8c | 6,561 | py | Python | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/chebyshev.py | poojavade/Genomics_Docker | 829b5094bba18bbe03ae97daf925fee40a8476e8 | [
"Apache-2.0"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/chebyshev.py | poojavade/Genomics_Docker | 829b5094bba18bbe03ae97daf925fee40a8476e8 | [
"Apache-2.0"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/chebyshev.py | poojavade/Genomics_Docker | 829b5094bba18bbe03ae97daf925fee40a8476e8 | [
"Apache-2.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | #!/usr/bin/env python
# Author : Pierre Schnizer
"""
This module describes routines for computing Chebyshev
approximations to univariate functions. A Chebyshev approximation is a
truncation of the series \M{f(x) = S{sum} c_n T_n(x)}, where the Chebyshev
polynomials \M{T_n(x) = cos(n \arccos x)} provide an orthogonal basis of
polynomials on the interval [-1,1] with the weight function
\M{1 / sqrt{1-x^2}}. The first few Chebyshev polynomials are, T_0(x) = 1,
T_1(x) = x, T_2(x) = 2 x^2 - 1.
def f(x, p):
if x < 0.5:
return 0.25
else:
return 0.75
n = 10000;
cs = cheb_series(40)
F = gsl_function(f, None)
cs.init(F, 0.0, 1.0)
nf = float(n)
for i in range(100):
x = i / nf
r10 = cs.eval_n(10, x)
r40 = cs.eval(x)
print "%g %g %g %g" % (x, f(x, None), r10, r40)
"""
import _callback
from _generic_solver import _workspace
from gsl_function import gsl_function
class cheb_series(_workspace):
"""
This class manages all internal detail. It provides the space for a
Chebyshev series of order N.
"""
_alloc = _callback.gsl_cheb_alloc
_free = _callback.gsl_cheb_free
_init = _callback.gsl_cheb_init
_eval = _callback.gsl_cheb_eval
_eval_err = _callback.gsl_cheb_eval_err
_eval_n = _callback.gsl_cheb_eval_n
_eval_n_err = _callback.gsl_cheb_eval_n_err
#_eval_mode = _callback.gsl_cheb_eval_mode
#_eval_mode_e = _callback.gsl_cheb_eval_mode_e
_calc_deriv = _callback.gsl_cheb_calc_deriv
_calc_integ = _callback.gsl_cheb_calc_integ
_get_coeff = _callback.pygsl_cheb_get_coefficients
_set_coeff = _callback.pygsl_cheb_set_coefficients
_get_a = _callback.pygsl_cheb_get_a
_set_a = _callback.pygsl_cheb_set_a
_get_b = _callback.pygsl_cheb_get_b
_set_b = _callback.pygsl_cheb_set_b
_get_f = _callback.pygsl_cheb_get_f
_set_f = _callback.pygsl_cheb_set_f
_get_order_sp = _callback.pygsl_cheb_get_order_sp
_set_order_sp = _callback.pygsl_cheb_set_order_sp
def __init__(self, size):
"""
input : n
@params n : number of coefficients
"""
self._size = size
_workspace.__init__(self, size)
def init(self, f, a, b):
"""
This function computes the Chebyshev approximation for the
function F over the range (a,b) to the previously specified order.
The computation of the Chebyshev approximation is an \M{O(n^2)}
process, and requires n function evaluations.
input : f, a, b
@params f : a gsl_function
@params a : lower limit
@params b : upper limit
"""
return self._init(self._ptr, f.get_ptr(), a, b)
def eval(self, x):
"""
This function evaluates the Chebyshev series CS at a given point X
input : x
x ... value where the series shall be evaluated.
"""
return self._eval(self._ptr, x)
def eval_err(self, x):
"""
This function computes the Chebyshev series at a given point X,
estimating both the series RESULT and its absolute error ABSERR.
The error estimate is made from the first neglected term in the
series.
input : x
x ... value where the error shall be evaluated.
"""
return self._eval_err(self._ptr, x)
def eval_n(self, order, x):
"""
This function evaluates the Chebyshev series CS at a given point
N, to (at most) the given order ORDER.
input : n, x
n ... number of cooefficients
x ... value where the series shall be evaluated.
"""
return self._eval_n(self._ptr, order, x)
def eval_n_err(self, order, x):
"""
This function evaluates a Chebyshev series CS at a given point X,
estimating both the series RESULT and its absolute error ABSERR,
to (at most) the given order ORDER. The error estimate is made
from the first neglected term in the series.
input : n, x
n ... number of cooefficients
x ... value where the error shall be evaluated.
"""
return self._eval_n_err(self._ptr, order, x)
# def eval_mode(self, x, mode):
# """
#
# """
# return self._eval(self._ptr, x, mode)
#
# def eval_mode_e(self, x, mode):
# return self._eval(self._ptr, x, mode)
def calc_deriv(self):
"""
This method computes the derivative of the series CS. It returns
a new instance of the cheb_series class.
"""
tmp = cheb_series(self._size)
self._calc_deriv(tmp._ptr, self._ptr)
return tmp
def calc_integ(self):
"""
This method computes the integral of the series CS. It returns
a new instance of the cheb_series class.
"""
tmp = cheb_series(self._size)
self._calc_integ(tmp._ptr, self._ptr)
return tmp
def get_coefficients(self):
"""
Get the chebyshev coefficients.
"""
return self._get_coeff(self._ptr)
def set_coefficients(self, coefs):
"""
Sets the chebyshev coefficients.
"""
return self._set_coeff(self._ptr, coefs)
def get_a(self):
"""
Get the lower boundary of the current representation
"""
return self._get_a(self._ptr)
def set_a(self, a):
"""
Set the lower boundary of the current representation
"""
return self._set_a(self._ptr, a)
def get_b(self):
"""
Get the upper boundary of the current representation
"""
return self._get_b(self._ptr)
def set_b(self, a):
"""
Set the upper boundary of the current representation
"""
return self._set_b(self._ptr, a)
def get_f(self):
"""
Get the value f (what is it ?) The documentation does not tell anything
about it.
"""
return self._get_f(self._ptr)
def set_f(self, a):
"""
Set the value f (what is it ?)
"""
return self._set_f(self._ptr, a)
def get_order_sp(self):
"""
Get the value f (what is it ?) The documentation does not tell anything
about it.
"""
return self._get_order_sp(self._ptr)
def set_order_sp(self, a):
"""
Set the value f (what is it ?)
"""
return self._set_order_sp(self._ptr, a)
| 28.903084 | 79 | 0.607072 |
4f6aad09ac14ae9cf1203089653f96e5eaaf0fd1 | 1,253 | py | Python | setup.py | xiaomo123zk/MapMatching4GMNS-v0.2.2-master | 82d22bc0a51c8a84bfb4782a7a39a13cc9972814 | [
"Apache-2.0"
] | 1 | 2021-04-12T08:04:55.000Z | 2021-04-12T08:04:55.000Z | setup.py | xiaomo123zk/trace2route | 82d22bc0a51c8a84bfb4782a7a39a13cc9972814 | [
"Apache-2.0"
] | null | null | null | setup.py | xiaomo123zk/trace2route | 82d22bc0a51c8a84bfb4782a7a39a13cc9972814 | [
"Apache-2.0"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="MapMatching4GMNS",
version="0.2.2",
author="Xuesong (Simon) Zhou, Kai (Frank) Zhang, Jiawei Lu",
author_email="xzhou74@asu.edu, zhangk2019@seu.edu.cn, jiaweil9@asu.edu",
description="An open-source, cross-platform, lightweight, and fast Python\
MapMatching4GMNS engine for mapping GPS traces to the underlying network\
using General Modeling Network Specification (GMNS).\
Its most likely path finding algorithm takes about 0.02 seconds to process one GPS trace\
with 50 location points in a large-scale network with 10K nodes.",
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type="text/markdown",
url="https://github.com/asu-trans-ai-lab/MapMatching4GMNS",
packages=['MapMatching4GMNS'],
package_dir={'MapMatching4GMNS': 'MapMatching4GMNS'},
package_data={'MapMatching4GMNS': ['bin/*']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| 44.75 | 106 | 0.671189 |
7f1445ec068657ef4cccc13bb930ddbbaf9c1265 | 3,384 | py | Python | horovod/runner/common/util/settings.py | dalian-ai/horovod | 7707267a4bef79e09a9df1d41b0652feb61b76c7 | [
"Apache-2.0"
] | 5,089 | 2017-08-10T20:44:50.000Z | 2019-02-12T00:45:34.000Z | horovod/runner/common/util/settings.py | dalian-ai/horovod | 7707267a4bef79e09a9df1d41b0652feb61b76c7 | [
"Apache-2.0"
] | 669 | 2017-08-11T21:33:41.000Z | 2019-02-12T01:02:17.000Z | horovod/runner/common/util/settings.py | dalian-ai/horovod | 7707267a4bef79e09a9df1d41b0652feb61b76c7 | [
"Apache-2.0"
] | 706 | 2017-08-11T00:30:43.000Z | 2019-02-11T12:00:34.000Z | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
class BaseSettings(object):
def __init__(self, num_proc=None, verbose=0, ssh_port=None, ssh_identity_file=None, extra_mpi_args=None,
tcp_flag=None, binding_args=None, key=None, start_timeout=None, output_filename=None,
run_func_mode=None, nics=None, elastic=False, prefix_output_with_timestamp=False):
"""
:param num_proc: number of horovod processes (-np)
:type num_proc: int
:param verbose: level of verbosity
:type verbose: int
:param ssh_port: SSH port on all the hosts
:type ssh_port: int
:param ssh_identity_file: SSH identity (private key) file
:type ssh_identity_file: string
:param extra_mpi_args: Extra MPI arguments to pass to mpirun
:type extra_mpi_args: string
:param tcp_flag: TCP only communication flag
:type tcp_flag: boolean
:param binding_args: Process binding arguments
:type binding_args: string
:param key: used for encryption of parameters passed across the hosts
:type key: str
:param start_timeout: has to finish all the checks before this timeout runs out.
:type start_timeout: horovod.runner.common.util.timeout.Timeout
:param output_filename: optional filename to redirect stdout / stderr by process
:type output_filename: string
:param run_func_mode: whether it is run function mode
:type run_func_mode: boolean
:param nics: specify the NICs to be used for tcp network communication.
:type nics: Iterable[str]
:param elastic: enable elastic auto-scaling and fault tolerance mode
:type elastic: boolean
:param prefix_output_with_timestamp: shows timestamp in stdout/stderr forwarding on the driver
:type prefix_output_with_timestamp: boolean
"""
self.num_proc = num_proc
self.verbose = verbose
self.ssh_port = ssh_port
self.ssh_identity_file = ssh_identity_file
self.extra_mpi_args = extra_mpi_args
self.tcp_flag = tcp_flag
self.binding_args = binding_args
self.key = key
self.start_timeout = start_timeout
self.output_filename = output_filename
self.run_func_mode = run_func_mode
self.nics = nics
self.elastic = elastic
self.prefix_output_with_timestamp = prefix_output_with_timestamp
class Settings(BaseSettings):
def __init__(self, hosts=None, **kwargs):
"""
:param hosts: string, comma-delimited, of hostname[s] with slots number[s]
:type hosts: string
"""
super(Settings, self).__init__(**kwargs)
self.hosts = hosts
| 45.12 | 108 | 0.675236 |
5080b9b4f1a32e00b2322db6f758d933ea9337d3 | 11,750 | py | Python | notebooks/archive/phoenix/phoenix/scrapers/wb_metadata_scraper.py | worldbank/wb-nlp-apps | c2537c6d1f0716c5f8edc3e69dd15732b3ee3d71 | [
"MIT"
] | 6 | 2021-06-30T03:41:30.000Z | 2022-03-05T22:57:44.000Z | notebooks/archive/phoenix/phoenix/scrapers/wb_metadata_scraper.py | worldbank/wb-nlp-apps | c2537c6d1f0716c5f8edc3e69dd15732b3ee3d71 | [
"MIT"
] | null | null | null | notebooks/archive/phoenix/phoenix/scrapers/wb_metadata_scraper.py | worldbank/wb-nlp-apps | c2537c6d1f0716c5f8edc3e69dd15732b3ee3d71 | [
"MIT"
] | 3 | 2021-08-20T04:29:10.000Z | 2021-10-04T18:58:49.000Z | import requests
import json
import os
import time
import glob
import pandas as pd
import re
from joblib import Parallel, delayed
from phoenix.path_manager import get_corpus_path
from phoenix.dataset.document import DocumentDB
from phoenix.scrapers.utils import (
download_with_retry,
normalize_str_col,
collapse_array,
collapse_nested_dict,
make_unique_entry,
normalize_geo_regions
)
fl_params = [
'guid', 'abstracts', 'admreg', 'alt_title', 'authr', 'available_in',
'bdmdt', 'chronical_docm_id', 'closedt', 'colti', 'count', 'credit_no',
'disclosure_date', 'disclosure_type', 'disclosure_type_date', 'disclstat',
'display_title', 'docdt', 'docm_id', 'docna', 'docty', 'dois', 'entityid',
'envcat', 'geo_reg', 'geo_reg_and_mdk', 'historic_topic', 'id',
'isbn', 'issn', 'keywd', 'lang', 'listing_relative_url', 'lndinstr', 'loan_no',
'majdocty', 'majtheme', 'ml_abstract', 'ml_display_title', 'new_url', 'owner',
'pdfurl', 'prdln', 'projn', 'publishtoextweb_dt', 'repnb', 'repnme', 'seccl',
'sectr', 'src_cit', 'subsc', 'subtopic', 'teratopic', 'theme', 'topic', 'topicv3',
'totvolnb', 'trustfund', 'txturl', 'unregnbr', 'url_friendly_title', 'versiontyp',
'versiontyp_key', 'virt_coll', 'vol_title', 'volnb', 'projectid',
]
SCRAPER_DIR = get_corpus_path('WB')
API_JSON_DIR = os.path.join(SCRAPER_DIR, 'tmp_api_json')
def request_worldbank_api(fl_params=None, offset=0, limit=1, max_retries=10):
'''
fl_params: list of values to return per row
offset: parameter corresponding to the start page
limit: maximum number of rows returned by the api call
'''
if fl_params is None:
fl_params = ['guid']
api_url = 'http://search.worldbank.org/api/v2/wds'
api_params = dict(
format='json',
fl=','.join(fl_params),
lang_exact='English',
disclstat='Disclosed',
srt='docdt',
order='desc', # Use asc such that pages already downloaded can still be used
os=offset,
rows=limit,
# frmdisclosuredate='', # '2018-09-12'
# todisclosuredate='', # '2018-09-13'
)
response = download_with_retry(url=api_url, params=api_params)
if (response is None) or (response.status_code != 200):
return {}
json_content = response.json()
return json_content
def get_total_documents():
# This method solves the problem of determination of
# the total pages in the database automatically.
poll_request = request_worldbank_api()
total_documents = poll_request['total']
return int(total_documents)
def scrape_page(fl_params, page, limit=500, verbose=True, store_to_file=True):
offset = page * limit
page_content = request_worldbank_api(fl_params=fl_params, offset=offset, limit=limit)
page_content = page_content['documents']
func_params = {'page': page}
# Remove extraneous key
page_content.pop('facets')
if store_to_file:
if not os.path.isdir(API_JSON_DIR):
os.makedirs(API_JSON_DIR)
page_file = os.path.join(API_JSON_DIR, 'data-{page}.json'.format(**func_params))
with open(page_file, 'w') as fl:
json.dump(page_content, fl)
if verbose:
print('Completed scraping of page {page}.'.format(**func_params))
time.sleep(1)
else:
return page_content
def scrape_worldbank_operational_docs_api(fl_params, limit=500, max_pages=5, n_jobs=1, verbose=False, to_celery=True):
'''
Note:
Parallelization of API access is discouraged for large limit size.
It could result to throttling or failed return values.
'''
func_params = {}
total_documents = get_total_documents()
total_pages = (total_documents // limit) + 1
func_params['total_pages'] = total_pages
scrape_params = []
for page in range(total_pages):
func_params['page'] = page + 1
if (max_pages is not None) and (page > max_pages):
print('Terminating scraping for remaining pages...')
break
if verbose:
print('Scraping page {page} / {total_pages}'.format(**func_params))
scrape_params.append(dict(fl_params=fl_params, page=page, limit=limit, verbose=verbose))
if to_celery:
from phoenix.tasks.wb_metadata_scraper_tasks import scrape_and_store_page
async_objs = {sp['page']: scrape_and_store_page.delay(sp) for sp in scrape_params}
return async_objs
else:
Parallel(n_jobs=n_jobs)(delayed(scrape_page)(**sp) for sp in scrape_params)
# ! Processing and normalization of scraped document metadata
def normalize_page_content(page_content, use_short_columns=True):
# if use_short_columns:
# columns = ['guid', 'docyear', 'majdoctype', 'doctype', 'authors', 'colti', 'display_title', 'docdt', 'docm_id', 'historic_topic', 'pdfurl', 'seccl', 'txturl', 'language', 'admreg', 'country', 'txtfilename']
# else:
# columns = ['authors', 'abstracts', 'admreg', 'alt_title', 'available_in', 'bdmdt', 'chronical_docm_id', 'closedt', 'colti', 'count', 'credit_no', 'disclosure_date', 'disclosure_type', 'disclosure_type_date', 'disclstat', 'display_title', 'docdt', 'doc_year', 'docm_id', 'docna', 'docty', 'dois', 'entityids', 'envcat', 'geo_regions', 'geo_region_mdks', 'historic_topic', 'id', 'isbn', 'issn', 'keywd', 'lang', 'listing_relative_url', 'lndinstr', 'loan_no', 'majdocty', 'majtheme', 'ml_abstract', 'ml_display_title', 'new_url', 'owner', 'pdfurl', 'prdln', 'projn', 'publishtoextweb_dt', 'repnb', 'repnme', 'seccl', 'sectr', 'src_cit', 'subsc', 'subtopic', 'teratopic', 'theme', 'topic', 'topicv3', 'totvolnb', 'trustfund', 'txturl', 'unregnbr', 'url_friendly_title', 'versiontyp', 'versiontyp_key', 'virt_coll', 'vol_title', 'volnb']
normalized_data = pd.DataFrame(page_content).T
normalized_data.index.name = 'uid'
normalized_data.index = normalized_data.index.str.strip('D') # The API updated the format of `uid` by adding a `D` prefix to the original format.
normalized_data.index = normalized_data.index.astype(int)
rename_cols = {
'docty': 'doc_type',
'lang': 'language',
'majdocty': 'majdoctype',
'count': 'country'
}
normalized_data = normalized_data.rename(columns=rename_cols)
try:
normalized_data['authors'] = normalized_data['authors'].map(lambda auth: auth.get('authr') if pd.notna(auth) else auth)
except KeyError:
# This means that the metadata doesn't have an author field
normalized_data['authors'] = None
# Assume that the `display_title` field follows a standard format: list -> dict
normalized_data['display_title'] = normalized_data['display_title'].map(lambda dt: dt[0].get('display_title') if len(dt) else None)
for col in normalized_data.columns:
try:
# Normalize line breaks for string data
normalized_data[col] = normalize_str_col(normalized_data[col])
normalized_data[col] = normalized_data[col].map(lambda x: collapse_array(x, '|'))
normalized_data[col] = normalized_data[col].map(lambda x: collapse_nested_dict(x, '|'))
except AttributeError:
# column is not a string type
continue
normalized_data['majdoctype'] = make_unique_entry(normalized_data['majdoctype'])
normalized_data['admreg'] = make_unique_entry(normalized_data['admreg'])
normalized_data['geo_regions'] = normalized_data['geo_regions'].map(normalize_geo_regions)
normalized_data['docyear'] = pd.to_datetime(normalized_data['docdt']).dt.year
# existing_cols = normalized_data.columns.intersection(columns)
# new_cols = pd.Index(set(columns).difference(normalized_data.columns))
# normalized_data = normalized_data[existing_cols]
# for col in new_cols:
# normalized_data[col] = None
return normalized_data
METADATA_COLS = [
'corpus', 'id', 'path_original', 'path_clean', 'filename_original', 'year',
'major_doc_type', 'doc_type', 'author', 'collection', 'title', 'journal', 'volume',
'date_published', 'digital_identifier', 'topics_src', 'url_pdf', 'url_txt', 'language_src',
'adm_region', 'geo_region', 'country',
# Not yet available at this stage...,
# 'language_detected', 'language_score', 'tokens'
# WB specific fields
# 'wb_lending_instrument', 'wb_product_line', 'wb_major_theme', 'wb_theme', 'wb_sector', # These are no longer available in the API or were renamed.
'wb_subtopic_src', 'wb_project_id',
# 'wb_environmental_category',
]
def build_wb_id(uid, max_len=9):
# return f'wb_{"0"*(max_len - len(str(uid)))}{uid}'
return f'wb_{uid}'
def standardize_metadata_fields(metadata_df):
'''
This method must be applied to the original metadata processed dataframe.
This will assign the final field names.
'''
metadata_df = metadata_df.reset_index()
metadata_df['uid'] = metadata_df.uid.map(build_wb_id)
wb_core_field_map = {
'uid': 'id',
'docyear': 'year',
'majdoctype': 'major_doc_type',
'doctype': 'doc_type',
'authors': 'author',
'colti': 'collection',
'display_title': 'title',
'docdt': 'date_published',
'docm_id': 'digital_identifier',
'historic_topic': 'topics_src',
'pdfurl': 'url_pdf',
'txturl': 'url_txt',
'language': 'language_src',
'admreg': 'adm_region',
'country': 'country',
'geo_regions': 'geo_region',
}
wb_specific_field_map = {
'lndinstr': 'wb_lending_instrument',
'prdln': 'wb_product_line',
'majtheme': 'wb_major_theme',
'theme': 'wb_theme',
'sectr': 'wb_sector',
# 'envcat': 'wb_environmental_category',
'projectid': 'wb_project_id',
'subtopic': 'wb_subtopic_src',
}
wb_new_fields = ['corpus', 'path_original', 'path_clean', 'filename_original', 'journal', 'volume']
path_original_dir = '/NLP/CORPUS/WB/TXT_ORIG'
path_clean_dir = '/NLP/CORPUS/WB/TXT_CLEAN'
# Perform post normalization preprocessing
metadata_df['docdt'] = pd.to_datetime(metadata_df['docdt']).dt.date.map(str)
# Apply final field names
metadata_df = metadata_df.rename(columns=wb_core_field_map)
metadata_df = metadata_df.rename(columns=wb_specific_field_map)
for nf in wb_new_fields:
if nf == 'corpus':
metadata_df[nf] = 'wb'
elif nf == 'filename_original':
metadata_df[nf] = metadata_df.url_txt.map(lambda x: os.path.basename(x) if isinstance(x, str) else x)
elif nf == 'path_original':
metadata_df[nf] = metadata_df['id'].map(lambda x: f"{path_original_dir}/{x}.txt")
elif nf == 'path_clean':
metadata_df[nf] = metadata_df['id'].map(lambda x: f"{path_clean_dir}/{x}.txt")
elif nf in ['journal', 'volume']:
metadata_df[nf] = None
metadata_df = metadata_df[METADATA_COLS]
return metadata_df.set_index('id')
def scrape_normalize_dump_wb_data_page(scrape_params):
metadb = DocumentDB()
page_content = scrape_page(
scrape_params.get('fl_params'), scrape_params.get('page'),
limit=scrape_params.get('limit', 500), verbose=False, store_to_file=False
)
normalized_data = normalize_page_content(page_content, use_short_columns=True)
metadata_df = standardize_metadata_fields(normalized_data)
metadata_df = metadata_df.reset_index('id')
metadata_df['_id'] = metadata_df['id']
metadata_df = metadata_df.drop('id', axis=1)
store_resp = metadb.store_metadata_data(metadata_df)
return dict(page=scrape_params.get('page'), store_resp=store_resp)
| 38.273616 | 842 | 0.668 |
542481b3077ee7228f266fe40b4649943070b234 | 1,170 | py | Python | script/spider_launcher.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | script/spider_launcher.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | script/spider_launcher.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | import paramiko
import sys
from helper.args_parser import stock_codes, future_codes
def launch_model():
# Spider name.
spider_name = 'stock'
# Codes.
# codes = stock_codes
codes = ['sh']
# Start date.
start = "2008-01-01"
# End date.
end = "2019-07-19"
# Mounted dir.
mounted_dir = '/home/duser/shuyu/Personae:/app/Personae/'
image_name = 'ceruleanwang/personae'
rl_cmd = 'docker run -tv {} --network=quant {} spider/'.format(mounted_dir, image_name)
rl_cmd += "{}_spider.py -c {} -s {} -e {}".format(
spider_name, " ".join(codes), start, end
)
cmd = rl_cmd
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='192.168.4.199', port=22, username='duser')
trans = ssh.get_transport()
channel = trans.open_session()
channel.get_pty()
channel.invoke_shell()
std_in, std_out, std_err = ssh.exec_command(cmd)
while True:
line = std_out.readline()
if line:
sys.stdout.write(line)
else:
break
ssh.close()
if __name__ == '__main__':
launch_model()
| 22.5 | 91 | 0.617094 |
bed8c30ede7c161001cb0ce5b4248c8a74cb188e | 57 | py | Python | superset/models/__init__.py | Prepsmith/incubator-superset | 834dbab3304d3e71dc53f298658ed51eb55956d7 | [
"Apache-2.0"
] | 2 | 2018-12-04T07:47:02.000Z | 2021-08-12T01:43:34.000Z | superset/models/__init__.py | Prepsmith/incubator-superset | 834dbab3304d3e71dc53f298658ed51eb55956d7 | [
"Apache-2.0"
] | 3 | 2021-09-08T21:26:28.000Z | 2022-03-29T22:28:44.000Z | superset/models/__init__.py | Prepsmith/incubator-superset | 834dbab3304d3e71dc53f298658ed51eb55956d7 | [
"Apache-2.0"
] | 4 | 2017-04-28T07:52:00.000Z | 2017-05-03T12:34:41.000Z | from . import core # noqa
from . import sql_lab # noqa
| 19 | 29 | 0.684211 |
a69b1eeca6fc017c3118d3b545f07dacaa100c1f | 2,907 | py | Python | dockstream/utils/entry_point_functions/write_out.py | niladell/DockStream | 75f06d24a95699cdc06fe1ea021e213e1d9fa5b3 | [
"Apache-2.0"
] | 34 | 2021-08-05T06:28:30.000Z | 2022-03-17T02:42:49.000Z | dockstream/utils/entry_point_functions/write_out.py | niladell/DockStream | 75f06d24a95699cdc06fe1ea021e213e1d9fa5b3 | [
"Apache-2.0"
] | 9 | 2021-08-31T10:35:51.000Z | 2022-02-03T08:57:58.000Z | dockstream/utils/entry_point_functions/write_out.py | niladell/DockStream | 75f06d24a95699cdc06fe1ea021e213e1d9fa5b3 | [
"Apache-2.0"
] | 10 | 2021-08-12T02:32:11.000Z | 2022-01-19T11:51:33.000Z | from dockstream.utils.enums.docking_enum import DockingConfigurationEnum, ResultKeywordsEnum
from dockstream.utils.enums.ligand_preparation_enum import LigandPreparationEnum
from dockstream.utils.enums.logging_enums import LoggingConfigEnum
from dockstream.utils.general_utils import *
def handle_poses_writeout(docking_run, docker, output_prefix):
_LE = LoggingConfigEnum()
_LP = LigandPreparationEnum()
_DE = DockingConfigurationEnum()
if in_keys(docking_run, [_DE.OUTPUT, _DE.OUTPUT_POSES]):
if in_keys(docking_run, [_DE.OUTPUT, _DE.OUTPUT_POSES, _DE.OUTPUT_POSES_PATH]):
poses_path = docking_run[_DE.OUTPUT][_DE.OUTPUT_POSES][_DE.OUTPUT_POSES_PATH]
poses_path = docker.apply_prefix_to_filename(poses_path, output_prefix)
# if the overwrite flag is set and the output file exists already, append number to basename
if nested_get(docking_run, [_DE.OUTPUT,
_DE.OUTPUT_POSES,
_DE.OUTPUT_POSES_OVERWRITE],
default=False):
poses_path = docker.update_path_to_unused(path=poses_path)
mode = nested_get(docking_run, [_DE.OUTPUT, _DE.OUTPUT_POSES, _DE.OUTPUT_MODE],
default=_DE.OUTPUT_MODE_ALL)
docker.write_docked_ligands(path=poses_path, mode=mode)
def handle_scores_writeout(docking_run, docker, output_prefix):
_LE = LoggingConfigEnum()
_LP = LigandPreparationEnum()
_DE = DockingConfigurationEnum()
if in_keys(docking_run, [_DE.OUTPUT, _DE.OUTPUT_SCORES]):
if in_keys(docking_run, [_DE.OUTPUT, _DE.OUTPUT_SCORES, _DE.OUTPUT_SCORES_PATH]):
scores_path = docking_run[_DE.OUTPUT][_DE.OUTPUT_SCORES][_DE.OUTPUT_SCORES_PATH]
scores_path = docker.apply_prefix_to_filename(scores_path, output_prefix)
# if the overwrite flag is set and the output file exists already, append number to basename
if nested_get(docking_run, [_DE.OUTPUT, _DE.OUTPUT_SCORES, _DE.OUTPUT_SCORES_OVERWRITE],
default=False):
scores_path = docker.update_path_to_unused(path=scores_path)
mode = nested_get(docking_run, [_DE.OUTPUT, _DE.OUTPUT_SCORES, _DE.OUTPUT_MODE],
default=_DE.OUTPUT_MODE_ALL)
docker.write_result(path=scores_path, mode=mode)
def handle_score_printing(print_scores: bool, print_all: bool, docker, logger):
_LE = LoggingConfigEnum()
_LP = LigandPreparationEnum()
_DE = DockingConfigurationEnum()
if print_scores:
_RK = ResultKeywordsEnum()
scores = docker.get_scores(best_only=not print_all)
for score in scores:
print(score, end="\n")
logger.log(f"Printed {len(scores)} scores to console (print_all set to {print_all}).", _LE.DEBUG) | 48.45 | 105 | 0.684899 |
c6a2d3f7977b4f9d0dcb5bce811c7d7d8e05f6da | 877 | py | Python | py/models.py | redmanatee/field-promotion | bba3fc1ffcd77587a10e85bbfb5820f86242649c | [
"MIT"
] | null | null | null | py/models.py | redmanatee/field-promotion | bba3fc1ffcd77587a10e85bbfb5820f86242649c | [
"MIT"
] | null | null | null | py/models.py | redmanatee/field-promotion | bba3fc1ffcd77587a10e85bbfb5820f86242649c | [
"MIT"
] | null | null | null | from google.appengine.ext import ndb
from google.appengine.api import memcache
class Game(ndb.Model):
date = ndb.DateProperty(required=True)
created_at = ndb.DateTimeProperty()
player_faction = ndb.StringProperty(required=True)
player_warcaster = ndb.StringProperty(required=True)
opponent_name = ndb.StringProperty()
opponent_faction = ndb.StringProperty(required=True)
opponent_warcaster = ndb.StringProperty(required=True)
size = ndb.IntegerProperty()
result = ndb.StringProperty(required=True)
won = ndb.BooleanProperty()
draw = ndb.BooleanProperty()
teaching = ndb.BooleanProperty()
location = ndb.StringProperty()
game_type = ndb.StringProperty()
class User(ndb.Model):
email = ndb.StringProperty()
name = ndb.StringProperty()
user_id = ndb.StringProperty()
active = ndb.DateTimeProperty(auto_now=True)
premeium = ndb.BooleanProperty(default=False)
| 32.481481 | 55 | 0.786773 |
6b41b211f7c09cafb0e04a27ec306bec1b61c377 | 136 | py | Python | configs/end2end/eqlv2_r50_8x2_1x.py | tztztztztz/eqlv2.mmdet | 907fad905b0cac6372f7ec4035f48ef3d0801098 | [
"Apache-2.0"
] | 95 | 2020-12-17T10:09:36.000Z | 2022-03-31T13:44:44.000Z | configs/end2end/eqlv2_r50_8x2_1x.py | tztztztztz/eqlv2.mmdet | 907fad905b0cac6372f7ec4035f48ef3d0801098 | [
"Apache-2.0"
] | 19 | 2021-03-20T00:58:47.000Z | 2022-03-30T12:02:11.000Z | configs/end2end/eqlv2_r50_8x2_1x.py | tztztztztz/eqlv2.mmdet | 907fad905b0cac6372f7ec4035f48ef3d0801098 | [
"Apache-2.0"
] | 14 | 2021-03-19T13:03:06.000Z | 2022-02-26T23:50:55.000Z | _base_ = ['./mask_rcnn_r50_8x2_1x.py']
model = dict(roi_head=dict(bbox_head=dict(loss_cls=dict(type="EQLv2"))))
work_dir = 'eqlv2_1x'
| 22.666667 | 72 | 0.727941 |
58135fd4d939d08aaa9181635bc78a4e441397b2 | 489 | py | Python | tests/test_system.py | i4s-pserrano/python-nomad | 0f8dd9dfa1d448465be490f0acf9f5df96cd893f | [
"MIT"
] | 109 | 2016-06-06T09:18:02.000Z | 2022-03-17T17:41:20.000Z | tests/test_system.py | i4s-pserrano/python-nomad | 0f8dd9dfa1d448465be490f0acf9f5df96cd893f | [
"MIT"
] | 104 | 2016-06-04T23:06:06.000Z | 2021-12-08T04:49:43.000Z | tests/test_system.py | i4s-pserrano/python-nomad | 0f8dd9dfa1d448465be490f0acf9f5df96cd893f | [
"MIT"
] | 80 | 2016-06-05T00:33:23.000Z | 2021-11-20T15:17:38.000Z | import pytest
# integration tests requires nomad Vagrant VM or Binary running
def test_initiate_garbage_collection(nomad_setup):
nomad_setup.system.initiate_garbage_collection()
def test_dunder_str(nomad_setup):
assert isinstance(str(nomad_setup.system), str)
def test_dunder_repr(nomad_setup):
assert isinstance(repr(nomad_setup.system), str)
def test_dunder_getattr(nomad_setup):
with pytest.raises(AttributeError):
d = nomad_setup.system.does_not_exist
| 23.285714 | 63 | 0.793456 |
7cf865b8bea089b1bf20927c563734957876afab | 698 | py | Python | run_tests.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | run_tests.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | run_tests.py | littleblackfish/gimmemotifs | 913a6e5db378493155273e2c0f8ab0dc11ab219e | [
"MIT"
] | null | null | null | import os
import re
import sys
import unittest
from glob import glob
if __name__ == "__main__":
def get_tests():
start_dir = os.path.join(os.path.dirname(__file__), "test")
return unittest.TestLoader().discover(start_dir, pattern="test_*.py")
libdirs = glob('build/lib.*')
if len(libdirs) > 0:
p = [os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), libdirs[0])
), "test"]
sys.path = p + sys.path
suite = unittest.TestSuite()
for test in get_tests():
suite.addTest(test)
runner = unittest.TextTestRunner(descriptions=1, verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
| 25.851852 | 77 | 0.630372 |
02c7d1e047e25547a5a7f79b873e2ecb67b05aec | 1,924 | py | Python | bingads/v13/bulk/entities/audiences/bulk_in_market_audience.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 86 | 2016-02-29T03:24:28.000Z | 2022-03-29T09:30:21.000Z | bingads/v13/bulk/entities/audiences/bulk_in_market_audience.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 135 | 2016-04-12T13:31:28.000Z | 2022-03-29T02:18:51.000Z | bingads/v13/bulk/entities/audiences/bulk_in_market_audience.py | pawelulita/BingAds-Python-SDK | e7b5a618e87a43d0a5e2c79d9aa4626e208797bd | [
"MIT"
] | 154 | 2016-04-08T04:11:27.000Z | 2022-03-29T21:21:07.000Z | from bingads.v13.bulk.entities import *
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.extensions import *
from .bulk_audience import BulkAudience
class BulkInMarketAudience(BulkAudience):
""" Represents an In Market Audience that can be read or written in a bulk file.
This class exposes the :attr:`in_market_audience` property that can be read and written as fields of the
In Market Audience record in a bulk file.
For more information, see In Market Audience at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
in_market_audience=None,
status=None,):
super(BulkInMarketAudience, self).__init__(audience = in_market_audience, status = status)
@property
def in_market_audience(self):
""" Defines an In Market Audience """
return self._audience
@in_market_audience.setter
def in_market_audience(self, in_market_audience):
self._audience = in_market_audience
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.in_market_audience, 'in_market_audience')
super(BulkInMarketAudience, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
def process_mappings_from_row_values(self, row_values):
self.in_market_audience = _CAMPAIGN_OBJECT_FACTORY_V13.create('InMarketAudience')
super(BulkInMarketAudience, self).process_mappings_from_row_values(row_values)
| 40.93617 | 108 | 0.759875 |
6ecab4136ece9d264078558acebcf347164b541b | 680 | py | Python | backend/apps/sampleapp/migrations/0001_initial.py | domasx2/django-angular-docker-seed | 5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb | [
"Unlicense"
] | 32 | 2015-04-27T02:01:59.000Z | 2021-04-06T10:19:42.000Z | backend/apps/sampleapp/migrations/0001_initial.py | domasx2/django-angular-docker-seed | 5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb | [
"Unlicense"
] | 14 | 2015-03-21T08:20:34.000Z | 2016-02-15T07:07:39.000Z | backend/apps/sampleapp/migrations/0001_initial.py | domasx2/django-angular-docker-seed | 5c1ad6d62d179c9cb5cdbf7b1254576efa63b2fb | [
"Unlicense"
] | 21 | 2015-03-18T18:40:12.000Z | 2021-03-16T22:12:44.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
]
| 26.153846 | 114 | 0.55 |
e558ad01971fad5b1afc1bf0923826e851311d62 | 279 | py | Python | app/__init__.py | ednoguera/python-rpg-game-api | 4e3ee988a5175c46e0e2503e028167fa910ee8e8 | [
"MIT"
] | null | null | null | app/__init__.py | ednoguera/python-rpg-game-api | 4e3ee988a5175c46e0e2503e028167fa910ee8e8 | [
"MIT"
] | 1 | 2020-11-26T14:12:11.000Z | 2020-11-26T14:12:11.000Z | app/__init__.py | ednoguera/python-rpg-game-api | 4e3ee988a5175c46e0e2503e028167fa910ee8e8 | [
"MIT"
] | 1 | 2022-03-03T23:59:44.000Z | 2022-03-03T23:59:44.000Z | from flask import Flask
from app.views.index_view import bp as index_bp
from app.views.create_character_view import bp as character_bp
def create_app():
app = Flask(__name__)
app.register_blueprint(index_bp)
app.register_blueprint(character_bp)
return app | 23.25 | 62 | 0.770609 |
4626ab5fdb7231cb5c54bd191e0110aec2a94a75 | 10,433 | py | Python | chempy/util/table.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | chempy/util/table.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | chempy/util/table.py | matecsaj/chempy | 2c93f185e4547739331193c06d77282206621517 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Convenience functions for presenting reaction systems in tables.
"""
from __future__ import (absolute_import, division, print_function)
import os
import shutil
import subprocess
import tempfile
from ..printing import latex
from ..kinetics.rates import RadiolyticBase
from ..units import to_unitless, get_derived_unit
tex_templates = {
'document': {
'default': r"""
\documentclass[a4paper,9pt]{article}
\pagestyle{empty}
\usepackage[paper=a4paper,margin=1cm]{geometry}
%(usepkg)s
\hypersetup{
bookmarksnumbered=true,
breaklinks=false,
raiselinks=true,
pdfborder={0 0 0},
colorlinks=true,
plainpages=false,
pdfstartview={FitH},
pdfcreator={LaTeX with hyperref package},
citecolor=teal,
linkcolor=red,
urlcolor=blue,
}
\begin{document}
%(begins)s
%(table)s
%(ends)s
\end{document}
"""
},
'table': {
'default': r"""
\begin{%(table_env)s}
\centering
\label{tab:%(label)s}
\caption[%(short_cap)s]{%(long_cap)s}
\begin{tabular}{%(alignment)s}
\toprule
%(header)s
\midrule
%(body)s
\bottomrule
\end{tabular}
\end{%(table_env)s}""",
'longtable': r"""
\begin{%(table_env)s}{%(alignment)s}
\caption[%(short_cap)s]{%(long_cap)s
\label{tab:%(label)s}}\\
\toprule
%(header)s
\midrule
%(body)s
\bottomrule
\end{%(table_env)s}"""
}
}
def render_tex_to_pdf(contents, texfname, pdffname, output_dir, save):
""" Generates a pdf from a tex file by calling pdflatex
Parameters
----------
contents : str
texfname : path
pdffname : path
output_dir : path
save : path or bool or str(bool)
"""
created_tempdir = False
try:
if output_dir is None:
output_dir = tempfile.mkdtemp()
created_tempdir = True
texpath = os.path.join(output_dir, texfname)
pdfpath = os.path.join(output_dir, pdffname)
cmds = ['pdflatex', '-halt-on-error', '-interaction',
'batchmode', texfname]
with open(texpath, 'wt') as ofh:
ofh.write(contents)
ofh.flush()
with open(pdfpath + '.out', 'wb') as logfile:
p = subprocess.Popen(cmds, cwd=output_dir,
stdout=logfile, stderr=logfile)
retcode = p.wait()
p = subprocess.Popen(cmds, cwd=output_dir,
stdout=logfile, stderr=logfile)
retcode += p.wait()
if retcode:
fmtstr = "{}\n returned with exit status {}"
raise RuntimeError(fmtstr.format(' '.join(cmds), retcode))
else:
return pdfpath
finally:
if save is True or save == 'True':
pass
else:
if save is False or save == 'False':
if created_tempdir:
shutil.rmtree(output_dir)
else:
# interpret path to copy pdf to.
if not os.path.samefile(pdfpath, save):
shutil.copy(pdfpath, save)
def rsys2tablines(rsys, rref0=1, coldelim=' & ',
tex=True, ref_fmt=None,
unit_registry=None, unit_fmt='{}', k_fmt='%.4g'):
"""
Generates a table representation of a ReactionSystem.
Parameters
----------
rsys : ReactionSystem
rref0 : integer
default start of index counter (default: 1)
coldelim : string
column delimiter (default: ' & ')
tex : bool
use latex formated output (default: True)
ref_fmt : string or callable
format string of ``ref`` attribute of reactions
unit_registry : unit registry
optional (default: None)
"""
if ref_fmt is None:
def _doi(s):
return r'\texttt{\href{http://dx.doi.org/'+s+'}{doi:'+s+'}}'
def ref_fmt(s):
if s is None:
return 'None'
if tex:
if isinstance(s, dict):
return _doi(s['doi'])
if s.startswith('doi:'):
return _doi(s[4:])
return s
def _wrap(s):
if tex:
return '\\ensuremath{' + s + '}'
else:
return s
lines = []
for ri, rxn in enumerate(rsys.rxns):
rxn_ref = rxn.ref
if isinstance(rxn.param, RadiolyticBase):
if unit_registry is not None:
kunit = get_derived_unit(unit_registry, 'radiolytic_yield')
k = k_fmt % to_unitless(rxn.param.args[0], kunit)
k_unit_str = (kunit.dimensionality.latex.strip('$') if tex
else kunit.dimensionality)
else:
if unit_registry is not None:
kunit = (get_derived_unit(unit_registry,
'concentration')**(1-rxn.order()) /
get_derived_unit(unit_registry, 'time'))
try:
k = k_fmt % to_unitless(rxn.param, kunit)
k_unit_str = (kunit.dimensionality.latex.strip('$') if tex
else kunit.dimensionality)
except Exception:
k, k_unit_str = rxn.param.equation_as_string(k_fmt, tex)
else:
k_unit_str = '-'
if isinstance(k_fmt, str):
k = k_fmt % rxn.param
else:
k = k_fmt(rxn.param)
latex_kw = dict(with_param=False, with_name=False)
if tex:
latex_kw['substances'] = rsys.substances
latex_kw['Reaction_around_arrow'] = ('}}' + coldelim + '\\ensuremath{{',
'}}' + coldelim + '\\ensuremath{{')
else:
latex_kw['Reaction_around_arrow'] = (coldelim,)*2
latex_kw['Reaction_arrow'] = '->'
lines.append(coldelim.join([
str(rref0+ri),
('\\ensuremath{%s}' if tex else '%s') % latex(rxn, **latex_kw),
_wrap(k),
unit_fmt.format(_wrap(k_unit_str)),
ref_fmt(rxn_ref) if callable(ref_fmt) else ref_fmt.format(rxn_ref)
]))
return lines
def rsys2table(rsys, table_template=None, table_template_dict=None,
param_name='Rate constant', **kwargs):
r"""
Renders user provided table_template with table_template_dict which
also has 'body' entry generated from `rsys2tablines`.
Defaults is LaTeX table requiring booktabs package to be used
(add \usepackage{booktabs} to preamble).
Parameters
----------
rsys : ReactionSystem
table_template : string
table_tempalte_dict : dict used to render table_template (excl. "body")
param_name : str
Column header for parameter column
longtable : bool
use longtable in defaults. (default: False)
**kwargs :
passed onto rsys2tablines
"""
siunitx = kwargs.pop('siunitx', False)
line_term = r' \\'
defaults = {
'table_env': 'longtable' if kwargs.pop(
'longtable', False) else 'table',
'alignment': 'llllSll' if siunitx else 'lllllll',
'header': kwargs.get('coldelim', ' & ').join([
'Id.', 'Reactants', '', 'Products', '{%s}' % param_name,
'Unit', 'Ref'
]) + line_term,
'short_cap': rsys.name,
'long_cap': rsys.name,
'label': (rsys.name or 'None').lower()
}
if table_template_dict is None:
table_template_dict = defaults
else:
for k, v in defaults:
if k not in table_template_dict:
table_template_dict[k] = v
if 'body' in table_template_dict:
raise KeyError("There is already a 'body' key in table_template_dict")
if 'k_fmt' not in kwargs:
kwargs['k_fmt'] = r'\num{%.4g}' if siunitx else '%.4g'
table_template_dict['body'] = (line_term + '\n').join(rsys2tablines(
rsys, **kwargs)
) + line_term
if table_template is None:
if table_template_dict['table_env'] == 'longtable':
table_template = tex_templates['table']['longtable']
else:
table_template = tex_templates['table']['default']
return table_template % table_template_dict
def rsys2pdf_table(rsys, output_dir=None, doc_template=None,
doc_template_dict=None, save=True, landscape=False,
**kwargs):
"""
Convenience function to render a ReactionSystem as
e.g. a pdf using e.g. pdflatex.
Parameters
----------
rsys : ReactionSystem
output_dir : path to output directory
(default: system's temporary folder)
doc_template : string
LaTeX boiler plate temlpate including preamble,
document environment etc.
doc_template_dict : dict (string -> string)
dict used to render temlpate (excl. 'table')
longtable : bool
use longtable in defaults. (default: False)
**kwargs :
passed on to `rsys2table`
"""
if doc_template is None:
doc_template = tex_templates['document']['default']
lscape = ['pdflscape' if landscape == 'pdf' else 'lscape'] if landscape else []
_pkgs = [
'booktabs', 'amsmath', ('pdftex,colorlinks,unicode=True', 'hyperref')
] + lscape
if kwargs.get('longtable', False):
_pkgs += ['longtable']
if kwargs.get('siunitx', False):
_pkgs += ['siunitx']
_envs = ['tiny'] + (['landscape'] if landscape else [])
defaults = {
'usepkg': '\n'.join([(r'\usepackage' + (
'[%s]' if isinstance(pkg, tuple) else '') + '{%s}') % pkg for pkg in _pkgs]),
'begins': '\n'.join([r'\begin{%s}' % env for env in _envs]),
'ends': '\n'.join([r'\end{%s}' % env for env in _envs[::-1]])
}
if doc_template_dict is None:
doc_template_dict = defaults
else:
for k, v in defaults:
if k not in doc_template_dict:
doc_template_dict[k] = v
if 'table' in doc_template_dict:
raise KeyError("There is already a 'table' key in doc_template_dict")
doc_template_dict['table'] = rsys2table(rsys, **kwargs)
contents = doc_template % doc_template_dict
if isinstance(save, str) and save.endswith('.pdf'):
texfname = save.rstrip('.pdf') + '.tex'
pdffname = save
else:
texfname = 'output.tex'
pdffname = 'output.pdf'
return render_tex_to_pdf(contents, texfname, pdffname, output_dir, save)
| 31.905199 | 89 | 0.570689 |
ff06e0c511fe9cf00eabaa07a9c888c19c85c63e | 3,466 | py | Python | data_managers/data_manager_kallisto_index_builder/data_manager/kallisto_index_builder.py | supernord/tools-iuc | 9a0c41967765d120a8fc519c0c7f09cbe3a6efbe | [
"MIT"
] | 142 | 2015-03-13T18:08:34.000Z | 2022-03-30T23:52:34.000Z | data_managers/data_manager_kallisto_index_builder/data_manager/kallisto_index_builder.py | mtekman/tools-iuc | 95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5 | [
"MIT"
] | 3,402 | 2015-01-05T18:04:20.000Z | 2022-03-30T22:09:36.000Z | data_managers/data_manager_kallisto_index_builder/data_manager/kallisto_index_builder.py | willemdek11/tools-iuc | dc0a0cf275168c2a88ee3dc47652dd7ca1137871 | [
"MIT"
] | 438 | 2015-01-07T20:33:59.000Z | 2022-03-30T04:39:18.000Z | #!/usr/bin/env python
# Based heavily on the Bowtie 2 data manager wrapper script by Dan Blankenberg
from __future__ import print_function
import argparse
import json
import os
import subprocess
import sys
DEFAULT_DATA_TABLE_NAME = "kallisto_indexes"
def get_id_name(params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_kallisto_index(data_manager_dict, options, params, sequence_id, sequence_name):
data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME
target_directory = params['output_data'][0]['extra_files_path']
if not os.path.exists(target_directory):
os.mkdir(target_directory)
fasta_base_name = os.path.split(options.fasta_filename)[-1]
sym_linked_fasta_filename = os.path.join(target_directory, fasta_base_name)
os.symlink(options.fasta_filename, sym_linked_fasta_filename)
args = ['kallisto', 'index']
args.extend([sym_linked_fasta_filename, '-i', sequence_id])
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory)
return_code = proc.wait()
if return_code:
print("Error building index.", file=sys.stderr)
sys.exit(return_code)
data_table_entry = dict(value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id)
_add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def main():
# Parse Command Line
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='output', action='store', type=str, default=None)
parser.add_argument('--fasta_filename', dest='fasta_filename', action='store', type=str, default=None)
parser.add_argument('--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None)
parser.add_argument('--fasta_description', dest='fasta_description', action='store', type=str, default=None)
parser.add_argument('--data_table_name', dest='data_table_name', action='store', type=str, default='kallisto_indexes')
options = parser.parse_args()
filename = options.output
with open(filename) as fh:
params = json.load(fh)
data_manager_dict = {}
if options.fasta_dbkey in [None, '', '?']:
raise Exception('"%s" is not a valid dbkey. You must specify a valid dbkey.' % (options.fasta_dbkey))
sequence_id, sequence_name = get_id_name(params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description)
# build the index
build_kallisto_index(data_manager_dict, options, params, sequence_id, sequence_name)
# save info to json file
with open(filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
| 40.776471 | 124 | 0.7412 |
70a055369d2e29b7d8ed993d47c0e9d0955c3bb4 | 420 | py | Python | DataStructAndAlgo/SStack.py | ggwhsd/PythonPractice | 58248be9bb5700bc9a1914f8e41c30931fad8b82 | [
"MIT"
] | null | null | null | DataStructAndAlgo/SStack.py | ggwhsd/PythonPractice | 58248be9bb5700bc9a1914f8e41c30931fad8b82 | [
"MIT"
] | null | null | null | DataStructAndAlgo/SStack.py | ggwhsd/PythonPractice | 58248be9bb5700bc9a1914f8e41c30931fad8b82 | [
"MIT"
] | null | null | null | class StackUnderflow(ValueError):
pass
class SStack():
def __init__(self):
self._elems = []
def is_empty(self):
return self._elems == []
def top(self):
if self._elems == []:
raise StackUnderflow("in SStack.top()")
return self._elems[-1]
def push(self,elem):
self._elems.append(elem)
def pop(self):
if self._elems == []:
raise StackUnderflow(" in SStack.pop() ")
return self._elems.pop()
| 17.5 | 44 | 0.654762 |
d4266f110be784b8c182618deb7c2bed7089ca13 | 2,942 | py | Python | coopeV3/templatetags/vip.py | nanoy42/coope | 3f970fe199f4ad8672cbb3a3200b0c4d110c9847 | [
"MIT"
] | 3 | 2019-06-27T21:08:41.000Z | 2019-09-07T17:20:39.000Z | coopeV3/templatetags/vip.py | nanoy42/coope | 3f970fe199f4ad8672cbb3a3200b0c4d110c9847 | [
"MIT"
] | 21 | 2019-06-09T10:56:53.000Z | 2021-06-10T21:35:12.000Z | coopeV3/templatetags/vip.py | nanoy42/coope | 3f970fe199f4ad8672cbb3a3200b0c4d110c9847 | [
"MIT"
] | null | null | null | from django import template
import random
from preferences.models import GeneralPreferences
register = template.Library()
@register.simple_tag
def president():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.president`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
return gp.president
@register.simple_tag
def treasurer():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.treasurer`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
return gp.treasurer
@register.simple_tag
def secretary():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.secretary`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
return gp.secretary
@register.simple_tag
def phoenix():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.phoenixTM_responsible`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
return gp.phoenixTM_responsible
@register.simple_tag
def global_message():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.global_message`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
messages = gp.global_message.split("\n")
return random.choice(messages)
@register.simple_tag
def logout_time():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.automatic_logout_time`.
"""
gp, _ = GeneralPreferences.objects.get_or_create(pk=1)
logout_time = gp.automatic_logout_time
return logout_time
@register.simple_tag
def statutes():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.statutes`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
try:
return '<a target="_blank" href="' + gp.statutes.url + '">' + str(gp.statutes) + '</a>'
except:
return "Pas de document"
@register.simple_tag
def rules():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.rules`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
try:
return '<a target="_blank" href="' + gp.rules.url + '">' + str(gp.rules) + '</a>'
except:
return "Pas de document"
@register.simple_tag
def menu():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.menu`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
try:
return '<a target="_blank" href="' + gp.menu.url + '">' + str(gp.menu) + '</a>'
except:
return "Pas de document"
@register.simple_tag
def alcool_charter():
"""
A tag which returns :attr:`preferences.models.GeneralPreferences.alcool_charter`.
"""
gp,_ = GeneralPreferences.objects.get_or_create(pk=1)
try:
return '<a target="_blank" href="' + gp.alcohol_charter.url + '">' + str(gp.alcohol_charter) + '</a>'
except:
return "Pas de document"
| 29.42 | 109 | 0.682189 |
bfaa86dccfebcc665040bc82643a9a0db783e87d | 2,174 | py | Python | chainercv/links/model/pixelwise_softmax_classifier.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | 1 | 2018-08-24T02:28:31.000Z | 2018-08-24T02:28:31.000Z | chainercv/links/model/pixelwise_softmax_classifier.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | null | null | null | chainercv/links/model/pixelwise_softmax_classifier.py | iory/chainercv | ecb1953f78c526dfd38308d68a4094c9f4df3a8d | [
"MIT"
] | 2 | 2019-12-16T02:20:26.000Z | 2022-01-17T02:00:49.000Z | import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import reporter
import numpy as np
class PixelwiseSoftmaxClassifier(chainer.Chain):
"""A pixel-wise classifier.
It computes the loss based on a given input/label pair for
semantic segmentation.
Args:
predictor (~chainer.Link): Predictor network.
ignore_label (int): A class id that is going to be ignored in
evaluation. The default value is -1.
class_weight (array): An array
that contains constant weights that will be multiplied with the
loss values along with the channel dimension. This will be
used in :func:`chainer.functions.softmax_cross_entropy`.
"""
def __init__(self, predictor, ignore_label=-1, class_weight=None):
super(PixelwiseSoftmaxClassifier, self).__init__()
with self.init_scope():
self.predictor = predictor
self.ignore_label = ignore_label
if class_weight is not None:
self.class_weight = np.asarray(class_weight, dtype=np.float32)
else:
self.class_weight = class_weight
def to_cpu(self):
super(PixelwiseSoftmaxClassifier, self).to_cpu()
if self.class_weight is not None:
self.class_weight = cuda.to_cpu(self.class_weight)
def to_gpu(self, device=None):
super(PixelwiseSoftmaxClassifier, self).to_gpu(device)
if self.class_weight is not None:
self.class_weight = cuda.to_gpu(self.class_weight, device)
def __call__(self, x, t):
"""Computes the loss value for an image and label pair.
Args:
x (~chainer.Variable): A variable with a batch of images.
t (~chainer.Variable): A variable with the ground truth
image-wise label.
Returns:
~chainer.Variable: Loss value.
"""
self.y = self.predictor(x)
self.loss = F.softmax_cross_entropy(
self.y, t, class_weight=self.class_weight,
ignore_label=self.ignore_label)
reporter.report({'loss': self.loss}, self)
return self.loss
| 32.939394 | 75 | 0.649494 |
6028378a36889501280b602ea6d96c50da7ac963 | 2,669 | py | Python | rlkit/torch/bc/bc.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 56 | 2019-10-20T03:09:02.000Z | 2022-03-25T09:21:40.000Z | rlkit/torch/bc/bc.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 3 | 2020-10-01T07:33:51.000Z | 2021-05-12T03:40:57.000Z | rlkit/torch/bc/bc.py | yifan-you-37/rl_swiss | 8b0ee7caa5c1fa93860916004cf4fd970667764f | [
"MIT"
] | 10 | 2019-11-04T16:56:09.000Z | 2022-03-25T09:21:41.000Z | import numpy as np
from collections import OrderedDict
import torch
import torch.optim as optim
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.core import np_to_pytorch_batch
from rlkit.torch.torch_base_algorithm import TorchBaseAlgorithm
class BC(TorchBaseAlgorithm):
def __init__(
self,
mode, # 'MLE' or 'MSE'
expert_replay_buffer,
num_updates_per_train_call=1,
batch_size=1024,
lr=1e-3,
momentum=0.0,
optimizer_class=optim.Adam,
**kwargs
):
assert mode in ['MLE', 'MSE'], 'Invalid mode!'
if kwargs['wrap_absorbing']: raise NotImplementedError()
super().__init__(**kwargs)
self.mode = mode
self.expert_replay_buffer = expert_replay_buffer
self.batch_size = batch_size
self.optimizer = optimizer_class(
self.exploration_policy.parameters(),
lr=lr,
betas=(momentum, 0.999)
)
self.num_updates_per_train_call = num_updates_per_train_call
def get_batch(self, batch_size, keys=None, use_expert_buffer=True):
if use_expert_buffer:
rb = self.expert_replay_buffer
else:
rb = self.replay_buffer
batch = rb.random_batch(batch_size, keys=keys)
batch = np_to_pytorch_batch(batch)
return batch
def _do_training(self, epoch):
for t in range(self.num_updates_per_train_call):
self._do_update_step(epoch, use_expert_buffer=True)
def _do_update_step(self, epoch, use_expert_buffer=True):
batch = self.get_batch(
self.batch_size,
keys=['observations', 'actions'],
use_expert_buffer=use_expert_buffer
)
obs = batch['observations']
acts = batch['actions']
self.optimizer.zero_grad()
if self.mode == 'MLE':
log_prob = self.exploration_policy.get_log_prob(obs, acts)
loss = -1.0 * log_prob.mean()
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
self.eval_statistics['Log-Likelihood'] = ptu.get_numpy(-1.0*loss)
else:
pred_acts = self.exploration_policy(obs)[0]
squared_diff = (pred_acts - acts)**2
loss = torch.sum(squared_diff, dim=-1).mean()
if self.eval_statistics is None:
self.eval_statistics = OrderedDict()
self.eval_statistics['MSE'] = ptu.get_numpy(loss)
loss.backward()
self.optimizer.step()
@property
def networks(self):
return [self.exploration_policy]
| 29.01087 | 81 | 0.61746 |
e724d43731f4e7a591067985809c84d78861cfab | 2,015 | py | Python | pyscripts/all_swaps.py | dlabrie/shakescripts-python | 1ac2d17b50754f2fad91df5715c99f023a42ccb2 | [
"MIT"
] | 1 | 2021-07-23T09:15:36.000Z | 2021-07-23T09:15:36.000Z | pyscripts/all_swaps.py | dlabrie/shakescripts-python | 1ac2d17b50754f2fad91df5715c99f023a42ccb2 | [
"MIT"
] | null | null | null | pyscripts/all_swaps.py | dlabrie/shakescripts-python | 1ac2d17b50754f2fad91df5715c99f023a42ccb2 | [
"MIT"
] | null | null | null | from modules.shakepay import *
updateTransactions()
print("\nProcessing...")
swaps = all_swaps()
swapsSummary = {}
for swapper in swaps:
if swaps[swapper] != 0:
transactions = swapperTransactions(swapper)
lastTransaction = list(transactions.keys())[0]
swapsSummary[lastTransaction] = {"createdAtUnix":transactions[lastTransaction]["createAtUnix"] ,"transaction":transactions[lastTransaction], "swapper":swapper, "balance":swaps[swapper]}
swapsSummary = {key: val for key, val in sorted(swapsSummary.items(), key = lambda item: int(item[1]["createdAtUnix"]), reverse=True)}
print("\n------------ You owe these people ------------")
for transaction in swapsSummary:
if swapsSummary[transaction]["balance"] > 0.00:
print(swapsSummary[transaction]["transaction"]["createdAt"]+" for $"+str(swapsSummary[transaction]["transaction"]["amount"])+" | "+ swapsSummary[transaction]["transaction"]["direction"]+" | "+swapsSummary[transaction]["transaction"]["note"]+" |", swapsSummary[transaction]["swapper"], "|", swapsSummary[transaction]["balance"])
sumFunds = 0.00
print("\n\n------------ These people owe you ------------")
for transaction in swapsSummary:
if swapsSummary[transaction]["balance"] < -0.05:
print(swapsSummary[transaction]["transaction"]["createdAt"]+" for $"+str(swapsSummary[transaction]["transaction"]["amount"])+" | "+ swapsSummary[transaction]["transaction"]["direction"]+" | "+swapsSummary[transaction]["transaction"]["note"]+" |", swapsSummary[transaction]["swapper"], "|", swapsSummary[transaction]["balance"])
sumFunds += swapsSummary[transaction]["balance"]
badgeSwappers = badge_swappers()
print("\n\n------------ Stats ------------")
print("You're missing $"+str(round(sumFunds,2)*-1)+" Canadian Rupees from your wallet")
print("You've swapped with "+str(len(badgeSwappers.keys()))+" different friends since May 3rd 🏓")
print("You've swapped with "+str(len(swaps.keys()))+" different friends since the beginning🏓")
print("\n")
| 54.459459 | 335 | 0.683871 |
06fe062182a648205186fcfd2ff878f9d9414b42 | 77,749 | py | Python | pacu.py | aaronrea/pacu | b345370df18ed4148bf2d2184192c773d7d832e0 | [
"BSD-3-Clause"
] | 1 | 2021-11-20T21:27:50.000Z | 2021-11-20T21:27:50.000Z | pacu.py | aaronrea/pacu | b345370df18ed4148bf2d2184192c773d7d832e0 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T02:39:40.000Z | 2021-06-02T02:39:40.000Z | pacu.py | aaronrea/pacu | b345370df18ed4148bf2d2184192c773d7d832e0 | [
"BSD-3-Clause"
] | 1 | 2021-09-14T01:14:50.000Z | 2021-09-14T01:14:50.000Z | #!/usr/bin/env python3
import copy
import importlib
import json
import os
import random
import re
import shlex
import subprocess
import datetime
import sys
import time
import traceback
import argparse
try:
import requests
import boto3
import botocore
import urllib.parse
import configure_settings
import settings
from core.models import AWSKey, PacuSession
from setup_database import setup_database_if_not_present
from sqlalchemy import exc
from utils import get_database_connection, set_sigint_handler
except ModuleNotFoundError as error:
exception_type, exception_value, tb = sys.exc_info()
print('Traceback (most recent call last):\n{}{}: {}\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value)))
print('Pacu was not able to start because a required Python package was not found.\nRun `sh install.sh` to check and install Pacu\'s Python requirements.')
sys.exit(1)
class Main:
COMMANDS = [
'aws', 'data', 'exec', 'exit', 'help', 'import_keys', 'list', 'load_commands_file',
'ls', 'quit', 'regions', 'run', 'search', 'services', 'set_keys', 'set_regions',
'swap_keys', 'update_regions', 'whoami', 'swap_session', 'sessions',
'list_sessions', 'delete_session', 'export_keys', 'open_console', 'console'
]
def __init__(self):
self.database = None
self.running_module_names = []
# Utility methods
def log_error(self, text, exception_info=None, session=None, local_data=None, global_data=None):
""" Write an error to the file at log_file_path, or a default log file
if no path is supplied. If a session is supplied, its name will be used
to determine which session directory to add the error file to. """
timestamp = time.strftime('%F %T', time.gmtime())
if session:
session_tag = '({})'.format(session.name)
else:
session_tag = '<No Session>'
try:
if session:
log_file_path = 'sessions/{}/error_log.txt'.format(session.name)
else:
log_file_path = 'global_error_log.txt'
print('\n[{}] Pacu encountered an error while running the previous command. Check {} for technical details. [LOG LEVEL: {}]\n\n {}\n'.format(timestamp, log_file_path, settings.ERROR_LOG_VERBOSITY.upper(), exception_info))
log_file_directory = os.path.dirname(log_file_path)
if log_file_directory and not os.path.exists(log_file_directory):
os.makedirs(log_file_directory)
formatted_text = '[{}] {}: {}'.format(timestamp, session_tag, text)
if settings.ERROR_LOG_VERBOSITY.lower() in ('low', 'high', 'extreme'):
if session:
session_data = session.get_all_fields_as_dict()
# Empty values are not valid keys, and that info should be
# preserved by checking for falsiness here.
if session_data.get('secret_access_key'):
session_data['secret_access_key'] = '****** (Censored)'
formatted_text += 'SESSION DATA:\n {}\n'.format(
json.dumps(
session_data,
indent=4,
default=str
)
)
if settings.ERROR_LOG_VERBOSITY.lower() == 'high':
if local_data is not None and global_data is not None:
formatted_text += '\nLAST TWO FRAMES LOCALS DATA:\n {}\n'.format('\n\n '.join(local_data[:2]))
formatted_text += '\nLAST TWO FRAMES GLOBALS DATA:\n {}\n'.format('\n\n '.join(global_data[:2]))
elif settings.ERROR_LOG_VERBOSITY.lower() == 'extreme':
if local_data is not None and global_data is not None:
formatted_text += '\nALL LOCALS DATA:\n {}\n'.format('\n\n '.join(local_data))
formatted_text += '\nALL GLOBALS DATA:\n {}\n'.format('\n\n '.join(global_data))
formatted_text += '\n'
with open(log_file_path, 'a+') as log_file:
log_file.write(formatted_text)
except Exception as error:
print('Error while saving exception information. This means the exception was not added to any error log and should most likely be provided to the developers.\n Exception raised: {}'.format(str(error)))
raise
# @message: String - message to print and/or write to file
# @output: String - where to output the message: both, file, or screen
# @output_type: String - format for message when written to file: plain or xml
# @is_cmd: boolean - Is the log the initial command that was run (True) or output (False)? Devs won't touch this most likely
def print(self, message='', output='both', output_type='plain', is_cmd=False, session_name=''):
session = self.get_active_session()
if session_name == '':
session_name = session.name
# Indent output from a command
if is_cmd is False:
# Add some recursion here to go through the entire dict for
# 'SecretAccessKey'. This is to not print the full secret access
# key into the logs, although this should get most cases currently.
if isinstance(message, dict):
if 'SecretAccessKey' in message:
message = copy.deepcopy(message)
message['SecretAccessKey'] = '{}{}'.format(message['SecretAccessKey'][0:int(len(message['SecretAccessKey']) / 2)], '*' * int(len(message['SecretAccessKey']) / 2))
message = json.dumps(message, indent=2, default=str)
elif isinstance(message, list):
message = json.dumps(message, indent=2, default=str)
# The next section prepends the running module's name in square
# brackets in front of the first line in the message containing
# non-whitespace characters.
if len(self.running_module_names) > 0 and isinstance(message, str):
split_message = message.split('\n')
for index, fragment in enumerate(split_message):
if re.sub(r'\s', '', fragment):
split_message[index] = '[{}] {}'.format(self.running_module_names[-1], fragment)
break
message = '\n'.join(split_message)
if output == 'both' or output == 'file':
if output_type == 'plain':
with open('sessions/{}/cmd_log.txt'.format(session_name), 'a+') as text_file:
text_file.write('{}\n'.format(message))
elif output_type == 'xml':
# TODO: Implement actual XML output
with open('sessions/{}/cmd_log.xml'.format(session_name), 'a+') as xml_file:
xml_file.write('{}\n'.format(message))
pass
else:
print(' Unrecognized output type: {}'.format(output_type))
if output == 'both' or output == 'screen':
print(message)
return True
# @message: String - input question to ask and/or write to file
# @output: String - where to output the message: both or screen (can't write a question to a file only)
# @output_type: String - format for message when written to file: plain or xml
def input(self, message, output='both', output_type='plain', session_name=''):
session = self.get_active_session()
if session_name == '':
session_name = session.name
if len(self.running_module_names) > 0 and isinstance(message, str):
split_message = message.split('\n')
for index, fragment in enumerate(split_message):
if re.sub(r'\s', '', fragment):
split_message[index] = '[{}] {}'.format(self.running_module_names[-1], fragment)
break
message = '\n'.join(split_message)
res = input(message)
if output == 'both':
if output_type == 'plain':
with open('sessions/{}/cmd_log.txt'.format(session_name), 'a+') as file:
file.write('{} {}\n'.format(message, res))
elif output_type == 'xml':
# TODO: Implement actual XML output
# now = time.time()
with open('sessions/{}/cmd_log.xml'.format(session_name), 'a+') as file:
file.write('{} {}\n'.format(message, res))\
else:
print(' Unrecognized output type: {}'.format(output_type))
return res
def validate_region(self, region):
if region in self.get_regions('All'):
return True
return False
def get_regions(self, service, check_session=True):
session = self.get_active_session()
service = service.lower()
with open('./modules/service_regions.json', 'r+') as regions_file:
regions = json.load(regions_file)
# TODO: Add an option for GovCloud regions
if service == 'all':
valid_regions = regions['all']
if 'local' in valid_regions:
valid_regions.remove('local')
if 'af-south-1' in valid_regions:
valid_regions.remove('af-south-1') # Doesn't work currently
if 'ap-east-1' in valid_regions:
valid_regions.remove('ap-east-1')
if 'eu-south-1' in valid_regions:
valid_regions.remove('eu-south-1')
if 'me-south-1' in valid_regions:
valid_regions.remove('me-south-1')
if type(regions[service]) == dict and regions[service].get('endpoints'):
if 'aws-global' in regions[service]['endpoints']:
return [None]
if 'all' in session.session_regions:
valid_regions = list(regions[service]['endpoints'].keys())
if 'local' in valid_regions:
valid_regions.remove('local')
if 'af-south-1' in valid_regions:
valid_regions.remove('af-south-1')
if 'ap-east-1' in valid_regions:
valid_regions.remove('ap-east-1')
if 'eu-south-1' in valid_regions:
valid_regions.remove('eu-south-1')
if 'me-south-1' in valid_regions:
valid_regions.remove('me-south-1')
return valid_regions
else:
valid_regions = list(regions[service]['endpoints'].keys())
if 'local' in valid_regions:
valid_regions.remove('local')
if 'af-south-1' in valid_regions:
valid_regions.remove('af-south-1')
if 'ap-east-1' in valid_regions:
valid_regions.remove('ap-east-1')
if 'eu-south-1' in valid_regions:
valid_regions.remove('eu-south-1')
if 'me-south-1' in valid_regions:
valid_regions.remove('me-south-1')
if check_session is True:
return [region for region in valid_regions if region in session.session_regions]
else:
return valid_regions
else:
if 'aws-global' in regions[service]:
return [None]
if 'all' in session.session_regions:
valid_regions = regions[service]
if 'local' in valid_regions:
valid_regions.remove('local')
if 'af-south-1' in valid_regions:
valid_regions.remove('af-south-1')
if 'ap-east-1' in valid_regions:
valid_regions.remove('ap-east-1')
if 'eu-south-1' in valid_regions:
valid_regions.remove('eu-south-1')
if 'me-south-1' in valid_regions:
valid_regions.remove('me-south-1')
return valid_regions
else:
valid_regions = regions[service]
if 'local' in valid_regions:
valid_regions.remove('local')
if 'af-south-1' in valid_regions:
valid_regions.remove('af-south-1')
if 'ap-east-1' in valid_regions:
valid_regions.remove('ap-east-1')
if 'eu-south-1' in valid_regions:
valid_regions.remove('eu-south-1')
if 'me-south-1' in valid_regions:
valid_regions.remove('me-south-1')
if check_session is True:
return [region for region in valid_regions if region in session.session_regions]
else:
return valid_regions
def display_all_regions(self, command):
for region in sorted(self.get_regions('all')):
print(' {}'.format(region))
# @data: list
# @module: string
# @args: string
def fetch_data(self, data, module, args, force=False):
session = self.get_active_session()
if data is None:
current = None
else:
current = getattr(session, data[0], None)
for item in data[1:]:
if current is not None and item in current:
current = current[item]
else:
current = None
break
if current is None or current == '' or current == [] or current == {} or current is False:
if force is False:
run_prereq = self.input('Data ({}) not found, run module "{}" to fetch it? (y/n) '.format(' > '.join(data), module), session_name=session.name)
else:
run_prereq = 'y'
if run_prereq == 'n':
return False
if args:
self.exec_module(['exec', module] + args.split(' '))
else:
self.exec_module(['exec', module])
return True
def check_for_updates(self):
with open('./last_update.txt', 'r') as f:
local_last_update = f.read().rstrip()
latest_update = requests.get('https://raw.githubusercontent.com/RhinoSecurityLabs/pacu/master/last_update.txt').text.rstrip()
local_year, local_month, local_day = local_last_update.split('-')
datetime_local = datetime.date(int(local_year), int(local_month), int(local_day))
latest_year, latest_month, latest_day = latest_update.split('-')
datetime_latest = datetime.date(int(latest_year), int(latest_month), int(latest_day))
if datetime_local < datetime_latest:
print('Pacu has a new version available! Clone it from GitHub to receive the updates.\n git clone https://github.com/RhinoSecurityLabs/pacu.git\n')
def key_info(self, alias=''):
""" Return the set of information stored in the session's active key
or the session's key with a specified alias, as a dictionary. """
session = self.get_active_session()
if alias == '':
alias = session.key_alias
aws_key = self.get_aws_key_by_alias(alias)
if aws_key is not None:
return aws_key.get_fields_as_camel_case_dictionary()
else:
return False
def print_key_info(self):
self.print(self.key_info())
def print_all_service_data(self, command):
session = self.get_active_session()
services = session.get_all_aws_data_fields_as_dict()
for service in services.keys():
print(' {}'.format(service))
def install_dependencies(self, external_dependencies):
if len(external_dependencies) < 1:
return True
answer = self.input('This module requires external dependencies: {}\n\nInstall them now? (y/n) '.format(external_dependencies))
if answer == 'n':
self.print('Not installing dependencies, exiting...')
return False
self.print('\nInstalling {} total dependencies...'.format(len(external_dependencies)))
for dependency in external_dependencies:
split = dependency.split('/')
name = split[-1]
if name.split('.')[-1] == 'git':
name = name.split('.')[0]
author = split[-2]
if os.path.exists('./dependencies/{}/{}'.format(author, name)):
self.print(' Dependency {}/{} already installed.'.format(author, name))
else:
try:
self.print(' Installing dependency {}/{} from {}...'.format(author, name, dependency))
subprocess.run(['git', 'clone', dependency, './dependencies/{}/{}'.format(author, name)])
except Exception as error:
self.print(' {} failed, view the error below. If you are unsure, some potential causes are that you are missing "git" on your command line, your git credentials are not properly set, or the GitHub link does not exist.'.format(error.cmd))
self.print(' stdout: {}\nstderr: {}'.format(error.cmd, error.stderr))
self.print(' Exiting module...')
return False
else:
if os.path.exists('./dependencies/{}'.format(name)):
self.print(' Dependency {} already installed.'.format(name))
else:
try:
self.print(' Installing dependency {}...'.format(name))
r = requests.get(dependency, stream=True)
if r.status_code == 404:
raise Exception('File not found.')
with open('./dependencies/{}'.format(name), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as error:
self.print(' Downloading {} has failed, view the error below.'.format(dependency))
self.print(error)
self.print(' Exiting module...')
return False
self.print('Dependencies finished installing.')
return True
def get_active_session(self):
""" A wrapper for PacuSession.get_active_session, removing the need to
import the PacuSession model. """
return PacuSession.get_active_session(self.database)
def get_aws_key_by_alias(self, alias):
""" Return an AWSKey with the supplied alias that is assigned to the
currently active PacuSession from the database, or None if no AWSKey
with the supplied alias exists. If more than one key with the alias
exists for the active session, an exception will be raised. """
session = self.get_active_session()
key = self.database.query(AWSKey) \
.filter(AWSKey.session_id == session.id) \
.filter(AWSKey.key_alias == alias) \
.scalar()
return key
# Pacu commands and execution
def parse_command(self, command):
command = command.strip()
if command.split(' ')[0] == 'aws':
self.run_aws_cli_command(command)
return
try:
command = shlex.split(command)
except ValueError:
self.print(' Error: Unbalanced quotes in command')
return
if not command or command[0] == '':
return
elif command[0] == 'data':
self.parse_data_command(command)
elif command[0] == 'sessions' or command[0] == 'list_sessions':
self.list_sessions()
elif command[0] == 'swap_session':
self.check_sessions()
elif command[0] == 'delete_session':
self.delete_session()
elif command[0] == 'export_keys':
self.export_keys(command)
elif command[0] == 'help':
self.parse_help_command(command)
elif command[0] == 'console' or command[0] == 'open_console':
self.print_web_console_url()
elif command[0] == 'import_keys':
self.parse_awscli_keys_import(command)
elif command[0] == 'list' or command[0] == 'ls':
self.parse_list_command(command)
elif command[0] == 'load_commands_file':
self.parse_commands_from_file(command)
elif command[0] == 'regions':
self.display_all_regions(command)
elif command[0] == 'run' or command[0] == 'exec':
self.parse_exec_module_command(command)
elif command[0] == 'search':
self.parse_search_command(command)
elif command[0] == 'services':
self.print_all_service_data(command)
elif command[0] == 'set_keys':
self.set_keys()
elif command[0] == 'set_regions':
self.parse_set_regions_command(command)
elif command[0] == 'swap_keys':
self.swap_keys()
elif command[0] == 'update_regions':
self.update_regions()
elif command[0] == 'whoami':
self.print_key_info()
elif command[0] == 'exit' or command[0] == 'quit':
self.exit()
else:
print(' Error: Unrecognized command')
return
def parse_commands_from_file(self, command):
if len(command) == 1:
self.display_command_help('load_commands_file')
return
commands_file = command[1]
if not os.path.isfile(commands_file):
self.display_command_help('load_commands_file')
return
with open(commands_file, 'r+') as f:
commands = f.readlines()
for command in commands:
print("Executing command: {} ...".format(command))
command_without_space = command.strip()
if command_without_space:
self.parse_command(command_without_space)
def parse_awscli_keys_import(self, command):
if len(command) == 1:
self.display_command_help('import_keys')
return
boto3_session = boto3.session.Session()
if command[1] == '--all':
profiles = boto3_session.available_profiles
for profile_name in profiles:
self.import_awscli_key(profile_name)
return
self.import_awscli_key(command[1])
def import_awscli_key(self, profile_name):
try:
boto3_session = boto3.session.Session(profile_name=profile_name)
creds = boto3_session.get_credentials()
self.set_keys(key_alias='imported-{}'.format(profile_name), access_key_id=creds.access_key, secret_access_key=creds.secret_key, session_token=creds.token)
self.print(' Imported keys as "imported-{}"'.format(profile_name))
except botocore.exceptions.ProfileNotFound as error:
self.print('\n Did not find the AWS CLI profile: {}\n'.format(profile_name))
boto3_session = boto3.session.Session()
print(' Profiles that are available:\n {}\n'.format('\n '.join(boto3_session.available_profiles)))
def run_aws_cli_command(self, command):
try:
result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as error:
result = error.output.decode('utf-8')
self.print(result)
def parse_data_command(self, command):
session = self.get_active_session()
if len(command) == 1:
self.print('\nSession data:')
session.print_all_data_in_session()
else:
if command[1] not in session.aws_data_field_names:
print(' Service not found.')
elif getattr(session, command[1]) == {} or getattr(session, command[1]) == [] or getattr(session, command[1]) == '':
print(' No data found.')
else:
print(json.dumps(getattr(session, command[1]), indent=2, sort_keys=True, default=str))
def parse_set_regions_command(self, command):
session = self.get_active_session()
if len(command) > 1:
for region in command[1:]:
if region.lower() == 'all':
session.update(self.database, session_regions=['all'])
print(' The region set for this session has been reset to the default of all supported regions.')
return
if self.validate_region(region) is False:
print(' {} is not a valid region.\n Session regions not changed.'.format(region))
return
session.update(self.database, session_regions=command[1:])
print(' Session regions changed: {}'.format(session.session_regions))
else:
print(' Error: set_regions requires either "all" or at least one region to be specified. Try the "regions" command to view all regions.')
def parse_help_command(self, command):
if len(command) <= 1:
self.display_pacu_help()
elif len(command) > 1 and command[1] in self.COMMANDS:
self.display_command_help(command[1])
else:
self.display_module_help(command[1])
def parse_list_command(self, command):
if len(command) == 1:
self.list_modules('')
elif len(command) == 2:
if command[1] in ('cat', 'category'):
self.list_modules('', by_category=True)
def parse_exec_module_command(self, command):
if len(command) > 1:
self.exec_module(command)
else:
print('The {} command requires a module name. Try using the module search function.'.format(command))
def parse_search_command(self, command):
if len(command) == 1:
self.list_modules('')
elif len(command) == 2:
self.list_modules(command[1])
elif len(command) >= 3:
if command[1] in ('cat', 'category'):
self.list_modules(command[2], by_category=True)
def display_pacu_help(self):
print("""
Pacu - https://github.com/RhinoSecurityLabs/pacu
Written and researched by Spencer Gietzen of Rhino Security Labs - https://rhinosecuritylabs.com/
This was built as a modular, open source tool to assist in penetration testing an AWS environment.
For usage and developer documentation, please visit the GitHub page.
Modules that have pre-requisites will have those listed in that modules help info, but if it is
executed before its pre-reqs have been filled, it will prompt you to run that module then continue
once that is finished, so you have the necessary data for the module you want to run.
Pacu command info:
list/ls List all modules
load_commands_file <file> Load an existing file with list of commands to execute
search [cat[egory]] <search term> Search the list of available modules by name or category
help Display this page of information
help <module name> Display information about a module
whoami Display information regarding to the active access keys
data Display all data that is stored in this session. Only fields
with values will be displayed
data <service> Display all data for a specified service in this session
services Display a list of services that have collected data in the
current session to use with the "data" command
regions Display a list of all valid AWS regions
update_regions Run a script to update the regions database to the newest
version
set_regions <region> [<region>...] Set the default regions for this session. These space-separated
regions will be used for modules where regions are required,
but not supplied by the user. The default set of regions is
every supported region for the service. Supply "all" to this
command to reset the region set to the default of all
supported regions
run/exec <module name> Execute a module
set_keys Add a set of AWS keys to the session and set them as the
default
swap_keys Change the currently active AWS key to another key that has
previously been set for this session
import_keys <profile name>|--all Import AWS keys from the AWS CLI credentials file (located
at ~/.aws/credentials) to the current sessions database.
Enter the name of a profile you would like to import or
supply --all to import all the credentials in the file.
export_keys Export the active credentials to a profile in the AWS CLI
credentials file (~/.aws/credentials)
sessions/list_sessions List all sessions in the Pacu database
swap_session Change the active Pacu session to another one in the database
delete_session Delete a Pacu session from the database. Note that the output
folder for that session will not be deleted
exit/quit Exit Pacu
Other command info:
aws <command> Run an AWS CLI command directly. Note: If Pacu detects "aws"
as the first word of the command, the whole command will
instead be run in a shell so that you can use the AWS CLI
from within Pacu. Due to the command running in a shell,
this enables you to pipe output where needed. An example
would be to run an AWS CLI command and pipe it into "jq"
to parse the data returned. Warning: The AWS CLI's
authentication is not related to Pacu. Be careful to
ensure that you are using the keys you want when using
the AWS CLI. It is suggested to use AWS CLI profiles
to solve this problem
console/open_console Generate a URL that will log the current user/role in to
the AWS web console
""")
def update_regions(self):
py_executable = sys.executable
# Update botocore to fetch the latest version of the AWS region_list
try:
self.print(' Fetching latest botocore...\n')
subprocess.run([py_executable, '-m', 'pip', 'install', '--upgrade', 'botocore'])
except:
pip = self.input(' Could not use pip3 or pip to update botocore to the latest version. Enter the name of your pip binary to continue: ').strip()
subprocess.run(['{}'.format(pip), 'install', '--upgrade', 'botocore'])
path = ''
try:
self.print(' Using pip3 to locate botocore...\n')
output = subprocess.check_output('{} -m pip show botocore'.format(py_executable), shell=True)
except:
path = self.input(' Could not use pip to determine botocore\'s location. Enter the path to your Python "dist-packages" folder (example: /usr/local/bin/python3.6/lib/dist-packages): ').strip()
if path == '':
# Account for Windows \r and \\ in file path (Windows)
rows = output.decode('utf-8').replace('\r', '').replace('\\\\', '/').split('\n')
for row in rows:
if row.startswith('Location: '):
path = row.split('Location: ')[1]
with open('{}/botocore/data/endpoints.json'.format(path), 'r+') as regions_file:
endpoints = json.load(regions_file)
for partition in endpoints['partitions']:
if partition['partition'] == 'aws':
regions = dict()
regions['all'] = list(partition['regions'].keys())
for service in partition['services']:
regions[service] = partition['services'][service]
with open('modules/service_regions.json', 'w+') as services_file:
json.dump(regions, services_file, default=str, sort_keys=True)
self.print(' Region list updated to the latest version!')
def import_module_by_name(self, module_name, include=()):
file_path = os.path.join(os.getcwd(), 'modules', module_name, 'main.py')
if os.path.exists(file_path):
import_path = 'modules.{}.main'.format(module_name).replace('/', '.').replace('\\', '.')
module = __import__(import_path, globals(), locals(), include, 0)
importlib.reload(module)
return module
return None
def print_web_console_url(self):
active_session = self.get_active_session()
if not active_session.access_key_id:
print(' No access key has been set. Not generating the URL.')
return
if not active_session.secret_access_key:
print(' No secret key has been set. Not generating the URL.')
return
sts = self.get_boto3_client('sts')
if active_session.session_token:
# Roles cant use get_federation_token
res = {
'Credentials': {
'AccessKeyId': active_session.access_key_id,
'SecretAccessKey': active_session.secret_access_key,
'SessionToken': active_session.session_token
}
}
else:
res = sts.get_federation_token(
Name=active_session.key_alias,
Policy=json.dumps({
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': '*',
'Resource': '*'
}
]
})
)
params = {
'Action': 'getSigninToken',
'Session': json.dumps({
'sessionId': res['Credentials']['AccessKeyId'],
'sessionKey': res['Credentials']['SecretAccessKey'],
'sessionToken': res['Credentials']['SessionToken']
})
}
res = requests.get(url='https://signin.aws.amazon.com/federation', params=params)
signin_token = res.json()['SigninToken']
params = {
'Action': 'login',
'Issuer': active_session.key_alias,
'Destination': 'https://console.aws.amazon.com/console/home',
'SigninToken': signin_token
}
url = 'https://signin.aws.amazon.com/federation?' + urllib.parse.urlencode(params)
print('Paste the following URL into a web browser to login as session {}...\n'.format(active_session.name))
print(url)
def all_region_prompt(self):
print('Automatically targeting regions:')
for region in self.get_regions('all'):
print(' {}'.format(region))
response = input('Continue? (y/n) ')
if response.lower() == 'y':
return True
else:
return False
def export_keys(self, command):
export = input('Export the active keys to the AWS CLI credentials file (~/.aws/credentials)? (y/n) ').rstrip()
if export.lower() == 'y':
session = self.get_active_session()
if not session.access_key_id:
print(' No access key has been set. Not exporting credentials.')
return
if not session.secret_access_key:
print(' No secret key has been set. Not exporting credentials.')
return
config = """
\n\n[{}]
aws_access_key_id = {}
aws_secret_access_key = {}
""".format(session.key_alias, session.access_key_id, session.secret_access_key)
if session.session_token:
config = config + 'aws_session_token = "{}"'.format(session.session_token)
config = config + '\n'
with open('{}/.aws/credentials'.format(os.path.expanduser('~')), 'a+') as f:
f.write(config)
print('Successfully exported {}. Use it with the AWS CLI like this: aws ec2 describe instances --profile {}'.format(session.key_alias, session.key_alias))
else:
return
###### Some module notes
# For any argument that needs a value and a region for that value, use the form
# value@region
# Arguments that accept multiple values should be comma separated.
######
def exec_module(self, command):
session = self.get_active_session()
# Run key checks so that if no keys have been set, Pacu doesn't default to
# the AWSCLI default profile:
if not session.access_key_id:
print(' No access key has been set. Not running module.')
return
if not session.secret_access_key:
print(' No secret key has been set. Not running module.')
return
module_name = command[1].lower()
module = self.import_module_by_name(module_name, include=['main', 'module_info', 'summary'])
if module is not None:
# Plaintext Command Log
self.print('{} ({}): {}'.format(session.access_key_id, time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()), ' '.join(command).strip()), output='file', is_cmd=True)
## XML Command Log - Figure out how to auto convert to XML
# self.print('<command>{}</command>'.format(cmd), output_type='xml', output='file')
self.print(' Running module {}...'.format(module_name))
try:
args = module.parser.parse_args(command[2:])
if 'regions' in args and args.regions is None:
session = self.get_active_session()
if session.session_regions == ['all']:
if not self.all_region_prompt():
return
except SystemExit:
print(' Error: Invalid Arguments')
return
self.running_module_names.append(module.module_info['name'])
try:
summary_data = module.main(command[2:], self)
# If the module's return value is None, it exited early.
if summary_data is not None:
summary = module.summary(summary_data, self)
if len(summary) > 10000:
raise ValueError('The {} module\'s summary is too long ({} characters). Reduce it to 10000 characters or fewer.'.format(module.module_info['name'], len(summary)))
if not isinstance(summary, str):
raise TypeError(' The {} module\'s summary is {}-type instead of str. Make summary return a string.'.format(module.module_info['name'], type(summary)))
self.print('{} completed.\n'.format(module.module_info['name']))
self.print('MODULE SUMMARY:\n\n{}\n'.format(summary.strip('\n')))
except SystemExit as error:
exception_type, exception_value, tb = sys.exc_info()
if 'SIGINT called' in exception_value.args:
self.print('^C\nExiting the currently running module.')
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}\n\nPacu caught a SystemExit error. '.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
finally:
self.running_module_names.pop()
elif module_name in self.COMMANDS:
print('Error: "{}" is the name of a Pacu command, not a module. Try using it without "run" or "exec" in front.'.format(module_name))
else:
print('Module not found. Is it spelled correctly? Try using the module search function.')
def display_command_help(self, command_name):
if command_name == 'list' or command_name == 'ls':
print('\n list/ls\n List all modules\n')
elif command_name == 'import_keys':
print('\n import_keys <profile name>|--all\n Import AWS keys from the AWS CLI credentials file (located at ~/.aws/credentials) to the current sessions database. Enter the name of a profile you would like to import or supply --all to import all the credentials in the file.\n')
elif command_name == 'aws':
print('\n aws <command>\n Use the AWS CLI directly. This command runs in your local shell to use the AWS CLI. Warning: The AWS CLI\'s authentication is not related to Pacu. Be careful to ensure that you are using the keys you want when using the AWS CLI. It is suggested to use AWS CLI profiles to help solve this problem\n')
elif command_name == 'console' or command_name == 'open_console':
print('\n console/open_console\n Generate a URL to login to the AWS web console as the current user/role\n')
elif command_name == 'export_keys':
print('\n export_keys\n Export the active credentials to a profile in the AWS CLI credentials file (~/.aws/credentials)\n')
elif command_name == 'search':
print('\n search [cat[egory]] <search term>\n Search the list of available modules by name or category\n')
elif command_name == 'sessions' or command_name == 'list_sessions':
print('\n sessions/list_sessions\n List all sessions stored in the Pacu database\n')
elif command_name == 'swap_session':
print('\n swap_session\n Swap the active Pacu session for another one stored in the database or a brand new session\n')
elif command_name == 'delete_session':
print('\n delete_session\n Delete a session from the Pacu database. Note that this does not delete the output folder for that session\n')
elif command_name == 'help':
print('\n help\n Display information about all Pacu commands\n help <module name>\n Display information about a module\n')
elif command_name == 'whoami':
print('\n whoami\n Display information regarding to the active access keys\n')
elif command_name == 'data':
print('\n data\n Display all data that is stored in this session. Only fields with values will be displayed\n data <service>\n Display all data for a specified service in this session\n')
elif command_name == 'services':
print('\n services\n Display a list of services that have collected data in the current session to use with the "data"\n command\n')
elif command_name == 'regions':
print('\n regions\n Display a list of all valid AWS regions\n')
elif command_name == 'update_regions':
print('\n update_regions\n Run a script to update the regions database to the newest version\n')
elif command_name == 'set_regions':
print('\n set_regions <region> [<region>...]\n Set the default regions for this session. These space-separated regions will be used for modules where\n regions are required, but not supplied by the user. The default set of regions is every supported\n region for the service. Supply "all" to this command to reset the region set to the default of all\n supported regions\n')
elif command_name == 'run' or command_name == 'exec':
print('\n run/exec <module name>\n Execute a module\n')
elif command_name == 'set_keys':
print('\n set_keys\n Add a set of AWS keys to the session and set them as the default\n')
elif command_name == 'swap_keys':
print('\n swap_keys\n Change the currently active AWS key to another key that has previously been set for this session\n')
elif command_name == 'exit' or command_name == 'quit':
print('\n exit/quit\n Exit Pacu\n')
elif command_name == 'load_commands_file':
print('\n load_commands_file <commands_file>\n Load an existing file with a set of commands to execute')
else:
print('Command or module not found. Is it spelled correctly? Try using the module search function.')
return
def display_module_help(self, module_name):
module = self.import_module_by_name(module_name, include=['module_info', 'parser'])
if module is not None:
print('\n{} written by {}.\n'.format(module.module_info['name'], module.module_info['author']))
if 'prerequisite_modules' in module.module_info and len(module.module_info['prerequisite_modules']) > 0:
print('Prerequisite Module(s): {}\n'.format(module.module_info['prerequisite_modules']))
if 'external_dependencies' in module.module_info and len(module.module_info['external_dependencies']) > 0:
print('External dependencies: {}\n'.format(module.module_info['external_dependencies']))
parser_help = module.parser.format_help()
print(parser_help.replace(os.path.basename(__file__), 'run {}'.format(module.module_info['name']), 1))
return
else:
print('Command or module not found. Is it spelled correctly? Try using the module search function, or "help" to view a list of commands.')
return
def list_modules(self, search_term, by_category=False):
found_modules_by_category = dict()
current_directory = os.getcwd()
for root, directories, files in os.walk('{}/modules'.format(current_directory)):
modules_directory_path = os.path.realpath('{}/modules'.format(current_directory))
specific_module_directory = os.path.realpath(root)
# Skip any directories inside module directories.
if os.path.dirname(specific_module_directory) != modules_directory_path:
continue
# Skip the root directory.
elif modules_directory_path == specific_module_directory:
continue
module_name = os.path.basename(root)
for file in files:
if file == 'main.py':
# Make sure the format is correct
module_path = 'modules/{}/main'.format(module_name).replace('/', '.').replace('\\', '.')
# Import the help function from the module
module = __import__(module_path, globals(), locals(), ['module_info'], 0)
importlib.reload(module)
category = module.module_info['category']
services = module.module_info['services']
regions = []
for service in services:
regions += self.get_regions(service)
# Skip modules with no regions in the list of set regions.
if len(regions) == 0:
continue
# Searching for modules by category:
if by_category and search_term in category:
if category not in found_modules_by_category.keys():
found_modules_by_category[category] = list()
found_modules_by_category[category].append(' {}'.format(module_name))
if search_term:
found_modules_by_category[category].append(' {}\n'.format(module.module_info['one_liner']))
# Searching or listing modules without specifying a category:
elif not by_category and search_term in module_name:
if category not in found_modules_by_category.keys():
found_modules_by_category[category] = list()
found_modules_by_category[category].append(' {}'.format(module_name))
if search_term:
found_modules_by_category[category].append(' {}\n'.format(module.module_info['one_liner']))
if found_modules_by_category:
PRINT_ORDER = ['RECON_UNAUTH', 'ENUM', 'ESCALATE', 'LATERAL_MOVE', 'EXPLOIT', 'PERSIST', 'EXFIL', 'EVADE']
for category in PRINT_ORDER:
if category in found_modules_by_category:
search_results = '\n'.join(found_modules_by_category[category]).strip('\n')
print('\n[Category: {}]\n\n{}'.format(category, search_results))
else:
print('\nNo modules found.')
print()
def set_keys(self, key_alias=None, access_key_id=None, secret_access_key=None, session_token=None):
session = self.get_active_session()
# If key_alias is None, then it's being run normally from the command line (set_keys),
# otherwise it means it is set programmatically and we don't want any prompts if it is
# done programatically
if key_alias is None:
self.print('Setting AWS Keys...')
self.print('Press enter to keep the value currently stored.')
self.print('Enter the letter C to clear the value, rather than set it.')
self.print('If you enter an existing key_alias, that key\'s fields will be updated instead of added.\n')
# Key alias
if key_alias is None:
new_value = self.input('Key alias [{}]: '.format(session.key_alias))
else:
new_value = key_alias.strip()
self.print('Key alias [{}]: {}'.format(session.key_alias, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.key_alias = None
elif str(new_value) != '':
session.key_alias = new_value.strip()
# Access key ID
if key_alias is None:
new_value = self.input('Access key ID [{}]: '.format(session.access_key_id))
else:
new_value = access_key_id
self.print('Access key ID [{}]: {}'.format(session.access_key_id, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.access_key_id = None
elif str(new_value) != '':
session.access_key_id = new_value.strip()
# Secret access key (should not be entered in log files)
if key_alias is None:
if session.secret_access_key is None:
new_value = input('Secret access key [None]: ')
else:
new_value = input('Secret access key [{}{}]: '.format(session.secret_access_key[0:int(len(session.secret_access_key) / 2)], '*' * int(len(session.secret_access_key) / 2)))
else:
new_value = secret_access_key
self.print('Secret access key [******]: ****** (Censored)', output='file')
if str(new_value.strip().lower()) == 'c':
session.secret_access_key = None
elif str(new_value) != '':
session.secret_access_key = new_value.strip()
# Session token (optional)
if key_alias is None:
new_value = self.input('Session token (Optional - for temp AWS keys only) [{}]: '.format(session.session_token))
else:
new_value = session_token
if new_value is None:
new_value = 'c'
self.print('Session token [{}]: {}'.format(session.session_token, new_value), output='file')
if str(new_value.strip().lower()) == 'c':
session.session_token = None
elif str(new_value) != '':
session.session_token = new_value.strip()
self.database.add(session)
aws_key = session.get_active_aws_key(self.database)
if aws_key:
aws_key.key_alias = session.key_alias
aws_key.access_key_id = session.access_key_id
aws_key.secret_access_key = session.secret_access_key
aws_key.session_token = session.session_token
else:
aws_key = AWSKey(
session=session,
key_alias=session.key_alias,
access_key_id=session.access_key_id,
secret_access_key=session.secret_access_key,
session_token=session.session_token
)
self.database.add(aws_key)
self.database.commit()
if key_alias is None:
self.print('\nKeys saved to database.\n')
def swap_keys(self):
session = self.get_active_session()
aws_keys = session.aws_keys.all()
if not aws_keys:
self.print('\nNo AWS keys set for this session. Run "set_keys" to add AWS keys.\n')
return
self.print('\nSwapping AWS Keys. Press enter to keep the currently active key.')
print('AWS keys in this session:')
for index, aws_key in enumerate(aws_keys, 1):
if aws_key.key_alias == session.key_alias:
print(' [{}] {} (ACTIVE)'.format(index, aws_key.key_alias))
else:
print(' [{}] {}'.format(index, aws_key.key_alias))
choice = input('Choose an option: ')
if not str(choice).strip():
self.print('The currently active AWS key will remain active. ({})'.format(session.key_alias))
return
if not choice.isdigit() or int(choice) not in range(1, len(aws_keys) + 1):
print('Please choose a number from 1 to {}.'.format(len(aws_keys)))
return self.swap_keys()
chosen_key = aws_keys[int(choice) - 1]
session.key_alias = chosen_key.key_alias
session.access_key_id = chosen_key.access_key_id
session.secret_access_key = chosen_key.secret_access_key
session.session_token = chosen_key.session_token
self.database.add(session)
self.database.commit()
self.print('AWS key is now {}.'.format(session.key_alias))
def check_sessions(self):
sessions = self.database.query(PacuSession).all()
if not sessions:
session = self.new_session()
else:
print('Found existing sessions:')
print(' [0] New session')
for index, session in enumerate(sessions, 1):
print(' [{}] {}'.format(index, session.name))
choice = input('Choose an option: ')
try:
if int(choice) == 0:
session = self.new_session()
else:
session = sessions[int(choice) - 1]
except (ValueError, IndexError):
print('Please choose a number from 0 to {}.'.format(len(sessions)))
return self.check_sessions()
session.activate(self.database)
def list_sessions(self):
active_session = self.get_active_session()
all_sessions = self.database.query(PacuSession).all()
print('Found existing sessions:')
for index, session in enumerate(all_sessions, 0):
if session.name == active_session.name:
print('- ' + session.name + ' (ACTIVE)')
else:
print('- ' + session.name)
print('\nUse "swap_session" to change to another session.')
return
def new_session(self):
session_data = dict()
name = None
while not name:
name = input('What would you like to name this new session? ').strip()
if not name:
print('A session name is required.')
else:
existing_sessions = self.database.query(PacuSession).filter(PacuSession.name == name).all()
if existing_sessions:
print('A session with that name already exists.')
name = None
session_data['name'] = name
session = PacuSession(**session_data)
self.database.add(session)
self.database.commit()
session_downloads_directory = './sessions/{}/downloads/'.format(name)
if not os.path.exists(session_downloads_directory):
os.makedirs(session_downloads_directory)
print('Session {} created.'.format(name))
return session
def delete_session(self):
active_session = self.get_active_session()
all_sessions = self.database.query(PacuSession).all()
print('Delete which session?')
for index, session in enumerate(all_sessions, 0):
if session.name == active_session.name:
print(' [{}] {} (ACTIVE)'.format(index, session.name))
else:
print(' [{}] {}'.format(index, session.name))
choice = input('Choose an option: ')
try:
session = all_sessions[int(choice)]
if session.name == active_session.name:
print('Cannot delete the active session! Switch sessions and try again.')
return
except (ValueError, IndexError):
print('Please choose a number from 0 to {}.'.format(len(all_sessions) - 1))
return self.delete_session()
self.database.delete(session)
self.database.commit()
print('Deleted {} from the database!'.format(session.name))
print('Note that the output folder at ./sessions/{}/ will not be deleted. Do it manually if necessary.'.format(session.name))
return
def get_data_from_traceback(self, tb):
session = None
global_data_in_all_frames = list()
local_data_in_all_frames = list()
for frame, line_number in traceback.walk_tb(tb):
global_data_in_all_frames.append(str(frame.f_globals))
local_data_in_all_frames.append(str(frame.f_locals))
# Save the most recent PacuSession called "session", working backwards.
if session is None:
session = frame.f_locals.get('session', None)
if not isinstance(session, PacuSession):
session = None
return session, global_data_in_all_frames, local_data_in_all_frames
def check_user_agent(self):
session = self.get_active_session()
if session.boto_user_agent is None: # If there is no user agent set for this session already
boto3_session = boto3.session.Session()
ua = boto3_session._session.user_agent()
if 'kali' in ua.lower() or 'parrot' in ua.lower() or 'pentoo' in ua.lower(): # If the local OS is Kali/Parrot/Pentoo Linux
# GuardDuty triggers a finding around API calls made from Kali Linux, so let's avoid that...
self.print('Detected environment as one of Kali/Parrot/Pentoo Linux. Modifying user agent to hide that from GuardDuty...')
with open('./user_agents.txt', 'r') as file:
user_agents = file.readlines()
user_agents = [agent.strip() for agent in user_agents] # Remove random \n's and spaces
new_ua = random.choice(user_agents)
session.update(self.database, boto_user_agent=new_ua)
self.print(' User agent for this session set to:')
self.print(' {}'.format(new_ua))
def get_boto3_client(self, service, region=None, user_agent=None, parameter_validation=True):
session = self.get_active_session()
if not session.access_key_id:
print(' No access key has been set. Failed to generate boto3 Client.')
return
if not session.secret_access_key:
print(' No secret key has been set. Failed to generate boto3 Client.')
return
# If there is not a custom user_agent passed into this function
# and session.boto_user_agent is set, use that as the user agent
# for this client. If both are set, the incoming user_agent will
# override the session.boto_user_agent. If niether are set, it
# will be None, and will default to the OS's regular user agent
if user_agent is None and session.boto_user_agent is not None:
user_agent = session.boto_user_agent
boto_config = botocore.config.Config(
user_agent=user_agent, # If user_agent=None, botocore will use the real UA which is what we want
parameter_validation=parameter_validation
)
return boto3.client(
service,
region_name=region, # Whether region has a value or is None, it will work here
aws_access_key_id=session.access_key_id,
aws_secret_access_key=session.secret_access_key,
aws_session_token=session.session_token,
config=boto_config
)
def get_boto3_resource(self, service, region=None, user_agent=None, parameter_validation=True):
# All the comments from get_boto3_client apply here too
session = self.get_active_session()
if not session.access_key_id:
print(' No access key has been set. Failed to generate boto3 Resource.')
return
if not session.secret_access_key:
print(' No secret key has been set. Failed to generate boto3 Resource.')
return
if user_agent is None and session.boto_user_agent is not None:
user_agent = session.boto_user_agent
boto_config = botocore.config.Config(
user_agent=user_agent,
parameter_validation=parameter_validation
)
return boto3.resource(
service,
region_name=region,
aws_access_key_id=session.access_key_id,
aws_secret_access_key=session.secret_access_key,
aws_session_token=session.session_token,
config=boto_config
)
def initialize_tab_completion(self):
try:
import readline
# Big thanks to samplebias: https://stackoverflow.com/a/5638688
MODULES = []
CATEGORIES = []
for root, directories, files in os.walk('{}/modules'.format(os.getcwd())):
modules_directory_path = os.path.realpath('{}/modules'.format(os.getcwd()))
category_path = os.path.realpath(root)
# Skip any directories inside module directories.
if os.path.dirname(category_path) != modules_directory_path:
continue
# Skip the root directory.
elif modules_directory_path == category_path:
continue
for file in files:
if file == 'main.py':
module_name = os.path.basename(root)
MODULES.append(module_name)
# Make sure the format is correct
module_path = 'modules/{}/main'.format(module_name).replace('/', '.').replace('\\', '.')
# Import the help function from the module
module = __import__(module_path, globals(), locals(), ['module_info'], 0)
importlib.reload(module)
CATEGORIES.append(module.module_info['category'])
RE_SPACE = re.compile('.*\s+$', re.M)
readline.set_completer_delims(' \t\n`~!@#$%^&*()=+[{]}\\|;:\'",<>/?')
class Completer(object):
def complete(completer, text, state):
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
# If nothing has been typed, show all commands. If help, exec, or run has been typed, show all modules
if not line:
return [c + ' ' for c in self.COMMANDS][state]
if len(line) == 1 and (line[0] == 'help'):
return [c + ' ' for c in MODULES + self.COMMANDS][state]
if len(line) == 1 and (line[0] == 'exec' or line[0] == 'run'):
return [c + ' ' for c in MODULES][state]
# account for last argument ending in a space
if RE_SPACE.match(buffer):
line.append('')
# Resolve command to the implementation function
if len(line) == 1:
cmd = line[0].strip()
results = [c + ' ' for c in self.COMMANDS if c.startswith(cmd)] + [None]
elif len(line) == 2:
cmd = line[1].strip()
if line[0].strip() == 'search':
results = [c + ' ' for c in MODULES + ['category'] if c.startswith(cmd)] + [None]
elif line[0].strip() == 'help':
results = [c + ' ' for c in MODULES + self.COMMANDS if c.startswith(cmd)] + [None]
else:
results = [c + ' ' for c in MODULES if c.startswith(cmd)] + [None]
elif len(line) == 3 and line[0] == 'search' and line[1] in ('cat', 'category'):
cmd = line[2].strip()
results = [c + ' ' for c in CATEGORIES if c.startswith(cmd)] + [None]
elif len(line) >= 3:
if line[0].strip() == 'run' or line[0].strip() == 'exec':
module_name = line[1].strip()
module = self.import_module_by_name(module_name, include=['module_info'])
autocomplete_arguments = module.module_info.get('arguments_to_autocomplete', list())
current_argument = line[-1].strip()
results = [c + ' ' for c in autocomplete_arguments if c.startswith(current_argument)] + [None]
return results[state]
comp = Completer()
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
except Exception as error:
# Error means most likely on Windows where readline is not supported
# TODO: Implement tab-completion for Windows
# print(error)
pass
def exit(self):
sys.exit('SIGINT called')
def idle(self):
session = self.get_active_session()
if session.key_alias:
alias = session.key_alias
else:
alias = 'No Keys Set'
command = input('Pacu ({}:{}) > '.format(session.name, alias))
self.parse_command(command)
self.idle()
def run_cli(self, *args):
self.database = get_database_connection(settings.DATABASE_CONNECTION_PATH)
sessions = self.database.query(PacuSession).all()
arg = args[0]
session = arg.session
module_name = arg.module_name
service = arg.data
list_mods = arg.list_modules
list_cmd = ['ls']
pacu_help = arg.pacu_help
pacu_help_cmd = ['help']
module_help = arg.module_info
if session is not None:
session_names = [x.name for x in sessions]
if session not in session_names:
print('Session could not be found. Exiting...')
self.exit()
session_index = session_names.index(session)
sessions[session_index].is_active = True
if module_name is not None:
module = ['exec', module_name]
if arg.module_args is not None:
args_list = arg.module_args.split(' ')
for i in args_list:
if i != '':
module.append(i)
if arg.exec is True:
self.exec_module(module)
if service is not None:
if service == 'all':
service_cmd = ['data']
else:
service_cmd = ['data', service.upper()]
self.parse_data_command(service_cmd)
if list_mods is True:
self.parse_list_command(list_cmd)
if pacu_help is True:
self.parse_help_command(pacu_help_cmd)
if arg.module_info is True:
if module_name is None:
print('Specify a module to get information on')
pacu_help_cmd.append(module_name)
self.parse_help_command(pacu_help_cmd)
if arg.set_regions is not None:
regions = arg.set_regions
regions.insert(0, 'set_regions')
self.parse_set_regions_command(regions)
if arg.whoami is True:
self.print_key_info()
def run_gui(self):
idle_ready = False
while True:
try:
if not idle_ready:
try:
print("""
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣶⣿⣿⣿⣿⣿⣿⣶⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣾⣿⡿⠛⠉⠁⠀⠀⠈⠙⠻⣿⣿⣦⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠛⠛⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣿⣷⣀⣀⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣀⣀⣀⣀⣀⣀⣤⣤⣤⣤⣤⣤⣤⣤⣀⣀⠀⠀⠀⠀⠀⠀⢻⣿⣿⣿⡿⣿⣿⣷⣦⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣈⣉⣙⣛⣿⣿⣿⣿⣿⣿⣿⣿⡟⠛⠿⢿⣿⣷⣦⣄⠀⠀⠈⠛⠋⠀⠀⠀⠈⠻⣿⣷⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣈⣉⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⣀⣀⣀⣤⣿⣿⣿⣷⣦⡀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣆⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣬⣭⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠿⠛⢛⣉⣉⣡⣄⠀⠀⠀⠀⠀⠀⠀⠀⠻⢿⣿⣿⣶⣄⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠋⣁⣤⣶⡿⣿⣿⠉⠻⠏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢻⣿⣧⡀
⠀⠀⠀⠀⠀⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠋⣠⣶⣿⡟⠻⣿⠃⠈⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢹⣿⣧
⢀⣀⣤⣴⣶⣶⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠁⢠⣾⣿⠉⠻⠇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿
⠉⠛⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠁⠀⠀⠀⠀⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⡟
⠀⠀⠀⠀⠉⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣾⣿⡟⠁
⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣦⣄⡀⠀⠀⠀⠀⠀⣴⣆⢀⣴⣆⠀⣼⣆⠀⠀⣶⣶⣶⣶⣶⣶⣶⣶⣾⣿⣿⠿⠋⠀⠀
⠀⠀⠀⣼⣿⣿⣿⠿⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠛⠓⠒⠒⠚⠛⠛⠛⠛⠛⠛⠛⠛⠀⠀⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠀⠀⠀⠀⠀
⠀⠀⠀⣿⣿⠟⠁⠀⢸⣿⣿⣿⣿⣿⣿⣿⣶⡀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣷⡄⠀⢀⣾⣿⣿⣿⣿⣿⣿⣷⣆⠀⢰⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠘⠁⠀⠀⠀⢸⣿⣿⡿⠛⠛⢻⣿⣿⡇⠀⢸⣿⣿⡿⠛⠛⢿⣿⣿⡇⠀⢸⣿⣿⡿⠛⠛⢻⣿⣿⣿⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠸⠿⠿⠟⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣧⣤⣤⣼⣿⣿⡇⠀⢸⣿⣿⣧⣤⣤⣼⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⣿⡿⠃⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⡇⠀⢸⣿⣿⡇⠀⠀⢀⣀⣀⣀⠀⢸⣿⣿⣿⠀⠀⠀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡏⠉⠉⠉⠉⠀⠀⠀⢸⣿⣿⡏⠉⠉⢹⣿⣿⡇⠀⢸⣿⣿⣇⣀⣀⣸⣿⣿⣿⠀⢸⣿⣿⣿⣀⣀⣀⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡇⠀⠀⢸⣿⣿⡇⠀⠸⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⢿⣿⣿⣿⣿⣿⣿⣿⡟
⠀⠀⠀⠀⠀⠀⠀⠀⠘⠛⠛⠃⠀⠀⠀⠀⠀⠀⠀⠘⠛⠛⠃⠀⠀⠘⠛⠛⠃⠀⠀⠉⠛⠛⠛⠛⠛⠛⠋⠀⠀⠀⠀⠙⠛⠛⠛⠛⠛⠉⠀
""")
except UnicodeEncodeError as error:
pass
configure_settings.copy_settings_template_into_settings_file_if_not_present()
set_sigint_handler(exit_text='\nA database must be created for Pacu to work properly.')
setup_database_if_not_present(settings.DATABASE_FILE_PATH)
set_sigint_handler(exit_text=None, value='SIGINT called')
self.database = get_database_connection(settings.DATABASE_CONNECTION_PATH)
self.check_sessions()
self.initialize_tab_completion()
self.display_pacu_help()
self.check_for_updates()
idle_ready = True
self.check_user_agent()
self.idle()
except (Exception, SystemExit) as error:
exception_type, exception_value, tb = sys.exc_info()
if exception_type == SystemExit:
if 'SIGINT called' in exception_value.args:
print('\nBye!')
return
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}\n\nPacu caught a SystemExit error. This may be due to incorrect module arguments received by argparse in the module itself. Check to see if any required arguments are not being received by the module when it executes.'.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
# Catch sqlalchemy error
elif exception_type == exc.OperationalError:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}\n\nPacu database error. This could be caused by a recent update in Pacu\'s database\'s structure. If your Pacu has been updated recently, try removing your old db.sqlite3 database file.'.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
else:
traceback_text = '\nTraceback (most recent call last):\n{}{}: {}\n\n'.format(''.join(traceback.format_tb(tb)), str(exception_type), str(exception_value))
session, global_data, local_data = self.get_data_from_traceback(tb)
self.log_error(
traceback_text,
exception_info='{}: {}'.format(exception_type, exception_value),
session=session,
local_data=local_data,
global_data=global_data
)
if not idle_ready:
print('Pacu is unable to start. Try backing up Pacu\'s sqlite.db file and deleting the old version. If the error persists, try reinstalling Pacu in a new directory.')
return
def run(self):
parser = argparse.ArgumentParser()
parser.add_argument('--session', required=False, default=None, help='<session name>', metavar='')
parser.add_argument('--module-name', required=False, default=None, help='<module name>', metavar='')
parser.add_argument('--data', required=False, default=None, help='<service name/all>', metavar='')
parser.add_argument('--module-args', default=None, help='<--module-args=\'--regions us-east-1,us-east-1\'>', metavar='')
parser.add_argument('--list-modules', action='store_true', help='List arguments')
parser.add_argument('--pacu-help', action='store_true', help='List the Pacu help window')
parser.add_argument('--module-info', action='store_true', help='Get information on a specific module, use --module-name')
parser.add_argument('--exec', action='store_true', help='exec module')
parser.add_argument('--set-regions', nargs='+', default=None, help='<region1 region2 ...> or <all> for all', metavar='')
parser.add_argument('--whoami', action='store_true', help='Display information on current IAM user')
args = parser.parse_args()
if any([args.session, args.data, args.module_args, args.exec, args.set_regions, args.whoami]):
if args.session is None:
print('When running Pacu from the CLI, a session is necessary')
exit()
self.run_cli(args)
elif any([args.list_modules, args.pacu_help, args.module_info]):
self.check_for_updates()
self.run_cli(args)
else:
self.run_gui()
if __name__ == '__main__':
Main().run()
| 47.523839 | 423 | 0.55876 |
2658d72fa7002d87460ef9013e14045b7581e4a9 | 3,428 | py | Python | sdk/python/pulumi_azure_native/documentdb/v20210301preview/list_notebook_workspace_connection_info.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20210301preview/list_notebook_workspace_connection_info.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/documentdb/v20210301preview/list_notebook_workspace_connection_info.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListNotebookWorkspaceConnectionInfoResult',
'AwaitableListNotebookWorkspaceConnectionInfoResult',
'list_notebook_workspace_connection_info',
]
@pulumi.output_type
class ListNotebookWorkspaceConnectionInfoResult:
"""
The connection info for the given notebook workspace
"""
def __init__(__self__, auth_token=None, notebook_server_endpoint=None):
if auth_token and not isinstance(auth_token, str):
raise TypeError("Expected argument 'auth_token' to be a str")
pulumi.set(__self__, "auth_token", auth_token)
if notebook_server_endpoint and not isinstance(notebook_server_endpoint, str):
raise TypeError("Expected argument 'notebook_server_endpoint' to be a str")
pulumi.set(__self__, "notebook_server_endpoint", notebook_server_endpoint)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> str:
"""
Specifies auth token used for connecting to Notebook server (uses token-based auth).
"""
return pulumi.get(self, "auth_token")
@property
@pulumi.getter(name="notebookServerEndpoint")
def notebook_server_endpoint(self) -> str:
"""
Specifies the endpoint of Notebook server.
"""
return pulumi.get(self, "notebook_server_endpoint")
class AwaitableListNotebookWorkspaceConnectionInfoResult(ListNotebookWorkspaceConnectionInfoResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListNotebookWorkspaceConnectionInfoResult(
auth_token=self.auth_token,
notebook_server_endpoint=self.notebook_server_endpoint)
def list_notebook_workspace_connection_info(account_name: Optional[str] = None,
notebook_workspace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNotebookWorkspaceConnectionInfoResult:
"""
The connection info for the given notebook workspace
:param str account_name: Cosmos DB database account name.
:param str notebook_workspace_name: The name of the notebook workspace resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['notebookWorkspaceName'] = notebook_workspace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20210301preview:listNotebookWorkspaceConnectionInfo', __args__, opts=opts, typ=ListNotebookWorkspaceConnectionInfoResult).value
return AwaitableListNotebookWorkspaceConnectionInfoResult(
auth_token=__ret__.auth_token,
notebook_server_endpoint=__ret__.notebook_server_endpoint)
| 41.804878 | 189 | 0.712369 |
27b23d6e4cc910fbbad493964f76b738dfc09d0e | 11,582 | py | Python | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/basic_value_unit_currency_main_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/basic_value_unit_currency_main_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/basic_value_unit_currency_main_list_data.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.basic_value_unit_currency_main_list_data_filter import BasicValueUnitCurrencyMainListDataFilter
globals()['BasicValueUnitCurrencyMainListDataFilter'] = BasicValueUnitCurrencyMainListDataFilter
class BasicValueUnitCurrencyMainListData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'filter': (BasicValueUnitCurrencyMainListDataFilter,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'filter': 'filter', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BasicValueUnitCurrencyMainListData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (BasicValueUnitCurrencyMainListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BasicValueUnitCurrencyMainListData - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (BasicValueUnitCurrencyMainListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.206107 | 145 | 0.584528 |
5a38c5df2e435cba821016fa344f12b82ba6626b | 21,209 | py | Python | lisa/executable.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 48 | 2018-05-19T17:46:34.000Z | 2020-09-28T21:09:06.000Z | lisa/executable.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 1,261 | 2018-05-17T04:32:22.000Z | 2020-11-23T17:29:13.000Z | lisa/executable.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 133 | 2018-05-15T23:12:14.000Z | 2020-11-13T10:37:49.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import pathlib
from hashlib import sha256
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from lisa.util import InitializableMixin, LisaException, constants
from lisa.util.logger import get_logger
from lisa.util.perf_timer import create_timer
from lisa.util.process import ExecutableResult, Process
if TYPE_CHECKING:
from lisa.node import Node
T = TypeVar("T")
class Tool(InitializableMixin):
"""
The base class, which wraps an executable, package, or scripts on a node.
A tool can be installed, and execute on a node. When a tool is needed, call
Tools[] to get one object. The Tools[] checks if it's installed. If it's
not installed, then check if it can be installed, and then install or fail.
After the tool instance returned, the run/Async of the tool will call
execute/Async of node. So that the command passes to current node.
The must be implemented methods are marked with @abstractmethod, includes
command: it's the command name, like echo, ntttcp. it uses in run/Async to run it,
and isInstalledInternal to check if it's installed.
The should be implemented methods throws NotImplementedError, but not marked as
abstract method, includes,
can_install: specify if a tool can be installed or not. If a tool is not builtin, it
must implement this method.
_install: If a tool is not builtin, it must implement this method. This
method needs to install a tool, and make sure it can be detected
by isInstalledInternal.
The may be implemented methods is empty, includes
initialize: It's called when a tool is created, and before to call any other
methods. It can be used to initialize variables or time-costing
operations.
dependencies: All depended tools, they will be checked and installed before
current tool installed. For example, ntttcp uses git to clone code
and build. So it depends on Git tool.
See details on method descriptions.
"""
def __init__(self, node: Node, *args: Any, **kwargs: Any) -> None:
"""
It's not recommended to replace this __init__ method. Anything need to be
initialized, should be in initialize() method.
"""
super().__init__()
self.node: Node = node
# triple states, None means not checked.
self._exists: Optional[bool] = None
self._log = get_logger("tool", self.name, self.node.log)
# specify the tool is in sudo or not. It may be set to True in
# _check_exists
self._use_sudo: bool = False
# cache the processes with same command line, so that it reduce time to
# rerun same commands.
self.__cached_results: Dict[str, Process] = {}
def __call__(
self,
parameters: str = "",
shell: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[pathlib.PurePath] = None,
) -> ExecutableResult:
return self.run(
parameters=parameters,
shell=shell,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
@property
def command(self) -> str:
"""
Return command string, which can be run in console. For example, echo.
The command can be different under different conditions. For example,
package management is 'yum' on CentOS, but 'apt' on Ubuntu.
"""
raise NotImplementedError("'command' is not implemented")
@property
def can_install(self) -> bool:
"""
Indicates if the tool supports installation or not. If it can return true,
installInternal must be implemented.
"""
raise NotImplementedError("'can_install' is not implemented")
@property
def package_name(self) -> str:
"""
return package name,
it may be different with command or different platform.
"""
return self.command
@property
def dependencies(self) -> List[Type[Tool]]:
"""
Declare all dependencies here, it can be other tools, but prevent to be a
circle dependency. The depdendented tools are checked and installed firstly.
"""
return []
@property
def name(self) -> str:
"""
Unique name to a tool and used as path of tool. Don't change it, or there may
be unpredictable behavior.
"""
return self.__class__.__name__.lower()
@property
def exists(self) -> bool:
"""
Return if a tool installed. In most cases, overriding inInstalledInternal is
enough. But if want to disable cached result and check tool every time,
override this method. Notice, remote operations take times, that why caching is
necessary.
"""
# the check may need extra cost, so cache it's result.
if self._exists is None:
self._exists = self._check_exists()
return self._exists
@classmethod
def create(cls, node: Node, *args: Any, **kwargs: Any) -> Tool:
"""
if there is a windows version tool, return the windows instance.
override this method if richer creation factory is needed.
"""
tool_cls = cls
if not node.is_posix:
windows_tool = cls._windows_tool()
if windows_tool:
tool_cls = windows_tool
return tool_cls(node, *args, **kwargs)
@classmethod
def _windows_tool(cls) -> Optional[Type[Tool]]:
"""
return a windows version tool class, if it's needed
"""
return None
def command_exists(self, command: str) -> Tuple[bool, bool]:
exists = False
use_sudo = False
if self.node.is_posix:
where_command = "command -v"
else:
where_command = "where"
where_command = f"{where_command} {command}"
result = self.node.execute(where_command, shell=True, no_info_log=True)
if result.exit_code == 0:
exists = True
use_sudo = False
else:
result = self.node.execute(
where_command,
shell=True,
no_info_log=True,
sudo=True,
)
if result.exit_code == 0:
self._log.debug(
"executable exists in root paths, "
"sudo always brings in following commands."
)
exists = True
use_sudo = True
return exists, use_sudo
def install(self) -> bool:
"""
Default behavior of install a tool, including dependencies. It doesn't need to
be overridden.
"""
# check dependencies
if self.dependencies:
self._log.info("installing dependencies")
for dependency in self.dependencies:
self.node.tools[dependency]
return self._install()
def run_async(
self,
parameters: str = "",
force_run: bool = False,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
no_debug_log: bool = False,
cwd: Optional[pathlib.PurePath] = None,
update_envs: Optional[Dict[str, str]] = None,
) -> Process:
"""
Run a command async and return the Process. The process is used for async, or
kill directly.
"""
if parameters:
command = f"{self.command} {parameters}"
else:
command = self.command
# If the command exists in sbin, use the root permission, even the sudo
# is not specified.
sudo = sudo or self._use_sudo
command_key = f"{command}|{shell}|{sudo}|{cwd}"
process = self.__cached_results.get(command_key, None)
if force_run or not process:
process = self.node.execute_async(
command,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
no_debug_log=no_debug_log,
cwd=cwd,
update_envs=update_envs,
)
self.__cached_results[command_key] = process
else:
self._log.debug(f"loaded cached result for command: [{command}]")
return process
def run(
self,
parameters: str = "",
force_run: bool = False,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
no_debug_log: bool = False,
cwd: Optional[pathlib.PurePath] = None,
update_envs: Optional[Dict[str, str]] = None,
timeout: int = 600,
expected_exit_code: Optional[int] = None,
expected_exit_code_failure_message: str = "",
) -> ExecutableResult:
"""
Run a process and wait for result.
"""
process = self.run_async(
parameters=parameters,
force_run=force_run,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
no_debug_log=no_debug_log,
cwd=cwd,
update_envs=update_envs,
)
return process.wait_result(
timeout=timeout,
expected_exit_code=expected_exit_code,
expected_exit_code_failure_message=expected_exit_code_failure_message,
)
def get_tool_path(self, use_global: bool = False) -> pathlib.PurePath:
"""
compose a path, if the tool need to be installed
"""
if use_global:
# change from lisa_working/20220126/20220126-194017-621 to
# lisa_working. The self.node.generate_working_path will determinate
# if it's Windows or Linux.
working_path = self.node.get_working_path().parent.parent
else:
assert self.node.working_path, "working path is not initialized"
working_path = self.node.working_path
path = working_path.joinpath(constants.PATH_TOOL, self.name)
self.node.shell.mkdir(path, exist_ok=True)
return path
def _install(self) -> bool:
"""
Execute installation process like build, install from packages. If other tools
are depended, specify them in dependencies. Other tools can be used here,
refer to ntttcp implementation.
"""
raise NotImplementedError("'install' is not implemented")
def _initialize(self, *args: Any, **kwargs: Any) -> None:
"""
Declare and initialize variables here, or some time costing initialization.
This method is called before other methods, when initialing on a node.
"""
...
def _check_exists(self) -> bool:
"""
Default implementation to check if a tool exists. This method is called by
isInstalled, and cached result. Builtin tools can override it can return True
directly to save time.
"""
exists, self._use_sudo = self.command_exists(self.command)
return exists
class CustomScript(Tool):
def __init__(
self,
name: str,
node: Node,
local_path: pathlib.Path,
files: List[pathlib.PurePath],
command: Optional[str] = None,
dependencies: Optional[List[Type[Tool]]] = None,
) -> None:
self._name = name
self._command = command
super().__init__(node)
self._local_path = local_path
self._files = files
self._cwd: Union[pathlib.PurePath, pathlib.Path]
if dependencies:
self._dependencies = dependencies
else:
self._dependencies = []
def run_async(
self,
parameters: str = "",
force_run: bool = False,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
no_debug_log: bool = False,
cwd: Optional[pathlib.PurePath] = None,
update_envs: Optional[Dict[str, str]] = None,
) -> Process:
if cwd is not None:
raise LisaException("don't set cwd for script")
return super().run_async(
parameters=parameters,
force_run=force_run,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
no_debug_log=no_debug_log,
cwd=self._cwd,
update_envs=update_envs,
)
def run(
self,
parameters: str = "",
force_run: bool = False,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
no_debug_log: bool = False,
cwd: Optional[pathlib.PurePath] = None,
update_envs: Optional[Dict[str, str]] = None,
timeout: int = 600,
expected_exit_code: Optional[int] = None,
expected_exit_code_failure_message: str = "",
) -> ExecutableResult:
process = self.run_async(
parameters=parameters,
force_run=force_run,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
no_debug_log=no_debug_log,
cwd=cwd,
update_envs=update_envs,
)
return process.wait_result(
timeout=timeout,
expected_exit_code=expected_exit_code,
expected_exit_code_failure_message=expected_exit_code_failure_message,
)
@property
def name(self) -> str:
return self._name
@property
def command(self) -> str:
assert self._command
return self._command
@property
def can_install(self) -> bool:
return True
def _check_exists(self) -> bool:
# the underlying '_check_exists' doesn't work for script but once it's
# cached in node, it won't be copied again. So it doesn't need to check
# exists.
return False
@property
def dependencies(self) -> List[Type[Tool]]:
return self._dependencies
def install(self) -> bool:
if self.node.is_remote:
# copy to remote
node_script_path = self.get_tool_path()
for file in self._files:
remote_path = node_script_path.joinpath(file)
source_path = self._local_path.joinpath(file)
self.node.shell.copy(source_path, remote_path)
self.node.shell.chmod(remote_path, 0o755)
self._cwd = node_script_path
else:
self._cwd = self._local_path
if not self._command:
if self.node.is_posix:
# in Linux, local script must to relative path.
self._command = f"./{pathlib.PurePosixPath(self._files[0])}"
else:
# windows needs absolute path
self._command = f"{self._cwd.joinpath(self._files[0])}"
return True
class CustomScriptBuilder:
"""
With CustomScriptBuilder, provides variables is enough to use like a tool
It needs some special handling in tool.py, but not much.
"""
def __init__(
self,
root_path: pathlib.Path,
files: List[str],
command: Optional[str] = None,
dependencies: Optional[List[Type[Tool]]] = None,
) -> None:
if not files:
raise LisaException("CustomScriptSpec should have at least one file")
self._dependencies = dependencies
root_path = root_path.resolve().absolute()
files_path: List[pathlib.PurePath] = []
for file_str in files:
file = pathlib.PurePath(file_str)
if not file.is_absolute():
raise LisaException(f"file must be relative path: '{file_str}'")
absolute_file = root_path.joinpath(file).resolve()
if not absolute_file.exists():
raise LisaException(f"cannot find file {absolute_file}")
try:
file = absolute_file.relative_to(root_path)
except ValueError:
raise LisaException(f"file '{file_str}' must be in '{root_path}'")
files_path.append(file)
self._files = files_path
self._local_rootpath: pathlib.Path = root_path
self._command: Union[str, None] = None
if command:
command_identifier = command
self._command = command
else:
command_identifier = files[0]
# generate an unique name based on file names
command_identifier = constants.NORMALIZE_PATTERN.sub("-", command_identifier)
hash_source = "".join(files).encode("utf-8")
hash_result = sha256(hash_source).hexdigest()[:8]
self.name = f"custom-{command_identifier}-{hash_result}".lower()
def build(self, node: Node) -> CustomScript:
return CustomScript(
self.name, node, self._local_rootpath, self._files, self._command
)
class Tools:
def __init__(self, node: Node) -> None:
self._node = node
self._cache: Dict[str, Tool] = {}
def __getattr__(self, key: str) -> Tool:
"""
for shortcut access like node.tools.echo.call_method()
"""
return self.__getitem__(key)
def __getitem__(self, tool_type: Union[Type[T], CustomScriptBuilder, str]) -> T:
return self.get(tool_type=tool_type)
def create(
self,
tool_type: Union[Type[T], CustomScriptBuilder, str],
*args: Any,
**kwargs: Any,
) -> T:
"""
Create a new tool with given arguments. Call it only when a new tool is
needed. Otherwise, call the get method.
"""
tool_key = self._get_tool_key(tool_type)
tool = self._cache.get(tool_key, None)
if tool:
del self._cache[tool_key]
return self.get(tool_type, *args, **kwargs)
def get(
self,
tool_type: Union[Type[T], CustomScriptBuilder, str],
*args: Any,
**kwargs: Any,
) -> T:
"""
return a typed subclass of tool or script builder.
for example,
echo_tool = node.tools[Echo]
echo_tool.run("hello")
"""
if tool_type is CustomScriptBuilder:
raise LisaException(
"CustomScriptBuilder should call build to create a script instance"
)
tool_key = self._get_tool_key(tool_type)
tool = self._cache.get(tool_key)
if tool is None:
# the Tool is not installed on current node, try to install it.
tool_log = get_logger("tool", tool_key, self._node.log)
tool_log.debug(f"initializing tool [{tool_key}]")
if isinstance(tool_type, CustomScriptBuilder):
tool = tool_type.build(self._node)
elif isinstance(tool_type, str):
raise LisaException(
f"{tool_type} cannot be found. "
f"short usage need to get with type before get with name."
)
else:
cast_tool_type = cast(Type[Tool], tool_type)
tool = cast_tool_type.create(self._node, *args, **kwargs)
tool.initialize()
if not tool.exists:
tool_log.debug(f"'{tool.name}' not installed")
if tool.can_install:
tool_log.debug(f"{tool.name} is installing")
timer = create_timer()
is_success = tool.install()
if not is_success:
raise LisaException(
f"install '{tool.name}' failed. After installed, "
f"it cannot be detected."
)
tool_log.debug(f"installed in {timer}")
else:
raise LisaException(
f"cannot find [{tool.name}] on [{self._node.name}], "
f"{self._node.os.__class__.__name__}, "
f"Remote({self._node.is_remote}) "
f"and installation of [{tool.name}] isn't enabled in lisa."
)
else:
tool_log.debug("installed already")
self._cache[tool_key] = tool
return cast(T, tool)
def _get_tool_key(self, tool_type: Union[type, CustomScriptBuilder, str]) -> str:
if isinstance(tool_type, CustomScriptBuilder):
tool_key = tool_type.name
elif isinstance(tool_type, str):
tool_key = tool_type.lower()
else:
tool_key = tool_type.__name__.lower()
return tool_key
| 34.430195 | 88 | 0.585318 |
1041c2cf95520d29c180c84549d2a48b6112522d | 1,355 | py | Python | code/models/tree_path_model.py | sourcery-ai-bot/csn | e96d72865e1d5765566165579c6ffc80230b544a | [
"MIT"
] | null | null | null | code/models/tree_path_model.py | sourcery-ai-bot/csn | e96d72865e1d5765566165579c6ffc80230b544a | [
"MIT"
] | null | null | null | code/models/tree_path_model.py | sourcery-ai-bot/csn | e96d72865e1d5765566165579c6ffc80230b544a | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional
from encoders import NBoWEncoder, TreePathEncoder
from models import Model
class TreePathModel(Model):
@classmethod
def get_default_hyperparameters(cls) -> Dict[str, Any]:
hypers = {}
label = 'code'
hypers.update({f'{label}_{key}': value
for key, value in TreePathEncoder.get_default_hyperparameters().items()})
label = 'query'
hypers.update({f'{label}_{key}': value
for key, value in NBoWEncoder.get_default_hyperparameters().items()})
model_hypers = {
'code_use_subtokens': False,
'code_mark_subtoken_end': False,
'loss': 'cosine',
'batch_size': 1000
}
hypers.update(super().get_default_hyperparameters())
hypers.update(model_hypers)
return hypers
def __init__(self,
hyperparameters: Dict[str, Any],
run_name: str = None,
model_save_dir: Optional[str] = None,
log_save_dir: Optional[str] = None):
super().__init__(
hyperparameters,
code_encoder_type=TreePathEncoder,
query_encoder_type=NBoWEncoder,
run_name=run_name,
model_save_dir=model_save_dir,
log_save_dir=log_save_dir)
| 34.74359 | 96 | 0.591882 |
da372c1922d19767031e2c57a9d35d7d7e7f4d74 | 1,371 | py | Python | preprocessing.py | pk0912/threat_text_classification | c0286e8f15a105d495ff395573070acefe5efd8f | [
"MIT"
] | null | null | null | preprocessing.py | pk0912/threat_text_classification | c0286e8f15a105d495ff395573070acefe5efd8f | [
"MIT"
] | null | null | null | preprocessing.py | pk0912/threat_text_classification | c0286e8f15a105d495ff395573070acefe5efd8f | [
"MIT"
] | null | null | null | """
Python script for data pre-processing
"""
import re
import utils.text_processing as tp
from utils.helpers import logger
def remove_referenced_name(text):
pattern = r"@[^\s\.\?,;:!]*"
return re.sub(pattern, " ", text).strip()
def simple_processing(text):
text = tp.unicode_normalize(text)
text = tp.general_regex(text)
text = remove_referenced_name(text)
return text
def complex_processing(text):
text = tp.unicode_normalize(text)
text = tp.lowercasing(text)
text = tp.general_regex(text)
text = remove_referenced_name(text)
text = tp.get_decontracted_form(text)
text = tp.keep_alpha_space(text)
text = tp.remove_repeating_chars(text)
text = tp.remove_stopwords(text)
if text != "":
text = tp.perform_lemmatization(text)
return text
def preprocess(data, preprocess_type="simple"):
try:
if preprocess_type == "simple":
data["text"] = data["comment_text"].map(simple_processing)
else:
data["text"] = data["comment_text"].map(complex_processing)
data = data.drop(columns=["comment_text"])
data = data.dropna()
data = data.drop(data.loc[data["text"] == ""].index)
return data.reset_index(drop=True)
except Exception as e:
logger.error("Exception in pre-processing : {}".format(str(e)))
return None
| 27.42 | 71 | 0.657185 |
6792d586035579a089c9685f4510f9981aec0c79 | 815 | py | Python | src/semu.robotics.ros2_bridge/compile_extension.py | Toni-SM/omni.add_on.ros2_bridge | 9c5e47153d51da3a401d7f4ce679b773b32beffc | [
"MIT"
] | null | null | null | src/semu.robotics.ros2_bridge/compile_extension.py | Toni-SM/omni.add_on.ros2_bridge | 9c5e47153d51da3a401d7f4ce679b773b32beffc | [
"MIT"
] | null | null | null | src/semu.robotics.ros2_bridge/compile_extension.py | Toni-SM/omni.add_on.ros2_bridge | 9c5e47153d51da3a401d7f4ce679b773b32beffc | [
"MIT"
] | null | null | null | import os
import sys
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# OV python (kit\python\include)
if sys.platform == 'win32':
raise Exception('Windows is not supported')
exit(1)
elif sys.platform == 'linux':
python_library_dir = os.path.join(os.path.dirname(sys.executable), "..", "include")
if not os.path.exists(python_library_dir):
raise Exception("OV Python library directory not found: {}".format(python_library_dir))
ext_modules = [
Extension("_ros2_bridge",
[os.path.join("semu", "robotics", "ros2_bridge", "ros2_bridge.py")],
library_dirs=[python_library_dir]),
]
setup(
name = 'semu.robotics.ros2_bridge',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| 28.103448 | 91 | 0.70184 |
6e9e4c11aa5bb1aa04a4b06a73ba022b7b2694ce | 433 | py | Python | customer/migrations/0002_alter_dccustomer_email.py | hasmeed/democrance_backend | 27d3bafd3ae1d241aa13b211769903b2051192c0 | [
"Unlicense"
] | null | null | null | customer/migrations/0002_alter_dccustomer_email.py | hasmeed/democrance_backend | 27d3bafd3ae1d241aa13b211769903b2051192c0 | [
"Unlicense"
] | null | null | null | customer/migrations/0002_alter_dccustomer_email.py | hasmeed/democrance_backend | 27d3bafd3ae1d241aa13b211769903b2051192c0 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2 on 2022-02-02 21:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dccustomer',
name='email',
field=models.EmailField(max_length=254, null=True, unique=True, verbose_name='Email Address'),
),
]
| 22.789474 | 106 | 0.616628 |
74c5e63fb215423a81f2c2931c76a5194a6221ef | 4,268 | py | Python | modules/dbnd-airflow/test_dbnd_airflow/utils.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/test_dbnd_airflow/utils.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | modules/dbnd-airflow/test_dbnd_airflow/utils.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | null | null | null | import functools
import json
import logging
from typing import Any
import pytest
from airflow.settings import Session
from flask import url_for
from flask_appbuilder.security.sqla.models import User as ab_user
from pytest import fixture
from six.moves.urllib.parse import quote_plus
from targets import target
logger = logging.getLogger(__name__)
skip_py2_non_compatible = functools.partial(
pytest.mark.skip, "Python 2 non compatible code"
)
def _assert_in_resp_html(text, resp_html):
if text not in resp_html:
target("/tmp/ttt.html").write(resp_html)
logger.info(resp_html)
logger.error("Can't find %s", text)
assert text in resp_html
def assert_content_in_response(text, resp, resp_code=200):
resp_html = resp.data.decode("utf-8")
assert resp_code == resp.status_code, "Response: %s" % str(resp)
if not isinstance(text, list):
text = [text]
for kw in text:
_assert_in_resp_html(kw, resp_html)
return resp_html
def assert_api_response(resp, resp_code=200, endpoint=None):
assert resp_code == resp.status_code, (
"Endpoint: %s\nResponse code: %s\nResponse data: %s"
% (str(endpoint), str(resp.status_code), str(resp.data))
)
resp_data = json.loads(resp.data.decode())
logger.info("Response: %s", resp_data)
return resp_data
def assert_ok(resp):
assert resp.status_code == 200
def percent_encode(obj):
return quote_plus(str(obj))
class WebAppCtrl(object):
def __init__(self, app, appbuilder, client):
self.app = app
self.appbuilder = appbuilder
self.client = client
self.session = Session()
def login(self):
sm_session = self.appbuilder.sm.get_session()
self.user = sm_session.query(ab_user).filter(ab_user.username == "test").first()
if not self.user:
role_admin = self.appbuilder.sm.find_role("Admin")
self.appbuilder.sm.add_user(
username="test",
first_name="test",
last_name="test",
email="test@fab.org",
role=role_admin,
password="test",
)
self.user = (
sm_session.query(ab_user).filter(ab_user.username == "test").first()
)
return self.client.post(
"/login/",
data=dict(username="test", password="test"),
follow_redirects=True,
)
def logout(self):
return self.client.get("/logout/")
def clear_table(self, model):
self.session.query(model).delete()
self.session.commit()
self.session.close()
class WebAppTest(object):
@fixture(autouse=True)
def _set_values(self, web_app_ctrl):
self.web = web_app_ctrl # type: WebAppCtrl
@property
def app(self):
return self.web.app
@property
def client(self):
return self.web.client
@property
def session(self):
return self.web.session
def _url(self, endpoint, **kwargs):
if endpoint.startswith("/"):
from urllib.parse import urlencode
url = endpoint
if kwargs:
url += "?" + urlencode(kwargs)
else:
url = url_for(endpoint, **kwargs)
logger.info("URL = %s", url)
return url
def _get(self, endpoint, **kwargs):
follow_redirects = kwargs.pop("follow_redirects", True)
url = self._url(endpoint, **kwargs)
return self.client.get(url, follow_redirects=follow_redirects)
def _post(self, endpoint, **kwargs):
follow_redirects = kwargs.pop("follow_redirects", True)
url = self._url(endpoint, **kwargs)
return self.client.post(url, follow_redirects=follow_redirects)
def assert_view(self, endpoint, expected, **kwargs):
resp = self._get(endpoint, **kwargs)
return assert_content_in_response(expected, resp)
def assert_api(self, endpoint, **kwargs):
resp = self._get(endpoint, **kwargs)
return assert_api_response(resp, endpoint=endpoint)
@fixture(autouse=True)
def _with_login(self, web_app_ctrl): # type: (WebAppCtrl) -> Any
yield web_app_ctrl.login()
web_app_ctrl.logout()
| 28.453333 | 88 | 0.631209 |
5254f253867c3f8ff2630a3c482a2ec37c483efb | 1,295 | py | Python | symtmm/backing.py | Matael/symtmm | 7156172259c77b3fa48df322f3456313c1031fcd | [
"MIT"
] | 1 | 2021-02-24T01:53:57.000Z | 2021-02-24T01:53:57.000Z | symtmm/backing.py | Matael/symtmm | 7156172259c77b3fa48df322f3456313c1031fcd | [
"MIT"
] | null | null | null | symtmm/backing.py | Matael/symtmm | 7156172259c77b3fa48df322f3456313c1031fcd | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding:utf8 -*-
#
# backing.py
#
# This file is part of symtmm, a software distributed under the MIT license.
# For any question, please contact the author below.
#
# Copyright (c) 2017 Mathieu Gaborit <gaborit@kth.se>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import sympy as sp
def rigid(medium):
if medium.MODEL == 'fluid':
return sp.Matrix([[0, 1]])
elif medium.MODEL == 'elastic':
return sp.Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
])
elif medium.MODEL == 'pem':
return sp.Matrix([
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
])
else:
raise ValueError('Type of material not known')
| 31.585366 | 80 | 0.63861 |
ca2bdf07a3fdbaace365587f12e74335001316f4 | 1,085 | py | Python | examples/views/counter.py | nextcord-ext/nextcord | 7b3022ae19299e1f40f5f34da33b80ae491aa06a | [
"MIT"
] | null | null | null | examples/views/counter.py | nextcord-ext/nextcord | 7b3022ae19299e1f40f5f34da33b80ae491aa06a | [
"MIT"
] | null | null | null | examples/views/counter.py | nextcord-ext/nextcord | 7b3022ae19299e1f40f5f34da33b80ae491aa06a | [
"MIT"
] | null | null | null | import nextcord
from nextcord.ext import commands
# Define a simple View that gives us a counter button
class Counter(nextcord.ui.View):
# Define the actual button
# When pressed, this increments the number displayed until it hits 5.
# When it hits 5, the counter button is disabled and it turns green.
# note: The name of the function does not matter to the library
@nextcord.ui.button(label="0", style=nextcord.ButtonStyle.red)
async def count(
self, button: nextcord.ui.Button, interaction: nextcord.Interaction
):
number = int(button.label) if button.label else 0
if number + 1 >= 5:
button.style = nextcord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
# Make sure to update the message with our updated selves
await interaction.response.edit_message(view=self)
bot = commands.Bot(command_prefix="$")
@bot.command()
async def counter(ctx):
"""Starts a counter for pressing."""
await ctx.send("Press!", view=Counter())
bot.run("token")
| 29.324324 | 75 | 0.680184 |
f7b290d88ef35dfdabd9ab8a762327f67e603674 | 8,185 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mediasvr_linux_oper.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 177 | 2016-03-15T17:03:51.000Z | 2022-03-18T16:48:44.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mediasvr_linux_oper.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2016-03-30T10:45:22.000Z | 2020-07-14T16:28:13.000Z | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mediasvr_linux_oper.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2016-03-16T20:38:57.000Z | 2022-02-22T04:26:02.000Z | """ Cisco_IOS_XR_mediasvr_linux_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR mediasvr\-linux package operational data.
This module contains definitions
for the following management objects\:
media\-svr\: Media server CLI operations
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class MediaSvr(_Entity_):
"""
Media server CLI operations
.. attribute:: all
Show Media bag
**type**\: :py:class:`All <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mediasvr_linux_oper.MediaSvr.All>`
**config**\: False
.. attribute:: location_descriptions
Show Media
**type**\: :py:class:`LocationDescriptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mediasvr_linux_oper.MediaSvr.LocationDescriptions>`
**config**\: False
"""
_prefix = 'mediasvr-linux-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MediaSvr, self).__init__()
self._top_entity = None
self.yang_name = "media-svr"
self.yang_parent_name = "Cisco-IOS-XR-mediasvr-linux-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("all", ("all", MediaSvr.All)), ("location-descriptions", ("location_descriptions", MediaSvr.LocationDescriptions))])
self._leafs = OrderedDict()
self.all = MediaSvr.All()
self.all.parent = self
self._children_name_map["all"] = "all"
self.location_descriptions = MediaSvr.LocationDescriptions()
self.location_descriptions.parent = self
self._children_name_map["location_descriptions"] = "location-descriptions"
self._segment_path = lambda: "Cisco-IOS-XR-mediasvr-linux-oper:media-svr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MediaSvr, [], name, value)
class All(_Entity_):
"""
Show Media bag
.. attribute:: show_output
string output
**type**\: str
**config**\: False
"""
_prefix = 'mediasvr-linux-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MediaSvr.All, self).__init__()
self.yang_name = "all"
self.yang_parent_name = "media-svr"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('show_output', (YLeaf(YType.str, 'show-output'), ['str'])),
])
self.show_output = None
self._segment_path = lambda: "all"
self._absolute_path = lambda: "Cisco-IOS-XR-mediasvr-linux-oper:media-svr/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MediaSvr.All, ['show_output'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_mediasvr_linux_oper as meta
return meta._meta_table['MediaSvr.All']['meta_info']
class LocationDescriptions(_Entity_):
"""
Show Media
.. attribute:: location_description
Location specified in location
**type**\: list of :py:class:`LocationDescription <ydk.models.cisco_ios_xr.Cisco_IOS_XR_mediasvr_linux_oper.MediaSvr.LocationDescriptions.LocationDescription>`
**config**\: False
"""
_prefix = 'mediasvr-linux-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MediaSvr.LocationDescriptions, self).__init__()
self.yang_name = "location-descriptions"
self.yang_parent_name = "media-svr"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("location-description", ("location_description", MediaSvr.LocationDescriptions.LocationDescription))])
self._leafs = OrderedDict()
self.location_description = YList(self)
self._segment_path = lambda: "location-descriptions"
self._absolute_path = lambda: "Cisco-IOS-XR-mediasvr-linux-oper:media-svr/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MediaSvr.LocationDescriptions, [], name, value)
class LocationDescription(_Entity_):
"""
Location specified in location
.. attribute:: node (key)
Node location
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
**config**\: False
.. attribute:: show_output
string output
**type**\: str
**config**\: False
"""
_prefix = 'mediasvr-linux-oper'
_revision = '2015-11-09'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(MediaSvr.LocationDescriptions.LocationDescription, self).__init__()
self.yang_name = "location-description"
self.yang_parent_name = "location-descriptions"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('node', (YLeaf(YType.str, 'node'), ['str'])),
('show_output', (YLeaf(YType.str, 'show-output'), ['str'])),
])
self.node = None
self.show_output = None
self._segment_path = lambda: "location-description" + "[node='" + str(self.node) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-mediasvr-linux-oper:media-svr/location-descriptions/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MediaSvr.LocationDescriptions.LocationDescription, ['node', 'show_output'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_mediasvr_linux_oper as meta
return meta._meta_table['MediaSvr.LocationDescriptions.LocationDescription']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_mediasvr_linux_oper as meta
return meta._meta_table['MediaSvr.LocationDescriptions']['meta_info']
def clone_ptr(self):
self._top_entity = MediaSvr()
return self._top_entity
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_mediasvr_linux_oper as meta
return meta._meta_table['MediaSvr']['meta_info']
| 33.962656 | 172 | 0.594746 |
aeb238f0e35645fc2cc8bba6e840d4466e05c802 | 479 | py | Python | mmdet/models/detectors/yolov3.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/yolov3.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/yolov3.py | mrsempress/mmdetection | cb650560c97a2fe56a9b369a1abc8ec17e06583a | [
"Apache-2.0"
] | null | null | null | from .single_stage import SingleStageDetector
from ..registry import DETECTORS
@DETECTORS.register_module
class YOLOv3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(YOLOv3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
| 28.176471 | 74 | 0.559499 |
8435b178cdaaf8a499654dd5bab2f0067e2ca98c | 955 | py | Python | appinit_backend/app/lib/jobs/notify.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | 1 | 2020-09-11T01:20:07.000Z | 2020-09-11T01:20:07.000Z | appinit_backend/app/lib/jobs/notify.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | null | null | null | appinit_backend/app/lib/jobs/notify.py | app-init/backend | 02bfc059aaa3ba34cb31c2c0cec92391f08826d9 | [
"MIT"
] | null | null | null | from appinit_backend.lib.imports import *
from appinit_backend.lib.notifications import email as email_notifications
def call(action, job=None):
manager = Manager()
users = set()
title = None
body = None
if action == "stopped":
title = "Jobs-Scheduler has stopped"
# groups.add("jobs.scheduler.stopped")
users.add("mowens")
body = "All runners have finished their remaining jobs, and the scheduler has stopped. The container is safe for stopping or restarting."
elif job is not None:
jid = None
if "_id" in job:
jid = job["_id"]
else:
jid = job["id"]
users.add(job["uid"])
title = """Job %s has %s""" % (jid, action)
body = """Job <a href="https://%s/jobs/%s/results/">%s</a> running '%s' has %s.""" % (manager.get_hostname(), jid, jid, job["api"], action)
else:
return None
email_notifications.call("Job Runner", title, users, body, job=False) | 35.37037 | 145 | 0.624084 |
5485749fccb598851d777efd6fc93c68f2d2e5a5 | 5,697 | py | Python | ultracart/models/user_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | 1 | 2018-03-15T16:56:23.000Z | 2018-03-15T16:56:23.000Z | ultracart/models/user_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | ultracart/models/user_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UserResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'Error',
'metadata': 'ResponseMetadata',
'success': 'bool',
'user': 'User',
'warning': 'Warning'
}
attribute_map = {
'error': 'error',
'metadata': 'metadata',
'success': 'success',
'user': 'user',
'warning': 'warning'
}
def __init__(self, error=None, metadata=None, success=None, user=None, warning=None): # noqa: E501
"""UserResponse - a model defined in Swagger""" # noqa: E501
self._error = None
self._metadata = None
self._success = None
self._user = None
self._warning = None
self.discriminator = None
if error is not None:
self.error = error
if metadata is not None:
self.metadata = metadata
if success is not None:
self.success = success
if user is not None:
self.user = user
if warning is not None:
self.warning = warning
@property
def error(self):
"""Gets the error of this UserResponse. # noqa: E501
:return: The error of this UserResponse. # noqa: E501
:rtype: Error
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this UserResponse.
:param error: The error of this UserResponse. # noqa: E501
:type: Error
"""
self._error = error
@property
def metadata(self):
"""Gets the metadata of this UserResponse. # noqa: E501
:return: The metadata of this UserResponse. # noqa: E501
:rtype: ResponseMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this UserResponse.
:param metadata: The metadata of this UserResponse. # noqa: E501
:type: ResponseMetadata
"""
self._metadata = metadata
@property
def success(self):
"""Gets the success of this UserResponse. # noqa: E501
Indicates if API call was successful # noqa: E501
:return: The success of this UserResponse. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this UserResponse.
Indicates if API call was successful # noqa: E501
:param success: The success of this UserResponse. # noqa: E501
:type: bool
"""
self._success = success
@property
def user(self):
"""Gets the user of this UserResponse. # noqa: E501
:return: The user of this UserResponse. # noqa: E501
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this UserResponse.
:param user: The user of this UserResponse. # noqa: E501
:type: User
"""
self._user = user
@property
def warning(self):
"""Gets the warning of this UserResponse. # noqa: E501
:return: The warning of this UserResponse. # noqa: E501
:rtype: Warning
"""
return self._warning
@warning.setter
def warning(self, warning):
"""Sets the warning of this UserResponse.
:param warning: The warning of this UserResponse. # noqa: E501
:type: Warning
"""
self._warning = warning
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UserResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.662162 | 103 | 0.556258 |
ff8ce6003a83f36de6a833c7e86a4478fe1b56a7 | 909 | py | Python | python/problem-073.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | 1 | 2015-12-18T13:25:41.000Z | 2015-12-18T13:25:41.000Z | python/problem-073.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | python/problem-073.py | mbuhot/mbuhot-euler-solutions | 30066543cfd2d84976beb0605839750b64f4b8ef | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import sys
sys.setrecursionlimit(20000)
description = '''
Counting fractions in a range
Problem 73
Consider the fraction, n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction.
If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get:
1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8
It can be seen that there are 3 fractions between 1/3 and 1/2.
How many fractions lie between 1/3 and 1/2 in the sorted set of reduced proper fractions for d ≤ 12,000?
'''
# recusively count the fraction between x/a and y/b
# interestingly, the numerators are not needed at all!
def between(a, b, n):
if a + b > n: return 0
mid = a + b
return between(mid, a, n) + 1 + between(mid, b, n)
assert(between(3, 2, 8) == 3)
print(between(3, 2, 12000))
| 30.3 | 127 | 0.669967 |
19326dd3d843f6f124c4978f55f387a22a931506 | 244 | py | Python | confoo-2012/unittest_example/doctest_example.py | andymckay/presentations | 19e485f0ad5ca5e56779475659f31b6682e8016e | [
"CC-BY-3.0"
] | 3 | 2015-08-05T23:04:10.000Z | 2022-01-24T20:01:33.000Z | confoo-2012/unittest_example/doctest_example.py | andymckay/presentations | 19e485f0ad5ca5e56779475659f31b6682e8016e | [
"CC-BY-3.0"
] | null | null | null | confoo-2012/unittest_example/doctest_example.py | andymckay/presentations | 19e485f0ad5ca5e56779475659f31b6682e8016e | [
"CC-BY-3.0"
] | 4 | 2015-06-28T19:02:49.000Z | 2021-10-29T19:28:39.000Z | def minus(numbers):
"""
Subtract a list of numbers.
>>> minus([3, 1])
2
>>> minus([3, 2, 4])
-3
"""
return reduce(lambda x, y: x - y, numbers)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 17.428571 | 46 | 0.52459 |
211655b9f4f39dae5a03b97d5ca6daed6f70dde3 | 570 | py | Python | scanner/model/scanover.py | WaIdo/InfoCollectTOOL | 8bc0d8e9436535d8947946a9c6cbf753045a4d5a | [
"MIT"
] | 8 | 2019-07-15T09:23:04.000Z | 2021-08-16T14:41:00.000Z | scanner/model/scanover.py | WaIdo/InfoCollectTOOL | 8bc0d8e9436535d8947946a9c6cbf753045a4d5a | [
"MIT"
] | null | null | null | scanner/model/scanover.py | WaIdo/InfoCollectTOOL | 8bc0d8e9436535d8947946a9c6cbf753045a4d5a | [
"MIT"
] | 8 | 2019-07-15T01:13:20.000Z | 2022-01-25T01:58:30.000Z | from ..orm import Base
from sqlalchemy import Column,Integer,String
from base64 import b64encode
import hashlib
class Scanover(Base):
__tablename__ = 'scan'
id = Column(Integer, primary_key=True)
url = Column(String(100), unique=True)
token = Column(String(50),unique=True)
result = Column(String(3000))
def __init__(self, url="", result="[]"):
self.url = url
self.token = hashlib.md5(b64encode(self.url.encode('utf-8'))).hexdigest()
self.result = result
def __repr__(self):
return '<scan %r>' % (self.url) | 28.5 | 81 | 0.65614 |
b15227091680aa50ab88006ebcf6d123edd23077 | 66,214 | py | Python | third_party/flatbuffers/tests/py_test.py | EricWang1hitsz/osrm-backend | ff1af413d6c78f8e454584fe978d5468d984d74a | [
"BSD-2-Clause"
] | 4,526 | 2015-01-01T15:31:00.000Z | 2022-03-31T17:33:49.000Z | third_party/flatbuffers/tests/py_test.py | wsx9527/osrm-backend | 1e70b645e480946dad313b67f6a7d331baecfe3c | [
"BSD-2-Clause"
] | 4,497 | 2015-01-01T15:29:12.000Z | 2022-03-31T19:19:35.000Z | third_party/flatbuffers/tests/py_test.py | wsx9527/osrm-backend | 1e70b645e480946dad313b67f6a7d331baecfe3c | [
"BSD-2-Clause"
] | 3,023 | 2015-01-01T18:40:53.000Z | 2022-03-30T13:30:46.000Z | # coding=utf-8
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import sys
import imp
PY_VERSION = sys.version_info[:2]
import ctypes
from collections import defaultdict
import math
import random
import timeit
import unittest
from flatbuffers import compat
from flatbuffers import util
from flatbuffers.compat import range_func as compat_range
from flatbuffers.compat import NumpyRequiredForThisFeature
import flatbuffers
from flatbuffers import number_types as N
import MyGame # refers to generated code
import MyGame.Example # refers to generated code
import MyGame.Example.Any # refers to generated code
import MyGame.Example.Color # refers to generated code
import MyGame.Example.Monster # refers to generated code
import MyGame.Example.Test # refers to generated code
import MyGame.Example.Stat # refers to generated code
import MyGame.Example.Vec3 # refers to generated code
import MyGame.MonsterExtra # refers to generated code
import MyGame.Example.ArrayTable # refers to generated code
import MyGame.Example.ArrayStruct # refers to generated code
import MyGame.Example.NestedStruct # refers to generated code
import MyGame.Example.TestEnum # refers to generated code
def assertRaises(test_case, fn, exception_class):
''' Backwards-compatible assertion for exceptions raised. '''
exc = None
try:
fn()
except Exception as e:
exc = e
test_case.assertTrue(exc is not None)
test_case.assertTrue(isinstance(exc, exception_class))
class TestWireFormat(unittest.TestCase):
def test_wire_format(self):
# Verify that using the generated Python code builds a buffer without
# returning errors, and is interpreted correctly, for size prefixed
# representation and regular:
for sizePrefix in [True, False]:
for file_identifier in [None, b"MONS"]:
gen_buf, gen_off = make_monster_from_generated_code(sizePrefix=sizePrefix, file_identifier=file_identifier)
CheckReadBuffer(gen_buf, gen_off, sizePrefix=sizePrefix, file_identifier=file_identifier)
# Verify that the canonical flatbuffer file is readable by the
# generated Python code. Note that context managers are not part of
# Python 2.5, so we use the simpler open/close methods here:
f = open('monsterdata_test.mon', 'rb')
canonicalWireData = f.read()
f.close()
CheckReadBuffer(bytearray(canonicalWireData), 0, file_identifier=b'MONS')
# Write the generated buffer out to a file:
f = open('monsterdata_python_wire.mon', 'wb')
f.write(gen_buf[gen_off:])
f.close()
def CheckReadBuffer(buf, offset, sizePrefix=False, file_identifier=None):
''' CheckReadBuffer checks that the given buffer is evaluated correctly
as the example Monster. '''
def asserter(stmt):
''' An assertion helper that is separated from TestCase classes. '''
if not stmt:
raise AssertionError('CheckReadBuffer case failed')
if file_identifier:
# test prior to removal of size_prefix
asserter(util.GetBufferIdentifier(buf, offset, size_prefixed=sizePrefix) == file_identifier)
asserter(util.BufferHasIdentifier(buf, offset, file_identifier=file_identifier, size_prefixed=sizePrefix))
if sizePrefix:
size = util.GetSizePrefix(buf, offset)
asserter(size == len(buf[offset:])-4)
buf, offset = util.RemoveSizePrefix(buf, offset)
if file_identifier:
asserter(MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
else:
asserter(not MyGame.Example.Monster.Monster.MonsterBufferHasIdentifier(buf, offset))
monster = MyGame.Example.Monster.Monster.GetRootAsMonster(buf, offset)
asserter(monster.Hp() == 80)
asserter(monster.Mana() == 150)
asserter(monster.Name() == b'MyMonster')
# initialize a Vec3 from Pos()
vec = monster.Pos()
asserter(vec is not None)
# verify the properties of the Vec3
asserter(vec.X() == 1.0)
asserter(vec.Y() == 2.0)
asserter(vec.Z() == 3.0)
asserter(vec.Test1() == 3.0)
asserter(vec.Test2() == 2)
# initialize a Test from Test3(...)
t = MyGame.Example.Test.Test()
t = vec.Test3(t)
asserter(t is not None)
# verify the properties of the Test
asserter(t.A() == 5)
asserter(t.B() == 6)
# verify that the enum code matches the enum declaration:
union_type = MyGame.Example.Any.Any
asserter(monster.TestType() == union_type.Monster)
# initialize a Table from a union field Test(...)
table2 = monster.Test()
asserter(type(table2) is flatbuffers.table.Table)
# initialize a Monster from the Table from the union
monster2 = MyGame.Example.Monster.Monster()
monster2.Init(table2.Bytes, table2.Pos)
asserter(monster2.Name() == b"Fred")
# iterate through the first monster's inventory:
asserter(monster.InventoryLength() == 5)
invsum = 0
for i in compat_range(monster.InventoryLength()):
v = monster.Inventory(i)
invsum += int(v)
asserter(invsum == 10)
for i in range(5):
asserter(monster.VectorOfLongs(i) == 10 ** (i * 2))
asserter(([-1.7976931348623157e+308, 0, 1.7976931348623157e+308]
== [monster.VectorOfDoubles(i)
for i in range(monster.VectorOfDoublesLength())]))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
asserter(monster.InventoryAsNumpy().sum() == 10)
asserter(monster.InventoryAsNumpy().dtype == np.dtype('uint8'))
VectorOfLongs = monster.VectorOfLongsAsNumpy()
asserter(VectorOfLongs.dtype == np.dtype('int64'))
for i in range(5):
asserter(VectorOfLongs[i] == 10 ** (i * 2))
VectorOfDoubles = monster.VectorOfDoublesAsNumpy()
asserter(VectorOfDoubles.dtype == np.dtype('float64'))
asserter(VectorOfDoubles[0] == np.finfo('float64').min)
asserter(VectorOfDoubles[1] == 0.0)
asserter(VectorOfDoubles[2] == np.finfo('float64').max)
except ImportError:
# If numpy does not exist, trying to get vector as numpy
# array should raise NumpyRequiredForThisFeature. The way
# assertRaises has been implemented prevents us from
# asserting this error is raised outside of a test case.
pass
asserter(monster.Test4Length() == 2)
# create a 'Test' object and populate it:
test0 = monster.Test4(0)
asserter(type(test0) is MyGame.Example.Test.Test)
test1 = monster.Test4(1)
asserter(type(test1) is MyGame.Example.Test.Test)
# the position of test0 and test1 are swapped in monsterdata_java_wire
# and monsterdata_test_wire, so ignore ordering
v0 = test0.A()
v1 = test0.B()
v2 = test1.A()
v3 = test1.B()
sumtest12 = int(v0) + int(v1) + int(v2) + int(v3)
asserter(sumtest12 == 100)
asserter(monster.TestarrayofstringLength() == 2)
asserter(monster.Testarrayofstring(0) == b"test1")
asserter(monster.Testarrayofstring(1) == b"test2")
asserter(monster.TestarrayoftablesLength() == 0)
asserter(monster.TestnestedflatbufferLength() == 0)
asserter(monster.Testempty() is None)
class TestFuzz(unittest.TestCase):
''' Low level stress/fuzz test: serialize/deserialize a variety of
different kinds of data in different combinations '''
binary_type = compat.binary_types[0] # this will always exist
ofInt32Bytes = binary_type([0x83, 0x33, 0x33, 0x33])
ofInt64Bytes = binary_type([0x84, 0x44, 0x44, 0x44,
0x44, 0x44, 0x44, 0x44])
overflowingInt32Val = flatbuffers.encode.Get(flatbuffers.packer.int32,
ofInt32Bytes, 0)
overflowingInt64Val = flatbuffers.encode.Get(flatbuffers.packer.int64,
ofInt64Bytes, 0)
# Values we're testing against: chosen to ensure no bits get chopped
# off anywhere, and also be different from eachother.
boolVal = True
int8Val = N.Int8Flags.py_type(-127) # 0x81
uint8Val = N.Uint8Flags.py_type(0xFF)
int16Val = N.Int16Flags.py_type(-32222) # 0x8222
uint16Val = N.Uint16Flags.py_type(0xFEEE)
int32Val = N.Int32Flags.py_type(overflowingInt32Val)
uint32Val = N.Uint32Flags.py_type(0xFDDDDDDD)
int64Val = N.Int64Flags.py_type(overflowingInt64Val)
uint64Val = N.Uint64Flags.py_type(0xFCCCCCCCCCCCCCCC)
# Python uses doubles, so force it here
float32Val = N.Float32Flags.py_type(ctypes.c_float(3.14159).value)
float64Val = N.Float64Flags.py_type(3.14159265359)
def test_fuzz(self):
return self.check_once(11, 100)
def check_once(self, fuzzFields, fuzzObjects):
testValuesMax = 11 # hardcoded to the number of scalar types
builder = flatbuffers.Builder(0)
l = LCG()
objects = [0 for _ in compat_range(fuzzObjects)]
# Generate fuzzObjects random objects each consisting of
# fuzzFields fields, each of a random type.
for i in compat_range(fuzzObjects):
builder.StartObject(fuzzFields)
for j in compat_range(fuzzFields):
choice = int(l.Next()) % testValuesMax
if choice == 0:
builder.PrependBoolSlot(int(j), self.boolVal, False)
elif choice == 1:
builder.PrependInt8Slot(int(j), self.int8Val, 0)
elif choice == 2:
builder.PrependUint8Slot(int(j), self.uint8Val, 0)
elif choice == 3:
builder.PrependInt16Slot(int(j), self.int16Val, 0)
elif choice == 4:
builder.PrependUint16Slot(int(j), self.uint16Val, 0)
elif choice == 5:
builder.PrependInt32Slot(int(j), self.int32Val, 0)
elif choice == 6:
builder.PrependUint32Slot(int(j), self.uint32Val, 0)
elif choice == 7:
builder.PrependInt64Slot(int(j), self.int64Val, 0)
elif choice == 8:
builder.PrependUint64Slot(int(j), self.uint64Val, 0)
elif choice == 9:
builder.PrependFloat32Slot(int(j), self.float32Val, 0)
elif choice == 10:
builder.PrependFloat64Slot(int(j), self.float64Val, 0)
else:
raise RuntimeError('unreachable')
off = builder.EndObject()
# store the offset from the end of the builder buffer,
# since it will keep growing:
objects[i] = off
# Do some bookkeeping to generate stats on fuzzes:
stats = defaultdict(int)
def check(table, desc, want, got):
stats[desc] += 1
self.assertEqual(want, got, "%s != %s, %s" % (want, got, desc))
l = LCG() # Reset.
# Test that all objects we generated are readable and return the
# expected values. We generate random objects in the same order
# so this is deterministic.
for i in compat_range(fuzzObjects):
table = flatbuffers.table.Table(builder.Bytes,
len(builder.Bytes) - objects[i])
for j in compat_range(fuzzFields):
field_count = flatbuffers.builder.VtableMetadataFields + j
f = N.VOffsetTFlags.py_type(field_count *
N.VOffsetTFlags.bytewidth)
choice = int(l.Next()) % testValuesMax
if choice == 0:
check(table, "bool", self.boolVal,
table.GetSlot(f, False, N.BoolFlags))
elif choice == 1:
check(table, "int8", self.int8Val,
table.GetSlot(f, 0, N.Int8Flags))
elif choice == 2:
check(table, "uint8", self.uint8Val,
table.GetSlot(f, 0, N.Uint8Flags))
elif choice == 3:
check(table, "int16", self.int16Val,
table.GetSlot(f, 0, N.Int16Flags))
elif choice == 4:
check(table, "uint16", self.uint16Val,
table.GetSlot(f, 0, N.Uint16Flags))
elif choice == 5:
check(table, "int32", self.int32Val,
table.GetSlot(f, 0, N.Int32Flags))
elif choice == 6:
check(table, "uint32", self.uint32Val,
table.GetSlot(f, 0, N.Uint32Flags))
elif choice == 7:
check(table, "int64", self.int64Val,
table.GetSlot(f, 0, N.Int64Flags))
elif choice == 8:
check(table, "uint64", self.uint64Val,
table.GetSlot(f, 0, N.Uint64Flags))
elif choice == 9:
check(table, "float32", self.float32Val,
table.GetSlot(f, 0, N.Float32Flags))
elif choice == 10:
check(table, "float64", self.float64Val,
table.GetSlot(f, 0, N.Float64Flags))
else:
raise RuntimeError('unreachable')
# If enough checks were made, verify that all scalar types were used:
self.assertEqual(testValuesMax, len(stats),
"fuzzing failed to test all scalar types: %s" % stats)
class TestByteLayout(unittest.TestCase):
''' TestByteLayout checks the bytes of a Builder in various scenarios. '''
def assertBuilderEquals(self, builder, want_chars_or_ints):
def integerize(x):
if isinstance(x, compat.string_types):
return ord(x)
return x
want_ints = list(map(integerize, want_chars_or_ints))
want = bytearray(want_ints)
got = builder.Bytes[builder.Head():] # use the buffer directly
self.assertEqual(want, got)
def test_numbers(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.PrependBool(True)
self.assertBuilderEquals(b, [1])
b.PrependInt8(-127)
self.assertBuilderEquals(b, [129, 1])
b.PrependUint8(255)
self.assertBuilderEquals(b, [255, 129, 1])
b.PrependInt16(-32222)
self.assertBuilderEquals(b, [0x22, 0x82, 0, 255, 129, 1]) # first pad
b.PrependUint16(0xFEEE)
# no pad this time:
self.assertBuilderEquals(b, [0xEE, 0xFE, 0x22, 0x82, 0, 255, 129, 1])
b.PrependInt32(-53687092)
self.assertBuilderEquals(b, [204, 204, 204, 252, 0xEE, 0xFE,
0x22, 0x82, 0, 255, 129, 1])
b.PrependUint32(0x98765432)
self.assertBuilderEquals(b, [0x32, 0x54, 0x76, 0x98,
204, 204, 204, 252,
0xEE, 0xFE, 0x22, 0x82,
0, 255, 129, 1])
def test_numbers64(self):
b = flatbuffers.Builder(0)
b.PrependUint64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
b = flatbuffers.Builder(0)
b.PrependInt64(0x1122334455667788)
self.assertBuilderEquals(b, [0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11])
def test_1xbyte_vector(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xbyte_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependByte(1)
self.assertBuilderEquals(b, [1, 0, 0])
b.PrependByte(2)
self.assertBuilderEquals(b, [2, 1, 0, 0])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 2, 1, 0, 0]) # padding
def test_1xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 1, 1)
self.assertBuilderEquals(b, [0, 0]) # align to 4bytes
b.PrependUint16(1)
self.assertBuilderEquals(b, [1, 0, 0, 0])
b.EndVector(1)
self.assertBuilderEquals(b, [1, 0, 0, 0, 1, 0, 0, 0]) # padding
def test_2xuint16_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint16Flags.bytewidth, 2, 1)
self.assertBuilderEquals(b, []) # align to 4bytes
b.PrependUint16(0xABCD)
self.assertBuilderEquals(b, [0xCD, 0xAB])
b.PrependUint16(0xDCBA)
self.assertBuilderEquals(b, [0xBA, 0xDC, 0xCD, 0xAB])
b.EndVector(2)
self.assertBuilderEquals(b, [2, 0, 0, 0, 0xBA, 0xDC, 0xCD, 0xAB])
def test_create_ascii_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"foo", encoding='ascii')
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 'f', 'o', 'o', 0])
b.CreateString(u"moop", encoding='ascii')
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 'm', 'o', 'o', 'p',
0, 0, 0, 0,
3, 0, 0, 0, 'f', 'o', 'o', 0])
def test_create_utf8_string(self):
b = flatbuffers.Builder(0)
b.CreateString(u"Цлїςσδε")
self.assertBuilderEquals(b, "\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
b.CreateString(u"フムアムカモケモ")
self.assertBuilderEquals(b, "\x18\x00\x00\x00\xef\xbe\x8c\xef\xbe\x91" \
"\xef\xbd\xb1\xef\xbe\x91\xef\xbd\xb6\xef\xbe\x93\xef\xbd\xb9\xef" \
"\xbe\x93\x00\x00\x00\x00\x0e\x00\x00\x00\xd0\xa6\xd0\xbb\xd1\x97" \
"\xcf\x82\xcf\x83\xce\xb4\xce\xb5\x00\x00")
def test_create_arbitrary_string(self):
b = flatbuffers.Builder(0)
s = "\x01\x02\x03"
b.CreateString(s) # Default encoding is utf-8.
# 0-terminated, no pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
s2 = "\x04\x05\x06\x07"
b.CreateString(s2) # Default encoding is utf-8.
# 0-terminated, 3-byte pad:
self.assertBuilderEquals(b, [4, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0,
3, 0, 0, 0, 1, 2, 3, 0])
def test_create_byte_vector(self):
b = flatbuffers.Builder(0)
b.CreateByteVector(b"")
# 0-byte pad:
self.assertBuilderEquals(b, [0, 0, 0, 0])
b = flatbuffers.Builder(0)
b.CreateByteVector(b"\x01\x02\x03")
# 1-byte pad:
self.assertBuilderEquals(b, [3, 0, 0, 0, 1, 2, 3, 0])
def test_create_numpy_vector_int8(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -3], dtype=np.int8)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 2, 256 - 3, 0 # vector value + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_uint16(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, 312], dtype=np.uint16)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, # 1
2, 0, # 2
312 - 256, 1, # 312
0, 0 # padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_int64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.int64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 0, 0, 0, 0, 0, 0, # 1
2, 0, 0, 0, 0, 0, 0, 0, # 2
256 - 12, 255, 255, 255, 255, 255, 255, 255 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float32(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float32)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 128, 63, # 1
0, 0, 0, 64, # 2
0, 0, 64, 193 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_float64(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([1, 2, -12], dtype=np.float64)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
0, 0, 0, 0, 0, 0, 240, 63, # 1
0, 0, 0, 0, 0, 0, 0, 64, # 2
0, 0, 0, 0, 0, 0, 40, 192 # -12
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_bool(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Systems endian:
b = flatbuffers.Builder(0)
x = np.array([True, False, True], dtype=np.bool)
b.CreateNumpyVector(x)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
# Reverse endian:
b = flatbuffers.Builder(0)
x_other_endian = x.byteswap().newbyteorder()
b.CreateNumpyVector(x_other_endian)
self.assertBuilderEquals(b, [
3, 0, 0, 0, # vector length
1, 0, 1, 0 # vector values + padding
])
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_strings(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array(["hello", "fb", "testing"])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_create_numpy_vector_reject_object(self):
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
import numpy as np
# Create String array
b = flatbuffers.Builder(0)
x = np.array([{"m": 0}, {"as": -2.1, 'c': 'c'}])
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
TypeError)
except ImportError:
b = flatbuffers.Builder(0)
x = 0
assertRaises(
self,
lambda: b.CreateNumpyVector(x),
NumpyRequiredForThisFeature)
def test_empty_vtable(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
self.assertBuilderEquals(b, [])
b.EndObject()
self.assertBuilderEquals(b, [4, 0, 4, 0, 4, 0, 0, 0])
def test_vtable_with_one_true_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # length of object including vtable offset
7, 0, # start of bool value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, 0, # padded to 4 bytes
1, # bool value
])
def test_vtable_with_one_default_bool(self):
b = flatbuffers.Builder(0)
self.assertBuilderEquals(b, [])
b.StartObject(1)
self.assertBuilderEquals(b, [])
b.PrependBoolSlot(0, False, False)
b.EndObject()
self.assertBuilderEquals(b, [
4, 0, # vtable bytes
4, 0, # end of object from here
# entry 1 is zero and not stored
4, 0, 0, 0, # offset for start of vtable (int32)
])
def test_vtable_with_one_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependInt16Slot(0, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value
6, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding to 4 bytes
0x9A, 0x78,
])
def test_vtable_with_two_int16(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependInt16Slot(1, 0x789A, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
4, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0x9A, 0x78, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_int16_and_bool(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt16Slot(0, 0x3456, 0)
b.PrependBoolSlot(1, True, False)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
8, 0, # end of object from here
6, 0, # offset to value 0
5, 0, # offset to value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
1, # value 1
0x56, 0x34, # value 0
])
def test_vtable_with_empty_vector(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset to vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0,
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_empty_vector_of_byte_and_some_scalars(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Uint8Flags.bytewidth, 0, 1)
vecend = b.EndVector(0)
b.StartObject(2)
b.PrependInt16Slot(0, 55, 0)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0,
10, 0, # offset to value 0
4, 0, # offset to vector offset
8, 0, 0, 0, # vtable loc
8, 0, 0, 0, # value 1
0, 0, 55, 0, # value 0
0, 0, 0, 0, # length of vector (not in struct)
])
def test_vtable_with_1_int16_and_2vector_of_int16(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int16Flags.bytewidth, 2, 1)
b.PrependInt16(0x1234)
b.PrependInt16(0x5678)
vecend = b.EndVector(2)
b.StartObject(2)
b.PrependUOffsetTRelativeSlot(1, vecend, 0)
b.PrependInt16Slot(0, 55, 0)
b.EndObject()
self.assertBuilderEquals(b, [
8, 0, # vtable bytes
12, 0, # length of object
6, 0, # start of value 0 from end of vtable
8, 0, # start of value 1 from end of buffer
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
55, 0, # value 0
4, 0, 0, 0, # vector position from here
2, 0, 0, 0, # length of vector (uint32)
0x78, 0x56, # vector value 1
0x34, 0x12, # vector value 0
])
def test_vtable_with_1_struct_of_1_int8__1_int16__1_int32(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.Prep(4+4+4, 0)
b.PrependInt8(55)
b.Pad(3)
b.PrependInt16(0x1234)
b.Pad(2)
b.PrependInt32(0x12345678)
structStart = b.Offset()
b.PrependStructSlot(0, structStart, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
16, 0, # end of object from here
4, 0, # start of struct from here
6, 0, 0, 0, # offset for start of vtable (int32)
0x78, 0x56, 0x34, 0x12, # value 2
0, 0, # padding
0x34, 0x12, # value 1
0, 0, 0, # padding
55, # value 0
])
def test_vtable_with_1_vector_of_2_struct_of_2_int8(self):
b = flatbuffers.Builder(0)
b.StartVector(flatbuffers.number_types.Int8Flags.bytewidth*2, 2, 1)
b.PrependInt8(33)
b.PrependInt8(44)
b.PrependInt8(55)
b.PrependInt8(66)
vecend = b.EndVector(2)
b.StartObject(1)
b.PrependUOffsetTRelativeSlot(0, vecend, 0)
b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0,
4, 0, # offset of vector offset
6, 0, 0, 0, # offset for start of vtable (int32)
4, 0, 0, 0, # vector start offset
2, 0, 0, 0, # vector length
66, # vector value 1,1
55, # vector value 1,0
44, # vector value 0,1
33, # vector value 0,0
])
def test_table_with_some_elements(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt16Slot(1, 66, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
12, 0, 0, 0, # root of table: points to vtable offset
8, 0, # vtable bytes
8, 0, # end of object from here
7, 0, # start of value 0
4, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
66, 0, # value 1
0, # padding
33, # value 0
])
def test__one_unfinished_table_and_one_finished_table(self):
b = flatbuffers.Builder(0)
b.StartObject(2)
b.PrependInt8Slot(0, 33, 0)
b.PrependInt8Slot(1, 44, 0)
off = b.EndObject()
b.Finish(off)
b.StartObject(3)
b.PrependInt8Slot(0, 55, 0)
b.PrependInt8Slot(1, 66, 0)
b.PrependInt8Slot(2, 77, 0)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to object
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # offset for start of vtable (int32)
0, # padding
77, # value 2
66, # value 1
55, # value 0
12, 0, 0, 0, # root of table: points to object
8, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
8, 0, 0, 0, # offset for start of vtable (int32)
0, 0, # padding
44, # value 1
33, # value 0
])
def test_a_bunch_of_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(8)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
b.PrependBoolSlot(3, True, False)
b.PrependBoolSlot(4, True, False)
b.PrependBoolSlot(5, True, False)
b.PrependBoolSlot(6, True, False)
b.PrependBoolSlot(7, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
24, 0, 0, 0, # root of table: points to vtable offset
20, 0, # vtable bytes
12, 0, # size of object
11, 0, # start of value 0
10, 0, # start of value 1
9, 0, # start of value 2
8, 0, # start of value 3
7, 0, # start of value 4
6, 0, # start of value 5
5, 0, # start of value 6
4, 0, # start of value 7
20, 0, 0, 0, # vtable offset
1, # value 7
1, # value 6
1, # value 5
1, # value 4
1, # value 3
1, # value 2
1, # value 1
1, # value 0
])
def test_three_bools(self):
b = flatbuffers.Builder(0)
b.StartObject(3)
b.PrependBoolSlot(0, True, False)
b.PrependBoolSlot(1, True, False)
b.PrependBoolSlot(2, True, False)
off = b.EndObject()
b.Finish(off)
self.assertBuilderEquals(b, [
16, 0, 0, 0, # root of table: points to vtable offset
0, 0, # padding
10, 0, # vtable bytes
8, 0, # size of object
7, 0, # start of value 0
6, 0, # start of value 1
5, 0, # start of value 2
10, 0, 0, 0, # vtable offset from here
0, # padding
1, # value 2
1, # value 1
1, # value 0
])
def test_some_floats(self):
b = flatbuffers.Builder(0)
b.StartObject(1)
b.PrependFloat32Slot(0, 1.0, 0.0)
off = b.EndObject()
self.assertBuilderEquals(b, [
6, 0, # vtable bytes
8, 0, # size of object
4, 0, # start of value 0
6, 0, 0, 0, # vtable offset
0, 0, 128, 63, # value 0
])
def make_monster_from_generated_code(sizePrefix = False, file_identifier=None):
''' Use generated code to build the example Monster. '''
b = flatbuffers.Builder(0)
string = b.CreateString("MyMonster")
test1 = b.CreateString("test1")
test2 = b.CreateString("test2")
fred = b.CreateString("Fred")
MyGame.Example.Monster.MonsterStartInventoryVector(b, 5)
b.PrependByte(4)
b.PrependByte(3)
b.PrependByte(2)
b.PrependByte(1)
b.PrependByte(0)
inv = b.EndVector(5)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddName(b, fred)
mon2 = MyGame.Example.Monster.MonsterEnd(b)
MyGame.Example.Monster.MonsterStartTest4Vector(b, 2)
MyGame.Example.Test.CreateTest(b, 10, 20)
MyGame.Example.Test.CreateTest(b, 30, 40)
test4 = b.EndVector(2)
MyGame.Example.Monster.MonsterStartTestarrayofstringVector(b, 2)
b.PrependUOffsetTRelative(test2)
b.PrependUOffsetTRelative(test1)
testArrayOfString = b.EndVector(2)
MyGame.Example.Monster.MonsterStartVectorOfLongsVector(b, 5)
b.PrependInt64(100000000)
b.PrependInt64(1000000)
b.PrependInt64(10000)
b.PrependInt64(100)
b.PrependInt64(1)
VectorOfLongs = b.EndVector(5)
MyGame.Example.Monster.MonsterStartVectorOfDoublesVector(b, 3)
b.PrependFloat64(1.7976931348623157e+308)
b.PrependFloat64(0)
b.PrependFloat64(-1.7976931348623157e+308)
VectorOfDoubles = b.EndVector(3)
MyGame.Example.Monster.MonsterStart(b)
pos = MyGame.Example.Vec3.CreateVec3(b, 1.0, 2.0, 3.0, 3.0, 2, 5, 6)
MyGame.Example.Monster.MonsterAddPos(b, pos)
MyGame.Example.Monster.MonsterAddHp(b, 80)
MyGame.Example.Monster.MonsterAddName(b, string)
MyGame.Example.Monster.MonsterAddInventory(b, inv)
MyGame.Example.Monster.MonsterAddTestType(b, 1)
MyGame.Example.Monster.MonsterAddTest(b, mon2)
MyGame.Example.Monster.MonsterAddTest4(b, test4)
MyGame.Example.Monster.MonsterAddTestarrayofstring(b, testArrayOfString)
MyGame.Example.Monster.MonsterAddVectorOfLongs(b, VectorOfLongs)
MyGame.Example.Monster.MonsterAddVectorOfDoubles(b, VectorOfDoubles)
mon = MyGame.Example.Monster.MonsterEnd(b)
if sizePrefix:
b.FinishSizePrefixed(mon, file_identifier)
else:
b.Finish(mon, file_identifier)
return b.Bytes, b.Head()
class TestAllCodePathsOfExampleSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfExampleSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
gen_mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
def test_default_monster_pos(self):
self.assertTrue(self.mon.Pos() is None)
def test_nondefault_monster_mana(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddMana(b, 50)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
got_mon = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(50, got_mon.Mana())
def test_default_monster_hp(self):
self.assertEqual(100, self.mon.Hp())
def test_default_monster_name(self):
self.assertEqual(None, self.mon.Name())
def test_default_monster_inventory_item(self):
self.assertEqual(0, self.mon.Inventory(0))
def test_default_monster_inventory_length(self):
self.assertEqual(0, self.mon.InventoryLength())
def test_default_monster_color(self):
self.assertEqual(MyGame.Example.Color.Color.Blue, self.mon.Color())
def test_nondefault_monster_color(self):
b = flatbuffers.Builder(0)
color = MyGame.Example.Color.Color.Red
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddColor(b, color)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(MyGame.Example.Color.Color.Red, mon2.Color())
def test_default_monster_testtype(self):
self.assertEqual(0, self.mon.TestType())
def test_default_monster_test_field(self):
self.assertEqual(None, self.mon.Test())
def test_default_monster_test4_item(self):
self.assertEqual(None, self.mon.Test4(0))
def test_default_monster_test4_length(self):
self.assertEqual(0, self.mon.Test4Length())
def test_default_monster_testarrayofstring(self):
self.assertEqual("", self.mon.Testarrayofstring(0))
def test_default_monster_testarrayofstring_length(self):
self.assertEqual(0, self.mon.TestarrayofstringLength())
def test_default_monster_testarrayoftables(self):
self.assertEqual(None, self.mon.Testarrayoftables(0))
def test_nondefault_monster_testarrayoftables(self):
b = flatbuffers.Builder(0)
# make a child Monster within a vector of Monsters:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 99)
sub_monster = MyGame.Example.Monster.MonsterEnd(b)
# build the vector:
MyGame.Example.Monster.MonsterStartTestarrayoftablesVector(b, 1)
b.PrependUOffsetTRelative(sub_monster)
vec = b.EndVector(1)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestarrayoftables(b, vec)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Output(), 0)
self.assertEqual(99, mon2.Testarrayoftables(0).Hp())
self.assertEqual(1, mon2.TestarrayoftablesLength())
def test_default_monster_testarrayoftables_length(self):
self.assertEqual(0, self.mon.TestarrayoftablesLength())
def test_nondefault_monster_enemy(self):
b = flatbuffers.Builder(0)
# make an Enemy object:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddHp(b, 88)
enemy = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(enemy)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddEnemy(b, enemy)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(88, mon2.Enemy().Hp())
def test_default_monster_testnestedflatbuffer(self):
self.assertEqual(0, self.mon.Testnestedflatbuffer(0))
def test_default_monster_testnestedflatbuffer_length(self):
self.assertEqual(0, self.mon.TestnestedflatbufferLength())
def test_nondefault_monster_testnestedflatbuffer(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStartTestnestedflatbufferVector(b, 3)
b.PrependByte(4)
b.PrependByte(2)
b.PrependByte(0)
sub_buf = b.EndVector(3)
# make the parent monster and include the vector of Monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestnestedflatbuffer(b, sub_buf)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(3, mon2.TestnestedflatbufferLength())
self.assertEqual(0, mon2.Testnestedflatbuffer(0))
self.assertEqual(2, mon2.Testnestedflatbuffer(1))
self.assertEqual(4, mon2.Testnestedflatbuffer(2))
try:
imp.find_module('numpy')
# if numpy exists, then we should be able to get the
# vector as a numpy array
self.assertEqual([0, 2, 4], mon2.TestnestedflatbufferAsNumpy().tolist())
except ImportError:
assertRaises(self,
lambda: mon2.TestnestedflatbufferAsNumpy(),
NumpyRequiredForThisFeature)
def test_nondefault_monster_testempty(self):
b = flatbuffers.Builder(0)
# make a Stat object:
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddVal(b, 123)
my_stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(my_stat)
# include the stat object in a monster:
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestempty(b, my_stat)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(123, mon2.Testempty().Val())
def test_default_monster_testbool(self):
self.assertFalse(self.mon.Testbool())
def test_nondefault_monster_testbool(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTestbool(b, True)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertTrue(mon2.Testbool())
def test_default_monster_testhashes(self):
self.assertEqual(0, self.mon.Testhashs32Fnv1())
self.assertEqual(0, self.mon.Testhashu32Fnv1())
self.assertEqual(0, self.mon.Testhashs64Fnv1())
self.assertEqual(0, self.mon.Testhashu64Fnv1())
self.assertEqual(0, self.mon.Testhashs32Fnv1a())
self.assertEqual(0, self.mon.Testhashu32Fnv1a())
self.assertEqual(0, self.mon.Testhashs64Fnv1a())
self.assertEqual(0, self.mon.Testhashu64Fnv1a())
def test_nondefault_monster_testhashes(self):
b = flatbuffers.Builder(0)
MyGame.Example.Monster.MonsterStart(b)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1(b, 1)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1(b, 2)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1(b, 3)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1(b, 4)
MyGame.Example.Monster.MonsterAddTesthashs32Fnv1a(b, 5)
MyGame.Example.Monster.MonsterAddTesthashu32Fnv1a(b, 6)
MyGame.Example.Monster.MonsterAddTesthashs64Fnv1a(b, 7)
MyGame.Example.Monster.MonsterAddTesthashu64Fnv1a(b, 8)
mon = MyGame.Example.Monster.MonsterEnd(b)
b.Finish(mon)
# inspect the resulting data:
mon2 = MyGame.Example.Monster.Monster.GetRootAsMonster(b.Bytes,
b.Head())
self.assertEqual(1, mon2.Testhashs32Fnv1())
self.assertEqual(2, mon2.Testhashu32Fnv1())
self.assertEqual(3, mon2.Testhashs64Fnv1())
self.assertEqual(4, mon2.Testhashu64Fnv1())
self.assertEqual(5, mon2.Testhashs32Fnv1a())
self.assertEqual(6, mon2.Testhashu32Fnv1a())
self.assertEqual(7, mon2.Testhashs64Fnv1a())
self.assertEqual(8, mon2.Testhashu64Fnv1a())
def test_getrootas_for_nonroot_table(self):
b = flatbuffers.Builder(0)
string = b.CreateString("MyStat")
MyGame.Example.Stat.StatStart(b)
MyGame.Example.Stat.StatAddId(b, string)
MyGame.Example.Stat.StatAddVal(b, 12345678)
MyGame.Example.Stat.StatAddCount(b, 12345)
stat = MyGame.Example.Stat.StatEnd(b)
b.Finish(stat)
stat2 = MyGame.Example.Stat.Stat.GetRootAsStat(b.Bytes, b.Head())
self.assertEqual(b"MyStat", stat2.Id())
self.assertEqual(12345678, stat2.Val())
self.assertEqual(12345, stat2.Count())
class TestAllCodePathsOfMonsterExtraSchema(unittest.TestCase):
def setUp(self, *args, **kwargs):
super(TestAllCodePathsOfMonsterExtraSchema, self).setUp(*args, **kwargs)
b = flatbuffers.Builder(0)
MyGame.MonsterExtra.MonsterExtraStart(b)
gen_mon = MyGame.MonsterExtra.MonsterExtraEnd(b)
b.Finish(gen_mon)
self.mon = MyGame.MonsterExtra.MonsterExtra.GetRootAsMonsterExtra(b.Bytes, b.Head())
def test_default_nan_inf(self):
self.assertTrue(math.isnan(self.mon.F1()))
self.assertEqual(self.mon.F2(), float("inf"))
self.assertEqual(self.mon.F3(), float("-inf"))
self.assertTrue(math.isnan(self.mon.D1()))
self.assertEqual(self.mon.D2(), float("inf"))
self.assertEqual(self.mon.D3(), float("-inf"))
class TestVtableDeduplication(unittest.TestCase):
''' TestVtableDeduplication verifies that vtables are deduplicated. '''
def test_vtable_deduplication(self):
b = flatbuffers.Builder(0)
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 11, 0)
b.PrependByteSlot(2, 22, 0)
b.PrependInt16Slot(3, 33, 0)
obj0 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 44, 0)
b.PrependByteSlot(2, 55, 0)
b.PrependInt16Slot(3, 66, 0)
obj1 = b.EndObject()
b.StartObject(4)
b.PrependByteSlot(0, 0, 0)
b.PrependByteSlot(1, 77, 0)
b.PrependByteSlot(2, 88, 0)
b.PrependInt16Slot(3, 99, 0)
obj2 = b.EndObject()
got = b.Bytes[b.Head():]
want = bytearray([
240, 255, 255, 255, # == -12. offset to dedupped vtable.
99, 0,
88,
77,
248, 255, 255, 255, # == -8. offset to dedupped vtable.
66, 0,
55,
44,
12, 0,
8, 0,
0, 0,
7, 0,
6, 0,
4, 0,
12, 0, 0, 0,
33, 0,
22,
11,
])
self.assertEqual((len(want), want), (len(got), got))
table0 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj0)
table1 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj1)
table2 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj2)
def _checkTable(tab, voffsett_value, b, c, d):
# vtable size
got = tab.GetVOffsetTSlot(0, 0)
self.assertEqual(12, got, 'case 0, 0')
# object size
got = tab.GetVOffsetTSlot(2, 0)
self.assertEqual(8, got, 'case 2, 0')
# default value
got = tab.GetVOffsetTSlot(4, 0)
self.assertEqual(voffsett_value, got, 'case 4, 0')
got = tab.GetSlot(6, 0, N.Uint8Flags)
self.assertEqual(b, got, 'case 6, 0')
val = tab.GetSlot(8, 0, N.Uint8Flags)
self.assertEqual(c, val, 'failed 8, 0')
got = tab.GetSlot(10, 0, N.Uint8Flags)
self.assertEqual(d, got, 'failed 10, 0')
_checkTable(table0, 0, 11, 22, 33)
_checkTable(table1, 0, 44, 55, 66)
_checkTable(table2, 0, 77, 88, 99)
class TestExceptions(unittest.TestCase):
def test_object_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.StartObject(0),
flatbuffers.builder.IsNestedError)
def test_object_is_not_nested_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.EndObject(),
flatbuffers.builder.IsNotNestedError)
def test_struct_is_not_inline_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
assertRaises(self, lambda: b.PrependStructSlot(0, 1, 0),
flatbuffers.builder.StructIsNotInlineError)
def test_unreachable_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.PrependUOffsetTRelative(1),
flatbuffers.builder.OffsetArithmeticError)
def test_create_string_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = 'test1'
assertRaises(self, lambda: b.CreateString(s),
flatbuffers.builder.IsNestedError)
def test_create_byte_vector_is_nested_error(self):
b = flatbuffers.Builder(0)
b.StartObject(0)
s = b'test1'
assertRaises(self, lambda: b.CreateByteVector(s),
flatbuffers.builder.IsNestedError)
def test_finished_bytes_error(self):
b = flatbuffers.Builder(0)
assertRaises(self, lambda: b.Output(),
flatbuffers.builder.BuilderNotFinishedError)
class TestFixedLengthArrays(unittest.TestCase):
def test_fixed_length_array(self):
builder = flatbuffers.Builder(0)
a = 0.5
b = range(0, 15)
c = 1
d_a = [[1, 2], [3, 4]]
d_b = [MyGame.Example.TestEnum.TestEnum.B, \
MyGame.Example.TestEnum.TestEnum.C]
d_c = [[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B], \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B]]
arrayOffset = MyGame.Example.ArrayStruct.CreateArrayStruct(builder, \
a, b, c, d_a, d_b, d_c)
# Create a table with the ArrayStruct.
MyGame.Example.ArrayTable.ArrayTableStart(builder)
MyGame.Example.ArrayTable.ArrayTableAddA(builder, arrayOffset)
tableOffset = MyGame.Example.ArrayTable.ArrayTableEnd(builder)
builder.Finish(tableOffset)
buf = builder.Output()
table = MyGame.Example.ArrayTable.ArrayTable.GetRootAsArrayTable(buf, 0)
# Verify structure.
nested = MyGame.Example.NestedStruct.NestedStruct()
self.assertEqual(table.A().A(), 0.5)
self.assertEqual(table.A().B(), \
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertEqual(table.A().C(), 1)
self.assertEqual(table.A().D(nested, 0).A(), [1, 2])
self.assertEqual(table.A().D(nested, 1).A(), [3, 4])
self.assertEqual(table.A().D(nested, 0).B(), \
MyGame.Example.TestEnum.TestEnum.B)
self.assertEqual(table.A().D(nested, 1).B(), \
MyGame.Example.TestEnum.TestEnum.C)
self.assertEqual(table.A().D(nested, 0).C(), \
[MyGame.Example.TestEnum.TestEnum.A, \
MyGame.Example.TestEnum.TestEnum.B])
self.assertEqual(table.A().D(nested, 1).C(), \
[MyGame.Example.TestEnum.TestEnum.C, \
MyGame.Example.TestEnum.TestEnum.B])
def CheckAgainstGoldDataGo():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_go_wire.mon'
if not os.path.exists(fn):
print('Go-generated data does not exist, failed.')
return False
# would like to use a context manager here, but it's less
# backwards-compatible:
f = open(fn, 'rb')
go_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(go_wire_data), 0)
if not bytearray(gen_buf[gen_off:]) == bytearray(go_wire_data):
raise AssertionError('CheckAgainstGoldDataGo failed')
except:
print('Failed to test against Go-generated test data.')
return False
print('Can read Go-generated test data, and Python generates bytewise identical data.')
return True
def CheckAgainstGoldDataJava():
try:
gen_buf, gen_off = make_monster_from_generated_code()
fn = 'monsterdata_java_wire.mon'
if not os.path.exists(fn):
print('Java-generated data does not exist, failed.')
return False
f = open(fn, 'rb')
java_wire_data = f.read()
f.close()
CheckReadBuffer(bytearray(java_wire_data), 0)
except:
print('Failed to read Java-generated test data.')
return False
print('Can read Java-generated test data.')
return True
class LCG(object):
''' Include simple random number generator to ensure results will be the
same cross platform.
http://en.wikipedia.org/wiki/Park%E2%80%93Miller_random_number_generator '''
__slots__ = ['n']
InitialLCGSeed = 48271
def __init__(self):
self.n = self.InitialLCGSeed
def Reset(self):
self.n = self.InitialLCGSeed
def Next(self):
self.n = ((self.n * 279470273) % 4294967291) & 0xFFFFFFFF
return self.n
def BenchmarkVtableDeduplication(count):
'''
BenchmarkVtableDeduplication measures the speed of vtable deduplication
by creating `prePop` vtables, then populating `count` objects with a
different single vtable.
When count is large (as in long benchmarks), memory usage may be high.
'''
for prePop in (1, 10, 100, 1000):
builder = flatbuffers.Builder(0)
n = 1 + int(math.log(prePop, 1.5))
# generate some layouts:
layouts = set()
r = list(compat_range(n))
while len(layouts) < prePop:
layouts.add(tuple(sorted(random.sample(r, int(max(1, n / 2))))))
layouts = list(layouts)
# pre-populate vtables:
for layout in layouts:
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
# benchmark deduplication of a new vtable:
def f():
layout = random.choice(layouts)
builder.StartObject(n)
for j in layout:
builder.PrependInt16Slot(j, j, 0)
builder.EndObject()
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
print(('vtable deduplication rate (n=%d, vtables=%d): %.2f sec' % (
prePop,
len(builder.vtables),
rate))
)
def BenchmarkCheckReadBuffer(count, buf, off):
'''
BenchmarkCheckReadBuffer measures the speed of flatbuffer reading
by re-using the CheckReadBuffer function with the gold data.
'''
def f():
CheckReadBuffer(buf, off)
duration = timeit.timeit(stmt=f, number=count)
rate = float(count) / duration
data = float(len(buf) * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('traversed %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec')
% (count, len(buf), duration, rate, data_rate))
def BenchmarkMakeMonsterFromGeneratedCode(count, length):
'''
BenchmarkMakeMonsterFromGeneratedCode measures the speed of flatbuffer
creation by re-using the make_monster_from_generated_code function for
generating gold data examples.
'''
duration = timeit.timeit(stmt=make_monster_from_generated_code,
number=count)
rate = float(count) / duration
data = float(length * count) / float(1024 * 1024)
data_rate = data / float(duration)
print(('built %d %d-byte flatbuffers in %.2fsec: %.2f/sec, %.2fMB/sec' % \
(count, length, duration, rate, data_rate)))
def backward_compatible_run_tests(**kwargs):
if PY_VERSION < (2, 6):
sys.stderr.write("Python version less than 2.6 are not supported")
sys.stderr.flush()
return False
# python2.6 has a reduced-functionality unittest.main function:
if PY_VERSION == (2, 6):
try:
unittest.main(**kwargs)
except SystemExit as e:
if not e.code == 0:
return False
return True
# python2.7 and above let us not exit once unittest.main is run:
kwargs['exit'] = False
kwargs['verbosity'] = 0
ret = unittest.main(**kwargs)
if ret.result.errors or ret.result.failures:
return False
return True
def main():
import os
import sys
if not len(sys.argv) == 4:
sys.stderr.write('Usage: %s <benchmark vtable count>'
'<benchmark read count> <benchmark build count>\n'
% sys.argv[0])
sys.stderr.write(' Provide COMPARE_GENERATED_TO_GO=1 to check'
'for bytewise comparison to Go data.\n')
sys.stderr.write(' Provide COMPARE_GENERATED_TO_JAVA=1 to check'
'for bytewise comparison to Java data.\n')
sys.stderr.flush()
sys.exit(1)
kwargs = dict(argv=sys.argv[:-3])
# run tests, and run some language comparison checks if needed:
success = backward_compatible_run_tests(**kwargs)
if success and os.environ.get('COMPARE_GENERATED_TO_GO', 0) == "1":
success = success and CheckAgainstGoldDataGo()
if success and os.environ.get('COMPARE_GENERATED_TO_JAVA', 0) == "1":
success = success and CheckAgainstGoldDataJava()
if not success:
sys.stderr.write('Tests failed, skipping benchmarks.\n')
sys.stderr.flush()
sys.exit(1)
# run benchmarks (if 0, they will be a noop):
bench_vtable = int(sys.argv[1])
bench_traverse = int(sys.argv[2])
bench_build = int(sys.argv[3])
if bench_vtable:
BenchmarkVtableDeduplication(bench_vtable)
if bench_traverse:
buf, off = make_monster_from_generated_code()
BenchmarkCheckReadBuffer(bench_traverse, buf, off)
if bench_build:
buf, off = make_monster_from_generated_code()
BenchmarkMakeMonsterFromGeneratedCode(bench_build, len(buf))
if __name__ == '__main__':
main()
| 36.421342 | 123 | 0.577431 |
7d578dae474b80e788927c83be4b92da4f525a7d | 768 | py | Python | tests/fixtures/proto_plus_fixture.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | 285 | 2018-10-05T16:47:58.000Z | 2022-03-31T00:58:39.000Z | tests/fixtures/proto_plus_fixture.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | 425 | 2018-09-10T13:32:41.000Z | 2022-03-31T14:50:05.000Z | tests/fixtures/proto_plus_fixture.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | 369 | 2018-11-28T07:01:00.000Z | 2022-03-28T09:53:22.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""A proto-plus wrapped protobuf definition to use for testing."""
import proto
class ProtoPlusFixture(proto.Message):
"""Wrapped protobuf class for testing purposes."""
name = proto.Field(proto.STRING, number=1)
| 33.391304 | 74 | 0.751302 |
62888055fdf251945f8ea1d05928d5dea38ee804 | 8,916 | py | Python | sdk/python/pulumi_azure_nextgen/storagesync/v20190601/cloud_endpoint.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/storagesync/v20190601/cloud_endpoint.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/storagesync/v20190601/cloud_endpoint.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['CloudEndpoint']
class CloudEndpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_file_share_name: Optional[pulumi.Input[str]] = None,
cloud_endpoint_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_account_resource_id: Optional[pulumi.Input[str]] = None,
storage_account_tenant_id: Optional[pulumi.Input[str]] = None,
storage_sync_service_name: Optional[pulumi.Input[str]] = None,
sync_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Cloud Endpoint object.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] azure_file_share_name: Azure file share name
:param pulumi.Input[str] cloud_endpoint_name: Name of Cloud Endpoint object.
:param pulumi.Input[str] friendly_name: Friendly Name
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] storage_account_resource_id: Storage Account Resource Id
:param pulumi.Input[str] storage_account_tenant_id: Storage Account Tenant Id
:param pulumi.Input[str] storage_sync_service_name: Name of Storage Sync Service resource.
:param pulumi.Input[str] sync_group_name: Name of Sync Group resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_file_share_name'] = azure_file_share_name
if cloud_endpoint_name is None:
raise TypeError("Missing required property 'cloud_endpoint_name'")
__props__['cloud_endpoint_name'] = cloud_endpoint_name
__props__['friendly_name'] = friendly_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['storage_account_resource_id'] = storage_account_resource_id
__props__['storage_account_tenant_id'] = storage_account_tenant_id
if storage_sync_service_name is None:
raise TypeError("Missing required property 'storage_sync_service_name'")
__props__['storage_sync_service_name'] = storage_sync_service_name
if sync_group_name is None:
raise TypeError("Missing required property 'sync_group_name'")
__props__['sync_group_name'] = sync_group_name
__props__['backup_enabled'] = None
__props__['last_operation_name'] = None
__props__['last_workflow_id'] = None
__props__['name'] = None
__props__['partnership_id'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagesync/latest:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20170605preview:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180402:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20180701:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20181001:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190201:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20190301:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20191001:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200301:CloudEndpoint"), pulumi.Alias(type_="azure-nextgen:storagesync/v20200901:CloudEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CloudEndpoint, __self__).__init__(
'azure-nextgen:storagesync/v20190601:CloudEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CloudEndpoint':
"""
Get an existing CloudEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return CloudEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureFileShareName")
def azure_file_share_name(self) -> pulumi.Output[Optional[str]]:
"""
Azure file share name
"""
return pulumi.get(self, "azure_file_share_name")
@property
@pulumi.getter(name="backupEnabled")
def backup_enabled(self) -> pulumi.Output[str]:
"""
Backup Enabled
"""
return pulumi.get(self, "backup_enabled")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly Name
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="lastOperationName")
def last_operation_name(self) -> pulumi.Output[Optional[str]]:
"""
Resource Last Operation Name
"""
return pulumi.get(self, "last_operation_name")
@property
@pulumi.getter(name="lastWorkflowId")
def last_workflow_id(self) -> pulumi.Output[Optional[str]]:
"""
CloudEndpoint lastWorkflowId
"""
return pulumi.get(self, "last_workflow_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnershipId")
def partnership_id(self) -> pulumi.Output[Optional[str]]:
"""
Partnership Id
"""
return pulumi.get(self, "partnership_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
CloudEndpoint Provisioning State
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageAccountResourceId")
def storage_account_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
Storage Account Resource Id
"""
return pulumi.get(self, "storage_account_resource_id")
@property
@pulumi.getter(name="storageAccountTenantId")
def storage_account_tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Storage Account Tenant Id
"""
return pulumi.get(self, "storage_account_tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.921182 | 787 | 0.662517 |
1e24fca53965bbbe61bd96f99be436177cf4c19d | 3,376 | py | Python | mrfd/diff-ROI-3DCAE_train.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 13 | 2020-11-14T06:26:04.000Z | 2022-03-30T10:46:22.000Z | mrfd/diff-ROI-3DCAE_train.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 5 | 2020-11-14T06:27:10.000Z | 2021-03-11T23:20:14.000Z | mrfd/diff-ROI-3DCAE_train.py | ivineetm007/Fall-detection | d18ac5f1e26a04ed8492b2e215a91c51b55f32dd | [
"MIT"
] | 4 | 2020-09-06T20:56:23.000Z | 2021-11-25T05:46:59.000Z |
import sys
import numpy as np
import os
from data_management import load_videos
import config
# import tensorflow as tf
# tf.config.experimental_run_functions_eagerly(True)
from models import diff_ROI_C3D_AE_no_pool,C3D_no_pool
from trainer.diffroigan import Params,Diff_ROI_3DCAE_GAN3D
from trainer.util import create_diff_mask
import argparse
parser = argparse.ArgumentParser(description='Region and difeerence constraint adversarial model training')
parser.add_argument('--epochstrained', default='0',
help='Epoch number of the saved model')
parser.add_argument('--lambda_S', default='1',
help='ROI MSE loss hyperparameter')
parser.add_argument('--lambda_T', default='1',
help='Diff MSE loss hyperparameter')
args = parser.parse_args()
dset = config.track_root_folder
d_type='ROIframe'
#parameters
epochs=300
epochs_trained=int(args.epochstrained)
LOAD_DATA_SHAPE=config.LOAD_DATA_SHAPE
width, height = LOAD_DATA_SHAPE[0],LOAD_DATA_SHAPE[1]
channels=LOAD_DATA_SHAPE[2]
win_length=config.WIN_LENGTH
regularizer_list = ['BN']
break_win=config.SPLIT_GAP
stride=config.STRIDE
lambdas=[float(args.lambda_S),float(args.lambda_T)]#lambda_s ,lambda_t
#aggreagte all parameters in Params class
param=Params(width=width, height=height,win_length=win_length,channels=channels,dset=dset,d_type=d_type,regularizer_list=regularizer_list,break_win=break_win)
param.lambda_S=lambdas[0]
param.lambda_T=lambdas[1]
#-----------------
#Load train data
#-----------------
ADL_videos=load_videos(dset='Thermal_track',vid_class='ADL',input_type='ROI_FRAME')
#load Gan trainer
GAN3D=Diff_ROI_3DCAE_GAN3D(train_par=param,stride=stride)
print("Creating wndows\n")
ADL_windows=GAN3D.create_windowed_data(ADL_videos,stride=stride,data_key='ROI_FRAME')
ADL_mask_windows=GAN3D.create_windowed_data(ADL_videos,stride=stride,data_key='MASK')
ADL_mask_windows=ADL_mask_windows.astype('int8')
#Creating diff mask
ADL_diff_mask_windows=create_diff_mask(ADL_mask_windows)
print("Thermal windows shape")
print(ADL_windows.shape)
print("Thermal windows masks shape")
print(ADL_mask_windows.shape)
print("Thermal difference frame windows mask shape")
print(ADL_diff_mask_windows.shape)
#-----------------
#MODEL Initialization
#-----------------
##reconstructor model
R, R_name, _ = diff_ROI_C3D_AE_no_pool(img_width=param.width, img_height=param.height, win_length=param.win_length, regularizer_list=param.regularizer_list,channels=param.channels,lambda_S=param.lambda_S,lambda_T=param.lambda_T)
##Dicriminator model
D, D_name, _ = C3D_no_pool(img_width=param.width, img_height=param.height, win_length=param.win_length, regularizer_list=param.regularizer_list,channels=param.channels)
param.R_name=R_name
param.D_name=D_name
D_path = param.get_D_path(epochs_trained)
R_path = param.get_R_path(epochs_trained)
if os.path.isfile(R_path) and os.path.isfile(D_path):
R.load_weights(R_path)
D.load_weights(D_path)
print("Model weights loaded successfully........")
else:
print("Saved model weights not found......")
epochs_trained=0
#-----------------
#model training
#-----------------
GAN3D.initialize_model(Reconstructor=R , Discriminator=D )
GAN3D.train(X_train_frame=ADL_windows, X_train_mask=ADL_mask_windows,X_train_diff_mask=ADL_diff_mask_windows,epochs= epochs,epochs_trained=epochs_trained, save_interval = 10)
| 37.098901 | 228 | 0.781991 |
7615639bf7ef6fdb90e854e6344a9326f7df0614 | 2,580 | py | Python | script/from_rss.py | radjan/newspac | 30c0c197f3d64e1810dd5f92fb05c9ca51e41c0e | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-11-06T06:55:02.000Z | 2018-03-14T03:18:51.000Z | script/from_rss.py | radjan/newspac | 30c0c197f3d64e1810dd5f92fb05c9ca51e41c0e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | script/from_rss.py | radjan/newspac | 30c0c197f3d64e1810dd5f92fb05c9ca51e41c0e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- encoding: utf-8 -*-
import common
common.DB_PATH = common.DB_DIR + '/newspacks2.db'
import simplejson as json
import database as db
db.THREAD_LOCAL = True
from importlib import import_module
def _feed_dict(feed_url, source, handler, catalog=None):
return dict(feed_url=feed_url,
source=source,
handler=handler,
catalog=catalog,
last=None)
default_feeds = []
default_feeds.append(_feed_dict('http://www.libertytimes.com.tw/rss/fo.xml', u'自由時報', 'libertytimes', u'焦點'))
default_feeds.append(_feed_dict('http://www.libertytimes.com.tw/rss/p.xml', u'自由時報', 'libertytimes', u'政治'))
default_feeds.append(_feed_dict('http://www.libertytimes.com.tw/rss/e.xml', u'自由時報', 'libertytimes', u'財經'))
default_feeds.append(_feed_dict('http://rss.chinatimes.com/rss/focusing-u.rss', u'中時電子報', 'chinatimes', u'首頁焦點'))
default_feeds.append(_feed_dict('http://rss.chinatimes.com/rss/focus-u.rss', u'中時電子報', 'chinatimes', u'焦點'))
default_feeds.append(_feed_dict('http://rss.chinatimes.com/rss/Politic-u.rss', u'中時電子報', 'chinatimes', u'政治'))
#default_feeds.append(_feed_dict('http://rss.chinatimes.com/rss/finance-u.rss', u'中時電子報', 'chinatimes', u'財經'))
default_feeds.append(_feed_dict('http://www.appledaily.com.tw/rss/create/kind/sec/type/1077', u'蘋果日報', 'appledaily', u'頭條'))
default_feeds.append(_feed_dict('http://www.appledaily.com.tw/rss/create/kind/sec/type/151', u'蘋果日報', 'appledaily', u'政治'))
default_feeds.append(_feed_dict('http://www.appledaily.com.tw/rss/create/kind/sec/type/11', u'蘋果日報', 'appledaily', u'要聞'))
def _load_state():
with open('feed_state', 'r') as f:
state = json.load(f)
return state
def _save_state(states):
with open('feed_state', 'w') as f:
json.dump(states, f)
def main():
state = _load_state()
for feed in default_feeds:
if feed['feed_url'] not in state:
state[feed['feed_url']] = feed
for url, meta in state.items():
print 'processing %s' % url
handler = import_module(meta['handler']).Handler()
new_articles, new_last = handler.get_articles(url, meta['last'])
bad_count = 0
for a in new_articles:
a['source'] = meta['source']
aid = db.ensure_article_exists(a, overwrite=True)
if not a['cached']:
bad_count += 1
#print a
print ' get %s articles, bad_count: %s' % (len(new_articles), bad_count)
print new_last
meta['last'] = new_last
_save_state(state)
if __name__ == '__main__':
main()
| 41.612903 | 124 | 0.657752 |
9bf5d429f465cce9c4eaed71d8419eb57087331e | 9,592 | py | Python | parlai/tasks/task_list.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | 1 | 2017-06-26T07:46:33.000Z | 2017-06-26T07:46:33.000Z | parlai/tasks/task_list.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | parlai/tasks/task_list.py | shagunsodhani/ParlAI | 5b634b844807372adfb0f6d6e5c42341ac8138f0 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""This file contains a list of all the tasks, their id and task name, description
and the tags associated with them.
"""
task_list = [
{
"id": "bAbI-1k",
"display_name": "bAbI 1k",
"task": "babi:All1k",
"tags": [ "all", "QA" ],
"description": "20 synthetic tasks that each test a unique aspect of text and reasoning, and hence test different capabilities of learning models. From Weston et al. '16. Link: http://arxiv.org/abs/1502.05698",
"notes": "You can access just one of the bAbI tasks with e.g. 'babi:Task1k:3' for task 3."
},
{
"id": "bAbI-10k",
"display_name": "bAbI 10k",
"task": "babi:All10k",
"tags": [ "all", "QA" ],
"description": "20 synthetic tasks that each test a unique aspect of text and reasoning, and hence test different capabilities of learning models. From Weston et al. '16. Link: http://arxiv.org/abs/1502.05698",
"notes": "You can access just one of the bAbI tasks with e.g. 'babi:Task10k:3' for task 3."
},
{
"id": "BookTest",
"display_name": "BookTest",
"task": "booktest",
"tags": [ "all", "Cloze" ],
"description": "Sentence completion given a few sentences as context from a book. A larger version of CBT. From Bajgar et al., 16. Link: https://arxiv.org/abs/1610.00956"
},
{
"id": "CBT",
"display_name": "Children's Book Test (CBT)",
"task": "cbt",
"tags": [ "all", "Cloze" ],
"description": "Sentence completion given a few sentences as context from a children's book. From Hill et al., '16. Link: https://arxiv.org/abs/1511.02301"
},
{
"id": "CornellMovie",
"display_name": "Cornell Movie",
"task": "cornell_movie",
"tags": [ "all", "ChitChat" ],
"description": "Fictional conversations extracted from raw movie scripts. Link: https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html"
},
{
"id": "DBLL-bAbI",
"display_name": "Dialog Based Language Learning: bAbI Task",
"task": "dbll_babi",
"tags": [ "all", "Goal" ],
"description": "Short dialogs based on the bAbI tasks, but in the form of a question from a teacher, the answer from the student, and finally a comment on the answer from the teacher. The aim is to find learning models that use the comments to improve. From Weston '16. Link: https://arxiv.org/abs/1604.06045"
},
{
"id": "DBLL-Movie",
"display_name": "Dialog Based Language Learning: WikiMovies Task",
"task": "dbll_movie",
"tags": [ "all", "Goal" ],
"description": "Short dialogs based on WikiMovies, but in the form of a question from a teacher, the answer from the student, and finally a comment on the answer from the teacher. The aim is to find learning models that use the comments to improve. From Weston '16. Link: https://arxiv.org/abs/1604.06045"
},
{
"id": "dialog-bAbI",
"display_name": "Dialog bAbI",
"task": "dialog_babi",
"tags": [ "all", "Goal" ],
"description": "Simulated dialogs of restaurant booking, from Bordes et al. '16. Link: https://arxiv.org/abs/1605.07683"
},
{
"id": "MCTest",
"display_name": "MCTest",
"task": "mctest",
"tags": [ "all", "QA" ],
"description": "Questions about short children's stories, from Richardson et al. '13. Link: https://www.microsoft.com/en-us/research/publication/mctest-challenge-dataset-open-domain-machine-comprehension-text/"
},
{
"id": "MovieDD-QA",
"display_name": "Movie Dialog QA",
"task": "moviedialog:Task:1",
"tags": [ "all", "QA", "MovieDD" ],
"description": "Closed-domain QA dataset asking templated questions about movies, answerable from Wikipedia, similar to WikiMovies. From Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931"
},
{
"id": "MovieDD-QARecs",
"display_name": "Movie Dialog QA Recommendations",
"task": "moviedialog:Task:3",
"tags": [ "all", "Goal", "MovieDD" ],
"description": "Dialogs discussing questions about movies as well as recommendations. From Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931"
},
{
"id": "MovieDD-Recs",
"display_name": "Movie Dialog Recommendations",
"task": "moviedialog:Task:2",
"tags": [ "all", "QA", "MovieDD" ],
"description": "Questions asking for movie recommendations. From Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931"
},
{
"id": "MovieDD-Reddit",
"display_name": "Movie Dialog Reddit",
"task": "moviedialog:Task:4",
"tags": [ "all", "ChitChat", "MovieDD" ],
"description": "Dialogs discussing Movies from Reddit (the Movies SubReddit). From Dodge et al. '15. Link: https://arxiv.org/abs/1511.06931"
},
{
"id": "MTurkWikiMovies",
"display_name": "MTurk WikiMovies",
"task": "mturkwikimovies",
"tags": [ "all", "QA" ],
"description": "Closed-domain QA dataset asking MTurk-derived questions about movies, answerable from Wikipedia. From Li et al. '16. Link: https://arxiv.org/abs/1611.09823"
},
{
"id": "OpenSubtitles",
"display_name": "Open Subtitles",
"task": "opensubtitles",
"tags": [ "all", "ChitChat" ],
"description": "Dataset of dialogs from movie scripts: http://opus.lingfil.uu.se/OpenSubtitles.php. A variant of the dataset used in Vinyals & Le '15, https://arxiv.org/abs/1506.05869."
},
{
"id": "QACNN",
"display_name": "QA CNN",
"task": "qacnn",
"tags": [ "all", "Cloze" ],
"description": "Cloze dataset based on a missing (anonymized) entity phrase from a CNN article, Hermann et al. '15. Link: https://arxiv.org/abs/1506.03340"
},
{
"id": "QADailyMail",
"display_name": "QA Daily Mail",
"task": "qadailymail",
"tags": [ "all", "Cloze" ],
"description": "Cloze dataset based on a missing (anonymized) entity phrase from a Daily Mail article, Hermann et al. '15. Link: https://arxiv.org/abs/1506.03340"
},
{
"id": "SimpleQuestions",
"display_name": "Simple Questions",
"task": "simplequestions",
"tags": [ "all", "QA" ],
"description": "Open-domain QA dataset based on Freebase triples from Bordes et al. '15. Link: https://arxiv.org/abs/1506.02075"
},
{
"id": "SQuAD",
"display_name": "SQuAD",
"task": "squad",
"tags": [ "all", "QA" ],
"description": "Open-domain QA dataset answerable from a given paragraph from Wikipedia, from Rajpurkar et al. '16. Link: https://arxiv.org/abs/1606.05250"
},
{
"id": "Ubuntu",
"display_name": "Ubuntu",
"task": "ubuntu",
"tags": [ "all", "ChitChat" ],
"description": "Dialogs between an Ubuntu user and an expert trying to fix issue, from Lowe et al. '15. Link: https://arxiv.org/abs/1506.08909"
},
{
"id": "WebQuestions",
"display_name": "Web Questions",
"task": "webquestions",
"tags": [ "all", "QA" ],
"description": "Open-domain QA dataset from Web queries from Berant et al. '13. Link: http://www.aclweb.org/anthology/D13-1160"
},
{
"id": "WikiMovies",
"display_name": "WikiMovies",
"task": "wikimovies",
"tags": [ "all", "QA" ],
"description": "Closed-domain QA dataset asking templated questions about movies, answerable from Wikipedia. From Miller et al. '16. Link: https://arxiv.org/abs/1606.03126"
},
{
"id": "WikiQA",
"display_name": "WikiQA",
"task": "wikiqa",
"tags": [ "all", "QA" ],
"description": "Open domain QA from Wikipedia dataset from Yang et al. '15. Link: https://www.microsoft.com/en-us/research/publication/wikiqa-a-challenge-dataset-for-open-domain-question-answering/"
},
{
"id": "VQAv1",
"display_name": "VQAv1",
"task": "vqa_v1",
"tags": [ "all", "Visual" ],
"description": "Open-ended question answering about visual content. From Agrawal et al. '15. Link: https://arxiv.org/abs/1505.00468"
},
{
"id": "VQAv2",
"display_name": "VQAv2",
"task": "vqa_v2",
"tags": [ "all", "Visual" ],
"description": "Bigger, more balanced version of the original VQA dataset. From Goyal et al. '16. Link: https://arxiv.org/abs/1612.00837"
},
{
"id": "VisDial",
"display_name": "VisDial",
"task": "visdial",
"tags": [ "all", "Visual" ],
"description": "Task which requires agents to hold a meaningful dialog about visual content. From Das et al. '16. Link: https://arxiv.org/abs/1611.08669"
},
{
"id": "MNIST_QA",
"display_name": "MNIST_QA",
"task": "mnist_qa",
"tags": [ "all", "Visual" ],
"description": "Task which requires agents to identify which number they are seeing. From the MNIST dataset."
},
]
| 47.251232 | 317 | 0.590179 |
254edbb5e3800caf8d8190b5fb6835a271ca7fc2 | 1,429 | py | Python | examples/gaussians.py | paulorauber/thesne | fc87fb89b1bd9e82c82fe7de18f8afc3e6203d4a | [
"MIT"
] | 67 | 2016-06-08T20:14:58.000Z | 2022-03-11T14:16:52.000Z | examples/gaussians.py | paulorauber/thesne | fc87fb89b1bd9e82c82fe7de18f8afc3e6203d4a | [
"MIT"
] | 2 | 2019-12-05T02:35:33.000Z | 2021-06-15T12:01:02.000Z | examples/gaussians.py | paulorauber/thesne | fc87fb89b1bd9e82c82fe7de18f8afc3e6203d4a | [
"MIT"
] | 23 | 2016-06-01T03:06:35.000Z | 2021-12-23T16:15:24.000Z | import numpy as np
from sklearn.utils import check_random_state
from thesne.model.dynamic_tsne import dynamic_tsne
from thesne.examples import plot
def create_blobs(classes=10, dims=100, class_size=100, variance=0.1, steps=4,
advection_ratio=0.5, random_state=None):
random_state = check_random_state(random_state)
X = []
indices = random_state.permutation(dims)[0:classes]
means = []
for c in range(classes):
mean = np.zeros(dims)
mean[indices[c]] = 1.0
means.append(mean)
X.append(random_state.multivariate_normal(mean, np.eye(dims)*variance,
class_size))
X = np.concatenate(X)
y = np.concatenate([[i]*class_size for i in range(classes)])
Xs = [np.array(X)]
for step in range(steps - 1):
Xnext = np.array(Xs[step])
for c in range(classes):
stard, end = class_size*c, class_size*(c + 1)
Xnext[stard: end] += advection_ratio*(means[c] - Xnext[stard: end])
Xs.append(Xnext)
return Xs, y
def main():
seed = 0
Xs, y = create_blobs(class_size=200, advection_ratio=0.1, steps=10,
random_state=seed)
Ys = dynamic_tsne(Xs, perplexity=70, lmbda=0.1, verbose=1, sigma_iters=50,
random_state=seed)
for Y in Ys:
plot.plot(Y, y)
if __name__ == "__main__":
main()
| 26.962264 | 79 | 0.60042 |
7b05bb48136e7f2f73d00bb45becc179311fa7dc | 2,574 | py | Python | tests/unit/resources/networking/test_fabrics.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | tests/unit/resources/networking/test_fabrics.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | tests/unit/resources/networking/test_fabrics.py | PragadeeswaranS/oneview-python | 3acc113b8dd30029beb7c228c3bc2bbe67d3485b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.fabrics import Fabrics
from hpOneView.resources.resource import ResourceClient
class FabricsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._fabrics = Fabrics(self.connection)
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._fabrics.get('7a9f7d09-3c24-4efe-928f-50a1af411120')
mock_get.assert_called_once_with(
'7a9f7d09-3c24-4efe-928f-50a1af411120')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._fabrics.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._fabrics.get_by('name', 'DefaultFabric')
mock_get_by.assert_called_once_with('name', 'DefaultFabric')
@mock.patch.object(ResourceClient, 'get')
def test_get_reserved_vlan_range(self, mock_get):
uri = '/rest/fabrics/123/reserved-vlan-range'
self._fabrics.get_reserved_vlan_range('123')
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'update')
def test_update_reserved_vlan_range(self, mock_update):
uri = '/rest/fabrics/123/reserved-vlan-range'
data_to_update = {
"start": 100,
"length": 100
}
self._fabrics.update_reserved_vlan_range('123', data_to_update)
mock_update.assert_called_once_with(
resource=data_to_update,
uri=uri,
force=False,
default_values=Fabrics.DEFAULT_VALUES
)
| 33 | 78 | 0.700078 |
edfe616553fde07c10a0a382b04bd2c053cb8e8f | 12,323 | py | Python | allennlp_beaker/__main__.py | allenai/allennlp-beaker | 6890ed300e08024c4738f937faebf31cd850003e | [
"Apache-2.0"
] | 1 | 2021-07-08T03:09:18.000Z | 2021-07-08T03:09:18.000Z | allennlp_beaker/__main__.py | allenai/allennlp-beaker | 6890ed300e08024c4738f937faebf31cd850003e | [
"Apache-2.0"
] | null | null | null | allennlp_beaker/__main__.py | allenai/allennlp-beaker | 6890ed300e08024c4738f937faebf31cd850003e | [
"Apache-2.0"
] | null | null | null | from collections import deque
from datetime import date
import os
import shutil
import subprocess
from tempfile import TemporaryDirectory
from typing import Any, Dict, List, Iterable, Optional, Tuple
import uuid
from allennlp.common.file_utils import cached_path
from allennlp.common.params import Params
import click
import click_spinner
import yaml
DEFAULT_CLUSTER = "ai2/on-prem-ai2-server2"
DOCKERFILE = """
FROM python:3.7
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
# Tell nvidia-docker the driver spec that we need as well as to
# use all available devices, which are mounted at /usr/local/nvidia.
# The LABEL supports an older version of nvidia-docker, the env
# variables a newer one.
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
LABEL com.nvidia.volumes.needed="nvidia_driver"
WORKDIR /stage/allennlp
ENTRYPOINT ["allennlp"]
ARG ALLENNLP
RUN pip install --no-cache-dir ${ALLENNLP}
COPY . .
"""
DOCKERFILE_EXTRA_STEPS = """
# Ensure allennlp isn't re-installed when we install allennlp-models.
ENV ALLENNLP_VERSION_OVERRIDE allennlp
# To be compatible with older versions of allennlp-models.
ENV IGNORE_ALLENNLP_IN_SETUP true
# Disable parallelism in tokenizers because it doesn't help, and sometimes hurts.
ENV TOKENIZERS_PARALLELISM 0
ARG PACKAGES
RUN pip install --no-cache-dir ${PACKAGES}
"""
def echo_command_output(cmd: List[str]) -> None:
for line in shell_out_command(cmd):
click.echo(line, nl=True)
def shell_out_command(cmd: List[str]) -> Iterable[str]:
try:
child = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
check=True,
)
for line in child.stdout.split("\n"):
line = line.rstrip()
if line.strip():
yield line
except subprocess.CalledProcessError as exc:
raise click.ClickException(click.style(exc.output, fg="red"))
except FileNotFoundError as exc:
raise click.ClickException(click.style(f"{exc.filename} not found", fg="red"))
def create_beaker_config(
name: str = None,
description: str = None,
image: str = None,
gpus: int = 0,
cluster: str = DEFAULT_CLUSTER,
) -> Dict[str, Any]:
return {
"description": description,
"tasks": [
{
"name": name,
"spec": {
"image": image,
"resultPath": "/output",
"args": [
"train",
"config.jsonnet",
"-s",
"/output",
"--file-friendly-logging",
],
"requirements": {"gpuCount": gpus},
},
"cluster": cluster,
}
],
}
def parse_version(ctx, param, version) -> str:
if not version:
return version
if param.name == "allennlp_version":
package = "allennlp"
else:
package = "allennlp-models"
if version.startswith("git@"):
git_url = f"https://github.com/allenai/{package}"
if version == "git@master":
# Get the latest commit from the git repo.
click.secho("Checking for latest commit...", fg="yellow")
with click_spinner.spinner():
latest_commits = list(
shell_out_command(["git", "ls-remote", git_url + ".git"])
)
latest = latest_commits[0].split("\t")[0]
version = f"git+{git_url}.git@{latest}"
else:
version = f"git+{git_url}.{version}"
elif version.startswith("git+"):
pass
else:
version = f"{package}=={version}"
click.echo("Using " + click.style(f"{version}", fg="green"))
return version
def check_for_beaker():
# Print beaker version for debugging. If beaker is not installed, this will
# exit with an error an notify the user.
echo_command_output(["beaker", "--version"])
_DEFAULT_EXPERIMENT_NAME: Optional[str] = None
def setup(ctx, param, config_path):
check_for_beaker()
path = cached_path(config_path)
# If this is a local json/jsonnet file, we'll use the file basename as the
# the default name of the experiment.
global _DEFAULT_EXPERIMENT_NAME
if path.endswith(".json") or path.endswith(".jsonnet"):
_DEFAULT_EXPERIMENT_NAME = os.path.splitext(os.path.basename(path))[
0
] + date.today().strftime("_%Y%m%d")
return path
def parse_gpus(ctx, param, value):
if value is None:
params_file = ctx.params["config"]
gpus: int = 0
params = Params.from_file(params_file).as_dict()
if "distributed" in params:
cuda_devices = params["distributed"].get("cuda_devices")
if cuda_devices:
gpus = len([d for d in cuda_devices if d >= 0])
else:
cuda_device = params.get("trainer", {}).get("cuda_device")
if isinstance(cuda_device, int) and cuda_device >= 0:
gpus = 1
value = gpus
click.echo("Config specifies " + click.style(f"{value}", fg="green") + " gpus")
elif not isinstance(value, int):
value = int(value)
return value
def validate_includes(ctx, param, value):
if value:
for path, _ in value:
if not os.path.exists(path):
raise click.BadParameter(f"path {path} doesn't exit")
return value
@click.command()
@click.argument(
"config",
callback=setup,
)
@click.option(
"--name",
prompt="What do you want to call your experiment?",
default=lambda: _DEFAULT_EXPERIMENT_NAME,
help="The name to give the experiment on beaker.",
)
@click.option(
"--allennlp-version",
prompt="What version of AllenNLP do you want to use?",
default="git@master",
show_default=True,
help="The PyPI version, branch, or commit SHA of AlleNLP to use. "
"Git branches and commits should be prefixed with 'git@'. For example, "
"'git@master' or '1.0.0rc5'.",
callback=parse_version,
)
@click.option(
"--models-version",
prompt="What version (if any) of AllenNLP Models do you want to use?",
default="",
help="The PyPI version, branch, or commit SHA of AllenNLP Models to use, if any. "
"Git branches and commits should be prefixed with 'git@'.",
callback=parse_version,
)
@click.option(
"--packages",
prompt="What other Python packages does your experiment need?",
help="Additional Python packages to install in the docker image. "
"The value of this argument will be passed directly to `pip install`.",
default="",
)
@click.option(
"--gpus",
default=None,
show_default="parsed from training config",
callback=parse_gpus,
type=click.INT,
help="The number of GPUs to reserve for your experiment. If not specified "
"the GPUs will be guessed from the training config.",
)
@click.option(
"--workspace",
default=os.environ.get("BEAKER_DEFAULT_WORKSPACE", ""),
show_default="$BEAKER_DEFAULT_WORKSPACE",
prompt="Which beaker workspace do you want to use?",
help="The beaker workspace to submit the experiment to.",
)
@click.option(
"--user",
default=os.environ.get("BEAKER_DEFAULT_USER", ""),
show_default="$BEAKER_DEFAULT_USER",
prompt="What is your beaker username?",
help="The beaker username to submit the experiment under.",
)
@click.option(
"--include",
type=(str, str),
multiple=True,
prompt="Do you want to include any other files or directories?",
help="A list of files or directories to include.",
callback=validate_includes,
)
@click.option("-v", "--verbose", count=True)
@click.option("--dry-run", is_flag=True)
@click.option(
"--cluster", type=str, default=DEFAULT_CLUSTER, help="The beaker cluster to use."
)
def run(
config: str,
name: str,
allennlp_version: str,
models_version: str,
packages: str,
gpus: int,
workspace: str,
user: str,
include: Tuple[Tuple[str, str], ...],
verbose: int,
dry_run: bool,
cluster: str,
):
# We create a temp directory to use as context for the Docker build, and
# also to create a temporary beaker config file.
with TemporaryDirectory() as context_dir:
# Write the training config to the context directory.
training_config_path = os.path.join(context_dir, "config.jsonnet")
params = Params.from_file(config)
params.to_file(training_config_path)
# Create a unique tag to use.
image_id = str(uuid.uuid4())
local_image_name = f"allennlp-beaker-{name}:{image_id}"
beaker_image_name = f"allennlp-beaker-{name}-{image_id}"
if models_version:
packages = models_version + " " + packages
packages = packages.strip()
# Write the Dockefile to the context directory.
dockerfile_path = os.path.join(context_dir, "Dockerfile")
with open(dockerfile_path, "w") as dockerfile:
dockerfile.write(DOCKERFILE)
if packages:
dockerfile.write(DOCKERFILE_EXTRA_STEPS)
# Write the beaker config to the context directory.
beaker_config_path = os.path.join(context_dir, "config.yml")
with open(beaker_config_path, "w") as beaker_config:
beaker_config.write(
yaml.dump(
create_beaker_config(
name=name,
image=user + "/" + beaker_image_name,
gpus=gpus,
description=f"{allennlp_version} {packages}",
cluster=cluster,
)
)
)
if verbose:
click.echo("Beaker config:")
for line in shell_out_command(["cat", beaker_config_path]):
print(line)
# Copy any other include files.
if include:
for (path, dest) in include:
dest = os.path.join(context_dir, dest)
click.echo(f"Copying {path} to {dest}")
if os.path.isdir(path):
shutil.copytree(path, dest)
else:
shutil.copy(path, dest)
# Build the Docker image.
click.echo(
"Building docker image with name "
+ click.style(local_image_name, fg="green")
+ "..."
)
build_args = [
"docker",
"build",
"--build-arg",
f"ALLENNLP={allennlp_version}",
]
if packages:
build_args.extend(["--build-arg", f"PACKAGES={packages}"])
build_args.extend(["-t", local_image_name, context_dir])
if verbose:
for line in shell_out_command(build_args):
print(line)
else:
with click_spinner.spinner():
deque(shell_out_command(build_args), maxlen=0)
if dry_run:
click.echo("Run the following to check the Docker image:\n")
click.echo(
f" docker run --rm -it --entrypoint /bin/bash {local_image_name}"
)
return None
# Publish the image to beaker.
click.echo("Publishing image to beaker...")
with click_spinner.spinner():
deque(
shell_out_command(
[
"beaker",
"image",
"create",
"-n",
beaker_image_name,
local_image_name,
]
),
maxlen=0,
)
# Submit the experiment to beaker.
click.echo("Submitting experiment to beaker...")
cmds = [
"beaker",
"experiment",
"create",
"--name",
name,
"-f",
beaker_config_path,
]
if workspace:
cmds.extend(["--workspace", workspace])
echo_command_output(cmds)
if __name__ == "__main__":
run()
| 30.962312 | 87 | 0.587519 |
4377696ce83f2def968224acdabc6e8ebdd355c8 | 64,575 | py | Python | cogs/settings.py | TAG-Epic/bot | bbcb7dab126bd79ee1e03056f32ec8d01a303ff1 | [
"MIT"
] | null | null | null | cogs/settings.py | TAG-Epic/bot | bbcb7dab126bd79ee1e03056f32ec8d01a303ff1 | [
"MIT"
] | null | null | null | cogs/settings.py | TAG-Epic/bot | bbcb7dab126bd79ee1e03056f32ec8d01a303ff1 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import discord
from discord.ext import commands, tasks
import datetime
import json
import asyncpg
import typing
import asyncio
import aiohttp
import humanfriendly
import functools
import re
from random import randint
from fire.converters import TextChannel, Role, Member
watchedcmds = ['purge']
region = {
'amsterdam': '🇳🇱 Amsterdam',
'brazil': '🇧🇷 Brazil',
'eu-central': '🇪🇺 Central Europe',
'eu-west': '🇪🇺 Western Europe',
'europe': '🇪🇺 Europe',
'frakfurt': '🇩🇪 Frankfurt',
'hongkong': '🇭🇰 Hong Kong',
'india': '🇮🇳 India',
'japan': '🇯🇵 Japan',
'england': '🇬🇧 England',
'russia': '🇷🇺 Russia',
'singapore': '🇸🇬 Singapore',
'southafrica': '🇿🇦 South Africa',
'sydney': '🇦🇺 Sydney',
'us-central': '🇺🇸 Central US',
'us-south': '🇺🇸 US South',
'us-east': '🇺🇸 US East',
'us-west': '🇺🇸 US West',
'vip-us-east': '🇺🇸 US East (VIP)',
'vip-us-west': '🇺🇸 US West (VIP)',
'vip-amsterdam': '🇳🇱 Amsterdam (VIP)'
}
class Settings(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.recentgban = []
self.joincache = {}
if not hasattr(self.bot, 'invites'):
self.bot.invites = {}
self.bot.aliases = {}
self.bot.loop.create_task(self.load_invites())
self.bot.loop.create_task(self.load_aliases())
self.bot.loop.create_task(self.load_data())
self.refresh_invites.start()
def clean(self, text: str):
return re.sub(r'[^A-Za-z0-9.\/ ]', '', text, 0, re.MULTILINE)
async def load_data(self):
await self.bot.wait_until_ready()
for g in self.bot.guilds:
self.joincache[g.id] = []
message = self.bot.get_cog('Message')
message.raidmsgs[g.id] = None
message.msgraiders[g.id] = []
@tasks.loop(minutes=2)
async def refresh_invites(self):
for gid in self.bot.premiumGuilds:
await self.load_invites(gid)
def cog_unload(self):
self.refresh_invites.cancel()
@refresh_invites.after_loop
async def after_refresh_invites(self):
self.bot.logger.warn(f'$YELLOWInvite refresher has stopped!')
async def load_aliases(self):
await self.bot.wait_until_ready()
self.bot.logger.info(f'$YELLOWLoading aliases...')
self.bot.aliases = {}
query = 'SELECT * FROM aliases;'
aliases = await self.bot.db.fetch(query)
self.bot.aliases['hasalias'] = []
for a in aliases:
self.bot.aliases['hasalias'].append(a['uid'])
for al in a['aliases']:
self.bot.aliases[al.lower()] = a['uid']
self.bot.logger.info(f'$GREENLoaded aliases!')
async def load_invites(self, gid: int = None):
if not gid:
self.bot.invites = {}
for gid in self.bot.premiumGuilds:
guild = self.bot.get_guild(gid)
if not guild:
continue
invites = []
try:
invites = await guild.invites()
if 'VANITY_URL' in guild.features:
vanity = await guild.vanity_invite()
invites.append(vanity)
except (discord.Forbidden, discord.HTTPException) as e:
if isinstance(e, discord.Forbidden):
continue
if isinstance(e, discord.HTTPException) and invites == []:
continue
self.bot.invites[guild.id] = {}
for invite in invites:
self.bot.invites[guild.id][invite.code] = invite.uses
else:
self.bot.invites[gid] = {}
guild = self.bot.get_guild(gid)
if guild:
invites = []
try:
invites = await guild.invites()
if 'VANITY_URL' in guild.features:
vanity = await guild.vanity_invite()
invites.append(vanity)
except (discord.Forbidden, discord.HTTPException) as e:
if isinstance(e, discord.Forbidden):
return
if isinstance(e, discord.HTTPException) and invites == []:
return
self.bot.invites[guild.id] = {}
for invite in invites:
self.bot.invites[guild.id][invite.code] = invite.uses
@commands.Cog.listener()
async def on_membercacheadd(self, gid: int, mid: int):
self.joincache[gid].append(mid)
await asyncio.sleep(20)
self.joincache[gid].remove(mid)
@commands.Cog.listener()
async def on_raid_attempt(self, guild: discord.Guild, raiders: list = []):
if not guild:
return
channel = guild.get_channel(self.antiraid.get(guild.id, 0))
if channel:
try:
raidmentions = ', '.join([x.mention for x in raiders])
potential = await channel.send(f'There seems to be a raid going on. Here\'s the raiders I found\n{raidmentions}\n\nClick the tick to ban.')
firesuccess = discord.utils.get(self.bot.emojis, id=674359197378281472)
firefailed = discord.utils.get(self.bot.emojis, id=674359427830382603)
await potential.add_reaction(firesuccess)
await potential.add_reaction(firefailed)
def ban_check(r, u):
return u.permissions_in(channel).ban_members and u.id != guild.me.id
doi, ban = await self.bot.wait_for('reaction_add', check=ban_check)
if doi.emoji == firesuccess and ban:
try:
[await guild.ban(discord.Object(x), reason=f'Automatic raid prevention, confirmed by {ban}') for x in raiders if guild.get_member(x)]
except Exception:
pass
return await channel.send('I have banned all raiders I found!')
if doi.emoji == firefailed:
await potential.delete()
return await channel.send('Ok, I will ignore it.')
except Exception:
try:
await channel.send('Something went wrong')
except Exception:
return
else:
return
@commands.Cog.listener()
async def on_msgraid_attempt(self, guild: discord.Guild, raiders: list = []):
if not guild:
return
channel = guild.get_channel(self.antiraid.get(guild.id, 0))
if channel:
try:
raidmentions = ', '.join([x.mention for x in raiders])
potential = await channel.send(f'There seems to be a raid going on. Here\'s the raiders I found\n{raidmentions}\n\nClick the tick to ban.')
firesuccess = discord.utils.get(self.bot.emojis, id=674359197378281472)
firefailed = discord.utils.get(self.bot.emojis, id=674359427830382603)
await potential.add_reaction(firesuccess)
await potential.add_reaction(firefailed)
def ban_check(r, u):
return u.permissions_in(channel).ban_members and u.id != guild.me.id
doi, ban = await self.bot.wait_for('reaction_add', check=ban_check)
if doi.emoji == firesuccess and ban:
try:
[await guild.ban(x, reason=f'Automatic raid prevention, confirmed by {ban}') for x in raiders if guild.get_member(x.id)]
except Exception:
pass
return await channel.send('I have banned all raiders I found!')
if doi.emoji == firefailed:
await potential.delete()
return await channel.send('Ok, I will ignore it.')
except Exception:
try:
await channel.send('Something went wrong')
except Exception:
return
else:
return
@commands.Cog.listener()
async def on_member_join(self, member):
premium = self.bot.premiumGuilds
if member.guild.id in premium:
self.bot.dispatch('membercacheadd', member.guild.id, member.id)
if len(self.joincache[member.guild.id]) >= 50:
self.bot.dispatch('raid_attempt', member.guild, self.joincache[member.guild.id])
usedinvite = None
if not member.bot:
if member.guild.id in self.bot.invites and member.guild.id in premium:
before = self.bot.invites[member.guild.id].copy()
await self.load_invites(member.guild.id)
after = self.bot.invites[member.guild.id]
for inv in before:
a = after.get(inv, False)
b = before[inv]
if b != a:
usedinvite = inv
if not usedinvite and 'PUBLIC' in member.guild.features:
if 'DISCOVERABLE' in member.guild.features:
usedinvite = 'Joined without invite. Potentially from Server Discovery'
else:
usedinvite = 'Joined without invite. Potentially from lurking'
if self.bot.configs[member.guild.id].get('greet.joinmsg'):
joinchan = self.bot.configs[member.guild.id].get('greet.joinchannel')
joinmsg = self.bot.configs[member.guild.id].get('greet.joinmsg')
if joinchan and joinmsg:
message = joinmsg.replace('{user.mention}', member.mention).replace('{user}', str(member)).replace('{user.name}', member.name).replace('{user.discrim}', member.discriminator).replace('{server}', member.guild.name).replace('{guild}', member.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
await joinchan.send(message)
logch = self.bot.configs[member.guild.id].get('log.moderation')
if self.bot.configs[member.guild.id].get('mod.globalbans'):
try:
banned = await self.bot.ksoft.bans_check(member.id)
if banned:
try:
await member.guild.ban(member, reason=f'{member} was found on global ban list')
self.recentgban.append(f'{member.id}-{member.guild.id}')
if logch:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{member.mention} was banned**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.add_field(name='Reason', value=f'{member} was found on global ban list', inline=False)
embed.set_footer(text=f"Member ID: {member.id}")
try:
return await logch.send(embed=embed)
except Exception:
pass
except discord.HTTPException:
return
except Exception:
pass
if logch:
#https://giphy.com/gifs/pepsi-5C0a8IItAWRebylDRX
embed = discord.Embed(title='Member Joined', url='https://i.giphy.com/media/Nx0rz3jtxtEre/giphy.gif', color=discord.Color.green(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}', icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.add_field(name='Account Created', value=humanfriendly.format_timespan(datetime.datetime.utcnow() - member.created_at) + ' ago', inline=False)
if usedinvite and member.guild.id in premium:
embed.add_field(name='Invite Used', value=usedinvite, inline=False)
if member.guild.id not in premium and randint(0, 100) == 69:
embed.add_field(name='Want to see what invite they used?', value='Fire Premium allows you to do that and more.\n[Get Premium](https://gaminggeek.dev/patreon)\n[Premium Commands](https://gaminggeek.dev/commands#premium-commands)', inline=False)
if member.bot:
try:
async for e in member.guild.audit_logs(action=discord.AuditLogAction.bot_add, limit=10):
if e.target.id == member.id:
embed.add_field(name='Invited By', value=f'{e.user} ({e.user.id})', inline=False)
break
except Exception as e:
pass
embed.set_footer(text=f'User ID: {member.id}')
try:
await logch.send(embed=embed)
except Exception:
pass
try:
if self.bot.configs[member.guild.id].get('mod.autodecancer'):
if not self.bot.isascii(member.name.replace('‘', '\'').replace('“', '"').replace('“', '"')): #fix weird mobile characters
name = self.bot.configs[member.guild.id].get('utils.badname') or f'John Doe {member.discriminator}'
return await member.edit(nick=name)
if self.bot.configs[member.guild.id].get('mod.autodehoist'):
if self.bot.ishoisted(member.name):
name = self.bot.configs[member.guild.id].get('utils.badname') or f'John Doe {member.discriminator}'
return await member.edit(nick=name)
except Exception:
pass
@commands.Cog.listener()
async def on_member_remove(self, member):
if self.bot.configs[member.guild.id].get('greet.leavemsg'):
leavechan = self.bot.configs[member.guild.id].get('greet.leavechannel')
leavemsg = self.bot.configs[member.guild.id].get('greet.leavemsg')
if leavechan and leavemsg:
message = leavemsg.replace('{user.mention}', member.mention).replace('{user}', str(member)).replace('{user.name}', member.name).replace('{user.discrim}', member.discriminator).replace('{server}', member.guild.name).replace('{guild}', member.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
await leavechan.send(message)
logch = self.bot.configs[member.guild.id].get('log.moderation')
if logch:
moderator = None
action = None
if member.guild.me.guild_permissions.view_audit_log:
async for e in member.guild.audit_logs(limit=5):
if e.action in [discord.AuditLogAction.kick, discord.AuditLogAction.ban] and e.target.id == member.id:
moderator = e.user
if moderator == member.guild.me:
return
if e.action == discord.AuditLogAction.kick:
action = 'Kicked'
reason = e.reason
if e.action == discord.AuditLogAction.ban:
action = 'Banned'
reason = e.reason
break
embed = discord.Embed(title='Member Left', url='https://i.giphy.com/media/5C0a8IItAWRebylDRX/source.gif', color=discord.Color.red(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}', icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
if member.nick:
embed.add_field(name='Nickname', value=member.nick, inline=False)
roles = [role.mention for role in member.roles if role != member.guild.default_role]
if roles:
embed.add_field(name='Roles', value=', '.join(roles), inline=False)
if moderator and action:
embed.add_field(name=f'{action} By', value=f'{moderator} ({moderator.id})', inline=False)
if action and reason:
embed.add_field(name=f'{action} for', value=reason, inline=False)
embed.set_footer(text=f'User ID: {member.id}')
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_user_update(self, before, after):
for guild in self.bot.guilds:
if before.name != after.name:
try:
member = guild.get_member(after.id)
if member:
if self.bot.configs[member.guild.id].get('mod.autodecancer'):
nitroboosters = discord.utils.get(member.guild.roles, id=585534346551754755)
if member.guild_permissions.manage_nicknames:
pass
if nitroboosters and nitroboosters in member.roles:
pass
else:
nick = after.name
badname = self.bot.configs[member.guild.id].get('utils.badname') or f'John Doe {member.discriminator}'
if not self.bot.isascii(nick.replace('‘', '\'').replace('“', '"').replace('“', '"')):
return await member.edit(nick=badname)
else:
if member.nick and badname in member.nick:
return await member.edit(nick=None)
if self.bot.configs[member.guild.id].get('mod.autodehoist'):
nitroboosters = discord.utils.get(member.guild.roles, id=585534346551754755)
if member.guild_permissions.manage_nicknames:
pass
if nitroboosters and nitroboosters in member.roles:
pass
else:
nick = after.name
badname = self.bot.configs[member.guild.id].get('utils.badname') or f'John Doe {member.discriminator}'
if self.bot.ishoisted(nick):
return await member.edit(nick=badname)
else:
if member.nick and badname in member.nick:
return await member.edit(nick=None)
except Exception:
pass
@commands.Cog.listener()
async def on_member_update(self, before, after):
if before.nick != after.nick:
badname = self.bot.configs[after.guild.id].get('utils.badname') or f'John Doe {after.discriminator}'
if after.nick is not None and badname in after.nick:
return
try:
if self.bot.configs[after.guild.id].get('mod.autodecancer'):
nitroboosters = discord.utils.get(after.guild.roles, id=585534346551754755)
if after.guild_permissions.manage_nicknames or nitroboosters in after.roles:
pass
else:
if not after.nick:
nick = after.name
else:
nick = after.nick
if not self.bot.isascii(nick.replace('‘', '\'').replace('“', '"').replace('“', '"')):
return await after.edit(nick=badname)
if self.bot.configs[after.guild.id].get('mod.autodehoist'):
nitroboosters = discord.utils.get(after.guild.roles, id=585534346551754755)
if after.guild_permissions.manage_nicknames or nitroboosters in after.roles:
pass
else:
if not after.nick:
nick = after.name
else:
nick = after.nick
if self.bot.ishoisted(nick):
return await after.edit(nick=badname)
except Exception:
pass
logch = self.bot.configs[after.guild.id].get('log.action')
if logch and after.nick:
embed = discord.Embed(color=after.color, timestamp=datetime.datetime.utcnow(), description=f'{after.mention}\'**s nickname was changed**')
embed.set_author(name=after, icon_url=str(after.avatar_url_as(static_format='png', size=2048)))
embed.add_field(name='Before', value=before.nick, inline=False)
embed.add_field(name='After', value=after.nick, inline=False)
embed.set_footer(text=f"Author ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.roles != after.roles:
logch = self.bot.configs[after.guild.id].get('log.action')
if logch:
broles = []
aroles = []
changed = []
for role in before.roles:
broles.append(role.name)
for role in after.roles:
aroles.append(role.name)
s = set(aroles)
removed = [x for x in broles if x not in s]
s = set(broles)
added = [x for x in aroles if x not in s]
if len(added) == 1:
joinedat = datetime.datetime.utcnow() - after.joined_at
if joinedat < datetime.timedelta(minutes=1):
return
role = discord.utils.get(after.guild.roles, name=added[0])
if not role:
return
embed = discord.Embed(color=role.color, timestamp=datetime.datetime.utcnow(), description=f'{after.mention}\'s roles were changed\n**{after.name} was given the** {role.mention} **role**')
embed.set_author(name=after, icon_url=str(after.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {after.id} | Role ID: {role.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if len(removed) == 1:
role = discord.utils.get(after.guild.roles, name=removed[0])
if not role:
return
embed = discord.Embed(color=role.color, timestamp=datetime.datetime.utcnow(), description=f'{after.mention}\'s roles were changed\n**{after.name} was removed from the** {role.mention} **role**')
embed.set_author(name=after, icon_url=str(after.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {after.id} | Role ID: {role.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_guild_channel_pins_update(self, channel, last_pin = 0):
logch = self.bot.configs[channel.guild.id].get('log.action')
if logch:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'{channel.mention}\'**s pinned messages were updated**')
embed.set_author(name=channel.guild.name, icon_url=str(channel.guild.icon_url))
embed.set_footer(text=f"Channel ID: {channel.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_guild_role_create(self, role):
logch = self.bot.configs[role.guild.id].get('log.action')
if logch:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**A new role was created**\n{role.mention}')
embed.set_author(name=role.guild.name, icon_url=str(role.guild.icon_url))
embed.set_footer(text=f"Role ID: {role.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_guild_role_delete(self, role):
logch = self.bot.configs[role.guild.id].get('log.action')
if logch:
embed = discord.Embed(color=role.color, timestamp=datetime.datetime.utcnow(), description=f'**The role** `{role.name}` **was deleted**')
embed.set_author(name=role.guild.name, icon_url=str(role.guild.icon_url))
embed.set_footer(text=f"Role ID: {role.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
logch = self.bot.configs[member.guild.id].get('log.action')
if logch:
if before.deaf != after.deaf:
if after.deaf:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **was server deafened**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
if after.channel:
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
elif not after.deaf:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **was server undeafened**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
if after.channel:
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.mute != after.mute:
if after.mute:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **was server muted**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
if after.channel:
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
elif not after.mute:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **was server unmuted**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
if after.channel:
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.self_video != after.self_video:
if after.self_video:
if after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **started sharing video in {after.channel.name}**')
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **started sharing video**')
embed.set_footer(text=f"Member ID: {member.id}")
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
try:
await logch.send(embed=embed)
except Exception:
pass
elif not after.self_video:
if after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **stopped sharing video in {after.channel.name}**')
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **stopped sharing video**')
embed.set_footer(text=f"Member ID: {member.id}")
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
try:
await logch.send(embed=embed)
except Exception:
pass
if before.self_stream != after.self_stream:
if after.self_stream:
if after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **went live in {after.channel.name}**')
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **went live**')
embed.set_footer(text=f"Member ID: {member.id}")
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
try:
await logch.send(embed=embed)
except Exception:
pass
elif not after.self_stream:
if after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **stopped being live in {after.channel.name}**')
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
else:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **stopped being live**')
embed.set_footer(text=f"Member ID: {member.id}")
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
try:
await logch.send(embed=embed)
except Exception:
pass
if before.channel != after.channel:
if before.channel and after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **switched voice channel**')
embed.add_field(name='Before', value=before.channel.name, inline=False)
embed.add_field(name='After', value=after.channel.name, inline=False)
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {member.id} | Old Channel ID: {before.channel.id} | New Channel ID: {after.channel.id}")
try:
return await logch.send(embed=embed)
except Exception:
pass
if after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **joined voice channel {after.channel.name}**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {after.channel.id}")
try:
return await logch.send(embed=embed)
except Exception:
pass
elif not after.channel:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow(), description=f'{member.mention} **left voice channel {before.channel.name}**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {member.id} | Channel ID: {before.channel.id}")
try:
return await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_guild_update(self, before, after):
logch = self.bot.configs[after.id].get('log.action')
if logch:
if before.name != after.name:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**Guild name was changed**')
embed.add_field(name='Before', value=before.name, inline=False)
embed.add_field(name='After', value=after.name, inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.description != after.description and after.id != 411619823445999637:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**Guild description was changed**')
embed.add_field(name='Before', value=before.description, inline=False)
embed.add_field(name='After', value=after.description, inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.region != after.region:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s region was changed**')
embed.add_field(name='Before', value=region[str(before.region)], inline=False)
embed.add_field(name='After', value=region[str(after.region)], inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.owner != after.owner:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name} was transferred to a new owner**')
embed.add_field(name='Before', value=before.owner, inline=False)
embed.add_field(name='After', value=after.owner, inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id} | Old Owner ID: {before.owner.id} | New Owner ID: {after.owner.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.verification_level != after.verification_level:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s verification level was changed**')
embed.add_field(name='Before', value=str(before.verification_level).capitalize(), inline=False)
embed.add_field(name='After', value=str(after.verification_level).capitalize(), inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.explicit_content_filter != after.explicit_content_filter:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s content filter level was changed**')
embed.add_field(name='Before', value=str(before.explicit_content_filter).capitalize().replace('_', ''), inline=False)
embed.add_field(name='After', value=str(after.explicit_content_filter).capitalize().replace('_', ''), inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if set(before.features) != set(after.features):
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s features were updated**')
s = set(after.features)
removed = [x for x in before.features if x not in s]
ignored = ['PREMIUM']
[removed.remove(f) for f in ignored if f in removed]
s = set(before.features)
added = [x for x in after.features if x not in s]
[added.remove(f) for f in ignored if f in added]
if added:
features = []
for feature in added:
features.append(f'> {feature}')
embed.add_field(name='Added', value='\n'.join(features), inline=False)
if removed:
features = []
for feature in removed:
features.append(f'> {feature}')
embed.add_field(name='Removed', value='\n'.join(features), inline=False)
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
if added or removed:
try:
await logch.send(embed=embed)
except Exception:
pass
if before.banner != after.banner:
if after.banner:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s banner was changed**')
embed.set_image(url=str(after.banner_url))
else:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s banner was removed**')
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.splash != after.splash:
if after.splash:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s splash was changed**')
embed.set_image(url=str(after.splash_url))
else:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s splash was removed**')
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.discovery_splash != after.discovery_splash:
if after.discovery_splash:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s discovery splash was changed**')
embed.set_image(url=str(after.discovery_splash_url))
else:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s discovery splash was removed**')
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.premium_tier != after.premium_tier:
if after.premium_tier > before.premium_tier:
embed = discord.Embed(color=discord.Color.from_rgb(255, 115, 250), timestamp=datetime.datetime.utcnow(), description=f'**{after.name} got boosted to Level {after.premium_tier}**')
if after.premium_tier < before.premium_tier:
embed = discord.Embed(color=discord.Color.from_rgb(255, 115, 250), timestamp=datetime.datetime.utcnow(), description=f'**{after.name} got weakened to Level {after.premium_tier}**')
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
if before.system_channel != after.system_channel:
if after.system_channel:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s system channel was changed to {after.system_channel.mention}**')
else:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{after.name}\'s system channel was removed**')
embed.set_author(name=after.name, icon_url=str(after.icon_url))
embed.set_footer(text=f"Guild ID: {after.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_member_ban(self, guild, member):
if f'{member.id}-{guild.id}' in self.recentgban:
self.recentgban.remove(f'{member.id}-{guild.id}')
return
logch = self.bot.configs[guild.id].get('log.action')
if logch:
embed = discord.Embed(color=member.color if member.color != discord.Color.default() else discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**{member.mention} was banned**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_member_unban(self, guild, member):
logch = self.bot.configs[guild.id].get('log.action')
if logch:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**{member} was unbanned**')
embed.set_author(name=member, icon_url=str(member.avatar_url_as(static_format='png', size=2048)))
embed.set_footer(text=f"Member ID: {member.id}")
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_invite_create(self, invite: discord.Invite):
guild = invite.guild
if guild.id in self.bot.premiumGuilds:
self.bot.invites.get(guild.id, {})[invite.code] = 0
if not isinstance(guild, discord.Guild):
return
logch = self.bot.configs[guild.id].get('log.action')
if logch:
embed = discord.Embed(color=discord.Color.green(), timestamp=datetime.datetime.utcnow(), description=f'**An invite was created**')
embed.set_author(name=guild.name, icon_url=str(guild.icon_url_as(static_format='png', size=2048)))
embed.add_field(name='Invite Code', value=invite.code, inline=False)
embed.add_field(name='Max Uses', value=invite.max_uses, inline=False)
embed.add_field(name='Temporary', value=invite.temporary, inline=False)
if invite.temporary:
delta = datetime.datetime.utcnow() + datetime.timedelta(seconds=invite.max_age)
if isinstance(delta, datetime.timedelta):
embed.add_field(name='Expires in', value=humanfriendly.format_timespan(delta), inline=False)
if isinstance(invite.channel, discord.abc.GuildChannel):
embed.add_field(name='Channel', value=f'#{invite.channel.name}({invite.channel.id})', inline=False)
if invite.inviter:
embed.set_footer(text=f'Created by: {invite.inviter} ({invite.inviter.id})')
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.Cog.listener()
async def on_invite_delete(self, invite: discord.Invite):
guild = invite.guild
if guild.id in self.bot.premiumGuilds:
self.bot.invites.get(guild.id, {}).pop(invite.code, 'lmao')
if not isinstance(guild, discord.Guild):
return
whodidit = None
async for a in guild.audit_logs(action=discord.AuditLogAction.invite_delete, limit=1):
if a.target.code == invite.code:
whodidit = a.user
logch = self.bot.configs[guild.id].get('log.action')
if logch:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'**An invite was deleted**')
embed.set_author(name=guild.name, icon_url=str(guild.icon_url_as(static_format='png', size=2048)))
embed.add_field(name='Invite Code', value=invite.code, inline=False)
if isinstance(invite.channel, discord.abc.GuildChannel):
embed.add_field(name='Channel', value=f'#{invite.channel.name}({invite.channel.id})', inline=False)
if whodidit:
embed.set_footer(text=f'Deleted by: {whodidit} ({whodidit.id})')
try:
await logch.send(embed=embed)
except Exception:
pass
@commands.command(name='settings', aliases=['setup'], description='Configure my settings')
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(add_reactions=True, external_emojis=True)
@commands.guild_only()
async def gsettings(self, ctx):
firesuccess = discord.utils.get(self.bot.emojis, id=674359197378281472)
firefailed = discord.utils.get(self.bot.emojis, id=674359427830382603)
await ctx.send('Hey, I\'m going to guide you through my settings. This shouldn\'t take long, there\'s only 6 options to configure')
await asyncio.sleep(3)
await ctx.send('First, we\'ll configure logging. Please give a channel for moderation logs or say `skip` to disable...')
def modlog_check(message):
if message.author != ctx.author:
return False
else:
return True
try:
modlogsmsg = await self.bot.wait_for('message', timeout=30.0, check=modlog_check)
if modlogsmsg.content.lower() != 'skip':
try:
modlogs = await TextChannel().convert(ctx, modlogsmsg.content)
except commands.BadArgument:
await ctx.error('Channel not found, moderation logs are now disabled.')
modlogs = None
else:
await ctx.success(f'Setting moderation logs to {modlogs.mention}')
else:
await ctx.success('Skipping moderation logs...')
modlogs = None
await self.bot.configs[ctx.guild.id].set('log.moderation', modlogs)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
await asyncio.sleep(2)
await ctx.send('Ok. Next we\'ll configure action logs. This is where actions such as deleted messages, edited messages etc. are logged.')
await asyncio.sleep(2)
await ctx.send('Please give a channel for action logs or say `skip` to disable...')
def actionlog_check(message):
if message.author != ctx.author:
return False
else:
return True
try:
actionlogsmsg = await self.bot.wait_for('message', timeout=30.0, check=modlog_check)
if actionlogsmsg.content.lower() != 'skip':
try:
actionlogs = await TextChannel().convert(ctx, actionlogsmsg.content)
except commands.BadArgument:
await ctx.error('Channel not found, action logs are now disabled.')
actionlogs = None
else:
await ctx.success(f'Setting action logs to {actionlogs.mention}')
else:
await ctx.success('Skipping action logs...')
actionlogs = None
await self.bot.configs[ctx.guild.id].set('log.action', actionlogs)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
try:
[await m.delete() for m in setupmsgs]
return
except Exception:
return
await asyncio.sleep(2)
await ctx.send('Ok. Next is link deletion. Discord invites are enabled by default but you can enable more with `$linkfilter`')
await asyncio.sleep(2)
linkfiltermsg = await ctx.send(f'React with {firesuccess} to enable and {firefailed} to disable')
await linkfiltermsg.add_reaction(firesuccess)
await linkfiltermsg.add_reaction(firefailed)
def linkfilter_check(reaction, user):
if user != ctx.author:
return False
if reaction.emoji == firefailed and reaction.message.id == linkfiltermsg.id:
return True
if reaction.emoji == firesuccess and reaction.message.id == linkfiltermsg.id:
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=linkfilter_check)
if reaction.emoji == firefailed:
linkfilter = []
await ctx.success('Disabling link filter...')
elif reaction.emoji == firesuccess:
linkfilter = self.bot.configs[ctx.guild.id].get('mod.linkfilter')
if not linkfilter:
linkfilter = ['discord']
await ctx.success(f'Enabling link filter. (If it was already enabled, your configuration won\'t change)')
await self.bot.configs[ctx.guild.id].set('mod.linkfilter', linkfilter)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
try:
[await m.delete() for m in setupmsgs]
return
except Exception:
return
await asyncio.sleep(2)
await ctx.send('Ok. Next is dupe checking. If a user attempts to send the same message again, I will delete it (that is, if I have permission to do so)')
await asyncio.sleep(2)
dupemsg = await ctx.send(f'React with {firesuccess} to enable and {firefailed} to disable')
await dupemsg.add_reaction(firesuccess)
await dupemsg.add_reaction(firefailed)
def dupemsg_check(reaction, user):
if user != ctx.author:
return False
if reaction.emoji == firefailed and reaction.message.id == dupemsg.id:
return True
if reaction.emoji == firesuccess and reaction.message.id == dupemsg.id:
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=dupemsg_check)
if reaction.emoji == firefailed:
dupecheck = False
await ctx.success('Disabling duplicate message filter...')
elif reaction.emoji == firesuccess:
dupecheck = True
await ctx.success(f'Enabling duplicate message filter')
await self.bot.configs[ctx.guild.id].set('mod.dupecheck', dupecheck)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
try:
[await m.delete() for m in setupmsgs]
return
except Exception:
return
await asyncio.sleep(2)
await ctx.send('Ok. Now we\'re onto global bans. Fire uses the KSoft.Si API to check for naughty people. If enabled, I will ban any of these naughty people if they attempt to join.')
await asyncio.sleep(2)
gbansmsg = await ctx.send(f'React with {firesuccess} to enable and {firefailed} to disable')
await gbansmsg.add_reaction(firesuccess)
await gbansmsg.add_reaction(firefailed)
def gban_check(reaction, user):
if user != ctx.author:
return False
if reaction.emoji == firefailed and reaction.message.id == gbansmsg.id:
return True
if reaction.emoji == firesuccess and reaction.message.id == gbansmsg.id:
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=gban_check)
if reaction.emoji == firefailed:
globalbans = False
await ctx.success('Disabling global ban check...')
elif reaction.emoji == firesuccess:
globalbans = True
await ctx.success(f'Enabling global ban check')
await self.bot.configs[ctx.guild.id].set('mod.globalbans', globalbans)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
try:
[await m.delete() for m in setupmsgs]
return
except Exception:
return
await asyncio.sleep(2)
await ctx.send('The penultimate setting, auto-decancer. This renames users with "cancerous" names (non-ascii)')
await asyncio.sleep(2)
autodcmsg = await ctx.send(f'React with {firesuccess} to enable and {firefailed} to disable')
await autodcmsg.add_reaction(firesuccess)
await autodcmsg.add_reaction(firefailed)
def dc_check(reaction, user):
if user != ctx.author:
return False
if reaction.emoji == firefailed and reaction.message.id == autodcmsg.id:
return True
if reaction.emoji == firesuccess and reaction.message.id == autodcmsg.id:
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=dc_check)
if reaction.emoji == firefailed:
audodc = False
await ctx.success('Disabling auto decancer...')
elif reaction.emoji == firesuccess:
audodc = True
await ctx.success(f'Enabling auto decancer')
await self.bot.configs[ctx.guild.id].set('mod.autodecancer', audodc)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
await asyncio.sleep(2)
await ctx.send('Finally, the last setting. Similar to the last one, auto-dehoist renames people with a non A-Z character at the start of their name.')
await asyncio.sleep(2)
autodhmsg = await ctx.send(f'React with {firesuccess} to enable and {firefailed} to disable')
await autodhmsg.add_reaction(firesuccess)
await autodhmsg.add_reaction(firefailed)
def dh_check(reaction, user):
if user != ctx.author:
return False
if reaction.emoji == firefailed and reaction.message.id == autodhmsg.id:
return True
if reaction.emoji == firesuccess and reaction.message.id == autodhmsg.id:
return True
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=dh_check)
if reaction.emoji == firefailed:
audodh = False
await ctx.success('Disabling auto dehoist...')
elif reaction.emoji == firesuccess:
audodh = True
await ctx.success(f'Enabling auto dehoist')
await self.bot.configs[ctx.guild.id].set('mod.autodehoist', audodh)
except asyncio.TimeoutError:
return await ctx.error(f'{ctx.author.mention}, you took too long. Stopping setup!')
await asyncio.sleep(2)
await ctx.send('Nice! We\'re all good to go. I\'ll send a recap in a moment. I just need to reload settings.')
config = self.bot.configs[ctx.guild.id]
embed = discord.Embed(title=":gear: Guild Settings", colour=ctx.author.color, description="Here's a list of the current guild settings", timestamp=datetime.datetime.utcnow())
embed.set_author(name=ctx.guild.name, icon_url=str(ctx.guild.icon_url))
embed.add_field(name="Moderation Logs", value=config.get('log.moderation').mention if config.get('log.moderation') else 'Not set.', inline=False)
embed.add_field(name="Action Logs", value=config.get('log.action').mention if config.get('log.action') else 'Not set.', inline=False)
embed.add_field(name="Link Filter", value=",".join(config.get('mod.linkfilter')) or 'No filters enabled.', inline=False)
embed.add_field(name="Global Ban Check (KSoft.Si API)", value=config.get('mod.globalbans'), inline=False)
embed.add_field(name="Auto-Decancer", value=config.get('mod.autodecancer'), inline=False)
embed.add_field(name="Auto-Dehoist", value=config.get('mod.autodehoist'), inline=False)
await ctx.send(embed=embed)
@commands.command(name='setlogs', aliases=['logging', 'log', 'logs'])
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def settings_logs(self, ctx, logtype: str = None, channel: TextChannel = None):
if not logtype or logtype and logtype.lower() not in ['mod', 'moderation', 'action']:
return await ctx.error(f'You must provide a log type, "moderation" or "action" for the log channel')
logtype = logtype.lower()
if logtype in ['mod', 'moderation']:
if not channel:
await self.bot.configs[ctx.guild.id].set('log.moderation', None)
return await ctx.success(f'Successfully reset the moderation logs channel.')
else:
await self.bot.configs[ctx.guild.id].set('log.moderation', channel)
return await ctx.success(f'Successfully set the moderation logs channel to {channel.mention}')
if logtype == 'action':
if not channel:
await self.bot.configs[ctx.guild.id].set('log.action', None)
return await ctx.success(f'Successfully reset the action logs channel.')
else:
await self.bot.configs[ctx.guild.id].set('log.action', channel)
return await ctx.success(f'Successfully set the action logs channel to {channel.mention}')
@commands.command(name='modonly', description='Set channels to be moderator only (users with `Manage Messages` are moderators')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def modonly(self, ctx, channels: commands.Greedy[TextChannel] = []):
current = self.bot.configs[ctx.guild.id].get('commands.modonly')
modonly = current.copy()
for sf in channels:
if sf not in modonly:
modonly.append(sf)
for sf in channels:
if sf in current:
modonly.remove(sf)
current = await self.bot.configs[ctx.guild.id].set('commands.modonly', modonly)
channelmentions = [c.mention for c in current]
if channelmentions:
channellist = ', '.join(channelmentions)
return await ctx.success(f'Commands can now only be run by moderators (those with Manage Messages permission) in:\n{channellist}.')
return await ctx.success(f'Moderator only channels have been reset')
@commands.command(name='adminonly', description='Set channels to be admin only (users with `Manage Server` are admins')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def adminonly(self, ctx, channels: commands.Greedy[TextChannel] = []):
current = self.bot.configs[ctx.guild.id].get('commands.adminonly')
adminonly = current.copy()
for sf in channels:
if sf not in current:
adminonly.append(sf)
for sf in channels:
if sf in current:
adminonly.remove(sf)
current = await self.bot.configs[ctx.guild.id].set('commands.adminonly', adminonly)
channelmentions = [c.mention for c in current]
if channelmentions:
channellist = ', '.join(channelmentions)
return await ctx.success(f'Commands can now only be run by admins (those with Manage Server permission) in;\n{channellist}.')
return await ctx.success(f'Admin only channels have been reset')
@commands.command(name='joinmsg', description='Set the channel and message for join messages')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def joinmsg(self, ctx, channel: typing.Union[TextChannel, str] = None, *, message: str = None):
if not channel:
joinmsg = self.bot.configs[ctx.guild.id].get('greet.joinmsg')
joinchan = self.bot.configs[ctx.guild.id].get('greet.joinchannel')
if not joinmsg:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'<:xmark:674359427830382603> Please provide a channel and message for join messages.')
variables = '{user}: {fuser}\n{user.mention}: {fmention}\n{user.name}: {fname}\n{user.discrim}: {fdiscrim}\n{server}|{guild}: {fguild}'.replace('{fmention}', ctx.author.mention).replace('{fuser}', str(ctx.author)).replace('{fname}', ctx.author.name).replace('{fdiscrim}', ctx.author.discriminator).replace('{fguild}', ctx.guild.name)
embed.add_field(name='Variables', value=variables, inline=False)
return await ctx.send(embed=embed)
embed = discord.Embed(color=ctx.author.color, timestamp=datetime.datetime.utcnow(), description=f'**Current Join Message Settings**\nDo __{ctx.prefix}joinmsg disable__ to disable join messages')
embed.add_field(name='Channel', value=joinchan.mention if joinchan else 'Not Set (Not sure how you managed to do this)', inline=False)
message = joinmsg or 'Not set.'
message = message.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name)
embed.add_field(name='Message', value=message, inline=False)
variables = '{user}: {fuser}\n{user.mention}: {fmention}\n{user.name}: {fname}\n{user.discrim}: {fdiscrim}\n{server}|{guild}: {fguild}'.replace('{fmention}', ctx.author.mention).replace('{fuser}', str(ctx.author)).replace('{fname}', ctx.author.name).replace('{fdiscrim}', ctx.author.discriminator).replace('{fguild}', ctx.guild.name)
embed.add_field(name='Variables', value=variables, inline=False)
return await ctx.send(embed=embed)
if isinstance(channel, str) and channel.lower() in ['off', 'disable', 'false']:
joinmsg = self.bot.configs[ctx.guild.id].get('greet.joinmsg')
if not joinmsg:
return await ctx.error('Can\'t disable something that wasn\'t enabled. ¯\_(ツ)_/¯')
await self.bot.configs[ctx.guild.id].set('greet.joinmsg', '')
await self.bot.configs[ctx.guild.id].set('greet.joinchannel', None)
return await ctx.success(f'Successfully disabled join messages!')
if isinstance(channel, str):
return await ctx.error('You need to provide a valid channel')
if not message:
joinmsg = self.bot.configs[ctx.guild.id].get('greet.joinmsg')
if not joinmsg:
return await ctx.error('You can\'t set a channel without setting a message.')
await self.bot.configs[ctx.guild.id].set('greet.joinchannel', channel)
message = joinmsg.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
return await ctx.success(f'Join messages will show in {channel.mention}!\nExample: {message}')
else:
await self.bot.configs[ctx.guild.id].set('greet.joinmsg', message)
await self.bot.configs[ctx.guild.id].set('greet.joinchannel', channel)
message = message.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
return await ctx.success(f'Join messages will show in {channel.mention}!\nExample: {message}')
@commands.command(name='leavemsg', description='Set the channel and message for leave messages')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def leavemsg(self, ctx, channel: typing.Union[TextChannel, str] = None, *, message: str = None):
if not channel:
leavemsg = self.bot.configs[ctx.guild.id].get('greet.leavemsg')
leavechan = self.bot.configs[ctx.guild.id].get('greet.leavechannel')
if not leavemsg:
embed = discord.Embed(color=discord.Color.red(), timestamp=datetime.datetime.utcnow(), description=f'<:xmark:674359427830382603> Please provide a channel and message for leave messages.')
variables = '{user}: {fuser}\n{user.mention}: {fmention}\n{user.name}: {fname}\n{user.discrim}: {fdiscrim}\n{server}|{guild}: {fguild}'.replace('{fmention}', ctx.author.mention).replace('{fuser}', str(ctx.author)).replace('{fname}', ctx.author.name).replace('{fdiscrim}', ctx.author.discriminator).replace('{fguild}', ctx.guild.name)
embed.add_field(name='Variables', value=variables, inline=False)
return await ctx.send(embed=embed)
embed = discord.Embed(color=ctx.author.color, timestamp=datetime.datetime.utcnow(), description=f'**Current Leave Message Settings**\nDo __{ctx.prefix}leavemsg disable__ to disable leave messages')
embed.add_field(name='Channel', value=leavechan.mention if leavechan else 'Not Set (Not sure how you managed to do this)', inline=False)
message = leavemsg or 'Not set.'
message = message.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name)
embed.add_field(name='Message', value=message, inline=False)
variables = '{user}: {fuser}\n{user.mention}: {fmention}\n{user.name}: {fname}\n{user.discrim}: {fdiscrim}\n{server}|{guild}: {fguild}'.replace('{fmention}', ctx.author.mention).replace('{fuser}', str(ctx.author)).replace('{fname}', ctx.author.name).replace('{fdiscrim}', ctx.author.discriminator).replace('{fguild}', ctx.guild.name)
embed.add_field(name='Variables', value=variables, inline=False)
return await ctx.send(embed=embed)
if isinstance(channel, str) and channel.lower() in ['off', 'disable', 'false']:
leavemsg = self.bot.configs[ctx.guild.id].get('greet.leavemsg')
if not leavemsg:
return await ctx.error('Can\'t disable something that wasn\'t enabled. ¯\_(ツ)_/¯')
await self.bot.configs[ctx.guild.id].set('greet.leavemsg', '')
await self.bot.configs[ctx.guild.id].set('greet.leavechannel', None)
return await ctx.success(f'Successfully disabled leave messages!')
if isinstance(channel, str):
return await ctx.error('You need to provide a valid channel')
if not message:
leavemsg = self.bot.configs[ctx.guild.id].get('greet.leavemsg')
if not leavemsg:
return await ctx.error('You can\'t set a channel without setting a message.')
await self.bot.configs[ctx.guild.id].set('greet.leavechannel', channel)
message = leavemsg.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
return await ctx.success(f'Leave messages will show in {channel.mention}!\nExample: {message}')
else:
await self.bot.configs[ctx.guild.id].set('greet.leavemsg', message)
await self.bot.configs[ctx.guild.id].set('greet.leavechannel', channel)
message = message.replace('{user.mention}', ctx.author.mention).replace('{user}', str(ctx.author)).replace('{user.name}', ctx.author.name).replace('{user.discrim}', ctx.author.discriminator).replace('{server}', ctx.guild.name).replace('{guild}', ctx.guild.name).replace('@everyone', '\@everyone').replace('@here', '\@here')
return await ctx.success(f'Leave messages will show in {channel.mention}!\nExample: {message}')
@commands.command(name='linkfilter', description='Configure the link filter for this server')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def linkfiltercmd(self, ctx, *, enabled: str = None):
options = ['discord', 'youtube', 'twitch', 'twitter', 'paypal', 'malware']
if not enabled:
return await ctx.error(f'You must provide a valid filter(s). You can choose from {", ".join(options)}')
enabled = enabled.split(' ')
if any(e not in options for e in enabled):
invalid = [e for e in enabled if e not in options]
return await ctx.error(f'{", ".join(invalid)} are not valid filters')
filtered = self.bot.configs[ctx.guild.id].get('mod.linkfilter')
for f in enabled:
if f in filtered:
filtered.remove(f)
else:
filtered.append(f)
new = await self.bot.configs[ctx.guild.id].set('mod.linkfilter', filtered)
if new:
return await ctx.success(f'Now filtering {", ".join(new)} links.')
else:
return await ctx.success(f'No longer filtering links')
@commands.command(name='filterexcl', description='Exclude channels, roles and members from the filter')
async def filterexclcmd(self, ctx, *ids: typing.Union[TextChannel, Role, Member]):
current = self.bot.configs[ctx.guild.id].get('excluded.filter')
ids = [d.id for d in ids]
for sf in ids:
if sf not in current:
ids.remove(sf)
current.append(sf)
for sf in current:
if sf in ids:
current.remove(sf)
await self.bot.configs[ctx.guild.id].set('excluded.filter', current)
excl = []
for sf in current:
if ctx.guild.get_member(sf):
excl.append(ctx.guild.get_member(sf))
elif ctx.guild.get_role(sf):
excl.append(ctx.guild.get_role(sf))
elif ctx.guild.get_channel(sf):
excl.append(ctx.guild.get_channel(sf))
else:
excl.append(sf)
await ctx.success(f'Successfully set objects excluded from filters (link filter and duplicate message check)\nExcluded: {", ".join([str(e) for e in excl])}')
@commands.command(name='command', description='Enable and disable commands')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def cmd(self, ctx, command: str = None):
if not command:
return await ctx.error('You must provide a command name')
command = self.bot.get_command(command)
if not command:
return await ctx.error('You must provide a valid command')
disabled = self.bot.configs[ctx.guild.id].get('disabled.commands')
if command.name in disabled:
toggle = 'enabled'
disabled.remove(command.name)
else:
toggle = 'disabled'
disabled.append(command.name)
await self.bot.configs[ctx.guild.id].set('disabled.commands', disabled)
return await ctx.success(f'{command.name} has been {toggle}.')
def setup(bot):
bot.add_cog(Settings(bot))
bot.logger.info(f'$GREENLoaded Settings/Events cog!')
| 50.331255 | 338 | 0.693318 |
c22b70cc937c28f82b4dfe641530adda4d0f9ac3 | 973 | py | Python | migrations/versions/ae4fe43054dd_.py | jurayev/garbage-collector | a08b4876e5fe75bc94a59657d966df82efb1fa33 | [
"MIT"
] | null | null | null | migrations/versions/ae4fe43054dd_.py | jurayev/garbage-collector | a08b4876e5fe75bc94a59657d966df82efb1fa33 | [
"MIT"
] | null | null | null | migrations/versions/ae4fe43054dd_.py | jurayev/garbage-collector | a08b4876e5fe75bc94a59657d966df82efb1fa33 | [
"MIT"
] | null | null | null | """empty message
Revision ID: ae4fe43054dd
Revises:
Create Date: 2020-08-01 23:11:19.853119
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ae4fe43054dd'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('todo_lists',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('todos', sa.Column('list_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'todos', 'todo_lists', ['list_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'todos', type_='foreignkey')
op.drop_column('todos', 'list_id')
op.drop_table('todo_lists')
# ### end Alembic commands ###
| 26.297297 | 77 | 0.672148 |
2420fa4b348f1f80210b70a565b8b44a4ae2ce2e | 4,443 | py | Python | LSTM_models.py | W0lgast/RumourEval | 270680c6c6f17a2b209f2f0dfb67f1ec92f39566 | [
"CC-BY-4.0"
] | null | null | null | LSTM_models.py | W0lgast/RumourEval | 270680c6c6f17a2b209f2f0dfb67f1ec92f39566 | [
"CC-BY-4.0"
] | null | null | null | LSTM_models.py | W0lgast/RumourEval | 270680c6c6f17a2b209f2f0dfb67f1ec92f39566 | [
"CC-BY-4.0"
] | null | null | null | """
Contains function that defines model's architecture
"""
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.layers import TimeDistributed, Masking
from keras import optimizers
from keras import regularizers
#from keras.models import load_model
import json
#%%
def LSTM_model_veracity(x_train, y_train, x_test, params,eval=False ):
num_lstm_units = int(params['num_lstm_units'])
num_lstm_layers = int(params['num_lstm_layers'])
num_dense_layers = int(params['num_dense_layers'])
num_dense_units = int(params['num_dense_units'])
num_epochs = params['num_epochs']
learn_rate = params['learn_rate']
mb_size = params['mb_size']
l2reg = params['l2reg']
model = Sequential()
num_features = x_train.shape[2]
model.add(Masking(mask_value=0., input_shape=(None, num_features)))
for nl in range(num_lstm_layers-1):
model.add(LSTM(num_lstm_units, dropout=0.2, recurrent_dropout=0.2,
return_sequences=True))
model.add(LSTM(num_lstm_units, dropout=0.2, recurrent_dropout=0.2,
return_sequences=False))
for nl in range(num_dense_layers):
model.add(Dense(num_dense_units, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax',
activity_regularizer=regularizers.l2(l2reg)))
adam = optimizers.Adam(lr=learn_rate, beta_1=0.9, beta_2=0.999,
epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=mb_size,
epochs=num_epochs, shuffle=True, class_weight=None, verbose=0)
if eval==True:
model.save('output/my_model_veracity.h5')
json_string = model.to_json()
with open('output/model_architecture_veracity.json','w') as fout:
json.dump(json_string,fout)
model.save_weights('output/my_model_veracity_weights.h5')
pred_probabilities = model.predict(x_test, batch_size=mb_size, verbose=0)
confidence = np.max(pred_probabilities, axis=1)
Y_pred = model.predict_classes(x_test, batch_size=mb_size)
return Y_pred, confidence
#%%
def LSTM_model_stance(x_train, y_train, x_test, params,eval=False ):
num_lstm_units = int(params['num_lstm_units'])
num_lstm_layers = int(params['num_lstm_layers'])
num_dense_layers = int(params['num_dense_layers'])
num_dense_units = int(params['num_dense_units'])
num_epochs = params['num_epochs']
learn_rate = params['learn_rate']
mb_size = params['mb_size']
l2reg = params['l2reg']
model = Sequential()
num_features = x_train.shape[2]
model.add(Masking(mask_value=0., input_shape=(None, num_features)))
for nl in range(num_lstm_layers-1):
model.add(LSTM(num_lstm_units, kernel_initializer='glorot_normal',
dropout=0.2, recurrent_dropout=0.2,
return_sequences=True))
model.add(LSTM(num_lstm_units, kernel_initializer='glorot_normal',
dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model.add(TimeDistributed(Dense(num_dense_units, activation='relu')))
for nl in range(num_dense_layers-1):
model.add(TimeDistributed(Dense(num_dense_units, activation='relu')))
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(4, activation='softmax',
activity_regularizer=regularizers.l2(l2reg))))
adam = optimizers.Adam(lr=learn_rate, beta_1=0.9, beta_2=0.999,
epsilon=1e-08, decay=0.0)
model.compile(optimizer=adam, loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=mb_size,
epochs=num_epochs, shuffle=True, class_weight=None, verbose=0)
if eval==True:
model.save('output/my_model_stance.h5')
json_string = model.to_json()
with open('output/model_architecture_stance.json','w') as fout:
json.dump(json_string,fout)
model.save_weights('output/my_model_stance_weights.h5')
pred_probabilities = model.predict(x_test, batch_size=mb_size, verbose=0)
confidence = np.max(pred_probabilities, axis=2)
Y_pred = model.predict_classes(x_test, batch_size=mb_size)
return Y_pred, confidence
| 43.990099 | 78 | 0.682647 |
590bcd6c627bee9d3e8368336c0f9e7dc91ab663 | 6,197 | py | Python | models/wideres_shiftadd_se.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 55 | 2020-10-04T17:17:46.000Z | 2022-03-31T02:56:51.000Z | models/wideres_shiftadd_se.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 8 | 2020-12-07T03:37:48.000Z | 2021-07-21T09:26:45.000Z | models/wideres_shiftadd_se.py | poppin-mice/ShiftAddNet | a17369a50da5bba6250fdeac7c065bd00f293f3c | [
"MIT"
] | 14 | 2020-10-29T16:51:41.000Z | 2021-11-16T01:36:43.000Z | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import sys
import numpy as np
from adder import adder
from se_shift import SEConv2d, SELinear
__all__ = ['wideres_shiftadd_se']
def init_conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv3x3(in_planes, out_planes, threshold, sign_threshold, distribution, stride=1, quantize=False, weight_bits=8, sparsity=0):
"""3x3 convolution with padding"""
shift = SEConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution)
add = adder.Adder2D(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
return nn.Sequential(shift, add)
def conv(in_planes, out_planes, threshold, sign_threshold, distribution, kernel_size=3, stride=1, padding=0, quantize=False, weight_bits=8, sparsity=0):
"""3x3 convolution with padding"""
shift = SEConv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution)
add = adder.Adder2D(out_planes, out_planes, kernel_size=kernel_size, stride=1, padding=padding, bias=False, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
return nn.Sequential(shift, add)
# def conv3x3(in_planes, out_planes, stride=1):
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, threshold, sign_threshold, distribution, stride=1, quantize=False, weight_bits=8, sparsity=0):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.conv1 = conv(in_planes, planes, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution, kernel_size=3, padding=1, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.conv2 = conv(planes, planes, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution, kernel_size=3, stride=stride, padding=1, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
conv(in_planes, planes, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution, kernel_size=1, stride=stride, quantize=quantize, weight_bits=weight_bits, sparsity=sparsity),
)
def forward(self, x):
# print(x.shape)
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
# print(out.shape)
# print(self.shortcut(x).shape)
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes, threshold, sign_threshold, distribution, quantize=False, weight_bits=8, sparsity=0):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
self.threshold = threshold
self.sign_threshold = sign_threshold
self.distribution = distribution
self.quantize = quantize
self.weight_bits = weight_bits
self.sparsity = sparsity
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
print('| Wide-Resnet %dx%d' %(depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = init_conv3x3(3,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, threshold=self.threshold,
sign_threshold=self.sign_threshold, distribution=self.distribution,
stride=stride, quantize=self.quantize, weight_bits=self.weight_bits, sparsity=self.sparsity))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def wideres_shiftadd_se(threshold, sign_threshold, distribution, num_classes=10, quantize=False, weight_bits=8, sparsity=0, **kwargs):
return Wide_ResNet(16, 8, 0.3, num_classes=num_classes, threshold=threshold, sign_threshold=sign_threshold, distribution=distribution,
quantize=quantize, weight_bits=weight_bits, sparsity=sparsity)
if __name__ == '__main__':
net=Wide_ResNet(28, 10, 0.3, 10)
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size()) | 49.18254 | 224 | 0.695014 |
8ccb41da393d39bec02f3ba207dd1dd73f435d95 | 1,480 | py | Python | emsapi/models/adi_ems_web_api_v2_dto_upload_upload_status_py3.py | ge-flight-analytics/emsapi-python | 2e3a53529758f1bd7a2a850119b1cc1b5ac552e3 | [
"MIT"
] | null | null | null | emsapi/models/adi_ems_web_api_v2_dto_upload_upload_status_py3.py | ge-flight-analytics/emsapi-python | 2e3a53529758f1bd7a2a850119b1cc1b5ac552e3 | [
"MIT"
] | 2 | 2020-01-16T00:04:35.000Z | 2021-05-26T21:04:06.000Z | emsapi/models/adi_ems_web_api_v2_dto_upload_upload_status_py3.py | ge-flight-analytics/emsapi-python | 2e3a53529758f1bd7a2a850119b1cc1b5ac552e3 | [
"MIT"
] | 1 | 2021-02-23T08:25:12.000Z | 2021-02-23T08:25:12.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoUploadUploadStatus(Model):
"""This is sent as a response to an upload transfer status request.
:param current_count: The current number of bytes the server has received
on this transfer.
:type current_count: long
:param state: The state of the upload. Possible values include:
'transferring', 'waitingProcessing', 'processing', 'processedSuccess',
'processedFailure', 'abandonedTransfer', 'abandonedProcessing', 'canceled'
:type state: str or ~emsapi.models.enum
:param message: Contains a user-readable message about the status of the
transfer.
:type message: str
"""
_attribute_map = {
'current_count': {'key': 'currentCount', 'type': 'long'},
'state': {'key': 'state', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, current_count: int=None, state=None, message: str=None, **kwargs) -> None:
super(AdiEmsWebApiV2DtoUploadUploadStatus, self).__init__(**kwargs)
self.current_count = current_count
self.state = state
self.message = message
| 40 | 100 | 0.613514 |
0a5b44135d26271e8f92d792ac287e5edbe71b03 | 6,840 | py | Python | model/seg_network_initial.py | gbc-sid/MTMFI-VOS | b26dbfad515ebf61b4f6316d166f11735b5dcb9a | [
"MIT"
] | 3 | 2021-04-20T02:31:28.000Z | 2021-12-31T03:20:16.000Z | model/seg_network_initial.py | gbc-sid/MTMFI-VOS | b26dbfad515ebf61b4f6316d166f11735b5dcb9a | [
"MIT"
] | null | null | null | model/seg_network_initial.py | gbc-sid/MTMFI-VOS | b26dbfad515ebf61b4f6316d166f11735b5dcb9a | [
"MIT"
] | 1 | 2021-04-20T02:36:03.000Z | 2021-04-20T02:36:03.000Z | import torch
from torch import nn as nn
from torch.nn import functional as F
from lib.utils import conv, relu, interpolate, adaptive_cat
class TSE(nn.Module):
def __init__(self, fc, ic, oc):
super().__init__()
nc = ic + oc
# ResNet用1*1的卷积来降通道的维度
self.reduce = nn.Sequential(conv(fc, oc, 1), relu(), conv(oc, oc, 1))
self.transform = nn.Sequential(conv(nc, nc, 3), relu(), conv(nc, nc, 3), relu(), conv(nc, oc, 3), relu())
def forward(self, ft, score, x=None):
h = self.reduce(ft)
# adaptive:只需给定输出输出,不需要设定kernel, stride
hpool = F.adaptive_avg_pool2d(h, (1, 1)) if x is None else x
h = adaptive_cat((h, score), dim=1, ref_tensor=0)
h = self.transform(h)
return h, hpool
class CAB(nn.Module):
def __init__(self, oc, deepest):
super().__init__()
self.convreluconv = nn.Sequential(conv(2 * oc, oc, 1), relu(), conv(oc, oc, 1))
self.deepest = deepest
def forward(self, deeper, shallower, att_vec=None):
shallow_pool = F.adaptive_avg_pool2d(shallower, (1, 1))
deeper_pool = deeper if self.deepest else F.adaptive_avg_pool2d(deeper, (1, 1))
if att_vec is not None:
global_pool = torch.cat([shallow_pool, deeper_pool, att_vec], dim=1)
else:
global_pool = torch.cat((shallow_pool, deeper_pool), dim=1)
conv_1x1 = self.convreluconv(global_pool)
inputs = shallower * torch.sigmoid(conv_1x1)
out = inputs + interpolate(deeper, inputs.shape[-2:])
return out
class RRB(nn.Module):
def __init__(self, oc, use_bn=False):
super().__init__()
self.conv1x1 = conv(oc, oc, 1)
if use_bn:
self.bblock = nn.Sequential(conv(oc, oc, 3), nn.BatchNorm2d(oc), relu(), conv(oc, oc, 3, bias=False))
else:
self.bblock = nn.Sequential(conv(oc, oc, 3), relu(), conv(oc, oc, 3, bias=False)) # Basic block
def forward(self, x):
h = self.conv1x1(x)
return F.relu(h + self.bblock(h))
class Upsampler(nn.Module):
def __init__(self, in_channels=64):
super().__init__()
self.conv1 = conv(in_channels, in_channels // 2, 3)
self.conv2 = conv(in_channels // 2, 1, 3)
def forward(self, x, image_size):
print(x.shape)
x = F.interpolate(x, (2 * x.shape[-2], 2 * x.shape[-1]), mode='bicubic', align_corners=False)
x = F.relu(self.conv1(x))
x = F.interpolate(x, image_size[-2:], mode='bicubic', align_corners=False)
x = self.conv2(x)
return x
class PyrUpBicubic2d(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
def kernel(d):
x = d + torch.arange(-1, 3, dtype=torch.float32)
x = torch.abs(x)
a = -0.75
f = (x < 1).float() * ((a + 2) * x * x * x - (a + 3) * x * x + 1) + \
((x >= 1) * (x < 2)).float() * (a * x * x * x - 5 * a * x * x + 8 * a * x - 4 * a)
W = f.reshape(1, 1, 1, len(x)).float()
Wt = W.permute(0, 1, 3, 2)
return W, Wt
We, We_t = kernel(-0.25)
Wo, Wo_t = kernel(-0.25 - 0.5)
# Building non-separable filters for now. It would make sense to
# have separable filters if it proves to be faster.
# .contiguous() is needed until a bug is fixed in nn.Conv2d.
self.W00 = (We_t @ We).expand(channels, 1, 4, 4).contiguous()
self.W01 = (We_t @ Wo).expand(channels, 1, 4, 4).contiguous()
self.W10 = (Wo_t @ We).expand(channels, 1, 4, 4).contiguous()
self.W11 = (Wo_t @ Wo).expand(channels, 1, 4, 4).contiguous()
def forward(self, input):
if input.device != self.W00.device:
self.W00 = self.W00.to(input.device)
self.W01 = self.W01.to(input.device)
self.W10 = self.W10.to(input.device)
self.W11 = self.W11.to(input.device)
a = F.pad(input, (2, 2, 2, 2), 'replicate')
I00 = F.conv2d(a, self.W00, groups=self.channels)
I01 = F.conv2d(a, self.W01, groups=self.channels)
I10 = F.conv2d(a, self.W10, groups=self.channels)
I11 = F.conv2d(a, self.W11, groups=self.channels)
n, c, h, w = I11.shape
J0 = torch.stack((I00, I01), dim=-1).view(n, c, h, 2 * w)
J1 = torch.stack((I10, I11), dim=-1).view(n, c, h, 2 * w)
out = torch.stack((J0, J1), dim=-2).view(n, c, 2 * h, 2 * w)
out = F.pad(out, (-1, -1, -1, -1))
return out
class BackwardCompatibleUpsampler(nn.Module):
""" Upsampler with bicubic interpolation that works with Pytorch 1.0.1 """
def __init__(self, in_channels=64):
super().__init__()
self.conv1 = conv(in_channels, in_channels // 2, 3)
self.up1 = PyrUpBicubic2d(in_channels)
self.conv2 = conv(in_channels // 2, 1, 3)
self.up2 = PyrUpBicubic2d(in_channels // 2)
def forward(self, x, image_size):
x = self.up1(x)
x = F.relu(self.conv1(x))
x = self.up2(x)
x = F.interpolate(x, image_size[-2:], mode='bilinear', align_corners=False)
x = self.conv2(x)
return x
class SegNetwork(nn.Module):
def __init__(self, in_channels=1, out_channels=32, ft_channels=None, use_bn=False):
super().__init__()
assert ft_channels is not None
self.ft_channels = ft_channels
self.TSE = nn.ModuleDict()
self.RRB1 = nn.ModuleDict()
self.CAB = nn.ModuleDict()
self.RRB2 = nn.ModuleDict()
ic = in_channels
oc = out_channels
for L, fc in self.ft_channels.items():
self.TSE[L] = TSE(fc, ic, oc)
self.RRB1[L] = RRB(oc, use_bn=use_bn)
self.CAB[L] = CAB(oc, L == 'layer5')
self.RRB2[L] = RRB(oc, use_bn=use_bn)
#if torch.__version__ == '1.0.1'
self.project = BackwardCompatibleUpsampler(out_channels)
#self.project = Upsampler(out_channels)
def forward(self, scores, features, image_size):
num_targets = scores.shape[0]
num_fmaps = features[next(iter(self.ft_channels))].shape[0]
if num_targets > num_fmaps:
multi_targets = True
else:
multi_targets = False
x = None
for i, L in enumerate(self.ft_channels):
ft = features[L]
s = interpolate(scores, ft.shape[-2:]) # Resample scores to match features size
if multi_targets:
h, hpool = self.TSE[L](ft.repeat(num_targets, 1, 1, 1), s, x)
else:
h, hpool = self.TSE[L](ft, s, x)
h = self.RRB1[L](h)
h = self.CAB[L](hpool, h)
x = self.RRB2[L](h)
x = self.project(x, image_size)
return x
| 33.043478 | 113 | 0.562427 |
3b1152541b705a9c0f2c9fd176303ab0dfb59dcf | 464 | py | Python | server/baseball/migrations/0012_auto_20190308_2256.py | louisliv/baseball | 1148785a6f2c03ced6ebcfcd209e45b901da26a3 | [
"MIT"
] | null | null | null | server/baseball/migrations/0012_auto_20190308_2256.py | louisliv/baseball | 1148785a6f2c03ced6ebcfcd209e45b901da26a3 | [
"MIT"
] | null | null | null | server/baseball/migrations/0012_auto_20190308_2256.py | louisliv/baseball | 1148785a6f2c03ced6ebcfcd209e45b901da26a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-03-08 22:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('baseball', '0011_auto_20190308_2256'),
]
operations = [
migrations.AlterField(
model_name='player',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
]
| 22.095238 | 58 | 0.62069 |
5435aadf11944dff2e48d5bda5705b3dafafb97b | 2,220 | py | Python | src/dirbs/api/v1/schemas/__init__.py | a-wakeel/DIRBS-Core | a80563c6dee0695f6be62c37abdcb702f174b717 | [
"PostgreSQL",
"Unlicense"
] | 19 | 2018-09-16T10:59:23.000Z | 2022-01-12T09:37:41.000Z | src/dirbs/api/v1/schemas/__init__.py | dirbs/DIRBS-Core | a80563c6dee0695f6be62c37abdcb702f174b717 | [
"PostgreSQL",
"Unlicense"
] | 5 | 2020-03-25T14:12:24.000Z | 2021-06-22T06:35:19.000Z | src/dirbs/api/v1/schemas/__init__.py | a-wakeel/DIRBS-Core | a80563c6dee0695f6be62c37abdcb702f174b717 | [
"PostgreSQL",
"Unlicense"
] | 19 | 2018-10-11T06:29:53.000Z | 2021-11-18T05:44:42.000Z | """
DIRBS REST-ful API-V1 schemas package.
Copyright (c) 2018-2021 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
| 67.272727 | 118 | 0.806757 |
072fa1914c0ad3a61787c05c8f98c853ff8bb6dc | 6,615 | py | Python | Flaskshop/taobao/forms.py | GinkgoTeam/2019YJTZB | 7acb23f145ff70e09baade92e6b5a65856b36a1e | [
"MIT"
] | 41 | 2019-05-13T14:22:58.000Z | 2021-11-12T03:30:10.000Z | Flaskshop/taobao/forms.py | GinkgoTeam/2019YJTZB | 7acb23f145ff70e09baade92e6b5a65856b36a1e | [
"MIT"
] | null | null | null | Flaskshop/taobao/forms.py | GinkgoTeam/2019YJTZB | 7acb23f145ff70e09baade92e6b5a65856b36a1e | [
"MIT"
] | 10 | 2019-05-13T22:49:22.000Z | 2021-02-16T20:33:32.000Z | # -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField,SelectField,TextAreaField,FloatField,IntegerField
from wtforms.validators import DataRequired,EqualTo,ValidationError,Length,Email,InputRequired
from taobao.models import Customer,Crew,Supplier,User
from flask_login import current_user
class RegistrationForm(FlaskForm):
role=SelectField("选择登录角色",coerce=str,choices=[("1","我是购买者"),("2","我是供应商"),("3","我是雇员")])
username = StringField('用户名',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('邮箱',
validators=[DataRequired(), Email()])
password = PasswordField('密码', validators=[DataRequired(),Length(min=6, max=20)])
confirm_password = PasswordField('确认密码',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('注册')
def validate_username(self, username):
if self.role.data == "1":#购买者
table = Customer
elif self.role.data == "2":#供应商
table = Supplier
elif self.role.data == "3":#雇员
table = Crew
user = table.query.filter_by(username=username.data).first()
if user:
raise ValidationError("这个用户名已经被用过了,换一个吧!")
def validate_email(self, email):
if self.role.data == "1": # 购买者
table = Customer
elif self.role.data == "2": # 供应商
table = Supplier
elif self.role.data == "3": # 雇员
table = Crew
user = table.query.filter_by(email=email.data).first()
if user:
raise ValidationError("这个邮箱已经被用过了,换一个吧!")
class LoginForm(FlaskForm):
role = SelectField("选择登录角色",coerce=str,choices=[("1","我是购买者"),("2","我是供应商"),("3","我是雇员")])
email = StringField('邮箱',
validators=[DataRequired(), Email()])
password = PasswordField('密码', validators=[DataRequired()])
remember = BooleanField("记住我")
submit = SubmitField('登录')
class CustomerDetailForm(FlaskForm):
consignee = StringField("收货人姓名",validators=[InputRequired(),Length(max=20,min=2)])
address = StringField("收货地址", validators=[InputRequired(),Length(min=10,max=40)])
telephone = StringField("电话",validators=[InputRequired(),Length(max=20,min=9)])
submit = SubmitField("添加地址")
class UpdateCustomerDetailForm(FlaskForm):
consignee = StringField("收货人姓名",validators=[InputRequired(),Length(max=20,min=2)])
address = StringField("收货地址", validators=[InputRequired(),Length(min=10,max=40)])
telephone = StringField("电话",validators=[InputRequired(),Length(max=20,min=9)])
submit = SubmitField("修改地址")
class SecurityCheck(FlaskForm):
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('验证身份')
class UpdateInfo(FlaskForm):
username = StringField('用户名',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('邮箱',
validators=[DataRequired(), Email()])
submit = SubmitField('更新用户名和邮箱')
def validate_username(self, username):
if current_user.table_name == "Customer":
table = Customer
elif current_user.table_name == "Supplier":
table = Supplier
elif current_user.table_name == "Crew":
table = Crew
user =table.query.filter_by(username=username.data).first()
if user and user.username !=current_user.username:
raise ValidationError("这个用户名已经被用过了,换一个吧!")
def validate_email(self, email):
if current_user.table_name == "Customer":
table = Customer
elif current_user.table_name == "Supplier":
table = Supplier
elif current_user.table_name == "Crew":
table = Crew
user =table.query.filter_by(email=email.data).first()
if user and user.username !=current_user.username:
raise ValidationError("这个邮箱已经被用过了,换一个吧!")
class UpdateSupplierInfoForm(FlaskForm):
supplier_name = StringField('公司名称(对外)',
validators=[InputRequired(), Length(min=5, max=40)])
address = StringField('公司地址',
validators=[InputRequired(),Length(min=5, max=40)])
telephone = StringField("电话", validators=[InputRequired(), Length(max=20, min=9)])
mission = TextAreaField("每日任务", validators=[InputRequired(), Length(max=140, min=0)])
submit = SubmitField('更新信息')
class UpdateCrewInfoForm(FlaskForm):
crew_name = StringField('正式名称(对供应商所见)',
validators=[DataRequired(), Length(min=1, max=40)])
address = StringField('居住地址',
validators=[DataRequired(),Length(min=4, max=40)])
telephone = StringField("电话", validators=[InputRequired(), Length(max=20, min=4)])
massage = TextAreaField("求职宣言", validators=[InputRequired(), Length(max=140, min=0)])
submit = SubmitField('更新信息')
class ProductForm(FlaskForm):
name = StringField('商品名称',
validators=[DataRequired(), Length(min=2, max=40)])
sort = StringField('商品类别',
validators=[DataRequired(), Length(min=2, max=40)])
price =FloatField("商品价格", validators=[DataRequired()])
detail = TextAreaField('商品细节',
validators=[DataRequired(), Length(min=1, max=140)])
start_count = IntegerField("初始库存", validators=[DataRequired()])
confirm = IntegerField("确认初始库存",validators=[DataRequired(), EqualTo("start_count")])
submit = SubmitField("添加商品")
class UpdateProductForm(FlaskForm):
name = StringField('商品名称',
validators=[InputRequired(), Length(min=2, max=40)])
sort = StringField('商品类别',
validators=[InputRequired(), Length(min=2, max=40)])
price = FloatField("商品价格", validators=[InputRequired()])
detail = TextAreaField('商品细节',
validators=[InputRequired(), Length(min=1, max=140)])
submit = SubmitField("修改商品")
class AddProductCountForm(FlaskForm):
count = IntegerField("增加的库存量", validators=[InputRequired()])
confirm = IntegerField("确认增加的库存量", validators=[InputRequired(), EqualTo("count")])
submit = SubmitField("添加库存")
class UpdatePasswordForm(FlaskForm):
password = PasswordField('密码', validators=[DataRequired(),Length(min=6, max=20)])
confirm_password = PasswordField('确认密码',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('更新密码')
| 37.372881 | 120 | 0.62842 |
3dda0b30ac7ddd9f96d1e45c798b9e5c54699e84 | 3,391 | py | Python | nordlys/core/retrieval/__init__.py | ageron/nordlys | 0b25d68ab129314e88af4cfe51daf6e1c262927a | [
"BSD-4-Clause"
] | 2 | 2019-03-30T02:12:48.000Z | 2021-03-08T18:58:43.000Z | nordlys/core/retrieval/__init__.py | ageron/nordlys | 0b25d68ab129314e88af4cfe51daf6e1c262927a | [
"BSD-4-Clause"
] | null | null | null | nordlys/core/retrieval/__init__.py | ageron/nordlys | 0b25d68ab129314e88af4cfe51daf6e1c262927a | [
"BSD-4-Clause"
] | null | null | null | """
This package provides basic indexing and scoring functionality based on Elasticsearch. It can be used both for documents and for entities (as the latter are represented as fielded documents).
Indexing
--------
.. todo:: Explain indexing (representing entities as fielded documents, mongo to elasticsearch)
The :mod:`~nordlys.core.retrieval.toy_indexer` module provides a toy example.
Notes
~~~~~
* There is no need to create a separate *id* field for document IDs. Elasticsearch creates an ``_id`` field by default.
* You may ignore creating a separate *catch-all* field. Elasticsearch automatically creates a catch-all field (called ``_all``), which is not stored; see the `elasticsearch documentation <https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-all-field.html>`_ for further details.
* To speed up indexing, use :meth:`~nordlys.core.retrieval.elastic.Elastic.add_docs_bulk`. The optimal number of documents to send in a single bulk depends on the size of documents; you need to figure it out experimentally.
* For indexing documents from a MongoDB collection, always use the :mod:`~nordlys.core.retrieval.indexer_mongo` module. For example usage of this class, see :mod:`~nordlys.core.data.dbpedia.indexer_fsdm`.
* We strongly recommend using the default Elasticsearch similarity (currently BM25) for indexing. (`Other similarity functions <https://www.elastic.co/guide/en/elasticsearch/reference/2.3/index-modules-similarity.html>`_ may be also used; in that case the similarity function can updated after indexing.)
Retrieval
---------
.. todo:: Explain two-stage retrieval
Basic retrieval models (LM, MLM, and PRMS) are implemented in the :mod:`~nordlys.core.retrieval.scorer` module. Check these out to get inspiration for writing a new scorer.
Command line usage
~~~~~~~~~~~~~~~~~~
::
python -m nordlys.core.retrieval.retrieval data/config/eg_retrieval.config.json
* Config file contain settings for 2-phase retrieval:
* *first_pass*: elastic built-in retrieval
* *second_pass*: nordlys retrieval methods
* The retrieval model (with its parameters) should be set according to `elastic search <https://www.elastic.co/guide/en/elasticsearch/reference/2.3/index-modules-similarity.html>`_
* If *second_pass* settings are not set, only first pass retrieval is performed.
API usage
~~~~~~~~~
Notes
~~~~~
* Always use a :class:`~nordlys.core.retrieval.elastic_cache.ElasticCache` object (instead of :class:`~nordlys.core.retrieval.elastic.Elastic`) for getting stats from the index. This class stores index stats (except term frequencies) in the memory, which strongly benefits efficiency.
* For getting term frequencies, you can call the :meth:`~nordlys.core.retrieval.elastic.Elastic.term_freq` method, but it may negatively affect efficiency. This means that you are reading from the index for each document, field, and term.
* You can also use :meth:`~nordlys.core.retrieval.elastic.Elastic.term_freqs` to get term frequency for all terms of a document field and cache it in memory. This helps efficiency, but remember that it can fill up the memory quite fast.
* The best strategy could be to cache term frequencies for each query (i.e., for every new query, all cache term frequencies should be deleted).
* For you can read :meth:`~nordlys.core.retrieval.scorer.ScorerLM.get_lm_term_prob` for example usage.
"""
| 55.590164 | 304 | 0.769979 |
ce60345fce4a7f42cc88838844dc798a56a4fcb1 | 2,800 | py | Python | everyday/20180511-0531/markov_reference.py | gotraveltoworld/python-everyday-practice | deb8fbbe21a73dcb25d66e9ad8d598f16a85db15 | [
"MIT"
] | 1 | 2020-01-27T13:45:52.000Z | 2020-01-27T13:45:52.000Z | everyday/20180511-0531/markov_reference.py | gotraveltoworld/python-everyday-practice | deb8fbbe21a73dcb25d66e9ad8d598f16a85db15 | [
"MIT"
] | null | null | null | everyday/20180511-0531/markov_reference.py | gotraveltoworld/python-everyday-practice | deb8fbbe21a73dcb25d66e9ad8d598f16a85db15 | [
"MIT"
] | null | null | null | """This module contains a code example related to
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
"""
from __future__ import print_function, division
import sys
import string
import random
# global variables
suffix_map = {} # map from prefixes to a list of suffixes
prefix = () # current tuple of words
def process_file(filename, order=2):
"""Reads a file and performs Markov analysis.
filename: string
order: integer number of words in the prefix
returns: map from prefix to list of possible suffixes.
"""
fp = open(filename)
skip_gutenberg_header(fp)
for line in fp:
for word in line.rstrip().split():
process_word(word, order)
def skip_gutenberg_header(fp):
"""Reads from fp until it finds the line that ends the header.
fp: open file object
"""
for line in fp:
if line.startswith('*END*THE SMALL PRINT!'):
break
def process_word(word, order=2):
"""Processes each word.
word: string
order: integer
During the first few iterations, all we do is store up the words;
after that we start adding entries to the dictionary.
"""
global prefix
if len(prefix) < order:
prefix += (word,)
return
try:
suffix_map[prefix].append(word)
except KeyError:
# if there is no entry for this prefix, make one
suffix_map[prefix] = [word]
prefix = shift(prefix, word)
def random_text(n=100):
"""Generates random wordsfrom the analyzed text.
Starts with a random prefix from the dictionary.
n: number of words to generate
"""
# choose a random prefix (not weighted by frequency)
start = random.choice(list(suffix_map.keys()))
for i in range(n):
suffixes = suffix_map.get(start, None)
if suffixes == None:
# if the start isn't in map, we got to the end of the
# original text, so we have to start again.
random_text(n-i)
return
# choose a random suffix
word = random.choice(suffixes)
print(word, end=' ')
start = shift(start, word)
def shift(t, word):
"""Forms a new tuple by removing the head and adding word to the tail.
t: tuple of strings
word: string
Returns: tuple of strings
"""
return t[1:] + (word,)
def main(script, filename='emma.txt', n=100, order=2):
try:
n = int(n)
order = int(order)
except ValueError:
print('Usage: %d filename [# of words] [prefix length]' % script)
else:
process_file(filename, order)
random_text(n)
print()
if __name__ == '__main__':
main(*sys.argv)
| 22.95082 | 74 | 0.627857 |
d222b1db309963a2703c15c6ad67ab73bcd267e4 | 379 | py | Python | main.py | clean-code-craft-tcq-1/modular-python-AkshayUHegde | 16bef3e625d3100c84ab3c487a7a2e41bd98a7e8 | [
"MIT"
] | null | null | null | main.py | clean-code-craft-tcq-1/modular-python-AkshayUHegde | 16bef3e625d3100c84ab3c487a7a2e41bd98a7e8 | [
"MIT"
] | null | null | null | main.py | clean-code-craft-tcq-1/modular-python-AkshayUHegde | 16bef3e625d3100c84ab3c487a7a2e41bd98a7e8 | [
"MIT"
] | null | null | null | import test
if __name__ == '__main__':
test.test_color_pair_from_pair_number(4, 'White', 'Brown')
test.test_color_pair_from_pair_number(5, 'White', 'Slate')
test.test_pair_number_from_color_pair('Black', 'Orange', 12)
test.test_pair_number_from_color_pair('Violet', 'Slate', 25)
test.test_pair_number_from_color_pair('Red', 'Orange', 7)
print('Done :)')
| 37.9 | 64 | 0.725594 |
c84d652bdad990acb2270426925799537bca7561 | 50,859 | py | Python | license_protected_downloads/tests/test_views.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | license_protected_downloads/tests/test_views.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | license_protected_downloads/tests/test_views.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
] | null | null | null | __author__ = 'dooferlad'
import hashlib
import os
import tempfile
import unittest
import urllib2
import urlparse
import json
import random
import shutil
import mock
from django.conf import settings
from django.test import Client, TestCase
from django.http import HttpResponse
from license_protected_downloads.buildinfo import BuildInfo
from license_protected_downloads.config import INTERNAL_HOSTS
from license_protected_downloads.models import APIKeyStore
from license_protected_downloads.tests.helpers import temporary_directory
from license_protected_downloads.tests.helpers import TestHttpServer
from license_protected_downloads.views import _insert_license_into_db
from license_protected_downloads.views import _process_include_tags
from license_protected_downloads.views import _sizeof_fmt
from license_protected_downloads.views import is_same_parent_dir
from license_protected_downloads import views
THIS_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
TESTSERVER_ROOT = os.path.join(THIS_DIRECTORY, "testserver_root")
class BaseServeViewTest(TestCase):
def setUp(self):
self.client = Client()
self.old_served_paths = settings.SERVED_PATHS
settings.SERVED_PATHS = [os.path.join(THIS_DIRECTORY,
"testserver_root")]
self.old_upload_path = settings.UPLOAD_PATH
settings.UPLOAD_PATH = os.path.join(THIS_DIRECTORY,
"test_upload_root")
if not os.path.isdir(settings.UPLOAD_PATH):
os.makedirs(settings.UPLOAD_PATH)
self.old_master_api_key = settings.MASTER_API_KEY
settings.MASTER_API_KEY = "1234abcd"
def tearDown(self):
settings.SERVED_PATHS = self.old_served_paths
settings.MASTER_API_KEY = self.old_master_api_key
os.rmdir(settings.UPLOAD_PATH)
settings.UPLOAD_PATH = self.old_upload_path
class ViewTests(BaseServeViewTest):
def test_license_directly(self):
response = self.client.get('/licenses/license.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_samsung(self):
response = self.client.get('/licenses/samsung.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_ste(self):
response = self.client.get('/licenses/ste.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_licensefile_directly_linaro(self):
response = self.client.get('/licenses/linaro.html', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/build-info')
def test_redirect_to_license_samsung(self):
# Get BuildInfo for target file
target_file = "build-info/origen-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_redirect_to_license_ste(self):
# Get BuildInfo for target file
target_file = "build-info/snowball-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "stericsson" theme. This contains igloo.png
self.assertContains(response, "igloo.png")
def test_redirect_to_license_linaro(self):
# Get BuildInfo for target file
target_file = "build-info/linaro-blob.txt"
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "linaro" theme. This contains linaro.png
self.assertContains(response, "linaro.png")
def set_up_license(self, target_file, index=0):
# Get BuildInfo for target file
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Insert license information into database
text = build_info.get("license-text", index)
digest = hashlib.md5(text).hexdigest()
theme = build_info.get("theme", index)
_insert_license_into_db(digest, text, theme)
return digest
def test_redirect_to_file_on_accept_license(self):
target_file = "build-info/linaro-blob.txt"
digest = self.set_up_license(target_file)
# Accept the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(response['Location'],
listing_url + "?dl=/" + target_file)
def test_redirect_to_decline_page_on_decline_license(self):
target_file = "build-info/linaro-blob.txt"
digest = self.set_up_license(target_file)
# Reject the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"reject": "reject"})
# We should get a message saying we don't have access to the file.
self.assertContains(response, "Without accepting the license, you can"
" not download the requested files.")
def test_download_file_accepted_license(self):
target_file = "build-info/linaro-blob.txt"
url = urlparse.urljoin("http://testserver/", target_file)
digest = self.set_up_license(target_file)
# Accept the license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
listing_url = os.path.dirname(url)
self.assertEqual(response['Location'],
listing_url + "?dl=/" + target_file)
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# XXX Workaround for seemingly out of sync cookie handling XXX
# The cookies in client.cookies are instances of
# http://docs.python.org/library/cookie.html once they have been
# returned by a client get/post. Unfortunately for the next query
# client.cookies needs to be a dictionary keyed by cookie name and
# containing a value of whatever is stored in the cookie (or so it
# seems). For this reason we start up a new client, erasing all
# cookies from the current session, and re-introduce them.
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
response = client.get(url)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_get_license_list(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
data = json.loads(response.content)["licenses"]
# Extract digests
digests = [d["digest"] for d in data]
# Make sure digests match what is in the database
self.assertIn(digest, digests)
self.assertEqual(len(digests), 1)
def test_api_get_license_list_multi_license(self):
target_file = "build-info/multi-license.txt"
digest_1 = self.set_up_license(target_file)
digest_2 = self.set_up_license(target_file, 1)
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
data = json.loads(response.content)["licenses"]
# Extract digests
digests = [d["digest"] for d in data]
# Make sure digests match what is in the database
self.assertIn(digest_1, digests)
self.assertIn(digest_2, digests)
self.assertEqual(len(digests), 2)
def test_api_get_license_list_404(self):
target_file = "build-info/snowball-b"
license_url = "/api/license/" + target_file
# Download JSON containing license information
response = self.client.get(license_url)
self.assertEqual(response.status_code, 404)
def test_api_download_file(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True,
HTTP_LICENSE_ACCEPTED=digest)
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_download_file_multi_license(self):
target_file = "build-info/multi-license.txt"
digest_1 = self.set_up_license(target_file)
digest_2 = self.set_up_license(target_file, 1)
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True,
HTTP_LICENSE_ACCEPTED=" ".join([digest_1, digest_2]))
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_api_download_file_404(self):
target_file = "build-info/snowball-blob.txt"
digest = self.set_up_license(target_file)
url = urlparse.urljoin("http://testserver/", target_file[:-2])
response = self.client.get(url, follow=True,
HTTP_LICENSE_ACCEPTED=digest)
self.assertEqual(response.status_code, 404)
def test_api_get_listing(self):
url = "/api/ls/build-info"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)["files"]
# For each file listed, check some key attributes
for file_info in data:
file_path = os.path.join(TESTSERVER_ROOT,
file_info["url"].lstrip("/"))
if file_info["type"] == "folder":
self.assertTrue(os.path.isdir(file_path))
else:
self.assertTrue(os.path.isfile(file_path))
mtime = os.path.getmtime(file_path)
self.assertEqual(mtime, file_info["mtime"])
def test_api_get_listing_single_file(self):
url = "/api/ls/build-info/snowball-blob.txt"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)["files"]
# Should be a listing for a single file
self.assertEqual(len(data), 1)
# For each file listed, check some key attributes
for file_info in data:
file_path = os.path.join(TESTSERVER_ROOT,
file_info["url"].lstrip("/"))
if file_info["type"] == "folder":
self.assertTrue(os.path.isdir(file_path))
else:
self.assertTrue(os.path.isfile(file_path))
mtime = os.path.getmtime(file_path)
self.assertEqual(mtime, file_info["mtime"])
def test_api_get_listing_404(self):
url = "/api/ls/buld-info"
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_OPEN_EULA_txt(self):
target_file = '~linaro-android/staging-vexpress-a9/test.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_never_available_dirs(self):
target_file = '~linaro-android/staging-imx53/test.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we don't have access we will get a Forbidden response (403)
self.assertEqual(response.status_code, 403)
def test_protected_by_EULA_txt(self):
# Get BuildInfo for target file
target_file = "~linaro-android/staging-origen/test.txt"
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
eula_path = os.path.join(settings.PROJECT_ROOT,
"templates/licenses/samsung.txt")
with open(eula_path) as license_file:
license_text = license_file.read()
digest = hashlib.md5(license_text).hexdigest()
self.assertRedirects(response, "/license?lic=%s&url=%s" %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, license_text)
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
@mock.patch('license_protected_downloads.views.config')
def test_protected_internal_file(self, config):
'''ensure a protected file can be downloaded by an internal host'''
config.INTERNAL_HOSTS = ('127.0.0.1',)
target_file = "~linaro-android/staging-origen/test.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn('X-Sendfile', response)
@mock.patch('license_protected_downloads.views.config')
def test_protected_internal_listing(self, config):
'''ensure directory listings are browseable for internal hosts'''
config.INTERNAL_HOSTS = ('127.0.0.1',)
response = self.client.get('http://testserver/')
self.assertIn('linaro-license-protection.git/commit', response.content)
def test_per_file_license_samsung(self):
# Get BuildInfo for target file
target_file = "images/origen-blob.txt"
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
eula_path = os.path.join(settings.PROJECT_ROOT,
"templates/licenses/samsung.txt")
with open(eula_path) as license_file:
license_text = license_file.read()
digest = hashlib.md5(license_text).hexdigest()
self.assertRedirects(response, "/license?lic=%s&url=%s" %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, license_text)
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_per_file_non_protected_dirs(self):
target_file = "images/MANIFEST"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_dir_containing_only_dirs(self):
target_file = "~linaro-android"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertContains(
response,
r"<th></th><th>Name</th><th>Last modified</th>"
"<th>Size</th><th>License</th>")
def test_not_found_file(self):
target_file = "12qwaszx"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(response, "not found", status_code=404)
def test_unprotected_BUILD_INFO(self):
target_file = 'build-info/panda-open.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_redirect_to_file_on_accept_multi_license(self):
target_file = "build-info/multi-license.txt"
digest = self.set_up_license(target_file)
# Accept the first license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = self.client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name = "license_accepted_" + digest
self.assertTrue(accept_cookie_name in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(
response['Location'], listing_url + "?dl=/" + target_file)
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
digest = self.set_up_license(target_file, 1)
# Accept the second license for our file...
accept_url = '/accept-license?lic=%s&url=%s' % (digest, target_file)
response = client.post(accept_url, {"accept": "accept"})
# We should have a license accept cookie.
accept_cookie_name1 = "license_accepted_" + digest
self.assertTrue(accept_cookie_name1 in response.cookies)
# We should get redirected back to the original file location.
self.assertEqual(response.status_code, 302)
url = urlparse.urljoin("http://testserver/", target_file)
listing_url = os.path.dirname(url)
self.assertEqual(
response['Location'], listing_url + "?dl=/" + target_file)
client = Client()
client.cookies[accept_cookie_name] = accept_cookie_name
client.cookies[accept_cookie_name1] = accept_cookie_name1
response = client.get(url)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_header_html(self):
target_file = "~linaro-android"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(
response, r"Welcome to the Linaro releases server")
def test_exception_internal_host_for_lic(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/origen-blob.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_exception_internal_host_for_openid(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/openid.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_exception_internal_host_for_lic_and_openid(self):
internal_host = INTERNAL_HOSTS[0]
target_file = 'build-info/origen-blob-openid.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def test_no_exception_ip(self):
internal_host = '10.1.2.3'
target_file = 'build-info/origen-blob.txt'
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(
url, follow=True, REMOTE_ADDR=internal_host)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
# Make sure that we get the license text in the license page
self.assertContains(response, build_info.get("license-text"))
# Test that we use the "samsung" theme. This contains exynos.png
self.assertContains(response, "exynos.png")
def test_broken_build_info_directory(self):
target_file = "build-info/broken-build-info"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file is invalid, we don't allow access
self.assertEqual(response.status_code, 403)
def test_broken_build_info_file(self):
target_file = "build-info/broken-build-info/test.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file is invalid, we don't allow access
self.assertEqual(response.status_code, 403)
def test_unable_to_download_hidden_files(self):
target_file = '~linaro-android/staging-vexpress-a9/OPEN-EULA.txt'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# This file exists, but isn't listed so we shouldn't be able to
# download it.
self.assertEqual(response.status_code, 404)
def test_partial_build_info_file_open(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_open.txt")
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file specifies this file is open
self.assertEqual(response.status_code, 200)
def test_partial_build_info_file_protected(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_protected.txt")
file_path = os.path.join(TESTSERVER_ROOT, target_file)
build_info = BuildInfo(file_path)
# Try to fetch file from server - we should be redirected
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
digest = hashlib.md5(build_info.get("license-text")).hexdigest()
self.assertRedirects(response, '/license?lic=%s&url=%s' %
(digest, target_file))
def test_partial_build_info_file_unspecified(self):
target_file = ("partial-license-settings/"
"partially-complete-build-info/"
"should_be_inaccessible.txt")
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If a build-info file has no information about this file
self.assertEqual(response.status_code, 403)
def test_listings_do_not_contain_double_slash_in_link(self):
target_file = 'images/'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# this link should not contain a double slash:
self.assertNotContains(response, "//origen-blob.txt")
def test_directory_with_broken_symlink(self):
target_file = 'broken-symlinks'
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# this test should not cause an exception. Anything else is a pass.
self.assertEqual(response.status_code, 200)
def test_sizeof_fmt(self):
self.assertEqual(_sizeof_fmt(1), '1')
self.assertEqual(_sizeof_fmt(1234), '1.2K')
self.assertEqual(_sizeof_fmt(1234567), '1.2M')
self.assertEqual(_sizeof_fmt(1234567899), '1.1G')
self.assertEqual(_sizeof_fmt(1234567899999), '1.1T')
def test_listdir(self):
patterns = [
(['b', 'a', 'latest', 'c'], ['latest', 'a', 'b', 'c']),
(['10', '1', '100', 'latest'], ['latest', '1', '10', '100']),
(['10', 'foo', '100', 'latest'], ['latest', '10', '100', 'foo']),
]
for files, expected in patterns:
path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, path)
for file in files:
with open(os.path.join(path, file), 'w') as f:
f.write(file)
self.assertEqual(expected, views._listdir(path))
def test_whitelisted_dirs(self):
target_file = "precise/restricted/whitelisted.txt"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
# If we have access to the file, we will get an X-Sendfile response
self.assertEqual(response.status_code, 200)
file_path = os.path.join(TESTSERVER_ROOT, target_file)
self.assertEqual(response['X-Sendfile'], file_path)
def make_temporary_file(self, data, root=None):
"""Creates a temporary file and fills it with data.
Returns the file name of the new temporary file.
"""
tmp_file_handle, tmp_filename = tempfile.mkstemp(dir=root)
tmp_file = os.fdopen(tmp_file_handle, "w")
tmp_file.write(data)
tmp_file.close()
self.addCleanup(os.unlink, tmp_filename)
return os.path.basename(tmp_filename)
def test_replace_self_closing_tag(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_self_closing_tag1(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README"/> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_with_closing_tag(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="README">README is missing'
'</linaro:include> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_non_existent_file(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="NON_EXISTENT_FILE" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_empty_file_property(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="../README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_subdir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="subdir/README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_subdir_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="subdir/../README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_full_path(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
tmp = self.make_temporary_file("Included from /tmp", root="/tmp")
ret = _process_include_tags(
'Test <linaro:include file="/tmp/%s" /> html' % tmp)
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_self_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="./README" /> html')
self.assertEqual(ret, r"Test Included from README html")
os.chdir(old_cwd)
def test_replace_self_parent_dir(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="./../README" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_replace_symlink(self):
target_file = "readme"
old_cwd = os.getcwd()
file_path = os.path.join(TESTSERVER_ROOT, target_file)
os.chdir(file_path)
ret = _process_include_tags(
'Test <linaro:include file="READMELINK" /> html')
self.assertEqual(ret, r"Test html")
os.chdir(old_cwd)
def test_process_include_tags(self):
target_file = "readme"
url = urlparse.urljoin("http://testserver/", target_file)
response = self.client.get(url, follow=True)
self.assertContains(response, r"Included from README")
def test_is_same_parent_dir_true(self):
fname = os.path.join(TESTSERVER_ROOT, "subdir/../file")
self.assertTrue(is_same_parent_dir(TESTSERVER_ROOT, fname))
def test_is_same_parent_dir_false(self):
fname = os.path.join(TESTSERVER_ROOT, "../file")
self.assertFalse(is_same_parent_dir(TESTSERVER_ROOT, fname))
def test_get_remote_static_unsupported_file(self):
response = self.client.get('/get-remote-static?name=unsupported.css')
self.assertEqual(response.status_code, 404)
def test_get_remote_static_nonexisting_file(self):
pages = {"/": "index"}
with TestHttpServer(pages) as http_server:
css_url = '%s/init.css' % http_server.base_url
settings.SUPPORTED_REMOTE_STATIC_FILES = {
'init.css': css_url}
self.assertRaises(urllib2.HTTPError, self.client.get,
'/get-remote-static?name=init.css')
def test_get_remote_static(self):
pages = {"/": "index", "/init.css": "test CSS"}
with TestHttpServer(pages) as http_server:
css_url = '%s/init.css' % http_server.base_url
settings.SUPPORTED_REMOTE_STATIC_FILES = {
'init.css': css_url}
response = self.client.get('/get-remote-static?name=init.css')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test CSS')
def test_path_to_root(self):
response = self.client.get("http://testserver//", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_path_to_dir_above(self):
response = self.client.get("http://testserver/../", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_path_to_dir_above2(self):
response = self.client.get("http://testserver/..", follow=True)
# Shouldn't be able to escape served paths...
self.assertEqual(response.status_code, 404)
def test_get_key(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
def test_get_key_api_disabled(self):
settings.MASTER_API_KEY = ""
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 403)
def test_get_key_post_and_get_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
last_used = APIKeyStore.objects.get(key=key).last_used
# Now write a file so we can upload it
file_content = "test_get_key_post_and_get_file"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
try:
# Send the file
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.UPLOAD_PATH, key, "file_name")
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file if we present the key
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertEqual(response.status_code, 200)
response = self.client.get("http://testserver/file_name")
self.assertNotEqual(response.status_code, 200)
self.assertNotEqual(
APIKeyStore.objects.get(key=key).last_used, last_used)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.UPLOAD_PATH, key))
def test_get_public_key_post_and_get_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY,
"public": ""})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
# Now write a file so we can upload it
file_content = "test_get_key_post_and_get_file"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
buildinfo_content = "\n".join([
"Format-Version: 0.1",
"Files-Pattern: *",
"Build-Name: test",
"License-Type: open"])
tmp_build_info = os.path.join(
file_root,
self.make_temporary_file(buildinfo_content))
try:
# Send the files
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/pub/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
with open(tmp_build_info) as f:
response = self.client.post(
"http://testserver/pub/BUILD-INFO.txt",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.SERVED_PATHS[0], 'pub/file_name')
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file
response = self.client.get("http://testserver/pub/file_name")
self.assertEqual(response.status_code, 200)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.SERVED_PATHS[0], "pub"))
def test_post_empty_file(self):
'''Ensure we accept zero byte files'''
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
# Now write a file so we can upload it
file_content = ""
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
try:
# Send the file
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name",
data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
# Check the upload worked by reading the file back from its
# uploaded location
uploaded_file_path = os.path.join(
settings.UPLOAD_PATH, key, "file_name")
with open(uploaded_file_path) as f:
self.assertEqual(f.read(), file_content)
# Test we can fetch the newly uploaded file if we present the key
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertEqual(response.status_code, 200)
response = self.client.get("http://testserver/file_name")
self.assertNotEqual(response.status_code, 200)
finally:
# Delete the files generated by the test
shutil.rmtree(os.path.join(settings.UPLOAD_PATH, key))
def test_post_no_file(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
response = self.client.post(
"http://testserver/file_name", data={"key": key})
self.assertEqual(response.status_code, 500)
def test_post_file_no_key(self):
file_content = "test_post_file_no_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
# Try to upload a file without a key.
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"file": f})
self.assertEqual(response.status_code, 500)
# Make sure the file didn't get created.
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, "file_name")))
def test_post_file_random_key(self):
key = "%030x" % random.randrange(256 ** 15)
file_content = "test_post_file_random_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
# Try to upload a file with a randomly generated key.
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"key": key, "file": f})
self.assertEqual(response.status_code, 500)
# Make sure the file didn't get created.
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, key, "file_name")))
def test_api_delete_key(self):
response = self.client.get("http://testserver/api/request_key",
data={"key": settings.MASTER_API_KEY})
self.assertEqual(response.status_code, 200)
# Don't care what the key is, as long as it isn't blank
self.assertRegexpMatches(response.content, "\S+")
key = response.content
file_content = "test_api_delete_key"
file_root = "/tmp"
tmp_file_name = os.path.join(
file_root,
self.make_temporary_file(file_content))
with open(tmp_file_name) as f:
response = self.client.post(
"http://testserver/file_name", data={"key": key, "file": f})
self.assertEqual(response.status_code, 200)
self.assertTrue(os.path.isfile(os.path.join(settings.UPLOAD_PATH,
key,
"file_name")))
# Release the key, the files should be deleted
response = self.client.get("http://testserver/api/delete_key",
data={"key": key})
self.assertEqual(response.status_code, 200)
self.assertFalse(os.path.isfile(
os.path.join(settings.UPLOAD_PATH, key, "file_name")))
# Key shouldn't work after released
response = self.client.get("http://testserver/file_name",
data={"key": key})
self.assertNotEqual(response.status_code, 200)
class HowtoViewTests(BaseServeViewTest):
def test_no_howtos(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'build.tar.bz2')
def test_howtos_without_license(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test")
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'build.tar.bz2')
def test_howtos_with_license_in_buildinfo(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test",
with_buildinfo=True)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'howto')
def test_howtos_with_license_in_openeula(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_test.txt", data=".h1 HowTo Test",
with_buildinfo=False)
serve_root.make_file(
"build/9/howto/OPEN-EULA.txt", with_buildinfo=False)
response = self.client.get('/build/9/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'howto')
def test_howtos_howto_dir(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/howto/HOWTO_releasenotes.txt", data=".h1 HowTo Test")
response = self.client.get('/build/9/howto/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'HowTo Test')
def test_howtos_product_dir(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file(
"build/9/build.tar.bz2", with_buildinfo=True)
serve_root.make_file(
"build/9/target/product/panda/howto/HOWTO_releasenotes.txt",
data=".h1 HowTo Test")
response = self.client.get('/build/9/target/product/panda/howto/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'HowTo Test')
class FileViewTests(BaseServeViewTest):
def test_static_file(self):
with temporary_directory() as serve_root:
settings.SERVED_PATHS = [serve_root.root]
serve_root.make_file("MD5SUM")
serve_root.make_file(
"BUILD-INFO.txt",
data=("Format-Version: 2.0\n\n"
"Files-Pattern: MD5SUM\n"
"License-Type: open\n"))
response = self.client.get('/MD5SUM')
self.assertEqual(response.status_code, 200)
class ViewHelpersTests(BaseServeViewTest):
def test_auth_group_error(self):
groups = ["linaro", "batman", "catwoman", "joker"]
request = mock.Mock()
request.path = "mock_path"
response = views.group_auth_failed_response(request, groups)
self.assertIsNotNone(response)
self.assertTrue(isinstance(response, HttpResponse))
self.assertContains(
response,
"You need to be the member of one of the linaro batman, catwoman "
"or joker groups",
status_code=403)
if __name__ == '__main__':
unittest.main()
| 41.756158 | 79 | 0.632887 |
4ed172a675bffba2b8999657bdd7bc228cf24575 | 16,912 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_cancel_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
**kwargs
)
def build_start_os_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
**kwargs
)
def build_get_latest_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineScaleSetRollingUpgradesOperations(object):
"""VirtualMachineScaleSetRollingUpgradesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _cancel_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
@distributed_trace
def begin_cancel(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel'} # type: ignore
def _start_os_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_os_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=self._start_os_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
@distributed_trace
def begin_start_os_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade'} # type: ignore
@distributed_trace
def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_04_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_latest_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=self.get_latest.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest'} # type: ignore
| 43.587629 | 220 | 0.686436 |
bf224aacfc18681a0989da3e48653de93b00c36c | 19,128 | py | Python | nflgame/__init__.py | front9tech/nflgame | 41fa436d2b6e9f4465adad2e9a536979e10853cc | [
"Unlicense"
] | 1 | 2016-01-12T11:14:42.000Z | 2016-01-12T11:14:42.000Z | nflgame/__init__.py | front9tech/nflgame | 41fa436d2b6e9f4465adad2e9a536979e10853cc | [
"Unlicense"
] | null | null | null | nflgame/__init__.py | front9tech/nflgame | 41fa436d2b6e9f4465adad2e9a536979e10853cc | [
"Unlicense"
] | null | null | null | """
nflgame is an API to retrieve and read NFL Game Center JSON data.
It can work with real-time data, which can be used for fantasy football.
nflgame works by parsing the same JSON data that powers NFL.com's live
GameCenter. Therefore, nflgame can be used to report game statistics while
a game is being played.
The package comes pre-loaded with game data from every pre- and regular
season game from 2009 up until the present (I try to update it every week).
Therefore, querying such data does not actually ping NFL.com.
However, if you try to search for data in a game that is being currently
played, the JSON data will be downloaded from NFL.com at each request (so be
careful not to inspect for data too many times while a game is being played).
If you ask for data for a particular game that hasn't been cached to disk
but is no longer being played, it will be automatically cached to disk
so that no further downloads are required.
Here's a quick teaser to find the top 5 running backs by rushing yards in the
first week of the 2013 season:
#!python
import nflgame
games = nflgame.games(2013, week=1)
players = nflgame.combine_game_stats(games)
for p in players.rushing().sort('rushing_yds').limit(5):
msg = '%s %d carries for %d yards and %d TDs'
print msg % (p, p.rushing_att, p.rushing_yds, p.rushing_tds)
And the output is:
L.McCoy 31 carries for 184 yards and 1 TDs
T.Pryor 13 carries for 112 yards and 0 TDs
S.Vereen 14 carries for 101 yards and 0 TDs
A.Peterson 18 carries for 93 yards and 2 TDs
R.Bush 21 carries for 90 yards and 0 TDs
Or you could find the top 5 passing plays in the same time period:
#!python
import nflgame
games = nflgame.games(2013, week=1)
plays = nflgame.combine_plays(games)
for p in plays.sort('passing_yds').limit(5):
print p
And the output is:
(DEN, DEN 22, Q4, 3 and 8) (4:42) (Shotgun) P.Manning pass short left to D.Thomas for 78 yards, TOUCHDOWN. Penalty on BAL-E.Dumervil, Defensive Offside, declined.
(DET, DET 23, Q3, 3 and 7) (5:58) (Shotgun) M.Stafford pass short middle to R.Bush for 77 yards, TOUCHDOWN.
(NYG, NYG 30, Q2, 1 and 10) (2:01) (No Huddle, Shotgun) E.Manning pass deep left to V.Cruz for 70 yards, TOUCHDOWN. Pass complete on a fly pattern.
(NO, NO 24, Q2, 2 and 6) (5:11) (Shotgun) D.Brees pass deep left to K.Stills to ATL 9 for 67 yards (R.McClain; R.Alford). Pass 24, YAC 43
(NYG, NYG 20, Q1, 1 and 10) (13:04) E.Manning pass short middle to H.Nicks pushed ob at DAL 23 for 57 yards (M.Claiborne). Pass complete on a slant pattern.
If you aren't a programmer, then the
[tutorial for non
programmers](https://github.com/BurntSushi/nflgame/wiki/Tutorial-for-non-programmers:-Installation-and-examples)
is for you.
If you need help, please come visit us at IRC/FreeNode on channel `#nflgame`.
If you've never used IRC before, then you can
[use a web client](http://webchat.freenode.net/?channels=%23nflgame).
(Enter any nickname you like, make sure the channel is `#nflgame`, fill in
the captcha and hit connect.)
Failing IRC, the second fastest way to get help is to
[open a new issue on the
tracker](https://github.com/BurntSushi/nflgame/issues/new).
There are several active contributors to nflgame that watch the issue tracker.
We tend to respond fairly quickly!
"""
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict # from PyPI
import itertools
import nflgame.game
import nflgame.live
import nflgame.player
import nflgame.schedule
import nflgame.seq
from nflgame.version import __version__
VERSION = __version__ # Deprecated. Backwards compatibility.
NoPlayers = nflgame.seq.GenPlayerStats(None)
"""
NoPlayers corresponds to the identity element of a Players sequences.
Namely, adding it to any other Players sequence has no effect.
"""
players = nflgame.player._create_players()
"""
A dict of all players and meta information about each player keyed
by GSIS ID. (The identifiers used by NFL.com GameCenter.)
"""
teams = [
['ARI', 'Arizona', 'Cardinals', 'Arizona Cardinals'],
['ATL', 'Atlanta', 'Falcons', 'Atlanta Falcons'],
['BAL', 'Baltimore', 'Ravens', 'Baltimore Ravens'],
['BUF', 'Buffalo', 'Bills', 'Buffalo Bills'],
['CAR', 'Carolina', 'Panthers', 'Carolina Panthers'],
['CHI', 'Chicago', 'Bears', 'Chicago Bears'],
['CIN', 'Cincinnati', 'Bengals', 'Cincinnati Bengals'],
['CLE', 'Cleveland', 'Browns', 'Cleveland Browns'],
['DAL', 'Dallas', 'Cowboys', 'Dallas Cowboys'],
['DEN', 'Denver', 'Broncos', 'Denver Broncos'],
['DET', 'Detroit', 'Lions', 'Detroit Lions'],
['GB', 'Green Bay', 'Packers', 'Green Bay Packers', 'G.B.', 'GNB'],
['HOU', 'Houston', 'Texans', 'Houston Texans'],
['IND', 'Indianapolis', 'Colts', 'Indianapolis Colts'],
['JAC', 'Jacksonville', 'Jaguars', 'Jacksonville Jaguars', 'JAX'],
['KC', 'Kansas City', 'Chiefs', 'Kansas City Chiefs', 'K.C.', 'KAN'],
['MIA', 'Miami', 'Dolphins', 'Miami Dolphins'],
['MIN', 'Minnesota', 'Vikings', 'Minnesota Vikings'],
['NE', 'New England', 'Patriots', 'New England Patriots', 'N.E.', 'NWE'],
['NO', 'New Orleans', 'Saints', 'New Orleans Saints', 'N.O.', 'NOR'],
['NYG', 'Giants', 'New York Giants', 'N.Y.G.'],
['NYJ', 'Jets', 'New York Jets', 'N.Y.J.'],
['OAK', 'Oakland', 'Raiders', 'Oakland Raiders'],
['PHI', 'Philadelphia', 'Eagles', 'Philadelphia Eagles'],
['PIT', 'Pittsburgh', 'Steelers', 'Pittsburgh Steelers'],
['SD', 'San Diego', 'Chargers', 'San Diego Chargers', 'S.D.', 'SDG'],
['SEA', 'Seattle', 'Seahawks', 'Seattle Seahawks'],
['SF', 'San Francisco', '49ers', 'San Francisco 49ers', 'S.F.', 'SFO'],
['STL', 'St. Louis', 'Rams', 'St. Louis Rams', 'S.T.L.'],
['TB', 'Tampa Bay', 'Buccaneers', 'Tampa Bay Buccaneers', 'T.B.', 'TAM'],
['TEN', 'Tennessee', 'Titans', 'Tennessee Titans'],
['WAS', 'Washington', 'Redskins', 'Washington Redskins', 'WSH'],
]
"""
A list of all teams. Each item is a list of different ways to
describe a team. (i.e., JAC, JAX, Jacksonville, Jaguars, etc.).
The first item in each list is always the standard NFL.com
team abbreviation (two or three letters).
"""
def find(name, team=None):
"""
Finds a player (or players) with a name matching (case insensitive)
name and returns them as a list.
If team is not None, it is used as an additional search constraint.
"""
hits = []
for player in players.itervalues():
if player.name.lower() == name.lower():
if team is None or team.lower() == player.team.lower():
hits.append(player)
return hits
def standard_team(team):
"""
Returns a standard abbreviation when team corresponds to a team in
nflgame.teams (case insensitive). All known variants of a team name are
searched. If no team is found, None is returned.
"""
team = team.lower()
for variants in teams:
for variant in variants:
if team == variant.lower():
return variants[0]
return None
def games(year, week=None, home=None, away=None, kind='REG', started=False):
"""
games returns a list of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
return list(games_gen(year, week, home, away, kind, started))
def games_gen(year, week=None, home=None, away=None,
kind='REG', started=False):
"""
games returns a generator of all games matching the given criteria. Each
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
As a special case, if the home and away teams are set to the same team,
then all games where that team played are returned.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
return None
def gen():
for info in infos:
g = nflgame.game.Game(info['eid'])
if g is None:
continue
yield g
return gen()
def one(year, week, home, away, kind='REG', started=False):
"""
one returns a single game matching the given criteria. The
game can then be queried for player statistics and information about
the game itself (score, winner, scoring plays, etc.).
one returns either a single game or no games. If there are multiple games
matching the given criteria, an assertion is raised.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
Note that if a game's JSON data is not cached to disk, it is retrieved
from the NFL web site. A game's JSON data is *only* cached to disk once
the game is over, so be careful with the number of times you call this
while a game is going on. (i.e., don't piss off NFL.com.)
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = _search_schedule(year, week, home, away, kind, started)
if not infos:
return None
assert len(infos) == 1, 'More than one game matches the given criteria.'
return nflgame.game.Game(infos[0]['eid'])
def combine(games, plays=False):
"""
DEPRECATED. Please use one of nflgame.combine_{game,play,max}_stats
instead.
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get PlayerStat objects corresponding to
statistics across an entire week, some number of weeks or an entire season.
If the plays parameter is True, then statistics will be dervied from
play by play data. This mechanism is slower but will contain more detailed
statistics like receiver targets, yards after the catch, punt and field
goal blocks, etc.
"""
if plays:
return combine_play_stats(games)
else:
return combine_game_stats(games)
def combine_game_stats(games):
"""
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
"""
return reduce(lambda ps1, ps2: ps1 + ps2,
[g.players for g in games if g is not None])
def combine_play_stats(games):
"""
Combines a list of games into one big player sequence containing play
level statistics.
This can be used, for example, to get PlayPlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats when more
detailed statistics such as receiver targets, yards after the catch and
punt/FG blocks are needed.
N.B. Since this combines *all* play data, this function may take a while
to complete depending on the number of games passed in.
"""
return reduce(lambda p1, p2: p1 + p2,
[g.drives.players() for g in games if g is not None])
def combine_max_stats(games):
"""
Combines a list of games into one big player sequence containing maximum
statistics based on game and play level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
This function should be used in lieu of combine_game_stats or
combine_play_stats when the best possible accuracy is desired.
"""
return reduce(lambda a, b: a + b,
[g.max_player_stats() for g in games if g is not None])
def combine_plays(games):
"""
Combines a list of games into one big play generator that can be searched
as if it were a single game.
"""
chain = itertools.chain(*[g.drives.plays() for g in games])
return nflgame.seq.GenPlays(chain)
def _search_schedule(year, week=None, home=None, away=None, kind='REG',
started=False):
"""
Searches the schedule to find the game identifiers matching the criteria
given.
The kind parameter specifies whether to fetch preseason, regular season
or postseason games. Valid values are PRE, REG and POST.
The week parameter is relative to the value of the kind parameter, and
may be set to a list of week numbers.
In the regular season, the week parameter corresponds to the normal
week numbers 1 through 17. Similarly in the preseason, valid week numbers
are 1 through 4. In the post season, the week number corresponds to the
numerical round of the playoffs. So the wild card round is week 1,
the divisional round is week 2, the conference round is week 3
and the Super Bowl is week 4.
The year parameter specifies the season, and not necessarily the actual
year that a game was played in. For example, a Super Bowl taking place
in the year 2011 actually belongs to the 2010 season. Also, the year
parameter may be set to a list of seasons just like the week parameter.
If started is True, then only games that have already started (or are
about to start in less than 5 minutes) will be returned. Note that the
started parameter requires pytz to be installed. This is useful when
you only want to collect stats from games that have JSON data available
(as opposed to waiting for a 404 error from NFL.com).
"""
infos = []
for (y, t, w, h, a), info in nflgame.schedule.games:
if year is not None:
if isinstance(year, list) and y not in year:
continue
if not isinstance(year, list) and y != year:
continue
if week is not None:
if isinstance(week, list) and w not in week:
continue
if not isinstance(week, list) and w != week:
continue
if home is not None and away is not None and home == away:
if h != home and a != home:
continue
else:
if home is not None and h != home:
continue
if away is not None and a != away:
continue
if t != kind:
continue
if started:
gametime = nflgame.live._game_datetime(info)
now = nflgame.live._now()
if gametime > now and (gametime - now).total_seconds() > 300:
continue
infos.append(info)
return infos
| 42.696429 | 166 | 0.687892 |
d906ad5a14ebad6c16c3fec584e762acfa3d55ef | 31,003 | py | Python | scripts/reflinks.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | null | null | null | scripts/reflinks.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | null | null | null | scripts/reflinks.py | sauravsrijan/pywikibot | 3d42e7c4de7ee96cda7d6dd9f95fe7c6d3c37484 | [
"MIT"
] | 1 | 2020-04-14T14:52:24.000Z | 2020-04-14T14:52:24.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Fetch and add titles for bare links in references.
This bot will search for references which are only made of a link without title
(i.e. <ref>[https://www.google.fr/]</ref> or <ref>https://www.google.fr/</ref>)
and will fetch the html title from the link to use it as the title of the wiki
link in the reference, i.e.
<ref>[https://www.google.fr/search?q=test test - Google Search]</ref>
The bot checks every 20 edits a special stop page. If the page has been edited,
it stops.
Warning: Running this script on German Wikipedia is not allowed anymore.
As it uses it, you need to configure noreferences.py for your wiki, or it will
not work.
pdfinfo is needed for parsing pdf titles.
The following parameters are supported:
-limit:n Stops after n edits
-xml:dump.xml Should be used instead of a simple page fetching method from
pagegenerators.py for performance and load issues
-xmlstart Page to start with when using an XML dump
-ignorepdf Do not handle PDF files (handy if you use Windows and can't
get pdfinfo)
-summary Use a custom edit summary. Otherwise it uses the default
one from i18n/reflinks.py
The following generators and filters are supported:
¶ms;
"""
# (C) Nicolas Dumazet (NicDumZ), 2008
# (C) Pywikibot team, 2008-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
import os
import re
import socket
import subprocess
import tempfile
from functools import partial
import pywikibot
from pywikibot import comms, i18n, pagegenerators, textlib, Bot
from pywikibot import config2 as config
from pywikibot.pagegenerators import (
XMLDumpPageGenerator as _XMLDumpPageGenerator,
)
from pywikibot.tools.formatter import color_format, PY2
from requests import codes
from scripts import noreferences
if not PY2:
import http.client as httplib
from urllib.error import URLError
else:
import httplib
from urllib2 import URLError
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
localized_msg = ('fr', 'it', 'pl') # localized message at MediaWiki
# localized message at specific wikipedia site
# should be moved to MediaWiki Pywikibot manual
stop_page = {
'fr': 'Utilisateur:DumZiBoT/EditezCettePagePourMeStopper',
'da': 'Bruger:DumZiBoT/EditThisPageToStopMe',
'de': 'Benutzer:DumZiBoT/EditThisPageToStopMe',
'fa': 'کاربر:Amirobot/EditThisPageToStopMe',
'it': 'Utente:Marco27Bot/EditThisPageToStopMe',
'ko': '사용자:GrassnBreadRefBot/EditThisPageToStopMe1',
'he': 'User:Matanyabot/EditThisPageToStopMe',
'hu': 'User:Damibot/EditThisPageToStopMe',
'en': 'User:DumZiBoT/EditThisPageToStopMe',
'pl': 'Wikipedysta:MastiBot/EditThisPageToStopMe',
'ru': 'User:Rubinbot/EditThisPageToStopMe',
'ur': 'صارف:Shuaib-bot/EditThisPageToStopMe',
'zh': 'User:Sz-iwbot',
}
deadLinkTag = {
'fr': '[%s] {{lien mort}}',
'da': '[%s] {{dødt link}}',
'fa': '[%s] {{پیوند مرده}}',
'he': '{{קישור שבור}}',
'hi': '[%s] {{Dead link}}',
'hu': '[%s] {{halott link}}',
'ko': '[%s] {{죽은 바깥 고리}}',
'es': '{{enlace roto2|%s}}',
'it': '{{Collegamento interrotto|%s}}',
'en': '[%s] {{dead link}}',
'pl': '[%s] {{Martwy link}}',
'ru': '[%s] {{subst:dead}}',
'sr': '[%s] {{dead link}}',
'ur': '[%s] {{مردہ ربط}}',
}
soft404 = re.compile(
r'\D404(\D|\Z)|error|errdoc|Not.{0,3}Found|sitedown|eventlog',
re.IGNORECASE)
# matches an URL at the index of a website
dirIndex = re.compile(
r'\w+://[^/]+/((default|index)\.'
r'(asp|aspx|cgi|htm|html|phtml|mpx|mspx|php|shtml|var))?$',
re.IGNORECASE)
# Extracts the domain name
domain = re.compile(r'^(\w+)://(?:www.|)([^/]+)')
globalbadtitles = r"""
# is
(test|
# starts with
^\W*(
register
|registration
|(sign|log)[ \-]?in
|subscribe
|sign[ \-]?up
|log[ \-]?on
|untitled[ ]?(document|page|\d+|$)
|404[ ]
).*
# anywhere
|.*(
403[ ]forbidden
|(404|page|file|information|resource).*not([ ]*be)?[ ]*
(available|found)
|site.*disabled
|error[ ]404
|error.+not[ ]found
|not[ ]found.+error
|404[ ]error
|\D404\D
|check[ ]browser[ ]settings
|log[ \-]?(on|in)[ ]to
|site[ ]redirection
).*
# ends with
|.*(
register
|registration
|(sign|log)[ \-]?in
|subscribe|sign[ \-]?up
|log[ \-]?on
)\W*$
)
"""
# Language-specific bad titles
badtitles = {
'en': '',
'fr': '.*(404|page|site).*en +travaux.*',
'es': '.*sitio.*no +disponible.*',
'it': '((pagina|sito) (non trovat[ao]|inesistente)|accedi|errore)',
'ru': '.*(Страница|страница).*(не[ ]*найдена|осутствует).*',
}
# Regex that match bare references
linksInRef = re.compile(
# bracketed URLs
r'(?i)<ref(?P<name>[^>]*)>\s*\[?(?P<url>(?:http|https)://(?:'
# unbracketed with()
r'^\[\]\s<>"]+\([^\[\]\s<>"]+[^\[\]\s\.:;\\,<>\?"]+|'
# unbracketed without ()
r'[^\[\]\s<>"]+[^\[\]\s\)\.:;\\,<>\?"]+|[^\[\]\s<>"]+))'
r'[!?,\s]*\]?\s*</ref>')
# Download this file :
# http://www.twoevils.org/files/wikipedia/404-links.txt.gz
# ( maintained by User:Dispenser )
listof404pages = '404-links.txt'
XmlDumpPageGenerator = partial(
_XMLDumpPageGenerator, text_predicate=linksInRef.search)
class RefLink(object):
"""Container to handle a single bare reference."""
def __init__(self, link, name, site=None):
"""Initializer."""
self.refname = name
self.link = link
self.site = site or pywikibot.Site()
self.linkComment = i18n.twtranslate(self.site, 'reflinks-comment')
self.url = re.sub('#.*', '', self.link)
self.title = None
def refTitle(self):
"""Return the <ref> with its new title."""
return '<ref%s>[%s %s<!-- %s -->]</ref>' % (self.refname, self.link,
self.title,
self.linkComment)
def refLink(self):
"""No title has been found, return the unbracketed link."""
return '<ref%s>%s</ref>' % (self.refname, self.link)
def refDead(self):
"""Dead link, tag it with a {{dead link}}."""
tag = i18n.translate(self.site, deadLinkTag)
if not tag:
dead_link = self.refLink()
elif '%s' in tag:
dead_link = '<ref%s>%s</ref>' % (self.refname, tag % self.link)
else:
dead_link = '<ref%s>%s</ref>' % (self.refname, tag)
return dead_link
def transform(self, ispdf=False):
"""Normalize the title."""
# convert html entities
if not ispdf:
self.title = pywikibot.html2unicode(self.title)
self.title = re.sub(r'-+', '-', self.title)
# remove formatting, i.e long useless strings
self.title = re.sub(r'[\.+\-=]{4,}', ' ', self.title)
# remove \n and \r and Unicode spaces from titles
self.title = re.sub(r'(?u)\s', ' ', self.title)
self.title = re.sub(r'[\n\r\t]', ' ', self.title)
# remove extra whitespaces
# remove leading and trailing ./;/,/-/_/+/ /
self.title = re.sub(r' +', ' ', self.title.strip(r'=.;,-+_ '))
self.avoid_uppercase()
# avoid closing the link before the end
self.title = self.title.replace(']', ']')
# avoid multiple } being interpreted as a template inclusion
self.title = self.title.replace('}}', '}}')
# prevent multiple quotes being interpreted as '' or '''
self.title = self.title.replace("''", "''")
self.title = pywikibot.unicode2html(self.title, self.site.encoding())
# TODO : remove HTML when both opening and closing tags are included
def avoid_uppercase(self):
"""
Convert to title()-case if title is 70% uppercase characters.
Skip title that has less than 6 characters.
"""
if len(self.title) <= 6:
return
nb_upper = 0
nb_letter = 0
for letter in self.title:
if letter.isupper():
nb_upper += 1
if letter.isalpha():
nb_letter += 1
if letter.isdigit():
return
if nb_upper / (nb_letter + 1) > .70:
self.title = self.title.title()
class DuplicateReferences(object):
"""Helper to de-duplicate references in text.
When some references are duplicated in an article,
name the first, and remove the content of the others
"""
def __init__(self, site=None):
"""Initializer."""
if not site:
site = pywikibot.Site()
# Match references
self.REFS = re.compile(
r'(?i)<ref(?P<params>[^>/]*)>(?P<content>.*?)</ref>')
self.NAMES = re.compile(
r'(?i).*name\s*=\s*(?P<quote>"?)\s*(?P<name>.+)\s*(?P=quote).*')
self.GROUPS = re.compile(
r'(?i).*group\s*=\s*(?P<quote>"?)\s*(?P<group>.+)\s*(?P=quote).*')
self.autogen = i18n.twtranslate(site, 'reflinks-autogen')
def process(self, text):
"""Process the page."""
# keys are ref groups
# values are a dict where :
# keys are ref content
# values are [name, [list of full ref matches],
# quoted, need_to_change]
found_refs = {}
found_ref_names = {}
# Replace key by [value, quoted]
named_repl = {}
for match in self.REFS.finditer(text):
content = match.group('content')
if not content.strip():
continue
params = match.group('params')
group = self.GROUPS.match(params)
if group not in found_refs:
found_refs[group] = {}
groupdict = found_refs[group]
if content in groupdict:
v = groupdict[content]
v[1].append(match.group())
else:
v = [None, [match.group()], False, False]
name = self.NAMES.match(params)
if name:
quoted = name.group('quote') == '"'
name = name.group('name')
if v[0]:
if v[0] != name:
named_repl[name] = [v[0], v[2]]
else:
# First name associated with this content
if name == 'population':
pywikibot.output(content)
if name not in found_ref_names:
# first time ever we meet this name
if name == 'population':
pywikibot.output('in')
v[2] = quoted
v[0] = name
else:
# if has_key, means that this name is used
# with another content. We'll need to change it
v[3] = True
found_ref_names[name] = 1
groupdict[content] = v
id = 1
while self.autogen + str(id) in found_ref_names:
id += 1
for (g, d) in found_refs.items():
if g:
group = 'group=\"{0}\" '.format(group)
else:
group = ''
for (k, v) in d.items():
if len(v[1]) == 1 and not v[3]:
continue
name = v[0]
if not name:
name = '"{0}{1}"'.format(self.autogen, id)
id += 1
elif v[2]:
name = '"{0}"'.format(name)
named = '<ref {0}name={1}>{2}</ref>'.format(group, name, k)
text = text.replace(v[1][0], named, 1)
# make sure that the first (named ref) is not
# removed later :
pos = text.index(named) + len(named)
header = text[:pos]
end = text[pos:]
unnamed = '<ref {0}name={1} />'.format(group, name)
for ref in v[1][1:]:
end = end.replace(ref, unnamed)
text = header + end
for (k, v) in named_repl.items():
# TODO : Support ref groups
name = v[0]
if v[1]:
name = '"{0}"'.format(name)
text = re.sub(
'<ref name\\s*=\\s*(?P<quote>"?)\\s*{}\\s*(?P=quote)\\s*/>'
.format(k),
'<ref name={} />'.format(name), text)
return text
class ReferencesRobot(Bot):
"""References bot."""
def __init__(self, generator, **kwargs):
"""- generator : Page generator."""
self.availableOptions.update({
'ignorepdf': False, # boolean
'limit': None, # int, stop after n modified pages
'summary': None,
})
super(ReferencesRobot, self).__init__(**kwargs)
self.generator = generator
self.site = pywikibot.Site()
self._use_fake_user_agent = config.fake_user_agent_default.get(
'reflinks', False)
# Check
manual = 'mw:Manual:Pywikibot/refLinks'
code = None
for alt in [self.site.code] + i18n._altlang(self.site.code):
if alt in localized_msg:
code = alt
break
if code:
manual += '/{0}'.format(code)
if self.getOption('summary') is None:
self.msg = i18n.twtranslate(self.site, 'reflinks-msg', locals())
else:
self.msg = self.getOption('summary')
local = i18n.translate(self.site, badtitles)
if local:
bad = '(' + globalbadtitles + '|' + local + ')'
else:
bad = globalbadtitles
self.titleBlackList = re.compile(bad, re.I | re.S | re.X)
self.norefbot = noreferences.NoReferencesBot(None, verbose=False)
self.deduplicator = DuplicateReferences(self.site)
self.site_stop_page = i18n.translate(self.site, stop_page)
if self.site_stop_page:
self.stop_page = pywikibot.Page(self.site, self.site_stop_page)
if self.stop_page.exists():
self.stop_page_rev_id = self.stop_page.latest_revision_id
else:
pywikibot.warning('The stop page {0} does not exist'
.format(self.stop_page.title(as_link=True)))
# Regex to grasp content-type meta HTML tag in HTML source
self.META_CONTENT = re.compile(br'(?i)<meta[^>]*content\-type[^>]*>')
# Extract the encoding from a charset property (from content-type !)
self.CHARSET = re.compile(r'(?i)charset\s*=\s*(?P<enc>[^\'",;>/]*)')
# Extract html title from page
self.TITLE = re.compile(r'(?is)(?<=<title>).*?(?=</title>)')
# Matches content inside <script>/<style>/HTML comments
self.NON_HTML = re.compile(
br'(?is)<script[^>]*>.*?</script>|<style[^>]*>.*?</style>|'
br'<!--.*?-->|<!\[CDATA\[.*?\]\]>')
# Authorized mime types for HTML pages
self.MIME = re.compile(
r'application/(?:xhtml\+xml|xml)|text/(?:ht|x)ml')
def httpError(self, err_num, link, pagetitleaslink):
"""Log HTTP Error."""
pywikibot.stdout('HTTP error ({0}) for {1} on {2}'
''.format(err_num, link, pagetitleaslink))
def getPDFTitle(self, ref, f):
"""Use pdfinfo to retrieve title from a PDF.
FIXME: Unix-only, I'm afraid.
"""
pywikibot.output('PDF file.')
fd, infile = tempfile.mkstemp()
urlobj = os.fdopen(fd, 'w+')
urlobj.write(f.text)
try:
pdfinfo_out = subprocess.Popen([r'pdfinfo', '/dev/stdin'],
stdin=urlobj,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False).communicate()[0]
for aline in pdfinfo_out.splitlines():
if aline.lower().startswith('title'):
ref.title = aline.split(None)[1:]
ref.title = ' '.join(ref.title)
if ref.title != '':
pywikibot.output('title: ' + ref.title)
pywikibot.output('PDF done.')
except ValueError:
pywikibot.output('pdfinfo value error.')
except OSError:
pywikibot.output('pdfinfo OS error.')
except Exception: # Ignore errors
pywikibot.output('PDF processing error.')
pywikibot.exception()
finally:
urlobj.close()
os.unlink(infile)
def run(self):
"""Run the Bot."""
try:
dead_links = codecs.open(listof404pages, 'r', 'latin_1').read()
except IOError:
raise NotImplementedError(
'404-links.txt is required for reflinks.py\n'
'You need to download\n'
'http://www.twoevils.org/files/wikipedia/404-links.txt.gz\n'
'and to unzip it in the same directory')
editedpages = 0
for page in self.generator:
try:
# Load the page's text from the wiki
new_text = page.get()
if not page.has_permission():
pywikibot.output("You can't edit page "
+ page.title(as_link=True))
continue
except pywikibot.NoPage:
pywikibot.output('Page {} not found'
.format(page.title(as_link=True)))
continue
except pywikibot.IsRedirectPage:
pywikibot.output('Page {} is a redirect'
.format(page.title(as_link=True)))
continue
# for each link to change
for match in linksInRef.finditer(
textlib.removeDisabledParts(page.get())):
link = match.group('url')
# debugging purpose
# print link
if 'jstor.org' in link:
# TODO: Clean URL blacklist
continue
ref = RefLink(link, match.group('name'), site=self.site)
try:
f = comms.http.fetch(
ref.url, use_fake_user_agent=self._use_fake_user_agent)
# Try to get Content-Type from server
content_type = f.response_headers.get('content-type')
if content_type and not self.MIME.search(content_type):
if ref.link.lower().endswith('.pdf') and \
not self.getOption('ignorepdf'):
# If file has a PDF suffix
self.getPDFTitle(ref, f)
else:
pywikibot.output(color_format(
'{lightyellow}WARNING{default} : '
'media : {0} ', ref.link))
if ref.title:
if not re.match(
'(?i) *microsoft (word|excel|visio)',
ref.title):
ref.transform(ispdf=True)
repl = ref.refTitle()
else:
pywikibot.output(color_format(
'{lightyellow}WARNING{default} : '
'PDF title blacklisted : {0} ', ref.title))
repl = ref.refLink()
else:
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
continue
# Get the real url where we end (http redirects !)
redir = f.data.url
if redir != ref.link and \
domain.findall(redir) == domain.findall(link):
if soft404.search(redir) and \
not soft404.search(ref.link):
pywikibot.output(color_format(
'{lightyellow}WARNING{default} : '
'Redirect 404 : {0} ', ref.link))
continue
if dirIndex.match(redir) and \
not dirIndex.match(ref.link):
pywikibot.output(color_format(
'{lightyellow}WARNING{default} : '
'Redirect to root : {0} ', ref.link))
continue
if f.status != codes.ok:
pywikibot.output('HTTP error ({0}) for {1} on {2}'
.format(f.status, ref.url,
page.title(as_link=True)),
toStdout=True)
# 410 Gone, indicates that the resource has been
# purposely removed
if f.status == 410 or \
(f.status == 404 and ('\t{}\t'.format(ref.url)
in dead_links)):
repl = ref.refDead()
new_text = new_text.replace(match.group(), repl)
continue
linkedpagetext = f.raw
except UnicodeError:
# example:
# http://www.adminet.com/jo/20010615¦/ECOC0100037D.html
# in [[fr:Cyanure]]
pywikibot.output(color_format(
'{lightred}Bad link{default} : {0} in {1}',
ref.url, page.title(as_link=True)))
continue
except (URLError,
socket.error,
IOError,
httplib.error,
pywikibot.FatalServerError,
pywikibot.Server504Error) as e:
pywikibot.output("Can't retrieve page {0} : {1}"
.format(ref.url, e))
continue
# remove <script>/<style>/comments/CDATA tags
linkedpagetext = self.NON_HTML.sub(b'', linkedpagetext)
meta_content = self.META_CONTENT.search(linkedpagetext)
enc = []
s = None
if content_type:
# use charset from http header
s = self.CHARSET.search(content_type)
if meta_content:
tag = meta_content.group()
# Prefer the contentType from the HTTP header :
if not content_type:
content_type = tag
if not s:
# use charset from html
s = self.CHARSET.search(str(tag))
if s:
tmp = s.group('enc').strip("\"' ").lower()
naked = re.sub(r'[ _\-]', '', tmp)
# Convert to python correct encoding names
if naked == 'gb2312':
enc.append('gbk')
elif naked == 'shiftjis':
enc.append('shift jis 2004')
enc.append('cp932')
elif naked == 'xeucjp':
enc.append('euc-jp')
else:
enc.append(tmp)
else:
pywikibot.output('No charset found for ' + ref.link)
if not content_type:
pywikibot.output('No content-type found for ' + ref.link)
continue
elif not self.MIME.search(content_type):
pywikibot.output(color_format(
'{lightyellow}WARNING{default} : media : {0} ',
ref.link))
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
continue
# Ugly hacks to try to survive when both server and page
# return no encoding.
# Uses most used encodings for each national suffix
if '.ru' in ref.link or '.su' in ref.link:
# see http://www.sci.aha.ru/ATL/ra13a.htm : no server
# encoding, no page encoding
enc = enc + ['koi8-r', 'windows-1251']
elif '.jp' in ref.link:
enc.append('shift jis 2004')
enc.append('cp932')
elif '.kr' in ref.link:
enc.append('euc-kr')
enc.append('cp949')
elif '.zh' in ref.link:
enc.append('gbk')
if 'utf-8' not in enc:
enc.append('utf-8')
try:
u = linkedpagetext.decode(enc[0]) # Bug T69410
except (UnicodeDecodeError, LookupError) as e:
pywikibot.output('{} : Decoding error - {}'
.format(ref.link, e))
continue
# Retrieves the first non empty string inside <title> tags
for m in self.TITLE.finditer(u):
t = m.group()
if t:
ref.title = t
ref.transform()
if ref.title:
break
if not ref.title:
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
pywikibot.output('{0} : No title found...'
.format(ref.link))
continue
# XXX Ugly hack
if 'é' in ref.title:
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
pywikibot.output('{0} : Hybrid encoding...'
.format(ref.link))
continue
if self.titleBlackList.match(ref.title):
repl = ref.refLink()
new_text = new_text.replace(match.group(), repl)
pywikibot.output(color_format(
'{lightred}WARNING{default} {0} : '
'Blacklisted title ({1})', ref.link, ref.title))
continue
# Truncate long titles. 175 is arbitrary
if len(ref.title) > 175:
ref.title = ref.title[:175] + '...'
repl = ref.refTitle()
new_text = new_text.replace(match.group(), repl)
# Add <references/> when needed, but ignore templates !
if page.namespace != 10:
if self.norefbot.lacksReferences(new_text):
new_text = self.norefbot.addReferences(new_text)
new_text = self.deduplicator.process(new_text)
old_text = page.text
self.userPut(page, old_text, new_text, summary=self.msg,
ignore_save_related_errors=True,
ignore_server_errors=True)
if new_text == old_text:
continue
else:
editedpages += 1
if self.getOption('limit') \
and editedpages >= self.getOption('limit'):
pywikibot.output('Edited {} pages, stopping.'
.format(self.getOption('limit')))
return
if self.site_stop_page and editedpages % 20 == 0:
self.stop_page = pywikibot.Page(self.site, self.site_stop_page)
if self.stop_page.exists():
pywikibot.output(color_format(
'{lightgreen}Checking stop page...{default}'))
actual_rev = self.stop_page.latest_revision_id
if actual_rev != self.stop_page_rev_id:
pywikibot.output(
'{0} has been edited : Someone wants us to stop.'
.format(self.stop_page.title(as_link=True)))
return
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
xml_filename = None
xml_start = None
options = {}
generator = None
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
gen_factory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg.startswith('-summary:'):
options['summary'] = arg[9:]
elif arg == '-always':
options['always'] = True
elif arg == '-ignorepdf':
options['ignorepdf'] = True
elif arg.startswith('-limit:'):
options['limit'] = int(arg[7:])
elif arg.startswith('-xmlstart'):
if len(arg) == 9:
xml_start = pywikibot.input(
'Please enter the dumped article to start with:')
else:
xml_start = arg[10:]
elif arg.startswith('-xml'):
if len(arg) == 4:
xml_filename = pywikibot.input(
"Please enter the XML dump's filename:")
else:
xml_filename = arg[5:]
else:
gen_factory.handleArg(arg)
if xml_filename:
generator = XmlDumpPageGenerator(xml_filename, xml_start,
gen_factory.namespaces)
if not generator:
generator = gen_factory.getCombinedGenerator()
if not generator:
pywikibot.bot.suggest_help(missing_generator=True)
return
if not gen_factory.nopreload:
generator = pagegenerators.PreloadingGenerator(generator)
generator = pagegenerators.RedirectFilterPageGenerator(generator)
bot = ReferencesRobot(generator, **options)
bot.run()
if __name__ == '__main__':
main()
| 37.488513 | 79 | 0.491759 |
9d7594197185df4d4e93f737fd14b000f3897a96 | 1,749 | py | Python | youtube/youtube.py | NeuroAssassin/aikaterna-cogs | 6a914fa60658f3853f914af97f1a8c7bf274c313 | [
"MIT"
] | null | null | null | youtube/youtube.py | NeuroAssassin/aikaterna-cogs | 6a914fa60658f3853f914af97f1a8c7bf274c313 | [
"MIT"
] | null | null | null | youtube/youtube.py | NeuroAssassin/aikaterna-cogs | 6a914fa60658f3853f914af97f1a8c7bf274c313 | [
"MIT"
] | null | null | null | import aiohttp
import re
from redbot.core import commands
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
class YouTube(commands.Cog):
"""Search YouTube for videos."""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
async def _youtube_results(self, query: str):
try:
search_url = "https://www.youtube.com/results?"
payload = {"search_query": "".join(query)}
headers = {"user-agent": "Red-cog/3.0"}
async with self.session.get(search_url, params=payload, headers=headers) as r:
result = await r.text()
yt_find = re.findall(r"href=\"\/watch\?v=(.{11})", result)
url_list = []
for track in yt_find:
url = f"https://www.youtube.com/watch?v={track}"
if url not in url_list:
url_list.append(url)
except Exception as e:
url_list = [f"Something went terribly wrong! [{e}]"]
return url_list
@commands.command()
async def youtube(self, ctx, *, query: str):
"""Search on Youtube."""
result = await self._youtube_results(query)
if result:
await ctx.send(result[0])
else:
await ctx.send("Nothing found. Try again later.")
@commands.command()
async def ytsearch(self, ctx, *, query: str):
"""Search on Youtube, multiple results."""
result = await self._youtube_results(query)
if result:
await menu(ctx, result, DEFAULT_CONTROLS)
else:
await ctx.send("Nothing found. Try again later.")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| 32.388889 | 90 | 0.582047 |
3281f72880fb760027bc402beef3acd14ac6496b | 7,662 | py | Python | tests/mfem_engines_bps/postprocess-plot-4.py | nbeams/benchmarks | fdd742fb2ca22c0282c0ccb85832f471bf22f23a | [
"BSD-2-Clause"
] | 6 | 2017-08-25T19:44:54.000Z | 2021-11-01T20:33:18.000Z | tests/mfem_engines_bps/postprocess-plot-4.py | nbeams/benchmarks | fdd742fb2ca22c0282c0ccb85832f471bf22f23a | [
"BSD-2-Clause"
] | 7 | 2018-05-08T13:53:44.000Z | 2021-02-11T04:26:30.000Z | tests/mfem_engines_bps/postprocess-plot-4.py | nbeams/benchmarks | fdd742fb2ca22c0282c0ccb85832f471bf22f23a | [
"BSD-2-Clause"
] | 5 | 2017-08-11T21:54:29.000Z | 2020-09-03T16:48:46.000Z | # Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project
# (17-SC-20-SC), a collaborative effort of two U.S. Department of Energy
# organizations (Office of Science and the National Nuclear Security
# Administration) responsible for the planning and preparation of a capable
# exascale ecosystem, including software, applications, hardware, advanced
# system engineering and early testbed platforms, in support of the nation's
# exascale computing imperative.
##### Load the data
execfile('postprocess-base.py')
##### Sample plot output
from pylab import *
rcParams['font.sans-serif'].insert(0,'Noto Sans')
rcParams['font.sans-serif'].insert(1,'Open Sans')
rcParams['figure.figsize']=[10, 8] # default: 8 x 6
cm=get_cmap('Set1') # 'Accent', 'Dark2', 'Set1', 'Set2', 'Set3'
if '_segmentdata' in cm.__dict__:
cm_size=len(cm.__dict__['_segmentdata']['red'])
elif 'colors' in cm.__dict__:
cm_size=len(cm.__dict__['colors'])
colors=[cm(1.*i/(cm_size-1)) for i in range(cm_size)]
# colors=['blue','green','crimson','turquoise','m','gold','cornflowerblue',
# 'darkorange']
sel_runs=runs
for run in sel_runs:
run['config']=run['config'].rsplit('/',1)[-1].rsplit('.sh',1)[0]
configs=[]
for run in sel_runs:
if not (run['config'] in configs):
configs.append(run['config'])
print 'Using configurations:', configs
for run in sel_runs:
run['test']=run['test'].rsplit('/',1)[-1].rsplit('.sh',1)[0]
tests=[]
for run in sel_runs:
if not (run['test'] in tests):
tests.append(run['test'])
print 'Using tests:', tests
compilers=[]
for run in sel_runs:
if not (run['compiler'] in compilers):
compilers.append(run['compiler'])
print 'Using compilers:', compilers
if 'problem' in sel_runs[0]:
for run in sel_runs:
if 'problem' in run:
run['problem']='bp1' if run['problem']==0 else 'bp3'
probs=list(set([run['problem'] for run in sel_runs]))
print 'Using problems:', probs
for run in sel_runs:
run['vdim']=1
files=[]
for run in sel_runs:
if not (run['file'] in files):
files.append(run['file'])
print 'Using files:', files
codes = list(set([run['code'] for run in sel_runs]))
code = codes[0]
sel_runs=[run for run in sel_runs if run['code']==code]
if len(configs)>1:
key='config'
val=configs[0]
val2=configs[1]
elif len(tests)>1:
key='test'
val=tests[0]
val2=tests[1]
elif len(compilers)>1:
key='compiler'
val=compilers[0]
val2=compilers[1]
elif len(probs)>1:
key='problem'
val='bp1'
val2='bp3'
elif len(files)>1:
key='file'
# val,val2 are defined per plot
else:
print 'Cannot determine comparison key. Stop.'
quit(1)
pl_set=[(run['num-procs'],run['num-procs-node'])
for run in sel_runs]
pl_set=sorted(set(pl_set))
print
pprint.pprint(pl_set)
for plt in pl_set:
num_procs=plt[0]
num_procs_node=plt[1]
num_nodes=num_procs/num_procs_node
print
print 'compute nodes: %i, number of MPI tasks = %i'%(num_nodes,num_procs)
pl_runs=[run for run in sel_runs
if run['num-procs']==num_procs and
run['num-procs-node']==num_procs_node]
if key=='file':
files=[]
for run in pl_runs:
if not (run['file'] in files):
files.append(run['file'])
print 'Using files:', files
if len(files)>1:
val=files[0]
val2=files[1]
else:
print 'Need at least two files. Skipping ...'
continue
if len(pl_runs)==0:
print 'Empty set of runs. Skipping ...'
continue
pl_runs=sorted(pl_runs)
figure()
i=0
sol_p_set=sorted(set([run['order'] for run in pl_runs]))
for sol_p in sol_p_set:
qpts=sorted(list(set([run['quadrature-pts'] for run in pl_runs
if run['order']==sol_p])))
qpts.reverse()
print 'Order: %i, quadrature points:'%sol_p, qpts
qpts_1d=[int(q**(1./3)+0.5) for q in qpts]
pl_runs_1=[run for run in pl_runs if run[key]==val]
pl_runs_2=[run for run in pl_runs if run[key]!=val]
d1=[[run['order'],run['num-elem'],
1.*run['num-unknowns']/num_procs/run['vdim'],
run['cg-iteration-dps']/num_procs]
for run in pl_runs_1
if run['order']==sol_p and
run['quadrature-pts']==qpts[0]]
d2=[[run['order'],run['num-elem'],
1.*run['num-unknowns']/num_procs/run['vdim'],
run['cg-iteration-dps']/num_procs]
for run in pl_runs_2
if run['order']==sol_p and
run['quadrature-pts']==qpts[0]]
di=set([e[2] for e in d1]).intersection(set([e[2] for e in d2]))
if len(di)>0:
d=[[npts,
max([e[3] for e in d1 if e[2]==npts]),
max([e[3] for e in d2 if e[2]==npts])]
for npts in di]
d=asarray(sorted(d))
plot(d[:,0],d[:,2]/d[:,1],'o-',color=colors[i],
label='p=%i, q=p+%i'%(sol_p,qpts_1d[0]-sol_p))
##
if len(qpts)==1:
i=i+1
continue
#
pl_runs_1=[run for run in pl_runs if run[key]==val]
pl_runs_2=[run for run in pl_runs if run[key]!=val]
d1=[[run['order'],run['num-elem'],
1.*run['num-unknowns']/num_procs/run['vdim'],
run['cg-iteration-dps']/num_procs]
for run in pl_runs_1
if run['order']==sol_p and
run['quadrature-pts']==qpts[1]]
d2=[[run['order'],run['num-elem'],
1.*run['num-unknowns']/num_procs/run['vdim'],
run['cg-iteration-dps']/num_procs]
for run in pl_runs_2
if run['order']==sol_p and
run['quadrature-pts']==qpts[1]]
di=set([e[2] for e in d1]).intersection(set([e[2] for e in d2]))
if len(di)>0:
d=[[npts,
max([e[3] for e in d1 if e[2]==npts]),
max([e[3] for e in d2 if e[2]==npts])]
for npts in di]
d=asarray(sorted(d))
plot(d[:,0],d[:,2]/d[:,1],'s--',color=colors[i],
label='p=%i, q=p+%i'%(sol_p,qpts_1d[1]-sol_p))
##
i=i+1
##
title('Config: %s %s (%i node%s, %i task%s/node), %s, %s'%(
code,str('-').join(configs),num_nodes,'' if num_nodes==1 else 's',
num_procs_node,'' if num_procs_node==1 else 's',
str('-').join(compilers),str('-').join(probs)))
xscale('log') # subsx=[2,4,6,8]
# yscale('log')
# rng=arange(1e7,1.02e8,1e7)
# yticks(rng,['%i'%int(v/1e6) for v in rng])
# ylim(min(rng),max(rng))
# xlim(0.5,max([run['order'] for run in pl_runs])+0.5)
grid('on', color='gray', ls='dotted')
grid('on', axis='both', which='minor', color='gray', ls='dotted')
gca().set_axisbelow(True)
xlabel('Points per MPI task')
ylabel('[%s dps] / [%s dps]'%(str(val2),str(val)))
legend(ncol=2, loc='best')
if 0: # write .pdf file?
pdf_file='plot4_%s_%s_%s_%s_N%03i_pn%i.pdf'%(
code,str('-').join(probs),str('-').join(configs),
str('-').join(compilers),num_nodes,num_procs_node)
print 'saving figure --> %s'%pdf_file
savefig(pdf_file, format='pdf', bbox_inches='tight')
if 1: # show the figures?
print '\nshowing figures ...'
show()
| 33.313043 | 77 | 0.600757 |
536c2d81d83da554e1a9bb3bb0c290cd032d874f | 1,990 | py | Python | apps/todo/density_matrix_diag_plt.py | nikwitt/cdmft | ebca66c760e0f6618a0b475eeeb5ace3cd229a2c | [
"MIT"
] | 7 | 2019-05-02T13:19:33.000Z | 2021-11-17T14:06:55.000Z | apps/todo/density_matrix_diag_plt.py | nikwitt/cdmft | ebca66c760e0f6618a0b475eeeb5ace3cd229a2c | [
"MIT"
] | null | null | null | apps/todo/density_matrix_diag_plt.py | nikwitt/cdmft | ebca66c760e0f6618a0b475eeeb5ace3cd229a2c | [
"MIT"
] | 1 | 2021-04-14T15:05:50.000Z | 2021-04-14T15:05:50.000Z | import numpy as np, sys
from cdmft.evaluation.common import Evaluation
from cdmft.h5interface import Storage
from cdmft.plot.cfg import plt, ax
n_bins = 800
omega_max = .8
log = False
offdiag_rows = []
plot_atomic = False
atomic_loop = -1
degeneracy_labels = True
atomic_beta = 1
labels = ["$\\rho_{ii}$"]
for nr in offdiag_rows:
labels.append("$\\rho_{"+str(nr)+"i}$")
#offdiag_rows = [0]
#offdiag_rows = [36,37,60,61]
#offdiag_rows = [84,1,100]
#offdiag_rows = [38,62,132,136]
for arch in sys.argv[1:]:
sto = Storage(arch)
ev = Evaluation(sto)
rho = ev.get_density_matrix_diag()
rhorows = [ev.get_density_matrix_row(nr) for nr in offdiag_rows]
n_plots = 1 + len(rhorows)
weights = [rho]+ rhorows
if plot_atomic:
rho_atom = ev.get_atomic_density_matrix_diag(atomic_loop, atomic_beta)
n_plots += 1
weights += [rho_atom]
labels.append("$\\rho^{atom}_{ii}(\\beta = "+str(atomic_beta)+")$")
energies = ev.get_energies()
x = [energies]*n_plots
if omega_max is None:
omega_max = rho[0,-1]
n, bins, patches = ax.hist(x, bins = n_bins, weights = weights, stacked = False, log = log, label = labels)
if degeneracy_labels:
bin_degeneracies = np.zeros([len(bins)-1])
for i in range(len(bins)-1):
for energy in energies:
if bins[i] <= energy <= bins[i+1]:
bin_degeneracies[i] += 1
for i, x_bin, bin_deg in zip(range(len(bins)), bins, bin_degeneracies):
if bin_deg > 0 and x_bin <= omega_max:
ax.text(x_bin, n[i], str(int(bin_deg)))
if log:
ax.set_ylim(bottom = 10**(-4))
else:
ax.set_ylim(0,1)
print np.sort(energies)[:10]
ax.legend()
ax.set_ylabel("$\\rho$")
ax.set_xlabel("$\\omega$")
ax.set_xlim(right = omega_max)
outname = arch[:-3]+"_density_matrix_diag.pdf"
plt.savefig(outname)
print outname+' ready, note: diag2 recommended'
plt.close()
| 32.096774 | 111 | 0.622111 |
d76340391455e1f463b02f3e71a0cc1cb3c47fe0 | 3,556 | py | Python | db/db.py | yasinasama/guime | 00caac105becf6df4d873fdbba4542711307e383 | [
"MIT"
] | null | null | null | db/db.py | yasinasama/guime | 00caac105becf6df4d873fdbba4542711307e383 | [
"MIT"
] | null | null | null | db/db.py | yasinasama/guime | 00caac105becf6df4d873fdbba4542711307e383 | [
"MIT"
] | null | null | null | import sqlite3
import os
class DB:
def __init__(self,dbpath):
self.db_path = dbpath
self._conn = None
self._cur = None
self._connect()
def _connect(self):
if not self._conn:
self._conn = sqlite3.connect(self.db_path)
self._cur = self._conn.cursor()
def query(self,sql,value=[]):
if not self._cur:
return None
try:
self._cur.execute(sql,value)
return self._cur.fetchall()
except:
return None
def create(self,sql):
if not self._cur:
return
try:
self._cur.execute(sql)
self.commit()
except:
raise
def insert(self,sql,value):
if not self._cur:
return
try:
self._cur.executemany(sql,value)
self.commit()
except:
raise
def delete(self,sql,value):
if not self._cur:
return
try:
self._cur.execute(sql,value)
self.commit()
except:
raise
def commit(self):
try:
self._conn.commit()
except:
self.rollback()
def rollback(self):
self._conn.rollback()
def __del__(self):
if self._cur:
self._cur.close()
if self._conn:
self._conn.close()
def close(self):
self.__del__()
CREATE_ORDER = '''
create table if not exists orders(
id integer primary key autoincrement,
order_id varchar(50) unique,
car_id varchar(50) default '',
car_type varchar(50)default '',
car_user varchar(50) default '',
phone varchar(20) default '',
car_frame varchar(50) default '',
order_time integer default 0,
pay_time integer default 0,
mile integer default 0,
total_pay integer default 0,
insurance_name varchar(50) default '',
insurance_time integer default 0,
remark text default ''
);
'''
CREATE_DETAIL = '''
create table if not exists detail(
id integer primary key autoincrement,
order_id varchar(50) default '',
project varchar(50) default '',
price integer default 0,
number integer default 0,
pay integer default 0,
remark text default ''
);
'''
DB_NAME = './guime.db'
def create_db(conn):
conn.create(CREATE_ORDER)
conn.create(CREATE_DETAIL)
if not os.path.exists(DB_NAME):
DB_CONN = DB(DB_NAME)
create_db(DB_CONN)
else:
DB_CONN = DB(DB_NAME)
if __name__=='__main__':
# DB_CONN.insert('insert into detail(order_id,project,pay,remark) values(?,?,?,?)',[['aa','aa','aa','aa']])
print(DB_CONN.query('select * from orders',[]))
# DB_CONN.insert('insert into orders(order_id,remark) values(?,?) on conflict(order_id) do update set phone=?,remark=?;',[('20190315001','12ss33221','gggg','ggg')])
# DB_CONN.insert('''insert into orders(order_id,car_id,car_type,car_user,phone,car_frame,order_time,remark)
# values (?,?,?,?,?,?,?,?)
# on conflict(order_id)
# do update set car_id=?
# and car_type=?
# and car_user=?
# and phone=?
# and car_frame=?
# and order_time=?
# and remark=?''',[['20190315001', 'ff', 'ff', 'ff', 'ff', 'ffese', '2019-03-15', '', 'ff', 'ff', 'ff', 'ff', 'ffese', '2019-03-15', '']])
| 26.340741 | 168 | 0.540776 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.