hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e701ebe70cc571aff89f753f2e757469bc7f99f7 | 120 | py | Python | vkquick/ext/chatbot/wrappers/__init__.py | NikitolProject/vkquick | a68e982974b5e96841d60de47519c1bbbaeedd29 | [
"MIT"
] | null | null | null | vkquick/ext/chatbot/wrappers/__init__.py | NikitolProject/vkquick | a68e982974b5e96841d60de47519c1bbbaeedd29 | [
"MIT"
] | null | null | null | vkquick/ext/chatbot/wrappers/__init__.py | NikitolProject/vkquick | a68e982974b5e96841d60de47519c1bbbaeedd29 | [
"MIT"
] | null | null | null | from .attachment import Document, Photo
from .message import Message
from .page_entities import Group, PageEntity, User
| 30 | 50 | 0.825 |
012d9b46627d2916815773337d7a89f6ff88fe40 | 3,806 | py | Python | section-03.py | richardolopes/pierian-python3 | afe44b066c68f6cc7c0c8fea197990b9a3c4b79f | [
"MIT"
] | null | null | null | section-03.py | richardolopes/pierian-python3 | afe44b066c68f6cc7c0c8fea197990b9a3c4b79f | [
"MIT"
] | null | null | null | section-03.py | richardolopes/pierian-python3 | afe44b066c68f6cc7c0c8fea197990b9a3c4b79f | [
"MIT"
] | null | null | null | '''
Aula 11
'''
a = "Python 3"
print(len(a)) # 8
# Retorna toda a string
# VAR:VAR - Retorna uma parte da string
# ::VAR - Retorna uma parte da string, sendo ":" o começo da string
# ::-1 - Retorna a string ao contrário
print(a[:]) # Python 3
print(a[1:4]) # yth
print(a[::2]) # Pto
print(a[::-1]) # 3 nohtyP
a = "Python "
print(a * 10) # Python Python Python Python Python Python Python Python Python Python
# Strings são imutáveis
print(a.lower()) # python
a = "A computação quântica é a ciência que estuda as aplicações das teorias e propriedades da mecânica quântica na Ciência da Computação."
print(a.split()) # ['A', 'computação', 'quântica', 'é', 'a', 'ciência', 'que', 'estuda', 'as', 'aplicações', 'das', 'teorias', 'e', 'propriedades', 'da', 'mecânica', 'quântica', 'na', 'Ciência', 'da', 'Computação.']
print(a.split("o")) # ['A c', 'mputaçã', ' quântica é a ciência que estuda as aplicações das te', 'rias e pr', 'priedades da mecânica quântica na Ciência da C', 'mputaçã', '.']
'''
Aula 13 - Formatações de impressões
'''
var = 'R'
# %s = str()
# %r = repr()
print("Olá %s, tudo bem? %s?" %(var, var)) # Olá R, tudo bem? R?
print("Olá " + var + ", tudo bem?") # Olá R, tudo bem?
print("Pontos flutuantes: %11.5f" %(23.344)) # Pontos flutuantes: 23.34400
print("O preço de um iPhone X é %d" %(5559.99)) # O preço de um iPhone X é 5559
print("Olá {}, tudo bem?" .format("Richard")) # Olá Richard, tudo bem?
print("O preço de um celular com suporte a 4 chips e um som mais alto que de uma JBL é {}" .format(123.5)) # O preço de um celular com suporte a 4 chips e um som mais alto que de uma JBL é 123.5
print("Um: {a}, dois: {b}, três: {c}".format(a = 1, b = "dois", c = 3.5)) # Um: 1, dois: dois, três: 3.5
'''
Aula 15 - Manipulação de listas
'''
lista = ["ZERO", "UM", "DOIS"]
lista += ["TRÊS"]
print(lista) # ['ZERO', 'UM', 'DOIS', 'TRÊS']
print(type(lista)) # list
print(len(lista)) # 4
lista.append("QUATRO") # ['ZERO', 'UM', 'DOIS', 'TRÊS', 'QUATRO']
print(lista.pop()) # 'QUATRO'
tres = lista.pop(3)
print(tres) # TRÊS
lista.reverse()
print(lista) # ['DOIS', 'UM', 'ZERO']
lista = [5, 7, 9, 1, 6, 0, 23, 51]
lista.sort()
print(lista) # [0, 1, 5, 6, 7, 9, 23, 51]
lista = ["b", "d", "z", "x", "a", "r"]
lista.sort()
print(lista) # ['a', 'b', 'd', 'r', 'x', 'z']
lista1 = [1, 2, 3]
lista2 = [4, 5, 6]
lista3 = [7, 8, 9]
matrix = [lista1, lista2, lista3]
print(matrix) # [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
primeira_coluna = [row[0] for row in matrix]
print(primeira_coluna) # [1, 4, 7]
'''
Aula 17 - Dicionários
'''
dic = {
"Nome": "Richard",
"Idade": 18,
"Cachorros": [
{
"Nome": "Mel",
"Idade": 5
},
{
"Nome": "Lessie",
"Idade": 12
}
]
}
print(dic["Cachorros"][0]["Nome"]) # Mel
print( list(dic.keys()) ) # ['Nome', 'Idade', 'Cachorros']
print( list(dic.items()) ) # [('Nome', 'Richard'), ('Idade', 18), ('Cachorros', [{'Nome': 'Mel', 'Idade': 5}, {'Nome': 'Lessie', 'Idade': 12}])]
'''
Aula 19 - Tuplas
'''
t = ("zero", 1, 2, "três")
print(type(t)) # tuple
# Tuplas são imutáveis.
# t[0] = 0 # Erro
'''
Aula 20 - Arquivos
'''
arq = open("section-03.txt")
print(arq) # <_io.TextIOWrapper name='section-03.txt' mode='r' encoding='cp1252'>
print(arq.read())
# Arquivo de texto.
# Arquivo de texto2.
# Arquivo de texto3.
print(arq.seek(0)) # 0
print(arq.readline()) # Arquivo de texto.
print("----")
arq.seek(0)
for line in arq:
print(line)
# Arquivo de texto.
#
# Arquivo de texto2.
#
# Arquivo de texto3.
'''
Aula 21 - Sets e Booleanos
'''
a = set()
a.add(1)
print(a) # {1}
a.add(2)
a.add(3)
a.add(1)
print(a) # {1, 2, 3}
a = set([1,1,2,3])
print(a) # {1, 2, 3}
a = True
b = False | 23.7875 | 215 | 0.566474 |
05fe00355b195d0c7f06f60f73603d52175d7d4b | 4,408 | py | Python | sites/paymentsalt/settings_base.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | sites/paymentsalt/settings_base.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | sites/paymentsalt/settings_base.py | eviljeff/zamboni | c446a9fc75513c9eef3ff7b1f0e23bbab29f0e68 | [
"BSD-3-Clause"
] | null | null | null | """private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from mkt.settings import (CACHE_PREFIX, ES_INDEXES,
KNOWN_PROXIES, LOGGING)
from .. import splitstrip
import private_base as private
ALLOWED_HOSTS = ['.allizom.org', '.mozflare.net']
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave']['ATOMIC_REQUESTS'] = True
DATABASES['slave']['CONN_MAX_AGE'] = 5 * 60 # 5m for persistent connections.
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
CACHE_MACHINE_USE_REDIS = True
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
SPIDERMONKEY = '/usr/bin/tracemonkey'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'marketplace-paymentsalt'
GOOGLE_ANALYTICS_CREDENTIALS = private.GOOGLE_ANALYTICS_CREDENTIALS
GOOGLE_API_CREDENTIALS = private.GOOGLE_API_CREDENTIALS
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_paymentsalt' % v) for k, v in ES_INDEXES.items())
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = False
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
CLEANCSS_BIN = 'cleancss'
LESS_BIN = 'lessc'
STYLUS_BIN = 'stylus'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE = True
ALLOW_SELF_REVIEWS = True
MONOLITH_SERVER = 'https://monolith.allizom.org'
GEOIP_URL = 'http://geo.marketplace.allizom.org'
API_THROTTLE = False
NEWRELIC_ENABLE = False
AES_KEYS = private.AES_KEYS
TASK_USER_ID = 4757633
SERVE_TMP_PATH = False
| 27.209877 | 79 | 0.694419 |
a2addcae182c91d7a14ee865162a506dbf3a01b1 | 854 | py | Python | api/system/service/api_views.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 97 | 2016-11-15T14:44:23.000Z | 2022-03-13T18:09:15.000Z | api/system/service/api_views.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 334 | 2016-11-17T19:56:57.000Z | 2022-03-18T10:45:53.000Z | api/system/service/api_views.py | klebed/esdc-ce | 2c9e4591f344247d345a83880ba86777bb794460 | [
"Apache-2.0"
] | 33 | 2017-01-02T16:04:13.000Z | 2022-02-07T19:20:24.000Z | from api.api_views import APIView
from api.exceptions import ObjectNotFound
from api.task.response import SuccessTaskResponse
from api.system.service.control import ServiceControl
class ServiceStatusView(APIView):
dc_bound = False
# noinspection PyUnusedLocal
def __init__(self, request, service, data=None):
super(ServiceStatusView, self).__init__(request)
self.service = service
self.ctrl = ServiceControl()
if service and service not in self.ctrl.services:
raise ObjectNotFound(object_name='Service')
def get(self):
"""Return service status or a list of all service statuses"""
if self.service:
res = self.ctrl.status(self.service)
else:
res = self.ctrl.status_all()
return SuccessTaskResponse(self.request, res, dc_bound=False)
| 31.62963 | 69 | 0.694379 |
16352208631de2c25b72c99539e448c5b919683b | 2,002 | py | Python | lib/coloraide/spaces/a98_rgb.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 253 | 2015-03-04T06:48:43.000Z | 2022-03-25T14:22:17.000Z | lib/coloraide/spaces/a98_rgb.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 197 | 2015-03-04T21:40:47.000Z | 2022-03-25T17:04:36.000Z | lib/coloraide/spaces/a98_rgb.py | adaminfinitum/ColorHelper | d6ab02ccff01dd1e3a01dbc186b5ba3ff1fcca47 | [
"MIT"
] | 32 | 2015-03-21T03:28:01.000Z | 2021-09-06T07:20:51.000Z | """A98 RGB color class."""
from ..spaces import RE_DEFAULT_MATCH
from .srgb.base import SRGB
from .xyz import XYZ
from .. import util
import re
RGB_TO_XYZ = [
[0.5766690429101304, 0.18555823790654635, 0.18822864623499475],
[0.297344975250536, 0.6273635662554663, 0.0752914584939979],
[0.027031361386412336, 0.07068885253582725, 0.9913375368376391]
]
XYZ_TO_RGB = [
[2.041587903810747, -0.5650069742788599, -0.34473135077832967],
[-0.9692436362808794, 1.8759675015077197, 0.04155505740717558],
[0.013444280632031149, -0.11836239223101835, 1.0151749943912052]
]
def lin_a98rgb_to_xyz(rgb):
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
return util.dot(RGB_TO_XYZ, rgb)
def xyz_to_lin_a98rgb(xyz):
"""Convert XYZ to linear-light a98-rgb."""
return util.dot(XYZ_TO_RGB, xyz)
def lin_a98rgb(rgb):
"""Convert an array of a98-rgb values in the range 0.0 - 1.0 to linear light (un-corrected) form."""
return [util.npow(val, 563 / 256) for val in rgb]
def gam_a98rgb(rgb):
"""Convert an array of linear-light a98-rgb in the range 0.0-1.0 to gamma corrected form."""
return [util.npow(val, 256 / 563) for val in rgb]
class A98RGB(SRGB):
"""A98 RGB class."""
SPACE = "a98-rgb"
DEFAULT_MATCH = re.compile(RE_DEFAULT_MATCH.format(color_space=SPACE, channels=3))
WHITE = "D65"
@classmethod
def _to_xyz(cls, parent, rgb):
"""To XYZ."""
return parent.chromatic_adaptation(cls.WHITE, XYZ.WHITE, lin_a98rgb_to_xyz(lin_a98rgb(rgb)))
@classmethod
def _from_xyz(cls, parent, xyz):
"""From XYZ."""
return gam_a98rgb(xyz_to_lin_a98rgb(parent.chromatic_adaptation(XYZ.WHITE, cls.WHITE, xyz)))
| 28.6 | 104 | 0.701299 |
fc94b38ce8c72a02e3fda944369aaa79e990fc5c | 46,706 | py | Python | tensorflow/python/ops/math_ops.py | izeye/tensorflow | d4422ff4b2f142de1d0c626f73c734655d340e0d | [
"Apache-2.0"
] | 1 | 2016-07-03T20:16:31.000Z | 2016-07-03T20:16:31.000Z | tensorflow/python/ops/math_ops.py | izeye/tensorflow | d4422ff4b2f142de1d0c626f73c734655d340e0d | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/math_ops.py | izeye/tensorflow | d4422ff4b2f142de1d0c626f73c734655d340e0d | [
"Apache-2.0"
] | 1 | 2021-03-16T21:45:10.000Z | 2021-03-16T21:45:10.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@truediv
@@floordiv
@@mod
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lgamma
@@erf
@@erfc
## Matrix Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions for matrices to your graph.
@@diag
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
@@self_adjoint_eig
@@batch_self_adjoint_eig
@@matrix_solve
@@batch_matrix_solve
@@matrix_triangular_solve
@@batch_matrix_triangular_solve
@@matrix_solve_ls
@@batch_matrix_solve_ls
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
@@fft2d
@@ifft2d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six.moves
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import gen_state_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_math_ops import *
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == dtypes.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation computes complex numbers elementwise of the form \\\\(a + bj\\\\),
where *a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must be the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor` of type `float`.
imag: A `Tensor` of type `float`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", logical_not)
def _OverrideBinaryOperatorHelper(func, op_name):
"""Register operators with different tensor and scalar versions.
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
}
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return floor(div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return div(x, y, name=name)
_OverrideBinaryOperatorHelper(add, "add")
_OverrideBinaryOperatorHelper(sub, "sub")
_OverrideBinaryOperatorHelper(mul, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit=None, delta=1, name="range"):
"""Creates a sequence of integers.
Creates a sequence of integers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
Defaults to 0.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
if limit is None:
start, limit = 0, start
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.constant_value(op.inputs[0])
limit_value = tensor_util.constant_value(op.inputs[1])
delta_value = tensor_util.constant_value(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == dtypes.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[1])
else:
k = int(a_shape[0])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if a_shape.dims is None and b_shape.dims is None:
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
def lgamma(x, name=None):
"""Computes `ln(|gamma(x)|)` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Lgamma") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._lgamma(x, name=name)
def erf(x, name=None):
"""Computes Gauss error function of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Erf") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._erf(x, name=name)
def erfc(x, name=None):
"""Computes complementary error function of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Erfc") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._erfc(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Lgamma")(common_shapes.unchanged_shape)
ops.RegisterShape("Erf")(common_shapes.unchanged_shape)
ops.RegisterShape("Erfc")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT2D")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.constant_value(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.constant_value(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSqrtN")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
"""Shape function for the SparseSegment[Mean|SqrtN]Grad ops."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
output_dim0 = tensor_util.constant_value(op.inputs[3])
if output_dim0 is not None:
dim0 = output_dim0[0]
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.vector(num)]
| 32.479833 | 86 | 0.683531 |
96d8cac3afba89bc0168387e9ef60953bbfb2888 | 2,310 | py | Python | python/cudf/cudf/io/json.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | 1 | 2021-12-17T19:28:00.000Z | 2021-12-17T19:28:00.000Z | python/cudf/cudf/io/json.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | 1 | 2021-03-10T20:28:23.000Z | 2021-03-25T15:58:47.000Z | python/cudf/cudf/io/json.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | 1 | 2020-11-10T03:19:16.000Z | 2020-11-10T03:19:16.000Z | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
import warnings
from io import BytesIO, StringIO
import pandas as pd
import cudf
from cudf._lib import json as libjson
from cudf.utils import ioutils
@ioutils.doc_read_json()
def read_json(
path_or_buf,
engine="auto",
dtype=True,
lines=False,
compression="infer",
byte_range=None,
*args,
**kwargs,
):
"""{docstring}"""
if engine == "cudf" and not lines:
raise ValueError("cudf engine only supports JSON Lines format")
if engine == "auto":
engine = "cudf" if lines else "pandas"
is_single_filepath_or_buffer = ioutils.ensure_single_filepath_or_buffer(
path_or_data=path_or_buf, **kwargs,
)
if not is_single_filepath_or_buffer:
raise NotImplementedError(
"`read_json` does not yet support reading multiple files"
)
path_or_buf, compression = ioutils.get_filepath_or_buffer(
path_or_data=path_or_buf,
compression=compression,
iotypes=(BytesIO, StringIO),
**kwargs,
)
if engine == "cudf":
return cudf.DataFrame._from_table(
libjson.read_json(
path_or_buf, dtype, lines, compression, byte_range
)
)
else:
warnings.warn(
"Using CPU via Pandas to read JSON dataset, this may "
"be GPU accelerated in the future"
)
if kwargs.get("orient") == "table":
pd_value = pd.read_json(
path_or_buf,
lines=lines,
compression=compression,
*args,
**kwargs,
)
else:
pd_value = pd.read_json(
path_or_buf,
lines=lines,
dtype=dtype,
compression=compression,
*args,
**kwargs,
)
df = cudf.from_pandas(pd_value)
return df
@ioutils.doc_to_json()
def to_json(cudf_val, path_or_buf=None, *args, **kwargs):
"""{docstring}"""
warnings.warn(
"Using CPU via Pandas to write JSON dataset, this may "
"be GPU accelerated in the future"
)
pd_value = cudf_val.to_pandas(nullable=True)
return pd.io.json.to_json(path_or_buf, pd_value, *args, **kwargs)
| 26.551724 | 76 | 0.585281 |
d5ab1d9caf25136da8b0bd00111e4a849015802a | 15,469 | py | Python | tex-camera-ready.py | pkorus/tex-camera-ready | b15ac9c8582d85804bb8be8609f44bff3c5fda69 | [
"MIT"
] | null | null | null | tex-camera-ready.py | pkorus/tex-camera-ready | b15ac9c8582d85804bb8be8609f44bff3c5fda69 | [
"MIT"
] | null | null | null | tex-camera-ready.py | pkorus/tex-camera-ready | b15ac9c8582d85804bb8be8609f44bff3c5fda69 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
import argparse
import collections
import os
import re
import shutil
import sys
import subprocess
from PIL import Image
class NonStandaloneError(RuntimeError):
pass
def build_dependency(old_file, new_file):
previous_dir = os.path.abspath(os.path.curdir)
os.chdir(os.path.split(old_file)[0])
# Open the file and check if the first line starts with standalone
with open(old_file) as file:
line = file.readline()
while line.startswith('%'):
line = file.readline()
if 'standalone' not in line:
raise NonStandaloneError()
# print('Compiling a standalone dependency {} -> {}'.format(old_file, new_file))
# print('> latexmk -pdf {}'.format(old_file))
(exitcode, output) = subprocess.getstatusoutput('latexmk -pdf {} < /dev/null'.format(old_file))
if exitcode != 0:
print('Error: could not build figure (see latexmk log below)!')
# print(output)
# print('Copying {} -> {}'.format(old_file.replace('.tex', '.pdf'), new_file.replace('.tex', '.pdf')))
shutil.copyfile(old_file.replace('.tex', '.pdf'), new_file.replace('.tex', '.pdf'))
os.chdir(previous_dir)
def refactor_dependencies(old_file, new_file, root_dir):
regexps = [" (table|graphics) {0,1}(\[[^\]]*\]){0,1} {0,}\{([^\}]*)\}",
"(includegraphics|input|include)(\[[^\]]*\]){0,}\{([^\}]*)\}"]
missing_files = []
included = []
new_commands = {}
standalone_mode = False
root_dir = os.path.join(root_dir, 'resources')
print(' Refactoring file {} -> {}'.format(old_file, new_file))
with open(new_file, 'w') as of:
with open(old_file) as f:
lines = f.readlines()
for line in lines:
if line.strip().startswith('%'): continue
# Check if this is a standalone class - requires different file handling
if re.search("documentclass(\[[^\]]*\]){0,1}\{standalone\}", line):
standalone_mode = True
# Check for simple new commands - used for referencing external resources
new_command = re.search("newcommand\*{0,1}\{([^\}]*)\}\{([^\}]*)\}", line)
# Build dictionary of new commands
if new_command:
key, value = new_command.groups()
if key in ['\\DataPath', '\\FigPath']:
new_commands[key] = value
line = '\\newcommand*{{{}}}{{{}}}\n'.format(key, '../resources/')
# Handle inclusion of graphics / data files
# Check for known inclusion commands
for pattern in regexps:
match = re.search(pattern, line)
if match:
command, params, filename = match.groups()
if standalone_mode:
for k, v in new_commands.items():
filename = re.sub(re.escape(k) + '( |\{\})', v, filename)
# Make sure the file exists & rewrite the line
full_path = '{}/{}'.format(os.path.split(old_file)[0], filename) if old_file.find(
'/') >= 0 else filename
if os.path.isfile(full_path):
if filename not in included:
print(' {:15} {}'.format(' ', filename))
else:
if filename not in included:
print(' {:15}! {}'.format(' ', filename))
missing_files.append(filename)
if len(new_commands.keys()) > 0:
new_filename = '{} {}/{}'.format(list(new_commands.keys())[0], os.path.split(new_file)[-1].split('.')[0], os.path.split(filename)[-1])
else:
new_filename = '{}/{}/{}'.format('./resources', os.path.split(new_file)[-1].split('.')[0], os.path.split(filename)[-1])
tgt_filaname = '{}/{}/{}'.format(root_dir, os.path.split(new_file)[-1].split('.')[0], os.path.split(filename)[-1])
if not os.path.isdir(os.path.split(tgt_filaname)[0]):
os.makedirs(os.path.split(tgt_filaname)[0])
if os.path.isfile(full_path):
shutil.copyfile(full_path, tgt_filaname)
# Update the command with a new filename in the current line
# (re module parses backslashes, so make sure to prevent that)
line = re.sub(pattern, '{}{}{{{}}}'.format(command, params, new_filename).replace('\\', '\\\\'), line)
included.append(filename)
of.write(line)
return missing_files
parser = argparse.ArgumentParser(
description='LaTeX source cleanup: take a working LaTeX sources and export a copy for dissemination (with '
'resource refactoring, bibtex items selection, etc.)')
parser.add_argument('filename', type=str, help='input file (*.tex)')
parser.add_argument('-o', '--output', type=str, help='Output directory, default: ./final')
parser.add_argument('-c', '--crop', help='Crop bitmaps based on LaTeX trim parameters', action='store_true')
parser.add_argument('-v', '--verbose', help='Print analysis summary to stdout', action='store_true')
parser.add_argument('-f', '--force', help='Force output to an existing directory', action='store_true')
parser.add_argument('-b', '--bib', help='Cleanup Bibtex entries (leave only cited)', action='store_true')
parser.add_argument('-t', '--tikz', help='Compile standalone TikZ figures and include resulting PDFs', action='store_true')
args = parser.parse_args()
supported_formats = ['.tex']
# Verify params
if not os.path.splitext(args.filename)[-1].lower() in supported_formats:
print('Error: Unsupported document format ({})'.format(os.path.split(args.filename)[-1]))
sys.exit(1)
if not args.output:
args.output = './final_new'
if os.path.isdir(args.output) and not args.force:
print('Error: directory {} exists!'.format(os.path.abspath(args.output)))
sys.exit(2)
current_environment = collections.deque()
resources = []
counters = {'figure': 0, 'table': 0, 'algorithm': 0}
input_root = os.path.dirname(args.filename)
input_root = input_root if len(input_root) > 0 else '.'
output_root = os.path.abspath(args.output)
# Read lines from the input file
with open(args.filename) as f:
lines = f.readlines()
missing_deps = {}
print('\nInput file : {}'.format(os.path.split(args.filename)[-1]))
print('Input dir : {}'.format(input_root))
print('Output dir : {}'.format(output_root))
print('Working dir : {}'.format(os.path.abspath(os.curdir)))
print('\nLoaded {} lines from {}'.format(len(lines), args.filename))
print('Writing to {}'.format(args.output))
for dirname in [args.output, '{}/bib'.format(args.output), '{}/resources'.format(args.output), '{}/includes'.format(args.output)]:
if not os.path.exists(dirname): os.mkdir(dirname)
# Open target file
of = open(os.path.join(args.output, os.path.split(args.filename)[-1]), 'w')
subfig_count = 0
current_subfig = 0
alphabet = 'abcdefghijklmnopqrstuwvxyz'
citations = []
bibtex_files = []
# Process successive lines
for line in lines:
if line.strip().startswith('%'):
continue
line_written = False
env_command = re.search('(begin|end){([a-z]*)\*{0,1}\}', line)
if env_command:
flag, env_type = env_command.groups()
if flag == 'begin':
current_environment.append(env_type)
if current_environment[-1] in counters:
counters[current_environment[-1]] += 1
current_subfig = 0
elif flag == 'end':
current_environment.pop()
else:
print('Parsing error in line: {}'.format(line))
sys.exit(3)
new_command = re.search("newcommand\*{0,1}\{([^\}]*)\}\{([^\}]*)\}", line)
# Replace simple new commands that control external resources
if new_command:
key, value = new_command.groups()
if key in ['\\DataPath', '\\FigPath']:
line = '\\newcommand*{{{}}}{{{}}}\n'.format(key, './resources/')
include_command = re.search("(includegraphics|input|include|includestandalone)(\[[^\]]*\]){0,1}\{([^\}]*)\}", line)
if include_command:
command, params, filename = include_command.groups()
if command in ['input', 'include', 'includestandalone']:
# If filename not explicit, fallback to *.tex
if not os.path.isfile(os.path.join(input_root, filename)) and not filename.endswith('.tex'):
filename = '{}.tex'.format(filename)
if not os.path.isfile(os.path.join(input_root, filename)):
print('Error: {} not found in the filesystem'.format(filename))
sys.exit(5)
# The sub-extension handles multiple includes in a single figure (
subext = '' if current_subfig <= 0 else alphabet[current_subfig]
extension = "" if len(filename.split('.')) == 1 else ".%s" % filename.split('.')[-1]
filename_split = os.path.split(filename)
context = '{} {:02}{}'.format(current_environment[-1], counters[current_environment[-1]], subext) if current_environment[-1] in counters else 'document'
context_file = '{}_{:02}{}{}'.format(current_environment[-1], counters[current_environment[-1]], subext, extension) if current_environment[-1] in counters else filename_split[-1]
new_filename = 'includes/{}'.format(context_file)
current_subfig += 1
print('\n + {:15}: {}'.format(context, filename))
if filename.endswith('.tex'):
# If the resource is a TiKz/PFG figure
if args.tikz:
# If requested, compile the standalone figure and incude the resulting PDF
try:
build_dependency(os.path.join(input_root, filename), os.path.join(output_root, new_filename))
new_filename = new_filename.replace('.tex', '.pdf')
command = 'includegraphics'
except NonStandaloneError:
missing_deps[filename] = refactor_dependencies(os.path.join(input_root, filename), os.path.join(output_root, new_filename), output_root)
else:
# Otherwise, refactor its dependencies
missing_deps[filename] = refactor_dependencies(os.path.join(input_root, filename), os.path.join(output_root, new_filename), output_root)
else:
# Look for cropping in parameters
cropopt = re.search('trim=([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)', params) if args.crop else None
# If the file is a graphics file, and cropping was requested, trim the bitmap and save...
if args.crop and command == "includegraphics" and cropopt:
l, b, r, t = cropopt.groups()
# Crop the image
im = Image.open("%s/%s" % (input_root, filename))
w, h = im.size
dpi = im.info["dpi"] if 'dpi' in im.info else 72
if not isinstance(dpi, tuple):
dpi = (dpi, dpi)
im.crop((int(l) * dpi[0] / 72, int(t) * dpi[1] / 72, w - int(r) * dpi[0] / 72,
h - int(b) * dpi[1] / 72)).save('{}/{}'.format(args.output, new_filename))
# Remove trimming commands from the parameters
params = re.sub('trim=([0-9]+) ([0-9]+) ([0-9]+) ([0-9]+)', '', params)
params = re.sub('clip', '', params)
params = re.sub(',,', ',', params)
params = params.replace("[,", "[")
params = params.replace(",]", "]")
print(' {:15}T {}'.format(' ', 'clipped bitmap'))
else:
shutil.copyfile(os.path.join(input_root, filename), '{}/{}'.format(args.output, new_filename))
print(' {:15}> {}'.format(' ', new_filename))
if not params:
params = ''
if len(params) > 0:
params = params.replace('\\', '\\\\')
if command == 'includestandalone':
new_filename = new_filename.replace('.tex', '')
line = re.sub("(includegraphics|input|include|includestandalone)(\[[^\]]*\]){0,1}\{([^\}]*)\}", "%s%s{%s}" % (command, params, new_filename), line)
if args.bib:
# Find citations
for r in re.findall('\\\\cite\{([\w0-9:\-\_\,\.]+)\}', line):
for i in r.split(','):
citations.append(i)
# Find included BibTeX databases
bib_result = re.findall('bibliography\{([^\]]+)\}', line)
if bib_result:
of.write(re.sub('(bibliography)\{([^\]]+)\}', '\\1{bib/references.bib}', line))
line_written = True
for r in bib_result:
for i in r.split(','):
bibtex_files.append(i)
if not line_written:
of.write(line)
of.close()
if sum([len(v) for v in missing_deps.values()]) > 0:
print('\nMissing dependencies (you may need to handle them manually):')
for k, v in missing_deps.items():
if len(v) > 0:
print(' + {}'.format(k))
for name in v:
print(' {}'.format(name))
# Process collected bibliography information
if args.bib:
found_citations = sorted(set(citations))
print('\nFound {} citations:'.format(len(found_citations)))
index = 1
for ref in found_citations:
print(' [{}] {}'.format(index, ref))
index += 1
print('Found {} Bibtex databases: {}'.format(len(bibtex_files), bibtex_files))
matched_citations = {}
for bib_file in bibtex_files:
print('Parsing {}'.format(bib_file))
if not bib_file.endswith(".bib") and not os.path.exists("%s/%s" % (input_root, bib_file)):
bib_file = "%s.bib" % bib_file
with open("%s/%s" % (input_root, bib_file)) as bf:
content = bf.read()
# TODO Could use a better regexp for pinpointing BibTeX entries - the current one needs the closing bracket in a separate line.
matches = re.findall('(@[\w0-9:\-\_\,\.]+\{(.(?!\n\}))+..\})', content, re.DOTALL) # [^\}]*(?=\n\})
# iterate over found entries
for entry in matches:
entry_text = entry[0]
# Add to dictionary
name = re.findall('^@[\w]+\{([^,]+),', entry_text)
if len(name) > 0 and name[0] in found_citations:
matched_citations[name[0]] = entry_text
# Sanity check - make sure only one entry has been matched (due to the limitation stated above)
count_tags = re.findall('\s(t|T)itle', entry_text)
if len(count_tags) != 1 and len(name) > 0:
print('Warning Suspicious bibtext entry for {} : {} title entries!'.format(name[0], len(count_tags)))
print('Matched {} entries'.format(len(matched_citations)))
if len([v for v in found_citations if v not in matched_citations.keys()]) > 0:
print('Missing ones: {}'.format([v for v in found_citations if v not in matched_citations.keys()]))
with open("%s/bib/references.bib" % (output_root), 'w') as of:
for name in sorted(matched_citations.keys()):
of.write("%s\n\n" % matched_citations[name])
| 42.265027 | 186 | 0.569785 |
a4f38b0ae40595ac95453fea337b14bdeb71fbf8 | 22,792 | py | Python | telescope/telescope.py | m-lab/telescope | 4b8bd775a36d533805749d40e80f2d7b71076479 | [
"Apache-2.0"
] | 9 | 2016-02-18T18:12:38.000Z | 2019-10-17T21:57:39.000Z | telescope/telescope.py | m-lab/telescope | 4b8bd775a36d533805749d40e80f2d7b71076479 | [
"Apache-2.0"
] | 46 | 2015-07-20T23:53:57.000Z | 2020-09-28T18:23:16.000Z | telescope/telescope.py | m-lab/telescope | 4b8bd775a36d533805749d40e80f2d7b71076479 | [
"Apache-2.0"
] | 7 | 2015-08-19T18:32:18.000Z | 2018-06-19T21:09:55.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2014 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import datetime
import logging
import os
import Queue
import random
import threading
import time
import external
import iptranslation
import mlab
import query
import result_csv
import selector
import utils
MAX_THREADS = 100
class TelescopeError(Exception):
pass
class NoClientNetworkBlocksFound(TelescopeError):
def __init__(self, provider_name):
Exception.__init__(
self, 'Could not find IP blocks associated with client provider %s.'
% (provider_name))
class MLabServerResolutionFailed(TelescopeError):
def __init__(self, inner_exception):
Exception.__init__(self, 'Failed to resolve M-Lab server IPs: %s' %
(inner_exception.message))
class ExternalQueryHandler(object):
"""Monitors jobs in BigQuery and retrieves their results.
Monitors external jobs in BigQuery and retrieves and processed the resulting
data when the job completes.
"""
def __init__(self, filepath, metadata):
"""Inits ExternalQueryHandler ouput and metadata information.
Args:
filepath: (str) Where the processed results will be stored.
metadata: (dict) Metadata on the query for output labels and further
processing of received values.
"""
self._metadata = metadata
self._filepath = filepath
self._has_succeeded = False # Whether the query has returned a result.
self._has_failed = False # Whether the query has received a fatal error.
@property
def has_succeeded(self):
"""Indicates whether the test has successfully completed."""
return self._has_succeeded
@property
def has_failed(self):
"""Indicates whether the test has encountered a fatal error."""
return self._has_failed
def retrieve_data_upon_job_completion(self, job_id, query_object=None):
"""Waits for a BigQuery job to complete, then processes its output.
Waits for a BigQuery job to complete, then retrieves the data, and
writes the result to an output data file.
Args:
job_id: (str) ID of job for which to retrieve data.
query_object: (external.BigQueryCall) Query object responsible for
retrieving data from BigQuery.
Returns:
(bool) True if data was successfully retrieved, processed, and written
to file, False otherwise.
"""
logger = logging.getLogger('telescope')
if query_object:
try:
bq_query_returned_data = query_object.retrieve_job_data(job_id)
logger.debug(
'Received data, processing according to %s metric.',
self._metadata['metric'])
write_metric_calculations_to_file(self._filepath,
bq_query_returned_data)
self._has_succeeded = True
except (ValueError, external.BigQueryJobFailure,
external.BigQueryCommunicationError) as caught_error:
logger.error((
'Caught {caught_error} for ({site}, {client_provider}, {metric}, '
'{date}).').format(caught_error=caught_error,
**self._metadata))
except external.TableDoesNotExist:
logger.error((
'Requested tables for ({site}, {client_provider}, {metric}, {date}'
') do not exist, moving on.').format(**self._metadata))
self._has_failed = True
return self._has_succeeded
def setup_logger(verbosity_level=0):
"""Create and configure application logging mechanism.
Args:
verbosity_level: (int) Specifies how much information to log. 0 logs
informational messages and below. Values > 0 log all messages.
Returns:
(logging.Logger) Logger object for the application.
"""
logger = logging.getLogger('telescope')
console_handler = logging.StreamHandler()
logger.addHandler(console_handler)
if verbosity_level > 0:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def write_metric_calculations_to_file(data_filepath,
metric_calculations,
should_write_header=False):
"""Writes metric data to a file in CSV format.
Args:
data_filepath: (str) File path to which to write data.
metric_calculations: (list) A list of dictionaries containing the values
of retrieved metrics.
Returns:
(bool) True if the file was written successfully.
"""
logger = logging.getLogger('telescope')
try:
with open(data_filepath, 'w') as data_file_raw:
data_file_raw.write(result_csv.metrics_to_csv(metric_calculations))
return True
except IOError as caught_error:
if caught_error.errno == 24:
logger.error(
'When writing raw output, caught %s, trying again shortly.',
caught_error)
write_metric_calculations_to_file(data_filepath,
metric_calculations)
time.sleep(20)
else:
logger.error('When writing raw output, caught %s, cannot move on.',
caught_error)
except Exception as caught_error:
logger.error('When writing raw output, caught %s, cannot move on.',
caught_error)
return False
def write_bigquery_to_file(bigquery_filepath, query_string):
"""Writes BigQuery query string to a file.
Args:
bigquery_filepath: (str) Output file path.
query_string: (str) BigQuery query string to write to file.
Returns:
(bool) True if query was written to file successfully, False otherwise.
"""
logger = logging.getLogger('telescope')
try:
with open(bigquery_filepath, 'w') as bigquery_file_raw:
bigquery_file_raw.write(query_string)
return True
except Exception as caught_error:
logger.error('When writing bigquery, caught %s.', caught_error)
return False
def selectors_from_files(selector_files):
"""Parses Selector objects from a list of selector files.
N.B.: Parsing errors are logged, but do not cause the function to fail.
Args:
selector_files: (list) A list of filenames of selector files.
Returns:
(list) A list of Selector objects that were successfully parsed.
"""
logger = logging.getLogger('telescope')
parser = selector.SelectorFileParser()
selectors = []
for selector_file in selector_files:
logger.debug('Attempting to parse selector file at: %s', selector_file)
try:
selectors.extend(parser.parse(selector_file))
except Exception as caught_error:
logger.error('Failed to parse selector file: %s', caught_error)
continue
return selectors
def shuffle_selectors(selectors):
"""Shuffles a list of selectors into random order."""
selectors_copy = copy.copy(selectors)
random.shuffle(selectors_copy)
return selectors_copy
def create_ip_translator(ip_translator_spec):
factory = iptranslation.IPTranslationStrategyFactory()
return factory.create(ip_translator_spec)
def generate_query(selector, ip_translator, mlab_site_resolver):
"""Generates BigQuery SQL corresponding to the given Selector object.
Args:
selector: (selector.Selector) Selector object that specifies what data to
retrieve.
ip_translator: (iptranslation.IPTranslationStrategy) Translator from ASN
name to associated IP address blocks.
mlab_site_resolver: (mlab.MLabSiteResolver) Resolver to translate M-Lab
site IDs to a set of IP addresses.
Returns:
(str, int) A 2-tuple containing the query string and the number of tables
referenced in the query.
"""
logger = logging.getLogger('telescope')
start_time_datetime = selector.start_time
end_time_datetime = start_time_datetime + datetime.timedelta(
seconds=selector.duration)
client_ip_blocks = []
if selector.client_provider:
client_ip_blocks = ip_translator.find_ip_blocks(
selector.client_provider)
if not client_ip_blocks:
raise NoClientNetworkBlocksFound(selector.client_provider)
server_ips = []
if selector.site:
try:
retrieved_site_ips = mlab_site_resolver.get_site_ndt_ips(
selector.site)
for retrieved_site_ip in retrieved_site_ips:
server_ips.append(retrieved_site_ip)
logger.debug('Found IP for %s of %s.', selector.site,
retrieved_site_ip)
except Exception as caught_error:
raise MLabServerResolutionFailed(caught_error)
query_generator = query.BigQueryQueryGenerator(
start_time_datetime,
end_time_datetime,
selector.metric,
server_ips=server_ips,
client_ip_blocks=client_ip_blocks,
client_country=selector.client_country)
return query_generator.query()
def duration_to_string(duration_seconds):
"""Converts a number of seconds into a duration string.
Serializes an amount of time in seconds to a human-readable string
representing the time in days, hours, minutes, and seconds.
Args:
duration_seconds: (int) Total number of seconds.
Returns:
(str) The amount of time represented in a human-readable shorthand
string.
"""
duration_string = ''
remaining_seconds = int(duration_seconds)
units_per_metric = int(remaining_seconds / (60 * 60 * 24))
if units_per_metric > 0:
duration_string += '{0}d'.format(units_per_metric)
remaining_seconds %= 60 * 60 * 24
units_per_metric = int(remaining_seconds / (60 * 60))
if units_per_metric > 0:
duration_string += '{0}h'.format(units_per_metric)
remaining_seconds %= 60 * 60
units_per_metric = int(remaining_seconds / (60))
if units_per_metric > 0:
duration_string += '{0}m'.format(units_per_metric)
remaining_seconds %= 60
if remaining_seconds != 0:
duration_string += '{0}s'.format(remaining_seconds)
return duration_string
def wait_to_respect_thread_limit(concurrent_thread_limit, queue_size):
"""Waits until the number of active threads is lower than the thread limit.
Waits until the number of active threads (including both background worker
threads and the main thread) have dropped below the maximum number of
permitted concurrent threads.
Args:
concurrent_thread_limit: (int) Maximum number of permitted concurrent
threads.
queue_size: (int) Total number of jobs waiting in work queue.
"""
logger = logging.getLogger('telescope')
active_thread_count = threading.activeCount()
while active_thread_count >= concurrent_thread_limit:
logger.debug(('Reached thread limit (%d), cooling off. Currently %d '
'active threads and %d in queue.'),
concurrent_thread_limit, active_thread_count, queue_size)
time.sleep(20)
active_thread_count = threading.activeCount()
def process_selector_queue(selector_queue, google_auth_config):
"""Processes the queue of Selector objects waiting for processing.
Processes the queue of Selector objects by launching BigQuery jobs for each
Selector and spawning threads to gather the results. Enforces query rate
limits so that queue processing obeys limits on maximum simultaneous
threads.
Args:
selector_queue: (Queue.Queue) A queue of Selector objects to process.
google_auth_config: (external.GoogleAPIAuth) Object containing GoogleAPI
auth data.
Returns:
(list) A list of 2-tuples where the first element is the spawned worker
thread that waits on query results and the second element is the object
that stores the results of the query.
"""
logger = logging.getLogger('telescope')
thread_monitor = []
while not selector_queue.empty():
(bq_query_string, thread_metadata, data_filepath,
_) = selector_queue.get(False)
try:
authenticated_service = external.get_authenticated_service(
google_auth_config)
bq_query_call = external.BigQueryCall(authenticated_service,
google_auth_config.project_id)
bq_job_id = bq_query_call.run_asynchronous_query(bq_query_string)
except (external.BigQueryJobFailure,
external.BigQueryCommunicationError) as caught_error:
logger.warn('Caught request error %s on query, cooling down for a '
'minute.', caught_error)
selector_queue.put((bq_query_string, thread_metadata, data_filepath,
True))
time.sleep(60)
bq_job_id = None
if bq_job_id is None:
logger.warn((
'No job id returned for {site} of {metric} (concurrent '
'threads: {thread_count}).').format(
thread_count=threading.activeCount(),
**thread_metadata))
selector_queue.put((bq_query_string, thread_metadata, data_filepath,
True))
continue
external_query_handler = ExternalQueryHandler(data_filepath,
thread_metadata)
external_query_handler.queue_set = (bq_query_string, thread_metadata,
data_filepath, True)
new_thread = threading.Thread(
target=bq_query_call.monitor_query_queue,
args=(bq_job_id, thread_metadata, None,
external_query_handler.retrieve_data_upon_job_completion))
new_thread.daemon = True
new_thread.start()
thread_monitor.append((new_thread, external_query_handler))
concurrent_thread_limit = MAX_THREADS
wait_to_respect_thread_limit(concurrent_thread_limit,
selector_queue.qsize())
return thread_monitor
def main(args):
selector_queue = Queue.Queue()
logger = setup_logger(args.verbosity)
selectors = selectors_from_files(args.selector_in)
# The selectors were likely provided in order. Shuffle them to get better
# concurrent distribution on BigQuery tables.
selectors = shuffle_selectors(selectors)
ip_translator_factory = iptranslation.IPTranslationStrategyFactory()
mlab_site_resolver = mlab.MLabSiteResolver()
for data_selector in selectors:
thread_metadata = {
'date': data_selector.start_time.strftime('%Y-%m-%d-%H%M%S'),
'duration': duration_to_string(data_selector.duration),
'site': data_selector.site,
'client_provider': data_selector.client_provider,
'client_country': data_selector.client_country,
'metric': data_selector.metric
}
data_filepath = utils.build_filename(
args.output, thread_metadata['date'], thread_metadata['duration'],
thread_metadata['site'], thread_metadata['client_provider'],
thread_metadata['client_country'], thread_metadata['metric'],
'-raw.csv')
if not args.ignorecache and utils.check_for_valid_cache(data_filepath):
logger.info(('Raw data file found (%s), assuming this is '
'cached copy of same data and moving off. Use '
'--ignorecache to suppress this behavior.'),
data_filepath)
continue
logger.debug('Did not find existing data file: %s', data_filepath)
logger.debug((
'Generating Query for subset of {site}, {client_provider}, '
'{date}, {duration}.').format(**thread_metadata))
data_selector.ip_translation_spec.params['maxmind_dir'] = (
args.maxminddir)
ip_translator = ip_translator_factory.create(
data_selector.ip_translation_spec)
bq_query_string = generate_query(data_selector, ip_translator,
mlab_site_resolver)
if args.savequery:
bigquery_filepath = utils.build_filename(
args.output, thread_metadata['date'],
thread_metadata['duration'], thread_metadata['site'],
thread_metadata['client_provider'],
thread_metadata['client_country'], thread_metadata['metric'],
'-bigquery.sql')
write_bigquery_to_file(bigquery_filepath, bq_query_string)
if not args.dryrun:
# Offer Queue a tuple of the BQ statement, metadata, and a boolean
# that indicates that the loop has not attempted to run the query
# thus far (failed queries are pushed back to the end of the loop).
selector_queue.put((bq_query_string, thread_metadata, data_filepath,
False))
else:
logger.warn(
'Dry run flag caught, built query and reached the point that '
'it would be posted, moving on.')
try:
if not args.dryrun:
logger.info('Finished processing selector files, approximately %d '
'queries to be performed.', selector_queue.qsize())
if os.path.exists(args.credentials_filepath) is False:
logger.warn(
'No credentials for Google appear to exist, next step '
'will be an authentication mechanism for its API.')
try:
google_auth_config = external.GoogleAPIAuth(
args.credentials_filepath,
is_headless=args.noauth_local_webserver)
except external.APIConfigError:
logger.error(
'Could not find developer project, please create one in '
'Developer Console to continue. (See README.md)')
return None
while not selector_queue.empty():
thread_monitor = process_selector_queue(selector_queue,
google_auth_config)
for (existing_thread, external_query_handler) in thread_monitor:
existing_thread.join()
# Join together all defined attributes of thread_metadata for a user
# friendly notiication string.
identifier_string = ', '.join(filter(
None, thread_metadata.values()))
if (not external_query_handler.has_succeeded and
not external_query_handler.has_failed):
selector_queue.put(external_query_handler.queue_set)
elif external_query_handler.has_failed:
logger.debug('Fatal error on %s, moving along.',
identifier_string)
else:
logger.debug('Successfully retrieved %s.',
identifier_string)
except KeyboardInterrupt:
logger.error('Caught interruption, shutting down now.')
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='M-Lab Telescope',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('selector_in',
nargs='+',
default=None,
help='Selector JSON datafile(s) to parse.')
parser.add_argument('-v',
'--verbosity',
action='count',
help=(
'variable output verbosity (e.g., -vv is more than '
'-v)'))
parser.add_argument('-o',
'--output',
default='processed/',
help=(
'Output file path. If the folder does not exist, it'
' will be created.'),
type=utils.create_directory_if_not_exists)
parser.add_argument('--maxminddir',
default='resources/',
help='MaxMind GeoLite ASN snapshot directory.')
parser.add_argument('--savequery',
default=False,
action='store_true',
help=('Save the BigQuery statement to the [output] '
'directory as a .sql file.'))
parser.add_argument('--dryrun',
default=False,
action='store_true',
help=('Run up until the query process (best used with '
'--savequery).'))
parser.add_argument('--ignorecache',
default=False,
action='store_true',
help='Overwrite cached query results if they exist.')
parser.add_argument('--noauth_local_webserver',
default=False,
action='store_true',
help=(
'Authenticate to Google using another method than a'
' local webserver.'))
parser.add_argument('--credentialspath',
dest='credentials_filepath',
default='bigquery_credentials.dat',
help=(
'Google API Credentials. If it does not exist, will'
' trigger Google auth.'))
args = parser.parse_args()
main(args)
| 38.894198 | 88 | 0.617015 |
518929a8f31bad6684c252fb80bc50a1e2166544 | 6,926 | py | Python | markyp_bootstrap4/carousels.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | 21 | 2019-07-16T15:03:43.000Z | 2021-11-16T10:51:58.000Z | markyp_bootstrap4/carousels.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | null | null | null | markyp_bootstrap4/carousels.py | volfpeter/markyp-bootstrap4 | 1af5a1f9dc861a14323706ace28882ef6555739a | [
"MIT"
] | null | null | null | """
Bootstrap carousel elements.
See https://getbootstrap.com/docs/4.0/components/carousel/.
"""
from typing import Optional, Tuple
from markyp import ElementType, PropertyValue
from markyp_html import join
from markyp_html.block import div
from markyp_html.inline import a, span
from markyp_html.lists import ol, li
__all__ = ("carousel", "controls", "indicators", "inner", "item", "item_caption", "slide")
def carousel(*args: ElementType,
identifier: str,
add_controls: bool = True,
add_indicators: bool = True,
interval: Optional[int] = None,
keyboard: Optional[bool] = None,
wrap: Optional[bool] = None,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> div:
"""
Creates a carousel.
Keyword arguments not listed in the arguments section are turned into element attributes
on the created `slide` element. If you need to put multiple HTML elements into the same
carousel item and you would like to save a wrapper `div`, you should have a look at the
`markyp.elements.ElementSequence` element.
Positional arguments are wrapped in `item` elements one-by-one and they will form
the main content of the carousel.
Arguments:
identifier: The identifier of the carousel. It must be unique in the entire webpage.
add_controls: Whether to add control elements to the carousel.
add_indicators: Whether to add indicator elements to the carousel.
interval: The amount of time (in milliseconds) to wait between cycling carousel items.
keyboard: Whether the carousel should react to keyboard events.
wrap: Whether the carousel should cycle continuously or have hard stops.
class_: CSS classes to add to the created `slide` element.
"""
if "data-interval" not in kwargs and interval is not None:
kwargs["data-interval"] = interval
if "data-keyboard" not in kwargs and keyboard is not None:
kwargs["data-keyboard"] = keyboard
if "data-wrap" not in kwargs and wrap is not None:
kwargs["data-wrap"] = wrap
return slide(
indicators(identifier, len(args)) if add_indicators else "",
inner(*[item(arg, active=i==0) for i, arg in enumerate(args)]),
*controls(identifier) if add_controls else ("", ""),
identifier=identifier,
class_=class_,
**kwargs
)
def controls(carousel_id: str, *, class_: Optional[str] = None, **kwargs: PropertyValue) -> Tuple[a, a]:
"""
Creates a pair of anchor elements that serve as the previous and next item controls
for the carousel with the given identifier.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created anchor elements.
Arguments:
carousel_id: The identifier of the carousel to control.
class_: CSS classes to add to the created anchor elements besides `carousel-control-{prev|next}`.
"""
return (
a(
span(class_="carousel-control-prev-icon", **{"aria-hdden": True}),
span("Previous", class_="sr-only"),
class_=join("carousel-control-prev", class_),
href=f"#{carousel_id}",
role="button",
**{**kwargs, "data-slide": "prev"}
),
a(
span(class_="carousel-control-next-icon", **{"aria-hdden": True}),
span("Next", class_="sr-only"),
class_=join("carousel-control-next", class_),
href=f"#{carousel_id}",
role="button",
**{**kwargs, "data-slide": "next"}
)
)
def indicators(carousel_id: str, n: int, *, active_index: int = 0, class_: Optional[str] = None, **kwargs: PropertyValue) -> ol:
"""
Creates an indicator list for the carousel with the given identifier.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created indicator elements.
Arguments:
carousel_id: The identifier of the carousel to control.
n: The number of items in the carousel (and the number of required indicators).
active_index: The index of the indicator that should be active by default.
class_: CSS classes to add to the created indicator elements.
"""
return ol(
*(li(class_=join("active" if active_index == i else None, class_) or None, **{**kwargs, "data-target": f"#{carousel_id}", "data-slide-to": i}) for i in range(n)),
class_="carousel-indicators"
)
def inner(*args: ElementType, class_: Optional[str] = None, **kwargs: PropertyValue) -> div:
"""
Creates a `div` element with `carousel-inner` style.
Positional arguments will become the children elements of the created `div`.
Keyword arguments are turned into element attributes on the created `div`.
Arguments:
class_: Additional CSS class names to set on the created `div`.
"""
return div(*args, class_=join("carousel-inner", class_), **kwargs)
def item(*args: ElementType, active: bool = False, class_: Optional[str] = None, **kwargs: PropertyValue) -> div:
"""
Creates a `div` element with `carousel-item` style.
Positional arguments will become the children elements of the created `div`.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `div`.
Arguments:
active: Whether this item should be the active one in the carousel.
class_: Additional CSS class names to set on the created `div`.
"""
return div(*args, class_=join("carousel-item", "active" if active else None, class_), **kwargs)
def item_caption(*args: ElementType, class_: Optional[str] = None, **kwargs: PropertyValue) -> div:
"""
Creates a caption element for a carousel item.
Positional arguments will become the children elements of the created `div`.
Keyword arguments are turned into element attributes on the created `div`.
Arguments:
class_: Additional CSS class names to set on the created `div`.
"""
return div(*args, class_=join("carousel-caption d-none d-md-block", class_), **kwargs)
def slide(*args: ElementType, identifier: str, class_: Optional[str] = None, **kwargs: PropertyValue) -> div:
"""
Creates a `carousel slide` `div`, the outer, main element of carousels.
Positional arguments will become the children elements of the created `div`.
Keyword arguments not listed in the arguments section are turned into element
attributes on the created `div`.
Arguments:
identifier: The identifier of the carousel. It must be unique in the entire webpage.
class_: Additional CSS class names to set on the created `div`.
"""
return div(*args, class_=join("carousel slide", class_), **{**kwargs, "data-ride": "carousel", "id": identifier})
| 40.034682 | 170 | 0.66633 |
13657b57844264cf82d010f49361b7f711d9cb7a | 3,327 | py | Python | script/model.py | mts-uw/ICAT_WGS | d0aeb66f46b78d47b91d14cdcde48a1d331f3fcd | [
"MIT"
] | null | null | null | script/model.py | mts-uw/ICAT_WGS | d0aeb66f46b78d47b91d14cdcde48a1d331f3fcd | [
"MIT"
] | null | null | null | script/model.py | mts-uw/ICAT_WGS | d0aeb66f46b78d47b91d14cdcde48a1d331f3fcd | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import GridSearchCV, KFold
import script.views as views
from skopt.learning import ExtraTreesRegressor as opt_ETR
import pickle
import os
def model_cal(data, cols, model='ETR', data_types=['conv', 'prop1', 'prop2'], shap=True):
for type_ in data_types:
os.makedirs('{type_}', exit_ok=True)
print(type_)
feat, target = data.loc[:, cols[type_]], data.loc[:, cols['target']]
model = grid_search(feat, target, model)
views.one_shot_plot(feat, target, model, xylim=[0, 35],
random_state=1126, save=f'{type_}/{model}')
views.plot_importance(model, feat.columns, 20, save=f'{type_}/{model}')
if shap:
shap.initjs()
shap_importance(model, feat, target, save=f'{type_}/{model}')
pickle.dump(model, save=f'{type_}/{model}.binaryfile')
def grid_search(feat, target, model='ETR'):
cvf = KFold(n_splits=10, shuffle=True, random_state=1126)
if 'ETR' == model:
cvmodel = GridSearchCV(ExtraTreesRegressor(n_jobs=1, random_state=1126),
param_grid={"n_estimators": [250, 500, 1000]},
n_jobs=5)
crossvalid(feat, target, cvmodel, cvf)
model = opt_ETR(n_estimators=cvmodel.best_params_['n_estimators'],
n_jobs=-1, random_state=1126)
if 'XGB' == model:
cvmodel = GridSearchCV(ExtraTreesRegressor(n_jobs=1, random_state=1126),
param_grid={"n_estimators": [250, 500, 1000]},
n_jobs=5)
crossvalid(feat, target, cvmodel, cvf)
model = XGBRegressor(n_estimators=cvmodel.best_params_['n_estimtors'],
n_jobs=-1, random_state=1126)
return model
def crossvalid(xx, yy, model, cvf):
err_trn = []
err_tes = []
r_2_tes = []
r_2_trn = []
for train_index, test_index in cvf.split(xx):
x_trn = np.array(xx)[train_index]
x_tes = np.array(xx)[test_index]
y_trn = np.array(yy)[train_index]
y_tes = np.array(yy)[test_index]
model.fit(x_trn, y_trn)
x_trn_pred = model.predict(x_trn)
x_tes_pred = model.predict(x_tes)
err_tes.append(mean_squared_error(x_tes_pred, y_tes))
err_trn.append(mean_squared_error(x_trn_pred, y_trn))
r_2_tes.append(r2_score(y_tes, x_tes_pred))
r_2_trn.append(r2_score(y_trn, x_trn_pred))
v_tes = np.sqrt(np.array(err_tes))
v_trn = np.sqrt(np.array(err_trn))
print("RMSE %1.3f (sd: %1.3f, min:%1.3f, max:%1.3f, det:%1.3f) ... train" % (
v_trn.mean(), v_trn.std(), v_trn.min(), v_trn.max(), np.array(r_2_trn).mean()))
print("RMSE %1.3f (sd: %1.3f, min:%1.3f, max:%1.3f, det:%1.3f) ... test" % (
v_tes.mean(), v_tes.std(), v_tes.min(), v_tes.max(), np.array(r_2_tes).mean()))
ret = {}
ret['trn_mean'] = v_trn.mean()
ret['trn_std'] = v_trn.std()
ret['trn_r2'] = np.array(r_2_trn).mean()
ret['tes_mean'] = v_tes.mean()
ret['tes_std'] = v_tes.std()
ret['tes_r2'] = np.array(r_2_tes).mean()
return ret
| 40.573171 | 89 | 0.612564 |
b97515cb9865f1fdcecb8373c55ce1997b680f4f | 1,488 | py | Python | tests/core/fixtures/core_serialization.py | lokijuhy/renku-python | 0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f | [
"Apache-2.0"
] | 26 | 2018-06-04T15:21:50.000Z | 2022-02-11T17:31:24.000Z | tests/core/fixtures/core_serialization.py | lokijuhy/renku-python | 0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f | [
"Apache-2.0"
] | 1,655 | 2018-05-17T22:07:50.000Z | 2022-03-31T21:22:01.000Z | tests/core/fixtures/core_serialization.py | lokijuhy/renku-python | 0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f | [
"Apache-2.0"
] | 19 | 2018-05-18T14:12:25.000Z | 2022-03-30T19:51:35.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2021 Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku core fixtures for serialization testing."""
from pathlib import Path
import pytest
import yaml
@pytest.fixture
def dataset_metadata():
"""Return dataset metadata fixture."""
from renku.core.models.jsonld import NoDatesSafeLoader
file_path = Path(__file__).parent / ".." / ".." / "data" / "doi-dataset.yml"
data = yaml.load(file_path.read_text(), Loader=NoDatesSafeLoader)
yield data
@pytest.fixture
def dataset_metadata_before_calamus():
"""Return dataset metadata fixture."""
from renku.core.models.jsonld import NoDatesSafeLoader
path = Path(__file__).parent / ".." / ".." / "data" / "dataset-v0.10.4-before-calamus.yml"
yield yaml.load(path.read_text(), Loader=NoDatesSafeLoader)
| 34.604651 | 94 | 0.735215 |
e2b21ffc56d64dc3550291129f6585567ec54044 | 8,555 | py | Python | homeassistant/components/cloud/google_config.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2021-11-22T22:37:43.000Z | 2022-03-17T00:55:28.000Z | homeassistant/components/cloud/google_config.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 25 | 2021-10-02T10:01:14.000Z | 2022-03-31T06:11:49.000Z | homeassistant/components/cloud/google_config.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """Google config for Cloud."""
import asyncio
from http import HTTPStatus
import logging
from hass_nabucasa import Cloud, cloud_api
from hass_nabucasa.google_report_state import ErrorResponse
from homeassistant.components.google_assistant.const import DOMAIN as GOOGLE_DOMAIN
from homeassistant.components.google_assistant.helpers import AbstractConfig
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from homeassistant.core import CoreState, split_entity_id
from homeassistant.helpers import entity_registry as er, start
from homeassistant.setup import async_setup_component
from .const import (
CONF_ENTITY_CONFIG,
DEFAULT_DISABLE_2FA,
PREF_DISABLE_2FA,
PREF_SHOULD_EXPOSE,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
class CloudGoogleConfig(AbstractConfig):
"""HA Cloud Configuration for Google Assistant."""
def __init__(
self, hass, config, cloud_user: str, prefs: CloudPreferences, cloud: Cloud
):
"""Initialize the Google config."""
super().__init__(hass)
self._config = config
self._user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._cur_entity_prefs = self._prefs.google_entity_configs
self._cur_default_expose = self._prefs.google_default_expose
self._sync_entities_lock = asyncio.Lock()
self._sync_on_started = False
@property
def enabled(self):
"""Return if Google is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.google_enabled
)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._prefs.google_secure_devices_pin
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self.enabled and self._prefs.google_report_state
def get_local_webhook_id(self, agent_user_id):
"""Return the webhook ID to be used for actions for a given agent user id via the local SDK."""
return self._prefs.google_local_webhook_id
def get_local_agent_user_id(self, webhook_id):
"""Return the user ID to be used for actions received via the local SDK."""
return self._user
@property
def cloud_user(self):
"""Return Cloud User account."""
return self._user
async def async_initialize(self):
"""Perform async initialization of config."""
await super().async_initialize()
async def hass_started(hass):
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
start.async_at_start(self.hass, hass_started)
# Remove any stored user agent id that is not ours
remove_agent_user_ids = []
for agent_user_id in self._store.agent_user_ids:
if agent_user_id != self.agent_user_id:
remove_agent_user_ids.append(agent_user_id)
for agent_user_id in remove_agent_user_ids:
await self.async_disconnect_agent_user(agent_user_id)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
er.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def should_expose(self, state):
"""If a state object should be exposed."""
return self._should_expose_entity_id(state.entity_id)
def _should_expose_entity_id(self, entity_id):
"""If an entity ID should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config["filter"].empty_filter:
return self._config["filter"](entity_id)
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
entity_registry = er.async_get(self.hass)
if registry_entry := entity_registry.async_get(entity_id):
auxiliary_entity = (
registry_entry.entity_category is not None
or registry_entry.hidden_by is not None
)
else:
auxiliary_entity = False
default_expose = self._prefs.google_default_expose
# Backwards compat
if default_expose is None:
return not auxiliary_entity
return not auxiliary_entity and split_entity_id(entity_id)[0] in default_expose
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return self._cloud.username
@property
def has_registered_user_agent(self):
"""Return if we have a Agent User Id registered."""
return len(self._store.agent_user_ids) > 0
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return self.agent_user_id
def should_2fa(self, state):
"""If an entity should be checked for 2FA."""
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(state.entity_id, {})
return not entity_config.get(PREF_DISABLE_2FA, DEFAULT_DISABLE_2FA)
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
try:
await self._cloud.google_report_state.async_send_message(message)
except ErrorResponse as err:
_LOGGER.warning("Error reporting state - %s: %s", err.code, err.message)
async def _async_request_sync_devices(self, agent_user_id: str):
"""Trigger a sync with Google."""
if self._sync_entities_lock.locked():
return HTTPStatus.OK
async with self._sync_entities_lock:
resp = await cloud_api.async_google_actions_request_sync(self._cloud)
return resp.status
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if not self._cloud.is_logged_in:
if self.is_reporting_state:
self.async_disable_report_state()
if self.is_local_sdk_active:
self.async_disable_local_sdk()
return
if (
self.enabled
and GOOGLE_DOMAIN not in self.hass.config.components
and self.hass.is_running
):
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
if self.should_report_state != self.is_reporting_state:
if self.should_report_state:
self.async_enable_report_state()
else:
self.async_disable_report_state()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities_all()
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
elif (
self._cur_entity_prefs is not prefs.google_entity_configs
or self._cur_default_expose is not prefs.google_default_expose
) and self._config["filter"].empty_filter:
self.async_schedule_google_sync_all()
if self.enabled and not self.is_local_sdk_active:
self.async_enable_local_sdk()
elif not self.enabled and self.is_local_sdk_active:
self.async_disable_local_sdk()
self._cur_entity_prefs = prefs.google_entity_configs
self._cur_default_expose = prefs.google_default_expose
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
# Only consider entity registry updates if info relevant for Google has changed
if event.data["action"] == "update" and not bool(
set(event.data["changes"]) & er.ENTITY_DESCRIBING_ATTRIBUTES
):
return
entity_id = event.data["entity_id"]
if not self._should_expose_entity_id(entity_id):
return
if self.hass.state != CoreState.running:
return
self.async_schedule_google_sync_all()
| 35.645833 | 103 | 0.671303 |
9fe3e3e718698243df5a8efcb2965b1359ef2f0d | 2,963 | py | Python | real-time.py | Imogen1004/drone-detection | 1fc744353c8f43992bc672bfbecaed5e2795560c | [
"MIT"
] | null | null | null | real-time.py | Imogen1004/drone-detection | 1fc744353c8f43992bc672bfbecaed5e2795560c | [
"MIT"
] | null | null | null | real-time.py | Imogen1004/drone-detection | 1fc744353c8f43992bc672bfbecaed5e2795560c | [
"MIT"
] | 2 | 2021-03-24T13:20:07.000Z | 2021-08-06T20:48:27.000Z | import cv2, queue, threading, time
from timeit import default_timer as timer
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import *
from yolov3.configs import *
from tools.Detection_to_XML import *
# bufferless VideoCapture
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
Yolo = Load_Yolo_model()
times, times_2 = [], []
cap = VideoCapture("rtsp://192.168.123.91/axis-media/media.amp?codec=h264")
#cap = VideoCapture("http://192.168.123.91/axis-cgi/mjpg/video.cgi")
while True:
img = cap.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [416, 416])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, 416, 0.6)
bboxes = nms(bboxes, 0.45, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
Createjsonfile("json_Detections", str(int(time.time())), original_image, bboxes, read_class_names(TRAIN_CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
show=True
if show:
cv2.imshow('output', image)
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
| 29.04902 | 121 | 0.628755 |
84e30fa9107e5d8f43d63785c54acd187b3277cc | 5,759 | py | Python | ogr/services/gitlab/issue.py | KPostOffice/ogr | 2742a5716229f1b51b9c325c6ea0b790f318bdfd | [
"MIT"
] | null | null | null | ogr/services/gitlab/issue.py | KPostOffice/ogr | 2742a5716229f1b51b9c325c6ea0b790f318bdfd | [
"MIT"
] | 4 | 2021-05-27T21:44:37.000Z | 2021-07-21T21:13:41.000Z | ogr/services/gitlab/issue.py | KPostOffice/ogr | 2742a5716229f1b51b9c325c6ea0b790f318bdfd | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
from typing import List, Optional, Dict, Union
import gitlab
from gitlab.v4.objects import Issue as _GitlabIssue
from ogr.abstract import IssueComment, IssueStatus, Issue
from ogr.exceptions import GitlabAPIException
from ogr.services import gitlab as ogr_gitlab
from ogr.services.base import BaseIssue
from ogr.services.gitlab.comments import GitlabIssueComment
class GitlabIssue(BaseIssue):
_raw_issue: _GitlabIssue
@property
def title(self) -> str:
return self._raw_issue.title
@title.setter
def title(self, new_title: str) -> None:
self._raw_issue.title = new_title
self._raw_issue.save()
@property
def id(self) -> int:
return self._raw_issue.iid
@property
def private(self) -> bool:
return self._raw_issue.confidential
@property
def status(self) -> IssueStatus:
return (
IssueStatus.open
if self._raw_issue.state == "opened"
else IssueStatus[self._raw_issue.state]
)
@property
def url(self) -> str:
return self._raw_issue.web_url
@property
def assignees(self) -> list:
return self._raw_issue.assignees
@property
def description(self) -> str:
return self._raw_issue.description
@description.setter
def description(self, new_description: str) -> None:
self._raw_issue.description = new_description
self._raw_issue.save()
@property
def author(self) -> str:
return self._raw_issue.author["username"]
@property
def created(self) -> datetime.datetime:
return self._raw_issue.created_at
@property
def labels(self) -> List:
return self._raw_issue.labels
def __str__(self) -> str:
return "Gitlab" + super().__str__()
@staticmethod
def create(
project: "ogr_gitlab.GitlabProject",
title: str,
body: str,
private: Optional[bool] = None,
labels: Optional[List[str]] = None,
assignees: Optional[List[str]] = None,
) -> "Issue":
assignee_ids = []
for user in assignees or []:
users_list = project.service.gitlab_instance.users.list(username=user)
if not users_list:
raise GitlabAPIException(f"Unable to find '{user}' username")
assignee_ids.append(str(users_list[0].id))
data = {"title": title, "description": body}
if labels:
data["labels"] = ",".join(labels)
if assignees:
data["assignee_ids"] = ",".join(assignee_ids)
issue = project.gitlab_repo.issues.create(data, confidential=private)
return GitlabIssue(issue, project)
@staticmethod
def get(project: "ogr_gitlab.GitlabProject", issue_id: int) -> "Issue":
try:
return GitlabIssue(project.gitlab_repo.issues.get(issue_id), project)
except gitlab.exceptions.GitlabGetError as ex:
raise GitlabAPIException(f"Issue {issue_id} was not found. ", ex)
@staticmethod
def get_list(
project: "ogr_gitlab.GitlabProject",
status: IssueStatus = IssueStatus.open,
author: Optional[str] = None,
assignee: Optional[str] = None,
labels: Optional[List[str]] = None,
) -> List["Issue"]:
# Gitlab API has status 'opened', not 'open'
parameters: Dict[str, Union[str, List[str], bool]] = {
"state": status.name if status != IssueStatus.open else "opened",
"order_by": "updated_at",
"sort": "desc",
"all": True,
}
if author:
parameters["author_username"] = author
if assignee:
parameters["assignee_username"] = assignee
if labels:
parameters["labels"] = labels
issues = project.gitlab_repo.issues.list(**parameters)
return [GitlabIssue(issue, project) for issue in issues]
def _get_all_comments(self) -> List[IssueComment]:
return [
GitlabIssueComment(parent=self, raw_comment=raw_comment)
for raw_comment in self._raw_issue.notes.list(sort="asc", all=True)
]
def comment(self, body: str) -> IssueComment:
comment = self._raw_issue.notes.create({"body": body})
return GitlabIssueComment(parent=self, raw_comment=comment)
def close(self) -> "Issue":
self._raw_issue.state_event = "close"
self._raw_issue.save()
return self
def add_label(self, *labels: str) -> None:
for label in labels:
self._raw_issue.labels.append(label)
self._raw_issue.save()
| 33.097701 | 82 | 0.655496 |
a030a4fd90e19f3f9ad1260d672ceea3fc671d9f | 1,655 | py | Python | setup.py | neocxf/fastone_ucloud | e931181c632c3c8dc25e94811fd09f8946352004 | [
"MIT"
] | 1 | 2020-09-20T06:11:01.000Z | 2020-09-20T06:11:01.000Z | setup.py | neocxf/fastone_ucloud | e931181c632c3c8dc25e94811fd09f8946352004 | [
"MIT"
] | null | null | null | setup.py | neocxf/fastone_ucloud | e931181c632c3c8dc25e94811fd09f8946352004 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', 'six>=1.15.0']
setup_requirements = ['pytest-runner', 'wheel', 'six']
test_requirements = ['pytest>=3', ]
setup(
author="Xiaofei Chen",
author_email='neocxf@qq.com',
python_requires='>=2',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="ucloud deployment services that work on FastOne stack for computing service",
entry_points={
'console_scripts': [
'fastone-ucloud=fastone_ucloud.cli:cli',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='fastone_ucloud',
name='fastone_ucloud',
packages=find_packages(include=['fastone_ucloud', 'fastone_ucloud.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/neocxf/fastone_ucloud',
version='0.1.2',
zip_safe=False,
)
| 30.090909 | 94 | 0.6429 |
e887a4b94f8653bb3eb80f9f713a758656bf00d3 | 490 | py | Python | examples/test2.py | nvalerkos/cronio | f7361b334a16747ab2b807d4acc88775d9971bf4 | [
"MIT"
] | 1 | 2019-03-17T09:22:31.000Z | 2019-03-17T09:22:31.000Z | examples/test2.py | nvalerkos/cronio | f7361b334a16747ab2b807d4acc88775d9971bf4 | [
"MIT"
] | 14 | 2018-08-07T13:40:37.000Z | 2019-09-19T06:53:37.000Z | examples/test2.py | nvalerkos/cronio | f7361b334a16747ab2b807d4acc88775d9971bf4 | [
"MIT"
] | null | null | null | import sys,os,json
original = {
'comment': 'complex data structure we would ideally want in there',
'ie.1' : {
'key': 'is value bla bla', 'value' : [1,2,3,4,5,6,7,10011]
},
'ie.2' : {
'key': 'is value bla bla', 'value' : [1,2,3,4,5,6,7,10011]
},
'ie.3' : {
'key': 'is value bla bla', 'value' : [1,2,3,4,5,6,7,10011]
}
}
if len(sys.argv) > 1:
data = sys.argv[1]
content = json.loads(data.decode('hex'))
else:
print "exit - no arguements"
exit()
print content == original | 20.416667 | 68 | 0.583673 |
3bbfe0b7a85787680baec8cdfd9112520df5a2df | 1,189 | py | Python | lib/python3.8/site-packages/ansible_collections/ansible/posix/tests/unit/modules/conftest.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | kubernetes-the-hard-way/system/collections/ansible_collections/ansible/posix/tests/unit/modules/conftest.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | null | null | null | kubernetes-the-hard-way/system/collections/ansible_collections/ansible/posix/tests/unit/modules/conftest.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
import pytest
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
@pytest.fixture
def patch_ansible_module(request, mocker):
if isinstance(request.param, string_types):
args = request.param
elif isinstance(request.param, MutableMapping):
if 'ANSIBLE_MODULE_ARGS' not in request.param:
request.param = {'ANSIBLE_MODULE_ARGS': request.param}
if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
args = json.dumps(request.param)
else:
raise Exception('Malformed data to the patch_ansible_module pytest fixture')
mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
| 41 | 92 | 0.743482 |
55cc048d09b5a4003fc84388797da58934f24d33 | 601 | py | Python | scrapybot/scrapybot/items.py | luzhuomi/collamine-client-python | 63bc174da28e0c42b7eb25ac81a5f68ec3e01a03 | [
"Apache-2.0"
] | null | null | null | scrapybot/scrapybot/items.py | luzhuomi/collamine-client-python | 63bc174da28e0c42b7eb25ac81a5f68ec3e01a03 | [
"Apache-2.0"
] | null | null | null | scrapybot/scrapybot/items.py | luzhuomi/collamine-client-python | 63bc174da28e0c42b7eb25ac81a5f68ec3e01a03 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
if scrapy.version_info[0:2] < (1,1):
from scrapy.contrib.djangoitem import DjangoItem
else:
from scrapy_djangoitem import DjangoItem # sudo easy_install scrapy_djangoitem
from scrapy.item import Field
from crawler.models import HTML
'''
class ScrapybotItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
'''
class ScrapybotItem(DjangoItem):
django_model = HTML
| 23.115385 | 86 | 0.723794 |
d65730a8a5911a9ed03c420a6e696fcecf8a1a92 | 302 | py | Python | tests/test_skeleton.py | ludwigflo/ml_utils | e58ac5842c00b167ee87c20c7c1ffc44322b2634 | [
"MIT"
] | null | null | null | tests/test_skeleton.py | ludwigflo/ml_utils | e58ac5842c00b167ee87c20c7c1ffc44322b2634 | [
"MIT"
] | null | null | null | tests/test_skeleton.py | ludwigflo/ml_utils | e58ac5842c00b167ee87c20c7c1ffc44322b2634 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from ml_utils.skeleton import fib
__author__ = "Florian Ludwig"
__copyright__ = "Florian Ludwig"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
| 17.764706 | 39 | 0.642384 |
14cb7009912c72e5ac4589bafcc7ae40455ba23a | 88 | py | Python | lambda/tasks/admin.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 6 | 2021-11-15T18:56:44.000Z | 2022-02-15T10:02:24.000Z | lambda/tasks/admin.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 5 | 2020-10-24T20:08:13.000Z | 2021-06-10T19:05:24.000Z | lambda/tasks/admin.py | Rory-Sullivan/lambda | 44e7ea6273958b2e666b1d59bc6eac54915b8b8c | [
"MIT"
] | 1 | 2020-10-19T14:35:24.000Z | 2020-10-19T14:35:24.000Z | from django.contrib import admin
from . import models
admin.site.register(models.Task)
| 17.6 | 32 | 0.806818 |
69fb0d0e00e96243e7c76ccdf2278c56bdc4ccba | 16,383 | py | Python | tests/models/levit/test_modeling_levit.py | DN6/transformers | 5c17918fe4cda80dae5b7ec8f0b2d23a813c4a05 | [
"Apache-2.0"
] | 5 | 2020-09-01T09:15:48.000Z | 2020-09-15T03:25:05.000Z | tests/models/levit/test_modeling_levit.py | DN6/transformers | 5c17918fe4cda80dae5b7ec8f0b2d23a813c4a05 | [
"Apache-2.0"
] | null | null | null | tests/models/levit/test_modeling_levit.py | DN6/transformers | 5c17918fe4cda80dae5b7ec8f0b2d23a813c4a05 | [
"Apache-2.0"
] | 3 | 2020-08-20T04:46:25.000Z | 2020-10-14T08:39:13.000Z | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LeViT model. """
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitFeatureExtractor
class LevitConfigTester(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
class LevitModelTester:
def __init__(
self,
parent,
batch_size=13,
image_size=64,
num_channels=3,
kernel_size=3,
stride=2,
padding=1,
patch_size=16,
hidden_sizes=[128, 256, 384],
num_attention_heads=[4, 6, 8],
depths=[2, 3, 4],
key_dim=[16, 16, 16],
drop_path_rate=0,
mlp_ratio=[2, 2, 2],
attention_ratio=[2, 2, 2],
initializer_range=0.02,
is_training=True,
use_labels=True,
num_labels=2, # Check
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.hidden_sizes = hidden_sizes
self.num_attention_heads = num_attention_heads
self.depths = depths
self.key_dim = key_dim
self.drop_path_rate = drop_path_rate
self.patch_size = patch_size
self.attention_ratio = attention_ratio
self.mlp_ratio = mlp_ratio
self.initializer_range = initializer_range
self.down_ops = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
self.is_training = is_training
self.use_labels = use_labels
self.num_labels = num_labels
self.initializer_range = initializer_range
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return LevitConfig(
image_size=self.image_size,
num_channels=self.num_channels,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
patch_size=self.patch_size,
hidden_sizes=self.hidden_sizes,
num_attention_heads=self.num_attention_heads,
depths=self.depths,
key_dim=self.key_dim,
drop_path_rate=self.drop_path_rate,
mlp_ratio=self.mlp_ratio,
attention_ratio=self.attention_ratio,
initializer_range=self.initializer_range,
down_ops=self.down_ops,
)
def create_and_check_model(self, config, pixel_values, labels):
model = LevitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
image_size = (self.image_size, self.image_size)
height, width = image_size[0], image_size[1]
for _ in range(4):
height = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
width = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = LevitForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class LevitModelTest(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Levit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
has_attentions = False
def setUp(self):
self.model_tester = LevitModelTester(self)
self.config_tester = ConfigTester(self, config_class=LevitConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def create_and_test_config_common_properties(self):
return
@unittest.skip(reason="Levit does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Levit does not support input and output embeddings")
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = len(self.model_tester.depths) + 1
self.assertEqual(len(hidden_states), expected_num_layers)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
height, width = image_size[0], image_size[1]
for _ in range(4):
height = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1
)
width = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1
)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[
height * width,
self.model_tester.hidden_sizes[0],
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
# special case for LevitForImageClassificationWithTeacher model
def test_training(self):
if not self.model_tester.is_training:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(MODEL_MAPPING)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
config.use_cache = False
config.return_dict = True
for model_class in self.all_model_classes:
if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
model = model_class(config)
model.gradient_checkpointing_enable()
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_problem_types(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
problem_types = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"):
config.problem_type = problem_type["title"]
config.num_labels = problem_type["num_labels"]
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if problem_type["num_labels"] > 1:
inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"])
inputs["labels"] = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=True) as warning_list:
loss = model(**inputs).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}"
)
loss.backward()
@slow
def test_model_from_pretrained(self):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = LevitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class LevitModelIntegrationTest(unittest.TestCase):
@cached_property
def default_feature_extractor(self):
return LevitFeatureExtractor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def test_inference_image_classification_head(self):
model = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
torch_device
)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([0.0096, -1.0084, -1.4318]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
| 38.639151 | 120 | 0.651163 |
32f7915982e138a13b110cbc56778890327be64d | 8,058 | py | Python | triple_agent/tests/test_report_utilities.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | 3 | 2020-04-25T11:42:03.000Z | 2020-07-08T16:38:26.000Z | triple_agent/tests/test_report_utilities.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | 17 | 2019-08-11T19:09:55.000Z | 2021-03-30T17:12:28.000Z | triple_agent/tests/test_report_utilities.py | andrewzwicky/TripleAgent | 8d056df5c53a3d264dc778bad6771a0a2f62e7e7 | [
"MIT"
] | null | null | null | import pytest
from triple_agent.reports.generation.report_utilities import (
create_plot_colors,
create_plot_hatching,
create_data_labels,
create_category_legend_labels,
)
from triple_agent.classes.action_tests import ActionTest
from triple_agent.classes.venues import Venue
from triple_agent.classes.characters import Characters
from triple_agent.reports.generation.plot_specs import PlotLabelStyle
from triple_agent.constants.colors import PlotColorsBase
import pandas
COLOR_TEST_CASES = [
(
PlotColorsBase(),
None,
pandas.DataFrame(
data=[[3, 4]], columns=[ActionTest.White, ActionTest.Green], index=[None]
),
True,
False,
[["#0077BB", "#0077BB"]],
),
(
PlotColorsBase(),
None,
pandas.DataFrame(
data=[[3, 4]], columns=[ActionTest.White, ActionTest.Green], index=[None]
),
True,
True,
[None],
),
# this test doesn't make sense because of this disconnect between stacks_are_categories and the index == [None]a
(
PlotColorsBase(),
None,
pandas.DataFrame(
data=[[3, 4]], columns=[ActionTest.White, ActionTest.Green], index=[None]
),
False,
False,
[None],
),
(
PlotColorsBase(),
{"x": "blue", "y": "red"},
pandas.DataFrame(
data=[[3, 4, 1], [0, 0, 0]], columns=["test", "a", "b"], index=["x", "y"]
),
False,
False,
[["blue", "blue", "blue"], ["red", "red", "red"]],
),
(
PlotColorsBase(),
None,
pandas.DataFrame(
data=[[3, 4, 1], [0, 0, 0]], columns=["test", "a", "b"], index=["x", "y"]
),
False,
False,
[None, None],
),
(
PlotColorsBase(),
{"x": "blue", "y": "red", "test": "green"},
pandas.DataFrame(data=[[3, 4, 1]], columns=["test", "x", "y"], index=[None]),
True,
False,
[["green", "blue", "red"]],
),
]
@pytest.mark.plotting
@pytest.mark.quick
@pytest.mark.parametrize(
"plot_colors, primary_color_dict, frame, stacks_are_categories, is_pie_chart, expected_colors",
COLOR_TEST_CASES,
)
def test_create_plot_colors(
plot_colors,
primary_color_dict,
frame,
stacks_are_categories,
is_pie_chart,
expected_colors,
):
colors = create_plot_colors(
plot_colors, primary_color_dict, frame, stacks_are_categories, is_pie_chart
)
assert colors == expected_colors
HATCH_TEST_CASES = [
(None, [ActionTest.White, ActionTest.Green], [None], True, [[None, None]]),
# this test doesn't make sense because of this disconnect between stacks_are_categories and the index == [None]a
(None, [ActionTest.White, ActionTest.Green], [None], False, [[None, None]]),
(
{"x": "//", "y": "-"},
["test", "a", "b"],
["x", "y"],
False,
[["//", "//", "//"], ["-", "-", "-"]],
),
({"x": "//", "y": "-"}, ["x", "y"], [None], True, [["//", "-"]]),
(
None,
["test", "a", "b"],
["x", "y"],
False,
[[None, None, None], [None, None, None]],
),
]
@pytest.mark.plotting
@pytest.mark.quick
@pytest.mark.parametrize(
"hatch_dict, columns, index, stacks_are_categories, expected_hatch",
HATCH_TEST_CASES,
)
def test_create_plot_hatching(
hatch_dict, columns, index, stacks_are_categories, expected_hatch
):
hatch = create_plot_hatching(hatch_dict, columns, index, stacks_are_categories)
assert hatch == expected_hatch
DATA_LABEL_CASES = [
(
pandas.DataFrame(
data=[
[6, 2, 5, 1],
[7, 7, 17, 3],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
],
columns=[Venue.Balcony, Venue.Terrace, Venue.Gallery, Venue.Ballroom],
index=[
ActionTest.Green,
ActionTest.White,
ActionTest.Ignored,
ActionTest.Red,
ActionTest.Canceled,
],
),
PlotLabelStyle.NoLabels,
[
["", "", "", ""],
["", "", "", ""],
["", "", "", ""],
["", "", "", ""],
["", "", "", ""],
],
),
(
pandas.DataFrame(
data=[
[6, 2, 5, 1],
[7, 7, 17, 3],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
],
columns=[Venue.Balcony, Venue.Terrace, Venue.Gallery, Venue.Ballroom],
index=[
ActionTest.Green,
ActionTest.White,
ActionTest.Ignored,
ActionTest.Red,
ActionTest.Canceled,
],
),
PlotLabelStyle.Plain,
[
["6", "2", "5", "1"],
["7", "7", "17", "3"],
["1", "0", "1", "0"],
["0", "1", "1", "0"],
["0", "0", "1", "0"],
],
),
]
@pytest.mark.plotting
@pytest.mark.quick
@pytest.mark.parametrize("input_frame, label_style, expected_labels", DATA_LABEL_CASES)
def test_create_data_labels(input_frame, label_style, expected_labels):
labels = create_data_labels(input_frame, label_style)
assert labels == expected_labels
CREATE_CATEGORY_LEGEND_LABELS_CASES = [
(None, None, [], [], False, [], []),
(
None,
None,
["aah", "bah", "ceh"],
["dah", "eeh", "fah"],
False,
["aah", "bah", "ceh"],
["dah", "eeh", "fah"],
),
(
None,
None,
[Characters.Boots, Characters.Carlos],
["dah", "eeh", "fah"],
False,
["Ms. B", "Mr. P"],
["dah", "eeh", "fah"],
),
(None, None, [], [], True, [], []),
(
None,
None,
["aah", "bah", "ceh"],
["dah", "eeh", "fah"],
True,
["aah", "bah", "ceh"],
[None, None, None],
),
(
None,
None,
[Characters.Boots, Characters.Carlos],
["dah", "eeh", "fah"],
True,
["Ms. B", "Mr. P"],
[None, None, None],
),
(
{"a": "d", "b": "e", "c": "f"},
None,
["a", "c", "b"],
[],
True,
["d", "f", "e"],
[],
),
(
{"a": "d", "b": "e", "c": "f"},
None,
[],
["a", "c", "b"],
False,
[],
["d", "f", "e"],
),
(
None,
None,
["a", "c", "b"],
["d", "f", "e"],
False,
["a", "c", "b"],
["d", "f", "e"],
),
(
None,
{"a": "d", "b": "e", "c": "f"},
[],
["a", "c", "b"],
False,
[],
["a", "c", "b"],
),
(
None,
{"a": "d", "b": "e", "c": "f"},
["a", "c", "b"],
["a", "c", "b"],
False,
["d", "f", "e"],
["a", "c", "b"],
),
(
{"a": "d", "b": "e", "c": "f"},
{"aa": "dd", "bb": "ee", "cc": "ff"},
["aa", "cc", "bb"],
["a", "c", "b"],
False,
["dd", "ff", "ee"],
["d", "f", "e"],
),
]
@pytest.mark.plotting
@pytest.mark.quick
@pytest.mark.parametrize(
"primary_label_dict,secondary_label_dict,columns,index,stacks_are_categories,expected_category_labels,expected_stack_labels",
CREATE_CATEGORY_LEGEND_LABELS_CASES,
)
def test_create_category_legend_labels(
primary_label_dict,
secondary_label_dict,
columns,
index,
stacks_are_categories,
expected_category_labels,
expected_stack_labels,
):
category_labels, stack_labels = create_category_legend_labels(
primary_label_dict,
secondary_label_dict,
columns,
index,
stacks_are_categories,
)
assert category_labels == expected_category_labels
assert stack_labels == expected_stack_labels
| 24.947368 | 129 | 0.466865 |
a50f11a7f7150f8284147022fdfac52ff2d8d702 | 553 | py | Python | script/timelapse.py | MarkHershey/opencv-motion-detection | dbaa1a18e7f5b14cc9192dd3a23ea251c3bf4059 | [
"MIT"
] | null | null | null | script/timelapse.py | MarkHershey/opencv-motion-detection | dbaa1a18e7f5b14cc9192dd3a23ea251c3bf4059 | [
"MIT"
] | null | null | null | script/timelapse.py | MarkHershey/opencv-motion-detection | dbaa1a18e7f5b14cc9192dd3a23ea251c3bf4059 | [
"MIT"
] | null | null | null | from time import sleep
from picamera import PiCamera
from datetime import datetime
def timestamp():
now = str(datetime.now())
ts = ""
for i in now[:-7]:
if i in (" ", "-", ":"):
pass
else:
ts += i
return ts
def main():
camera = PiCamera()
camera.resolution = (1024, 768)
camera.rotation = -90
# camera.start_preview()
# Camera warm-up time
while True:
ts = timestamp()
camera.capture(f'{ts}.jpg')
sleep(2)
if __name__ == "__main__":
main()
| 18.433333 | 35 | 0.538879 |
7c3863af3cc54ef5387425cc4125c76ef1235b09 | 3,840 | py | Python | helm/dagster/schema/schema/utils/helm_template.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | helm/dagster/schema/schema/utils/helm_template.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | helm/dagster/schema/schema/utils/helm_template.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | import json
import os
import shutil
import subprocess
from contextlib import contextmanager
from dataclasses import dataclass
from pprint import pprint
from tempfile import NamedTemporaryFile, mkstemp
from typing import Any, List, Optional, Union
import yaml
from kubernetes.client.api_client import ApiClient
from schema.charts.dagster.values import DagsterHelmValues
from schema.charts.dagster_user_deployments.values import DagsterUserDeploymentsHelmValues
def git_repo_root():
return subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip()
@dataclass
class HelmTemplate:
helm_dir_path: str
subchart_paths: List[str]
output: Optional[str] = None
model: Optional[Any] = None
name: str = "RELEASE-NAME"
api_client: ApiClient = ApiClient()
def render(
self,
values: Union[DagsterHelmValues, DagsterUserDeploymentsHelmValues],
chart_version: Optional[str] = None,
) -> List[Any]:
with NamedTemporaryFile() as tmp_file:
helm_dir_path = os.path.join(git_repo_root(), self.helm_dir_path)
values_json = json.loads(values.json(exclude_none=True, by_alias=True))
pprint(values_json)
content = yaml.dump(values_json)
tmp_file.write(content.encode())
tmp_file.flush()
command = [
"helm",
"template",
self.name,
helm_dir_path,
"--debug",
*["--values", tmp_file.name],
]
if self.output:
## Uncomment to render all templates before filtering to surface Helm templating
## errors with better error messages
# subprocess.check_output(command)
command += ["--show-only", self.output]
with self._with_chart_yaml(helm_dir_path, chart_version):
templates = subprocess.check_output(command)
print("\n--- Helm Templates ---") # pylint: disable=print-call
print(templates.decode()) # pylint: disable=print-call
k8s_objects = [k8s_object for k8s_object in yaml.full_load_all(templates) if k8s_object]
if self.model:
k8s_objects = [
self.api_client._ApiClient__deserialize_model( # pylint: disable=W0212
k8s_object, self.model
)
for k8s_object in k8s_objects
]
return k8s_objects
@contextmanager
def _with_chart_yaml(self, helm_dir_path: str, chart_version: Optional[str]):
if not chart_version:
yield
else:
umbrella_chart_path = os.path.join(helm_dir_path, "Chart.yaml")
subchart_chart_paths = [
os.path.join(helm_dir_path, subchart_path, "Chart.yaml")
for subchart_path in self.subchart_paths
]
chart_paths = subchart_chart_paths + [umbrella_chart_path]
chart_copy_paths = []
for chart_path in chart_paths:
_, chart_copy_path = mkstemp()
shutil.copy2(chart_path, chart_copy_path)
chart_copy_paths.append(chart_copy_path)
with open(chart_path) as chart_file:
old_chart_yaml = yaml.safe_load(chart_file)
with open(chart_path, "w") as chart_file:
new_chart_yaml = old_chart_yaml.copy()
new_chart_yaml["version"] = chart_version
yaml.dump(new_chart_yaml, chart_file)
yield
for chart_path, chart_copy_path in zip(chart_paths, chart_copy_paths):
shutil.copy2(chart_copy_path, chart_path)
os.remove(chart_copy_path)
| 35.555556 | 100 | 0.610938 |
b5176fb2944f9a3587720798544fdd5b64863675 | 26,539 | py | Python | translate_sdk/model/monitor/alert_event_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | translate_sdk/model/monitor/alert_event_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | translate_sdk/model/monitor/alert_event_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: alert_event.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from translate_sdk.model.monitor import alert_conditions_pb2 as translate__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='alert_event.proto',
package='monitor',
syntax='proto3',
serialized_options=_b('ZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitor'),
serialized_pb=_b('\n\x11\x61lert_event.proto\x12\x07monitor\x1a\x32translate_sdk/model/monitor/alert_conditions.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xa7\t\n\nAlertEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x61lert_id\x18\x02 \x01(\t\x12\x0f\n\x07rule_id\x18\x03 \x01(\t\x12\x12\n\nis_recover\x18\x04 \x01(\x08\x12\x11\n\tsend_succ\x18\x05 \x01(\x08\x12\x0f\n\x07subject\x18\x06 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x07 \x01(\t\x12\x0e\n\x06source\x18\x08 \x01(\t\x12\x0e\n\x06status\x18\t \x01(\x05\x12\x13\n\x0bsend_detail\x18\n \x01(\x05\x12\x14\n\x0crecover_type\x18\x0b \x01(\t\x12\x0b\n\x03org\x18\x0c \x01(\x05\x12\x0e\n\x06target\x18\r \x01(\t\x12\r\n\x05level\x18\x0e \x01(\x05\x12%\n\x05value\x18\x0f \x01(\x0b\x32\x16.google.protobuf.Value\x12\x16\n\x0e\x61lert_duration\x18\x10 \x01(\x02\x12\x18\n\x10\x61lert_begin_time\x18\x11 \x01(\x05\x12\x16\n\x0e\x61lert_end_time\x18\x12 \x01(\x05\x12\x0c\n\x04time\x18\x13 \x01(\x05\x12\x12\n\nstart_time\x18\x14 \x01(\x05\x12\x13\n\x0binsert_time\x18\x15 \x01(\x05\x12;\n\x0f\x61lert_receivers\x18\x16 \x03(\x0b\x32\".monitor.AlertEvent.AlertReceivers\x12\x31\n\nalert_dims\x18\x17 \x03(\x0b\x32\x1d.monitor.AlertEvent.AlertDims\x12,\n\x07\x61\x63tions\x18\x18 \x03(\x0b\x32\x1b.monitor.AlertEvent.Actions\x12\x32\n\x10\x61lert_conditions\x18\x19 \x01(\x0b\x32\x18.monitor.AlertConditions\x12\x10\n\x08objectId\x18\x1a \x01(\t\x12\x12\n\ninstanceId\x18\x1b \x01(\t\x12\x0e\n\x06system\x18\x1c \x01(\t\x1a.\n\x0e\x41lertReceivers\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06method\x18\x02 \x01(\t\x1a@\n\tAlertDims\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x1a\x87\x03\n\x07\x41\x63tions\x12\x38\n\tcondition\x18\x01 \x01(\x0b\x32%.monitor.AlertEvent.Actions.Condition\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12\x0f\n\x07upgrade\x18\x04 \x01(\x08\x12\x0b\n\x03run\x18\x05 \x01(\x08\x12\x0f\n\x07methods\x18\x06 \x03(\t\x12\x11\n\treceivers\x18\x07 \x03(\t\x12\x1c\n\x14receiver_user_groups\x18\x08 \x03(\t\x12\x43\n\x0freceiver_owners\x18\t \x03(\x0b\x32*.monitor.AlertEvent.Actions.ReceiverOwners\x1a/\n\tCondition\x12\x13\n\x0blasting_for\x18\x01 \x01(\x05\x12\r\n\x05level\x18\x02 \x01(\x05\x1aN\n\x0eReceiverOwners\x12\x11\n\ttranslate\x18\x01 \x01(\t\x12\x11\n\tobject_id\x18\x02 \x01(\t\x12\x16\n\x0eobject_attr_id\x18\x03 \x01(\tBCZAgo.easyops.local/contracts/protorepo-models/easyops/model/monitorb\x06proto3')
,
dependencies=[translate__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_ALERTEVENT_ALERTRECEIVERS = _descriptor.Descriptor(
name='AlertReceivers',
full_name='monitor.AlertEvent.AlertReceivers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='monitor.AlertEvent.AlertReceivers.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='method', full_name='monitor.AlertEvent.AlertReceivers.method', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=798,
serialized_end=844,
)
_ALERTEVENT_ALERTDIMS = _descriptor.Descriptor(
name='AlertDims',
full_name='monitor.AlertEvent.AlertDims',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='monitor.AlertEvent.AlertDims.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='monitor.AlertEvent.AlertDims.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=846,
serialized_end=910,
)
_ALERTEVENT_ACTIONS_CONDITION = _descriptor.Descriptor(
name='Condition',
full_name='monitor.AlertEvent.Actions.Condition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lasting_for', full_name='monitor.AlertEvent.Actions.Condition.lasting_for', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='monitor.AlertEvent.Actions.Condition.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1177,
serialized_end=1224,
)
_ALERTEVENT_ACTIONS_RECEIVEROWNERS = _descriptor.Descriptor(
name='ReceiverOwners',
full_name='monitor.AlertEvent.Actions.ReceiverOwners',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='translate', full_name='monitor.AlertEvent.Actions.ReceiverOwners.translate', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_id', full_name='monitor.AlertEvent.Actions.ReceiverOwners.object_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_attr_id', full_name='monitor.AlertEvent.Actions.ReceiverOwners.object_attr_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1226,
serialized_end=1304,
)
_ALERTEVENT_ACTIONS = _descriptor.Descriptor(
name='Actions',
full_name='monitor.AlertEvent.Actions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='condition', full_name='monitor.AlertEvent.Actions.condition', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='monitor.AlertEvent.Actions.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='monitor.AlertEvent.Actions.status', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upgrade', full_name='monitor.AlertEvent.Actions.upgrade', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='run', full_name='monitor.AlertEvent.Actions.run', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='methods', full_name='monitor.AlertEvent.Actions.methods', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receivers', full_name='monitor.AlertEvent.Actions.receivers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_user_groups', full_name='monitor.AlertEvent.Actions.receiver_user_groups', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver_owners', full_name='monitor.AlertEvent.Actions.receiver_owners', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTEVENT_ACTIONS_CONDITION, _ALERTEVENT_ACTIONS_RECEIVEROWNERS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=913,
serialized_end=1304,
)
_ALERTEVENT = _descriptor.Descriptor(
name='AlertEvent',
full_name='monitor.AlertEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='monitor.AlertEvent.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_id', full_name='monitor.AlertEvent.alert_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rule_id', full_name='monitor.AlertEvent.rule_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_recover', full_name='monitor.AlertEvent.is_recover', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='send_succ', full_name='monitor.AlertEvent.send_succ', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject', full_name='monitor.AlertEvent.subject', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content', full_name='monitor.AlertEvent.content', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='monitor.AlertEvent.source', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='monitor.AlertEvent.status', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='send_detail', full_name='monitor.AlertEvent.send_detail', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recover_type', full_name='monitor.AlertEvent.recover_type', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='monitor.AlertEvent.org', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='monitor.AlertEvent.target', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='level', full_name='monitor.AlertEvent.level', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='monitor.AlertEvent.value', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_duration', full_name='monitor.AlertEvent.alert_duration', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_begin_time', full_name='monitor.AlertEvent.alert_begin_time', index=16,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_end_time', full_name='monitor.AlertEvent.alert_end_time', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time', full_name='monitor.AlertEvent.time', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_time', full_name='monitor.AlertEvent.start_time', index=19,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='insert_time', full_name='monitor.AlertEvent.insert_time', index=20,
number=21, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_receivers', full_name='monitor.AlertEvent.alert_receivers', index=21,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_dims', full_name='monitor.AlertEvent.alert_dims', index=22,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actions', full_name='monitor.AlertEvent.actions', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alert_conditions', full_name='monitor.AlertEvent.alert_conditions', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='monitor.AlertEvent.objectId', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='monitor.AlertEvent.instanceId', index=26,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='system', full_name='monitor.AlertEvent.system', index=27,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ALERTEVENT_ALERTRECEIVERS, _ALERTEVENT_ALERTDIMS, _ALERTEVENT_ACTIONS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=1304,
)
_ALERTEVENT_ALERTRECEIVERS.containing_type = _ALERTEVENT
_ALERTEVENT_ALERTDIMS.fields_by_name['value'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_ALERTEVENT_ALERTDIMS.containing_type = _ALERTEVENT
_ALERTEVENT_ACTIONS_CONDITION.containing_type = _ALERTEVENT_ACTIONS
_ALERTEVENT_ACTIONS_RECEIVEROWNERS.containing_type = _ALERTEVENT_ACTIONS
_ALERTEVENT_ACTIONS.fields_by_name['condition'].message_type = _ALERTEVENT_ACTIONS_CONDITION
_ALERTEVENT_ACTIONS.fields_by_name['receiver_owners'].message_type = _ALERTEVENT_ACTIONS_RECEIVEROWNERS
_ALERTEVENT_ACTIONS.containing_type = _ALERTEVENT
_ALERTEVENT.fields_by_name['value'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_ALERTEVENT.fields_by_name['alert_receivers'].message_type = _ALERTEVENT_ALERTRECEIVERS
_ALERTEVENT.fields_by_name['alert_dims'].message_type = _ALERTEVENT_ALERTDIMS
_ALERTEVENT.fields_by_name['actions'].message_type = _ALERTEVENT_ACTIONS
_ALERTEVENT.fields_by_name['alert_conditions'].message_type = translate__sdk_dot_model_dot_monitor_dot_alert__conditions__pb2._ALERTCONDITIONS
DESCRIPTOR.message_types_by_name['AlertEvent'] = _ALERTEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AlertEvent = _reflection.GeneratedProtocolMessageType('AlertEvent', (_message.Message,), {
'AlertReceivers' : _reflection.GeneratedProtocolMessageType('AlertReceivers', (_message.Message,), {
'DESCRIPTOR' : _ALERTEVENT_ALERTRECEIVERS,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent.AlertReceivers)
})
,
'AlertDims' : _reflection.GeneratedProtocolMessageType('AlertDims', (_message.Message,), {
'DESCRIPTOR' : _ALERTEVENT_ALERTDIMS,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent.AlertDims)
})
,
'Actions' : _reflection.GeneratedProtocolMessageType('Actions', (_message.Message,), {
'Condition' : _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), {
'DESCRIPTOR' : _ALERTEVENT_ACTIONS_CONDITION,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent.Actions.Condition)
})
,
'ReceiverOwners' : _reflection.GeneratedProtocolMessageType('ReceiverOwners', (_message.Message,), {
'DESCRIPTOR' : _ALERTEVENT_ACTIONS_RECEIVEROWNERS,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent.Actions.ReceiverOwners)
})
,
'DESCRIPTOR' : _ALERTEVENT_ACTIONS,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent.Actions)
})
,
'DESCRIPTOR' : _ALERTEVENT,
'__module__' : 'alert_event_pb2'
# @@protoc_insertion_point(class_scope:monitor.AlertEvent)
})
_sym_db.RegisterMessage(AlertEvent)
_sym_db.RegisterMessage(AlertEvent.AlertReceivers)
_sym_db.RegisterMessage(AlertEvent.AlertDims)
_sym_db.RegisterMessage(AlertEvent.Actions)
_sym_db.RegisterMessage(AlertEvent.Actions.Condition)
_sym_db.RegisterMessage(AlertEvent.Actions.ReceiverOwners)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 47.560932 | 2,477 | 0.743057 |
daec5e946f5986c6bdbd7e5b913c666f3d34fc57 | 2,718 | py | Python | looker_deployer/commands/deploy_connections.py | hselbie/looker_deployer | fc97ba1f6f5288e4e3413fb89f7ea16db6543ac8 | [
"MIT"
] | null | null | null | looker_deployer/commands/deploy_connections.py | hselbie/looker_deployer | fc97ba1f6f5288e4e3413fb89f7ea16db6543ac8 | [
"MIT"
] | 1 | 2021-08-05T16:19:09.000Z | 2021-08-05T16:19:09.000Z | looker_deployer/commands/deploy_connections.py | hselbie/looker_deployer | fc97ba1f6f5288e4e3413fb89f7ea16db6543ac8 | [
"MIT"
] | null | null | null | import logging
import re
from looker_sdk import models, error
from looker_deployer.utils import deploy_logging
from looker_deployer.utils import parse_ini
from looker_deployer.utils.get_client import get_client
logger = deploy_logging.get_logger(__name__)
def get_filtered_connections(source_sdk, pattern=None):
connections = source_sdk.all_connections()
logger.debug(
"Connections pulled",
extra={
"connection_names": [i.name for i in connections]
}
)
if pattern:
compiled_pattern = re.compile(pattern)
connections = [i for i in connections if compiled_pattern.search(i.name)]
logger.debug(
"Connections filtered",
extra={
"filtered_connections": [i.name for i in connections],
"pattern": pattern
}
)
return connections
def write_connections(connections, target_sdk, db_config=None):
for conn in connections:
# Create a DB Write Object from each connection
new_conn = models.WriteDBConnection()
new_conn.__dict__.update(conn.__dict__)
conn_exists = True
try:
target_sdk.connection(new_conn.name)
except error.SDKError:
conn_exists = False
if db_config:
logger.debug("Attempting password update", extra={"connection": new_conn.name})
db_pass = db_config[conn.name]
new_conn.password = db_pass
if not conn_exists:
logger.debug("No existing connection found. Creating...")
logger.info("Deploying connection", extra={"connection": new_conn.name})
target_sdk.create_connection(new_conn)
logger.info("Deployment complete", extra={"connection": new_conn.name})
else:
logger.debug("Existing connection found. Updating...")
logger.info("Deploying connection", extra={"connection": new_conn.name})
target_sdk.update_connection(new_conn.name, new_conn)
logger.info("Deployment complete", extra={"connection": new_conn.name})
def send_connections(source_sdk, target_sdk, pattern=None, db_config=None):
connections = get_filtered_connections(source_sdk, pattern)
write_connections(connections, target_sdk, db_config)
def main(args):
if args.debug:
logger.setLevel(logging.DEBUG)
if args.include_password:
db_config = parse_ini.read_ini(args.ini)["Databases"]
else:
db_config = None
source_sdk = get_client(args.ini, args.source)
for t in args.target:
target_sdk = get_client(args.ini, t)
send_connections(source_sdk, target_sdk, args.pattern, db_config)
| 31.976471 | 91 | 0.664091 |
35bce7c69e4aaf474249afa6ead814c8a80a78ea | 5,790 | py | Python | test/functional/p2p_eviction.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 159 | 2016-07-09T13:02:19.000Z | 2022-03-11T08:15:56.000Z | test/functional/p2p_eviction.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 40 | 2016-07-22T17:26:37.000Z | 2022-03-22T19:37:32.000Z | test/functional/p2p_eviction.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 57 | 2016-10-21T23:57:47.000Z | 2022-03-26T20:51:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
""" Test node eviction logic
When the number of peers has reached the limit of maximum connections,
the next connecting inbound peer will trigger the eviction mechanism.
We cannot currently test the parts of the eviction logic that are based on
address/netgroup since in the current framework, all peers are connecting from
the same local address. See Issue #14210 for more info.
Therefore, this test is limited to the remaining protection criteria.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
create_block,
create_coinbase,
)
from test_framework.messages import (
msg_pong,
msg_tx,
tx_from_hex,
)
from test_framework.p2p import P2PDataStore, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class SlowP2PDataStore(P2PDataStore):
def on_ping(self, message):
time.sleep(0.1)
self.send_message(msg_pong(message.nonce))
class SlowP2PInterface(P2PInterface):
def on_ping(self, message):
time.sleep(0.1)
self.send_message(msg_pong(message.nonce))
class P2PEvict(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# The choice of maxconnections=32 results in a maximum of 21 inbound connections
# (32 - 10 outbound - 1 feeler). 20 inbound peers are protected from eviction:
# 4 by netgroup, 4 that sent us blocks, 4 that sent us transactions and 8 via lowest ping time
self.extra_args = [['-maxconnections=32']]
def run_test(self):
protected_peers = set() # peers that we expect to be protected from eviction
current_peer = -1
node = self.nodes[0]
node.generatetoaddress(COINBASE_MATURITY + 1, node.get_deterministic_priv_key().address)
self.log.info("Create 4 peers and protect them from eviction by sending us a block")
for _ in range(4):
block_peer = node.add_p2p_connection(SlowP2PDataStore())
current_peer += 1
block_peer.sync_with_ping()
best_block = node.getbestblockhash()
tip = int(best_block, 16)
best_block_time = node.getblock(best_block)['time']
block = create_block(tip, create_coinbase(node.getblockcount() + 1), best_block_time + 1)
block.solve()
block_peer.send_blocks_and_test([block], node, success=True)
protected_peers.add(current_peer)
self.log.info("Create 5 slow-pinging peers, making them eviction candidates")
for _ in range(5):
node.add_p2p_connection(SlowP2PInterface())
current_peer += 1
self.log.info("Create 4 peers and protect them from eviction by sending us a tx")
for i in range(4):
txpeer = node.add_p2p_connection(SlowP2PInterface())
current_peer += 1
txpeer.sync_with_ping()
prevtx = node.getblock(node.getblockhash(i + 1), 2)['tx'][0]
rawtx = node.createrawtransaction(
inputs=[{'txid': prevtx['txid'], 'vout': 0}],
outputs=[{node.get_deterministic_priv_key().address: 50 - 0.00125}],
)
sigtx = node.signrawtransactionwithkey(
hexstring=rawtx,
privkeys=[node.get_deterministic_priv_key().key],
prevtxs=[{
'txid': prevtx['txid'],
'vout': 0,
'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'],
}],
)['hex']
txpeer.send_message(msg_tx(tx_from_hex(sigtx)))
protected_peers.add(current_peer)
self.log.info("Create 8 peers and protect them from eviction by having faster pings")
for _ in range(8):
fastpeer = node.add_p2p_connection(P2PInterface())
current_peer += 1
self.wait_until(lambda: "ping" in fastpeer.last_message, timeout=10)
# Make sure by asking the node what the actual min pings are
peerinfo = node.getpeerinfo()
pings = {}
for i in range(len(peerinfo)):
pings[i] = peerinfo[i]['minping'] if 'minping' in peerinfo[i] else 1000000
sorted_pings = sorted(pings.items(), key=lambda x: x[1])
# Usually the 8 fast peers are protected. In rare case of unreliable pings,
# one of the slower peers might have a faster min ping though.
for i in range(8):
protected_peers.add(sorted_pings[i][0])
self.log.info("Create peer that triggers the eviction mechanism")
node.add_p2p_connection(SlowP2PInterface())
# One of the non-protected peers must be evicted. We can't be sure which one because
# 4 peers are protected via netgroup, which is identical for all peers,
# and the eviction mechanism doesn't preserve the order of identical elements.
evicted_peers = []
for i in range(len(node.p2ps)):
if not node.p2ps[i].is_connected:
evicted_peers.append(i)
self.log.info("Test that one peer was evicted")
self.log.debug("{} evicted peer: {}".format(len(evicted_peers), set(evicted_peers)))
assert_equal(len(evicted_peers), 1)
self.log.info("Test that no peer expected to be protected was evicted")
self.log.debug("{} protected peers: {}".format(len(protected_peers), protected_peers))
assert evicted_peers[0] not in protected_peers
if __name__ == '__main__':
P2PEvict().main()
| 41.956522 | 102 | 0.65475 |
0f8cfecb42d4607bb54136b727653b7ea3f4b98a | 1,244 | py | Python | osh/word_eval_test.py | msingle/oil | 5623c58d4558d37cd43e6274574d94a0e547f192 | [
"Apache-2.0"
] | null | null | null | osh/word_eval_test.py | msingle/oil | 5623c58d4558d37cd43e6274574d94a0e547f192 | [
"Apache-2.0"
] | null | null | null | osh/word_eval_test.py | msingle/oil | 5623c58d4558d37cd43e6274574d94a0e547f192 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
word_eval_test.py: Tests for word_eval.py
"""
from __future__ import print_function
import unittest
from core import test_lib
from osh.cmd_parse_test import assertParseSimpleCommand
from osh import state
def InitEvaluator():
word_ev = test_lib.MakeTestEvaluator()
state.SetLocalString(word_ev.mem, 'x', '- -- ---')
state.SetLocalString(word_ev.mem, 'y', 'y yy')
state.SetLocalString(word_ev.mem, 'empty', '')
return word_ev
class WordEvalTest(unittest.TestCase):
def testEvalWordSequence(self):
node = assertParseSimpleCommand(self, 'ls foo')
self.assertEqual(2, len(node.words), node.words)
ev = InitEvaluator()
argv = ev.EvalWordSequence2(node.words)
print()
print(argv)
node = assertParseSimpleCommand(self, 'ls [$x] $y core/a*.py')
print(node)
ev = InitEvaluator()
argv = ev.EvalWordSequence2(node.words)
print()
print(argv)
if __name__ == '__main__':
unittest.main()
| 25.387755 | 66 | 0.713023 |
c23fdbaaa0f2abf8ca79e0d388ac4c2c2354c71a | 395 | py | Python | howtosapi/asgi.py | tiveritz/how-tos-api | 5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e | [
"MIT"
] | null | null | null | howtosapi/asgi.py | tiveritz/how-tos-api | 5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e | [
"MIT"
] | 3 | 2021-05-23T07:57:15.000Z | 2021-05-28T05:38:17.000Z | howtosapi/asgi.py | tiveritz/how-tos-api | 5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e | [
"MIT"
] | null | null | null | """
ASGI config for howtosapi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'howtosapi.settings')
application = get_asgi_application()
| 23.235294 | 78 | 0.787342 |
ef8b168f71875a3a26750fbbc0a3a324d71c90b9 | 4,721 | py | Python | examples/momentum.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | 20 | 2019-12-02T11:49:12.000Z | 2021-12-24T19:34:32.000Z | examples/momentum.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | null | null | null | examples/momentum.py | jiajiaxu123/Orca | e86189e70c1d0387816bb98b8047a6232fbda9df | [
"Apache-2.0"
] | 5 | 2019-12-02T12:16:22.000Z | 2021-10-22T02:27:47.000Z | import dolphindb.orca as orca
import matplotlib.pyplot as plt
US = 'C:/DolphinDB/Orca/databases/USstocks.csv'
orca.connect('localhost', 8848, 'admin', '123456')
def load_price_data(df):
USstocks = df[df.date.dt.weekday.between(0, 4), df.PRC.notnull(), df.VOL.notnull()][
['PERMNO', 'date', 'PRC', 'VOL', 'RET', 'SHROUT']
].sort_values(by=['PERMNO', 'date'])
USstocks['PRC'] = USstocks.PRC.abs()
USstocks['MV'] = USstocks.SHROUT * USstocks.PRC
USstocks['cumretIndex'] = (USstocks + 1)['RET'].groupby('PERMNO', lazy=True).cumprod()
USstocks['signal'] = (USstocks.shift(21) / USstocks.shift(252) - 1).groupby(
'PERMNO', lazy=True)['cumretIndex'].transform()
return USstocks
def gen_trade_tables(df):
USstocks = df[(df.PRC > 5), (df.MV > 100000), (df.VOL > 0), (df.signal.notnull())]
USstocks = USstocks[['date', 'PERMNO', 'MV', 'signal']].sort_values(by='date')
return USstocks
def form_portfolio(start_date, end_date, tradables, holding_days, groups, wt_scheme):
ports = tradables[tradables.date.between(start_date, end_date)].groupby('date').filter('count(PERMNO) >= 100')
ports['rank'] = ports.groupby('date')['signal'].transform('rank{{,true,{groups}}}'.format(groups=groups))
ports['wt'] = 0.0
ports_rank_eq_0 = (ports['rank'] == 0)
ports_rank_eq_groups_sub_1 = (ports['rank'] == groups-1)
if wt_scheme == 1:
ports.loc[ports_rank_eq_0, 'wt'] = \
ports[ports_rank_eq_0].groupby(['date'])['PERMNO'].transform(
r'(PERMNO->-1\count(PERMNO)\{holding_days})'.format(holding_days=holding_days)
)
ports.loc[ports_rank_eq_groups_sub_1, 'wt'] = \
ports[ports_rank_eq_groups_sub_1].groupby(['date'])['PERMNO'].transform(
r'(PERMNO->1\count(PERMNO)\\{holding_days})'.format(holding_days=holding_days)
)
elif wt_scheme == 2:
ports.loc[ports_rank_eq_0, 'wt'] = \
ports[ports_rank_eq_0].groupby(['date'])['MV'].transform(
r'(MV->-MV\sum(MV)\{holding_days})'.format(holding_days=holding_days)
)
ports.loc[ports_rank_eq_groups_sub_1, 'wt'] = \
ports[ports_rank_eq_groups_sub_1].groupby(['date'])['MV'].transform(
r'(MV->MV\sum(MV)\{holding_days})'.format(holding_days=holding_days)
)
ports = ports.loc[ports.wt != 0, ['PERMNO', 'date', 'wt']].sort_values(by=['PERMNO', 'date'])
ports.rename(columns={'date': 'tranche'}, inplace=True)
return ports
def calc_stock_pnl(ports, daily_rtn, holding_days, end_date, last_days):
dates = ports[['tranche']].drop_duplicates().sort_values(by='tranche')
dates_after_ages = orca.DataFrame()
for age in range(1, holding_days+1):
dates_after_age_i = dates.copy()
dates_after_age_i['age'] = age
dates_after_age_i['date_after_age'] = dates_after_age_i['tranche'].shift(-age)
dates_after_ages.append(dates_after_age_i, inplace=True)
pos = ports.merge(dates_after_ages, on='tranche')
pos = pos.join(last_days, on='PERMNO')
pos = pos.loc[(pos.date_after_age.notnull() & (pos.date_after_age <= pos.last_day.clip(upper=end_date))),
['date_after_age', 'PERMNO', 'tranche', 'age', 'wt']]
pos = pos.compute()
pos.rename(columns={'date_after_age': 'date', 'wt': 'expr'}, inplace=True)
pos['ret'] = 0.0
pos['pnl'] = 0.0
# use set_index to make it easy to equal join two Frames
daily_rtn.set_index(['date', 'PERMNO'], inplace=True)
pos.set_index(['date', 'PERMNO'], inplace=True)
pos['ret'] = daily_rtn['RET']
pos.reset_index(inplace=True)
pos['expr'] = (pos.expr * (1 + pos.ret).cumprod()).groupby(
['PERMNO', 'tranche'], lazy=True).transform()
pos['pnl'] = pos.expr * pos.ret / (1 + pos.ret)
return pos
def main():
df = orca.read_csv(US)
price_data = load_price_data(df)
tradables = gen_trade_tables(price_data)
start_date, end_date = orca.Timestamp("1996.01.01"), orca.Timestamp("2017.01.01")
holding_days = 5
groups = 10
ports = form_portfolio(start_date, end_date, tradables, holding_days, groups, 2)
daily_rtn = price_data.loc[price_data.date.between(start_date, end_date), ['date', 'PERMNO', 'RET']]
last_days = price_data.groupby('PERMNO')['date'].max()
last_days.rename("last_day", inplace=True)
stock_pnl = calc_stock_pnl(ports, daily_rtn, holding_days, end_date, last_days)
port_pnl = stock_pnl.groupby('date')['pnl'].sum()
cumulative_return = port_pnl.cumsum()
cumulative_return.plot()
plt.show()
if __name__ == '__main__':
main()
| 41.778761 | 114 | 0.632493 |
dc847dccb7940bde987b2c90d74ede2c3bab38ef | 3,157 | py | Python | autotest/gdrivers/isis2.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,100 | 2015-01-02T10:33:40.000Z | 2022-03-31T02:06:51.000Z | autotest/gdrivers/isis2.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,496 | 2015-01-06T16:53:30.000Z | 2022-03-31T20:18:51.000Z | autotest/gdrivers/isis2.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 2,036 | 2015-01-08T20:22:12.000Z | 2022-03-31T10:24:08.000Z | #!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for ISIS2 driver.
# Author: Even Rouault <even dot rouault @ spatialys.com>
#
###############################################################################
# Copyright (c) 2008, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import gdaltest
###############################################################################
# Read a truncated and modified version of arvidson_original.cub from
# ftp://ftpflag.wr.usgs.gov/dist/pigpen/venus/venustopo_download/ovda_dtm.zip
def test_isis2_1():
tst = gdaltest.GDALTest('ISIS2', 'isis2/arvidson_original_truncated.cub', 1, 382)
expected_prj = """PROJCS["SIMPLE_CYLINDRICAL VENUS",
GEOGCS["GCS_VENUS",
DATUM["D_VENUS",
SPHEROID["VENUS",6051000,0]],
PRIMEM["Reference_Meridian",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Equirectangular"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",0],
PARAMETER["standard_parallel_1",-6.5],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["meter",1]]"""
expected_gt = (10157400.403618813, 1200.0000476837158, 0.0, -585000.02324581146, 0.0, -1200.0000476837158)
return tst.testOpen(check_prj=expected_prj,
check_gt=expected_gt)
###############################################################################
# Test simple creation on disk.
def test_isis2_2():
tst = gdaltest.GDALTest('ISIS2', 'byte.tif', 1, 4672)
return tst.testCreate()
###############################################################################
# Test a different data type with some options.
def test_isis2_3():
tst = gdaltest.GDALTest('ISIS2', 'float32.tif', 1, 4672,
options=['LABELING_METHOD=DETACHED', 'IMAGE_EXTENSION=qub'])
return tst.testCreateCopy(vsimem=1)
| 38.036145 | 110 | 0.61039 |
84caadfa74733c76863a44278b4fd61a2edff4e7 | 933 | py | Python | manage.py | davidpmills/project-1 | 52788bc5ab3e4359e31ce153e49d7e097dc99127 | [
"BSD-2-Clause"
] | null | null | null | manage.py | davidpmills/project-1 | 52788bc5ab3e4359e31ce153e49d7e097dc99127 | [
"BSD-2-Clause"
] | null | null | null | manage.py | davidpmills/project-1 | 52788bc5ab3e4359e31ce153e49d7e097dc99127 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import os
from flask.ext.script import Manager, Server
from flask.ext.script.commands import ShowUrls, Clean
from appname import create_app
from appname.models import db, User
# default to dev config because no one should use this in
# production anyway
env = os.environ.get('APPNAME_ENV', 'dev')
app = create_app('appname.settings.%sConfig' % env.capitalize(), env=env)
manager = Manager(app)
manager.add_command("server", Server())
manager.add_command("show-urls", ShowUrls())
manager.add_command("clean", Clean())
@manager.shell
def make_shell_context():
""" Creates a python REPL with several default imports
in the context of the app
"""
return dict(app=app, db=db, User=User)
@manager.command
def createdb():
""" Creates a database with all of the tables defined in
your SQLAlchemy models
"""
db.create_all()
if __name__ == "__main__":
manager.run()
| 23.325 | 73 | 0.713826 |
8cb5462d834e9c57a56366ddde58944a3528edff | 24,217 | py | Python | docker/dockerTrader/gateway/okcoinGateway/okcoinGateway.py | OceanMT/vnpy_py3 | 0901e9381c54e615247eb753bac476a911c9ae5d | [
"MIT"
] | null | null | null | docker/dockerTrader/gateway/okcoinGateway/okcoinGateway.py | OceanMT/vnpy_py3 | 0901e9381c54e615247eb753bac476a911c9ae5d | [
"MIT"
] | null | null | null | docker/dockerTrader/gateway/okcoinGateway/okcoinGateway.py | OceanMT/vnpy_py3 | 0901e9381c54e615247eb753bac476a911c9ae5d | [
"MIT"
] | null | null | null | # encoding: UTF-8
'''
vn.okcoin的gateway接入
注意:
1. 前仅支持USD和CNY的现货交易,USD的期货合约交易暂不支持
'''
import os
import json
from datetime import datetime
from copy import copy
from threading import Condition
from queue import Queue
from threading import Thread
from . import vnokcoin
from vtGateway import *
# 价格类型映射
priceTypeMap = {}
priceTypeMap['buy'] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
priceTypeMap['sell'] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
priceTypeMapReverse = {v: k for k, v in list(priceTypeMap.items())}
# 方向类型映射
directionMap = {}
directionMapReverse = {v: k for k, v in list(directionMap.items())}
# 委托状态印射
statusMap = {}
statusMap[-1] = STATUS_CANCELLED
statusMap[0] = STATUS_NOTTRADED
statusMap[1] = STATUS_PARTTRADED
statusMap[2] = STATUS_ALLTRADED
statusMap[4] = STATUS_UNKNOWN
############################################
## 交易合约代码
############################################
# USD
BTC_USD_SPOT = 'BTC_USD_SPOT'
BTC_USD_THISWEEK = 'BTC_USD_THISWEEK'
BTC_USD_NEXTWEEK = 'BTC_USD_NEXTWEEK'
BTC_USD_QUARTER = 'BTC_USD_QUARTER'
LTC_USD_SPOT = 'LTC_USD_SPOT'
LTC_USD_THISWEEK = 'LTC_USD_THISWEEK'
LTC_USD_NEXTWEEK = 'LTC_USD_NEXTWEEK'
LTC_USD_QUARTER = 'LTC_USD_QUARTER'
# CNY
BTC_CNY_SPOT = 'BTC_CNY_SPOT'
LTC_CNY_SPOT = 'LTC_CNY_SPOT'
# 印射字典
spotSymbolMap = {}
spotSymbolMap['ltc_usd'] = LTC_USD_SPOT
spotSymbolMap['btc_usd'] = BTC_USD_SPOT
spotSymbolMap['ltc_cny'] = LTC_CNY_SPOT
spotSymbolMap['btc_cny'] = BTC_CNY_SPOT
spotSymbolMapReverse = {v: k for k, v in list(spotSymbolMap.items())}
############################################
## Channel和Symbol的印射
############################################
channelSymbolMap = {}
# USD
channelSymbolMap['ok_sub_spotusd_btc_ticker'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_ticker'] = LTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_btc_depth_20'] = BTC_USD_SPOT
channelSymbolMap['ok_sub_spotusd_ltc_depth_20'] = LTC_USD_SPOT
# CNY
channelSymbolMap['ok_sub_spotcny_btc_ticker'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_ticker'] = LTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_btc_depth_20'] = BTC_CNY_SPOT
channelSymbolMap['ok_sub_spotcny_ltc_depth_20'] = LTC_CNY_SPOT
########################################################################
class OkcoinGateway(VtGateway):
"""OkCoin接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='OKCOIN'):
"""Constructor"""
super(OkcoinGateway, self).__init__(eventEngine, gatewayName)
self.api = Api(self)
self.leverage = 0
self.connected = False
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.gatewayName + '_connect.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
host = str(setting['host'])
apiKey = str(setting['apiKey'])
secretKey = str(setting['secretKey'])
trace = setting['trace']
leverage = setting['leverage']
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '连接配置缺少字段,请检查'
self.onLog(log)
return
# 初始化接口
self.leverage = leverage
if host == 'CNY':
host = vnokcoin.OKCOIN_CNY
else:
host = vnokcoin.OKCOIN_USD
self.api.active = True
self.api.connect(host, apiKey, secretKey, trace)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '接口初始化成功'
self.onLog(log)
# 启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
pass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api.spotSendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.spotCancel(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api.spotUserInfo()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.active = False
self.api.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
########################################################################
class Api(vnokcoin.OkCoinApi):
"""OkCoin的API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(Api, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.initCallback()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
data = self.readData(evt)[0]
channel = data['channel']
callback = self.cbDict[channel]
callback(data)
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(evt)
self.gateway.onError(error)
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.connected:
return
self.gateway.connected = False
self.writeLog('服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.connected:
self.writeLog('等待10秒后重新连接')
sleep(10)
if not self.gateway.connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
#----------------------------------------------------------------------
def onOpen(self, ws):
"""连接成功"""
self.gateway.connected = True
self.writeLog('服务器连接成功')
# 连接后查询账户和委托数据
self.spotUserInfo()
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_LTC, '-1')
self.spotOrderInfo(vnokcoin.TRADING_SYMBOL_BTC, '-1')
# 连接后订阅现货的成交和账户数据
self.subscribeSpotTrades()
self.subscribeSpotUserInfo()
self.subscribeSpotTicker(vnokcoin.SYMBOL_BTC)
self.subscribeSpotTicker(vnokcoin.SYMBOL_LTC)
self.subscribeSpotDepth(vnokcoin.SYMBOL_BTC, vnokcoin.DEPTH_20)
self.subscribeSpotDepth(vnokcoin.SYMBOL_LTC, vnokcoin.DEPTH_20)
# 如果连接的是USD网站则订阅期货相关回报数据
if self.currency == vnokcoin.CURRENCY_USD:
self.subscribeFutureTrades()
self.subscribeFutureUserInfo()
self.subscribeFuturePositions()
# 返回合约信息
if self.currency == vnokcoin.CURRENCY_CNY:
l = self.generateCnyContract()
else:
l = self.generateUsdContract()
for contract in l:
contract.gatewayName = self.gatewayName
self.gateway.onContract(contract)
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
#----------------------------------------------------------------------
def initCallback(self):
"""初始化回调函数"""
# USD_SPOT
self.cbDict['ok_sub_spotusd_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotusd_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotusd_ltc_depth_20'] = self.onDepth
self.cbDict['ok_spotusd_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotusd_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotusd_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotusd_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotusd_trade'] = self.onSpotTrade
self.cbDict['ok_spotusd_cancel_order'] = self.onSpotCancelOrder
# CNY_SPOT
self.cbDict['ok_sub_spotcny_btc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_ltc_ticker'] = self.onTicker
self.cbDict['ok_sub_spotcny_btc_depth_20'] = self.onDepth
self.cbDict['ok_sub_spotcny_ltc_depth_20'] = self.onDepth
self.cbDict['ok_spotcny_userinfo'] = self.onSpotUserInfo
self.cbDict['ok_spotcny_orderinfo'] = self.onSpotOrderInfo
self.cbDict['ok_sub_spotcny_userinfo'] = self.onSpotSubUserInfo
self.cbDict['ok_sub_spotcny_trades'] = self.onSpotSubTrades
self.cbDict['ok_spotcny_trade'] = self.onSpotTrade
self.cbDict['ok_spotcny_cancel_order'] = self.onSpotCancelOrder
# USD_FUTURES
#----------------------------------------------------------------------
def onTicker(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
rawData = data['data']
tick.highPrice = float(rawData['high'])
tick.lowPrice = float(rawData['low'])
tick.lastPrice = float(rawData['last'])
tick.volume = float(rawData['vol'].replace(',', ''))
#tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onDepth(self, data):
""""""
if 'data' not in data:
return
channel = data['channel']
symbol = channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if 'data' not in data:
return
rawData = data['data']
tick.bidPrice1, tick.bidVolume1 = rawData['bids'][0]
tick.bidPrice2, tick.bidVolume2 = rawData['bids'][1]
tick.bidPrice3, tick.bidVolume3 = rawData['bids'][2]
tick.bidPrice4, tick.bidVolume4 = rawData['bids'][3]
tick.bidPrice5, tick.bidVolume5 = rawData['bids'][4]
tick.askPrice1, tick.askVolume1 = rawData['asks'][-1]
tick.askPrice2, tick.askVolume2 = rawData['asks'][-2]
tick.askPrice3, tick.askVolume3 = rawData['asks'][-3]
tick.askPrice4, tick.askVolume4 = rawData['asks'][-4]
tick.askPrice5, tick.askVolume5 = rawData['asks'][-5]
tick.date, tick.time = generateDateTime(rawData['timestamp'])
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def onSpotUserInfo(self, data):
"""现货账户资金推送"""
rawData = data['data']
info = rawData['info']
funds = rawData['info']['funds']
# 持仓信息
for symbol in ['btc', 'ltc', self.currency]:
if symbol in funds['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(funds['freezed'][symbol])
pos.position = pos.frozen + float(funds['free'][symbol])
self.gateway.onPosition(pos)
# 账户资金
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = self.gatewayName
account.vtAccountID = account.accountID
account.balance = float(funds['asset']['net'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def onSpotSubUserInfo(self, data):
"""现货账户资金推送"""
if 'data' not in data:
return
rawData = data['data']
info = rawData['info']
# 持仓信息
for symbol in ['btc', 'ltc', self.currency]:
if symbol in info['free']:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol
pos.vtSymbol = symbol
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(info['freezed'][symbol])
pos.position = pos.frozen + float(info['free'][symbol])
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def onSpotSubTrades(self, data):
"""成交和委托推送"""
if 'data' not in data:
return
rawData = data['data']
# 本地和系统委托号
orderId = str(rawData['orderId'])
localNo = self.orderIdDict[orderId]
# 委托信息
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[rawData['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(rawData['tradeUnitPrice'])
order.totalVolume = float(rawData['tradeAmount'])
order.direction, priceType = priceTypeMap[rawData['tradeType']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(rawData['completedTradeAmount'])
order.status = statusMap[rawData['status']]
self.gateway.onOrder(copy(order))
# 成交信息
if 'sigTradeAmount' in rawData and float(rawData['sigTradeAmount'])>0:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = spotSymbolMap[rawData['symbol']]
trade.vtSymbol = order.symbol
trade.tradeID = str(rawData['id'])
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = localNo
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(rawData['sigTradePrice'])
trade.volume = float(rawData['sigTradeAmount'])
trade.direction, priceType = priceTypeMap[rawData['tradeType']]
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onSpotOrderInfo(self, data):
"""委托信息查询回调"""
rawData = data['data']
for d in rawData['orders']:
self.localNo += 1
localNo = str(self.localNo)
orderId = str(d['order_id'])
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = spotSymbolMap[d['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = d['price']
order.totalVolume = d['amount']
order.direction, priceType = priceTypeMap[d['type']]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = d['deal_amount']
order.status = statusMap[d['status']]
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
def generateSpecificContract(self, contract, symbol):
"""生成合约"""
new = copy(contract)
new.symbol = symbol
new.vtSymbol = symbol
new.name = symbol
return new
#----------------------------------------------------------------------
def generateCnyContract(self):
"""生成CNY合约信息"""
contractList = []
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_CNY_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_CNY_SPOT))
return contractList
#----------------------------------------------------------------------
def generateUsdContract(self):
"""生成USD合约信息"""
contractList = []
# 现货
contract = VtContractData()
contract.exchange = EXCHANGE_OKCOIN
contract.productClass = PRODUCT_SPOT
contract.size = 1
contract.priceTick = 0.01
contractList.append(self.generateSpecificContract(contract, BTC_USD_SPOT))
contractList.append(self.generateSpecificContract(contract, LTC_USD_SPOT))
# 期货
contract.productClass = PRODUCT_FUTURES
contractList.append(self.generateSpecificContract(contract, BTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, BTC_USD_QUARTER))
contractList.append(self.generateSpecificContract(contract, LTC_USD_THISWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_NEXTWEEK))
contractList.append(self.generateSpecificContract(contract, LTC_USD_QUARTER))
return contractList
#----------------------------------------------------------------------
def onSpotTrade(self, data):
"""委托回报"""
rawData = data['data']
orderId = rawData['order_id']
# 尽管websocket接口的委托号返回是异步的,但经过测试是
# 符合先发现回的规律,因此这里通过queue获取之前发送的
# 本地委托号,并把它和推送的系统委托号进行映射
localNo = self.localNoQueue.get_nowait()
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
# 检查是否有系统委托号返回前就发出的撤单请求,若有则进
# 行撤单操作
if localNo in self.cancelDict:
req = self.cancelDict[localNo]
self.spotCancel(req)
del self.cancelDict[localNo]
#----------------------------------------------------------------------
def onSpotCancelOrder(self, data):
"""撤单回报"""
pass
#----------------------------------------------------------------------
def spotSendOrder(self, req):
"""发单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
type_ = priceTypeMapReverse[(req.direction, req.priceType)]
self.spotTrade(symbol, type_, str(req.price), str(req.volume))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
self.localNoQueue.put(str(self.localNo))
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def spotCancel(self, req):
"""撤单"""
symbol = spotSymbolMapReverse[req.symbol][:4]
localNo = req.orderID
if localNo in self.localNoDict:
orderID = self.localNoDict[localNo]
self.spotCancelOrder(symbol, orderID)
else:
# 如果在系统委托号返回前客户就发送了撤单请求,则保存
# 在cancelDict字典中,等待返回后执行撤单任务
self.cancelDict[localNo] = req
#----------------------------------------------------------------------
def generateDateTime(s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time | 34.204802 | 86 | 0.503531 |
2a37f1cfc1e1bdb2b99c811e51ef09f279492252 | 9,931 | py | Python | fairseq/modules/lightconv_layer/cuda_function_gen.py | aiboxlab/TSPNet | 359402151afd262857cde6fae3fc13445d73c9a7 | [
"MIT"
] | 83 | 2020-10-11T04:44:52.000Z | 2022-01-11T13:59:50.000Z | fairseq/modules/lightconv_layer/cuda_function_gen.py | aiboxlab/TSPNet | 359402151afd262857cde6fae3fc13445d73c9a7 | [
"MIT"
] | 9 | 2020-12-12T10:12:00.000Z | 2021-03-28T16:05:08.000Z | fairseq/modules/lightconv_layer/cuda_function_gen.py | aiboxlab/TSPNet | 359402151afd262857cde6fae3fc13445d73c9a7 | [
"MIT"
] | 11 | 2020-12-17T13:38:56.000Z | 2022-03-12T23:39:41.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {
at::DeviceGuard g(input.device());
const auto minibatch = input.size(0);
const auto numFeatures = input.size(1);
const auto sequenceLength = input.size(2);
const auto numHeads = filters.size(0);
const auto filterSize = filters.size(1);
const auto numFiltersInBlock = numFeatures / numHeads;
const dim3 blocks(minibatch, numFeatures);
auto output = at::zeros_like(input);
auto stream = at::cuda::getCurrentCUDAStream();
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
switch(filterSize) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {pad}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{
lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>
<<<blocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
output.data<scalar_t>());
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;
}
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;
}
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
final_return = """
}
return {output};
}
"""
with open("lightconv_cuda_forward.cu", 'w') as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [k // 2, k - 1]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
def gen_backward():
head = """
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "lightconv_cuda.cuh"
std::vector<at::Tensor> lightconv_cuda_backward(
at::Tensor gradOutput,
int padding_l,
at::Tensor input,
at::Tensor filters) {
// gradWrtInput
const int minibatch = input.size(0);
const int numFeatures = input.size(1);
const int sequenceLength = input.size(2);
const int numHeads = filters.size(0);
const int filterSize = filters.size(1);
const dim3 gradBlocks(minibatch, numFeatures);
const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);
const dim3 weightGradSecondpassBlocks(numHeads, filterSize);
const int numFiltersInBlock = numFeatures / numHeads;
auto gradInput = at::zeros_like(input);
auto gradFilters = at::zeros_like(filters);
at::DeviceGuard g(input.device());
auto stream = at::cuda::getCurrentCUDAStream();
switch(filterSize) {
"""
sequence_if = """
if (sequenceLength <= {seq}) {{
"""
case_k = """
case {k}:
"""
main_block = """
if (padding_l == {p}) {{
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{
lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
gradOutput.data<scalar_t>(),
filters.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
gradInput.data<scalar_t>());
"""
weight_grad_short = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>
<<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
numHeads,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
weight_grad = """
at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));
lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>
<<<gradBlocks, {b_size}, 0, stream>>>(
input.data<scalar_t>(),
gradOutput.data<scalar_t>(),
minibatch,
sequenceLength,
numFeatures,
numFiltersInBlock,
tempSumGradFilters.data<float>()
);
lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>
<<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(
tempSumGradFilters.data<float>(),
minibatch,
numFiltersInBlock,
gradFilters.data<scalar_t>()
);
}}));
}} else
"""
bad_padding = """
{
std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;
}
"""
breakout = """
break;
"""
bad_filter = """
default:
std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;
"""
con_else = """
} else
"""
final_else = """
{
switch(filterSize) {
"""
last_return = """
}
return {gradInput, gradFilters};
}
"""
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [32 * x for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, -1, -1, -1]
max_mem = [-1, -1, -1, -1, -1, 192, 96, 64]
with open("lightconv_cuda_backward.cu", 'w') as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (t == -1 or seq <= t) and (mem == -1 or seq < mem):
backward.write(sequence_if.format(seq=seq))
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [k // 2, k - 1]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
if __name__ == "__main__":
gen_forward()
gen_backward()
| 34.244828 | 142 | 0.479509 |
73a687e272f29983d0fac056745954d9c353740d | 461 | py | Python | src/mspelling/cli.py | mario-bermonti/computerized-spelling-measure | 7140c2407d3324a7b9f867d45c2bf4dd0978c8dd | [
"BSD-3-Clause"
] | 1 | 2021-06-25T16:46:44.000Z | 2021-06-25T16:46:44.000Z | src/mspelling/cli.py | mario-bermonti/computerized-spelling-measure | 7140c2407d3324a7b9f867d45c2bf4dd0978c8dd | [
"BSD-3-Clause"
] | 8 | 2021-12-27T04:11:34.000Z | 2022-03-12T01:06:12.000Z | src/mspelling/cli.py | mario-bermonti/computerized-spelling-measure | 7140c2407d3324a7b9f867d45c2bf4dd0978c8dd | [
"BSD-3-Clause"
] | null | null | null | """Console script for mspelling."""
import click
from mspelling import __version__
@click.command()
@click.version_option(version=__version__)
def main() -> int:
"""Console script for mspelling."""
click.echo("This is the cli for the mspelling project")
click_url = "https://click.palletsprojects.com/"
click.echo(f"See the click docs at {click_url} for more details")
return 0
if __name__ == "__main__":
main() # pragma: no cover
| 24.263158 | 69 | 0.698482 |
524e3f6e0d6627460c58586e64beb5257570ada8 | 10,170 | py | Python | examples/cnn/model/xceptionnet.py | chrishkchris/incubator-singa | ced9e9d44c200d709db5a2354076390788986b77 | [
"Apache-2.0"
] | 2 | 2021-04-22T02:56:43.000Z | 2021-04-22T02:56:46.000Z | examples/cnn/model/xceptionnet.py | guoshnBJTU/singa | f04d197ee15777504bf80a8cb77666b8cacb4b94 | [
"Apache-2.0"
] | 3 | 2020-09-09T11:51:47.000Z | 2021-01-15T12:55:06.000Z | examples/cnn/model/xceptionnet.py | zlheui/singa | ced9e9d44c200d709db5a2354076390788986b77 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# the code is modified from
# https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/xception.py
from singa import layer
from singa import model
class Block(layer.Layer):
def __init__(self,
in_filters,
out_filters,
reps,
strides=1,
padding=0,
start_with_relu=True,
grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides != 1:
self.skip = layer.Conv2d(in_filters,
out_filters,
1,
stride=strides,
padding=padding,
bias=False)
self.skipbn = layer.BatchNorm2d(out_filters)
else:
self.skip = None
self.layers = []
filters = in_filters
if grow_first:
self.layers.append(layer.ReLU())
self.layers.append(
layer.SeparableConv2d(in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(layer.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps - 1):
self.layers.append(layer.ReLU())
self.layers.append(
layer.SeparableConv2d(filters,
filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(layer.BatchNorm2d(filters))
if not grow_first:
self.layers.append(layer.ReLU())
self.layers.append(
layer.SeparableConv2d(in_filters,
out_filters,
3,
stride=1,
padding=1,
bias=False))
self.layers.append(layer.BatchNorm2d(out_filters))
if not start_with_relu:
self.layers = self.layers[1:]
else:
self.layers[0] = layer.ReLU()
if strides != 1:
self.layers.append(layer.MaxPool2d(3, strides, padding + 1))
self.register_layers(*self.layers)
self.add = layer.Add()
def forward(self, x):
y = self.layers[0](x)
for layer in self.layers[1:]:
if isinstance(y, tuple):
y = y[0]
y = layer(y)
if self.skip is not None:
skip = self.skip(x)
skip = self.skipbn(skip)
else:
skip = x
y = self.add(y, skip)
return y
class Xception(model.Model):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, num_classes=10, num_channels=3):
""" Constructor
Args:
num_classes: number of classes
"""
super(Xception, self).__init__()
self.num_classes = num_classes
self.input_size = 299
self.dimension = 4
self.conv1 = layer.Conv2d(num_channels, 32, 3, 2, 0, bias=False)
self.bn1 = layer.BatchNorm2d(32)
self.relu1 = layer.ReLU()
self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False)
self.bn2 = layer.BatchNorm2d(64)
self.relu2 = layer.ReLU()
# do relu here
self.block1 = Block(64,
128,
2,
2,
padding=0,
start_with_relu=False,
grow_first=True)
self.block2 = Block(128,
256,
2,
2,
padding=0,
start_with_relu=True,
grow_first=True)
self.block3 = Block(256,
728,
2,
2,
padding=0,
start_with_relu=True,
grow_first=True)
self.block4 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block5 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block6 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block7 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block8 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block9 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block10 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block11 = Block(728,
728,
3,
1,
start_with_relu=True,
grow_first=True)
self.block12 = Block(728,
1024,
2,
2,
start_with_relu=True,
grow_first=False)
self.conv3 = layer.SeparableConv2d(1024, 1536, 3, 1, 1)
self.bn3 = layer.BatchNorm2d(1536)
self.relu3 = layer.ReLU()
# do relu here
self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1)
self.bn4 = layer.BatchNorm2d(2048)
self.relu4 = layer.ReLU()
self.globalpooling = layer.MaxPool2d(10, 1)
self.flatten = layer.Flatten()
self.fc = layer.Linear(num_classes)
self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.bn4(x)
return x
def logits(self, features):
x = self.relu4(features)
x = self.globalpooling(x)
x = self.flatten(x)
x = self.fc(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def train_one_batch(self, x, y, dist_option, spars):
out = self.forward(x)
loss = self.softmax_cross_entropy(out, y)
if dist_option == 'fp32':
self.optimizer(loss)
elif dist_option == 'fp16':
self.optimizer.backward_and_update_half(loss)
elif dist_option == 'partialUpdate':
self.optimizer.backward_and_partial_update(loss)
elif dist_option == 'sparseTopK':
self.optimizer.backward_and_sparse_update(loss,
topK=True,
spars=spars)
elif dist_option == 'sparseThreshold':
self.optimizer.backward_and_sparse_update(loss,
topK=False,
spars=spars)
return out, loss
def set_optimizer(self, optimizer):
self.optimizer = optimizer
def create_model(pretrained=False, **kwargs):
"""Constructs a Xceptionnet model.
Args:
pretrained (bool): If True, returns a model pre-trained
"""
model = Xception(**kwargs)
return model
__all__ = ['Xception', 'create_model']
| 32.912621 | 101 | 0.436775 |
b1d6a0e609e66b4ab1b9cb38a728c7ae6bef72ab | 5,041 | py | Python | docs/conf.py | dem4ply/chibi_gob_mx_elasticsearch | 7b4a5b35ad79817db0f5d5cc6705f085b4708a1d | [
"WTFPL"
] | null | null | null | docs/conf.py | dem4ply/chibi_gob_mx_elasticsearch | 7b4a5b35ad79817db0f5d5cc6705f085b4708a1d | [
"WTFPL"
] | null | null | null | docs/conf.py | dem4ply/chibi_gob_mx_elasticsearch | 7b4a5b35ad79817db0f5d5cc6705f085b4708a1d | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# chibi_gob_mx_elasticsearch documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import chibi_gob_mx_elasticsearch
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'chibi_gob_mx_elasticsearch'
copyright = u"2020, dem4ply"
author = u"dem4ply"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = chibi_gob_mx_elasticsearch.__version__
# The full version, including alpha/beta/rc tags.
release = chibi_gob_mx_elasticsearch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'chibi_gob_mx_elasticsearchdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'chibi_gob_mx_elasticsearch.tex',
u'chibi_gob_mx_elasticsearch Documentation',
u'dem4ply', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chibi_gob_mx_elasticsearch',
u'chibi_gob_mx_elasticsearch Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'chibi_gob_mx_elasticsearch',
u'chibi_gob_mx_elasticsearch Documentation',
author,
'chibi_gob_mx_elasticsearch',
'One line description of project.',
'Miscellaneous'),
]
| 30.737805 | 79 | 0.698274 |
ad76ff0c1ce3b783b372db913208704f35e83f63 | 17 | py | Python | markovGames/examples/__init__.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | markovGames/examples/__init__.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | markovGames/examples/__init__.py | rohit-konda/markovGames | d6dd1b8a11f1c95658a468f9e471aecfcf0e6839 | [
"MIT"
] | null | null | null | name = 'examples' | 17 | 17 | 0.705882 |
011b6dd9c401f68d2291de294d516a7d663195b0 | 1,515 | py | Python | competitive_programming/python_template_ext.py | hey24sheep/code_templates | 39a766676fc8ad4f82e5c926c3b06fa3531bf028 | [
"MIT"
] | null | null | null | competitive_programming/python_template_ext.py | hey24sheep/code_templates | 39a766676fc8ad4f82e5c926c3b06fa3531bf028 | [
"MIT"
] | null | null | null | competitive_programming/python_template_ext.py | hey24sheep/code_templates | 39a766676fc8ad4f82e5c926c3b06fa3531bf028 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------
# Created By : hey24sheep.com
# Created Date: 11th Feb 2022
# License : MIT
# version = 1.0
#
# Description : A small and quick template file for competitve programming
# --------------------------------------------------------------------------------------
import sys, io, os, math, bisect
from collections import Counter, defaultdict, OrderedDict, deque
from itertools import permutations, combinations
from sys import stdin, stdout
# set max recurssion limit
sys.setrecursionlimit(100000000)
# 10**9+7 prime
mod = 1000000007
mod1 = 998244353
# helpers
ceil = lambda x: int(x) if (x == int(x)) else int(x) + 1
ceildiv = lambda x, d: x // d if (x % d == 0) else x // d + 1
def isprime(n):
if (n == 1 or n == 0): return False
for i in range(2, int(n**(1 / 2)) + 1):
if (n % i == 0):
return False
return True
# fast input
input = stdin.readline
get_input = lambda: stdin.readline().strip()
get_int = lambda: int(get_input())
get_list = lambda: get_input().split()
get_int_list = lambda: list(map(int, get_list()))
get_float_list = lambda: list(map(float, get_list()))
# fast output
output_float = lambda val: (stdout.write(f"{val:.2f}\n") and stdout.flush())
output = lambda val: (stdout.write(str(val) + "\n") and stdout.flush())
# solve
testcases = get_int()
for t in range(1, testcases + 1):
n = get_input()
s = get_input()
# print result
# print(f'Case #{t}: {result}')
| 31.5625 | 88 | 0.584818 |
a780de2130486c3efdffb4c093720b046b77f11e | 4,134 | py | Python | torch_geometric/nn/conv/res_gated_graph_conv.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 2,350 | 2021-09-12T08:32:50.000Z | 2022-03-31T18:09:36.000Z | torch_geometric/nn/conv/res_gated_graph_conv.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 588 | 2021-09-12T08:49:08.000Z | 2022-03-31T21:02:13.000Z | torch_geometric/nn/conv/res_gated_graph_conv.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 505 | 2021-09-13T13:13:32.000Z | 2022-03-31T15:54:00.000Z | from typing import Callable, Optional, Tuple, Union
from torch import Tensor
from torch.nn import Parameter, Sigmoid
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, PairTensor
from ..inits import zeros
class ResGatedGraphConv(MessagePassing):
r"""The residual gated graph convolutional operator from the
`"Residual Gated Graph ConvNets" <https://arxiv.org/abs/1711.07553>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i +
\sum_{j \in \mathcal{N}(i)} \eta_{i,j} \odot \mathbf{W}_2 \mathbf{x}_j
where the gate :math:`\eta_{i,j}` is defined as
.. math::
\eta_{i,j} = \sigma(\mathbf{W}_3 \mathbf{x}_i + \mathbf{W}_4
\mathbf{x}_j)
with :math:`\sigma` denoting the sigmoid function.
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
act (callable, optional): Gating function :math:`\sigma`.
(default: :meth:`torch.nn.Sigmoid()`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **inputs:**
node features :math:`(|\mathcal{V}|, F_{in})` or
:math:`((|\mathcal{V_s}|, F_{s}), (|\mathcal{V_t}|, F_{t}))`
if bipartite,
edge indices :math:`(2, |\mathcal{E}|)`
- **outputs:** node features :math:`(|\mathcal{V}|, F_{out})` or
:math:`(|\mathcal{V_t}|, F_{out})` if bipartite
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
act: Optional[Callable] = Sigmoid(),
root_weight: bool = True,
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.act = act
self.root_weight = root_weight
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.lin_key = Linear(in_channels[1], out_channels)
self.lin_query = Linear(in_channels[0], out_channels)
self.lin_value = Linear(in_channels[0], out_channels)
if root_weight:
self.lin_skip = Linear(in_channels[1], out_channels, bias=False)
else:
self.register_parameter('lin_skip', None)
if bias:
self.bias = Parameter(Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
self.lin_key.reset_parameters()
self.lin_query.reset_parameters()
self.lin_value.reset_parameters()
if self.lin_skip is not None:
self.lin_skip.reset_parameters()
if self.bias is not None:
zeros(self.bias)
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
""""""
if isinstance(x, Tensor):
x: PairTensor = (x, x)
k = self.lin_key(x[1])
q = self.lin_query(x[0])
v = self.lin_value(x[0])
# propagate_type: (k: Tensor, q: Tensor, v: Tensor)
out = self.propagate(edge_index, k=k, q=q, v=v, size=None)
if self.root_weight:
out += self.lin_skip(x[1])
if self.bias is not None:
out += self.bias
return out
def message(self, k_i: Tensor, q_j: Tensor, v_j: Tensor) -> Tensor:
return self.act(k_i + q_j) * v_j
| 33.885246 | 79 | 0.599419 |
5bdcb24f597d7804d3fca6352b03e396a2d34e63 | 2,077 | py | Python | refactor_json_format.py | flores-jacob/philippine-regions-provinces-cities-municipalities-baranggays | 3c993f5669bc7ca62d2c5740eb1733923e61eac2 | [
"MIT"
] | 79 | 2018-11-22T05:10:27.000Z | 2022-02-05T06:37:51.000Z | refactor_json_format.py | kentastudillo/philippine-regions-provinces-cities-municipalities-barangays | 0ae4a49d3d5e5e1749575a1d028da6dac4020b35 | [
"MIT"
] | 1 | 2020-07-13T10:32:14.000Z | 2022-01-11T12:06:14.000Z | refactor_json_format.py | kentastudillo/philippine-regions-provinces-cities-municipalities-barangays | 0ae4a49d3d5e5e1749575a1d028da6dac4020b35 | [
"MIT"
] | 25 | 2019-04-06T07:41:46.000Z | 2021-11-06T13:12:41.000Z | # This script is meant to refactor the original file into the new format.
# The idea is to make the formatting more consistent by removing redundant
# and unnecessary list organization for the municipalities.
# In effect, only dictionaries are used all throughout the file, except
# for the barangays which are still in list format
from collections import OrderedDict
import json
JSON_FILE = "./philippine_provinces_cities_municipalities_and_barangays_2016.json"
NEW_JSON_FILE = "./philippine_provinces_cities_municipalities_and_barangays_2016_v2.json"
with open(JSON_FILE) as json_file:
data = json.load(json_file)
modified_dict = {}
for region_key, region_contents in data.items():
modified_dict[region_key] = {}
modified_dict[region_key]["region_name"] = region_contents["region_name"]
modified_dict[region_key]["province_list"] = {}
modified_province_list = modified_dict[region_key]["province_list"]
province_dict = region_contents["province_list"]
for province_key, province_contents in province_dict.items():
modified_province_list[province_key] = {}
modified_province_list[province_key]["municipality_list"] = {}
modified_municipality_list = modified_province_list[province_key]["municipality_list"]
for municipality_item in province_contents["municipality_list"]:
for municipality_key, municipality_contents in sorted(municipality_item.items(), key=lambda x: x[0]):
modified_municipality_list[municipality_key] = municipality_contents
# sort by municipality name
modified_dict[region_key]["province_list"][province_key]["municipality_list"] = OrderedDict(sorted(modified_municipality_list.items(), key=lambda x: x[0]))
# sort by province name
modified_dict[region_key]["province_list"] = OrderedDict(sorted(modified_province_list.items(), key=lambda x: x[0]))
# sort by region
modified_dict = OrderedDict(sorted(modified_dict.items(), key=lambda x: x[0]))
with open(NEW_JSON_FILE, "w") as outfile:
json.dump(modified_dict, outfile, indent=2)
| 47.204545 | 163 | 0.76649 |
ce52bf45391c5df802a575b5e6792c3ab687b569 | 3,759 | py | Python | Menu/HelloTF2.py | ylu4/Hands-on-ML-2nd-rearranged | 87be431cc88b3806a7d726d623ad1688608aab8b | [
"Apache-2.0"
] | null | null | null | Menu/HelloTF2.py | ylu4/Hands-on-ML-2nd-rearranged | 87be431cc88b3806a7d726d623ad1688608aab8b | [
"Apache-2.0"
] | null | null | null | Menu/HelloTF2.py | ylu4/Hands-on-ML-2nd-rearranged | 87be431cc88b3806a7d726d623ad1688608aab8b | [
"Apache-2.0"
] | null | null | null | # From https://www.tensorflow.org/beta/
import tensorflow as tf
import time
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
t0 = time.time()
# Load and prepare the MNIST dataset.
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Build the tf.keras.Sequential model by stacking layers, choose
# loss function and optimizers for training.
model0 = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape = (28, 28)),
tf.keras.layers.Dense(128, activation = 'relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation = 'softmax')])
model0.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
# Train and evaluate the model.
model0.fit(x_train, y_train, epochs = 5)
result0 = model0.evaluate(x_test, y_test)
elapse0 = time.time() - t0
print(result0, elapse0)
# Use tf.data to batch and shuffle the dataset.
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices(
(x_test, y_test)).batch(32)
# Build the tf.keras model using the Keras model subclassing API.
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation = 'relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation = 'relu')
self.d2 = Dense(10, activation = 'softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model1 = MyModel()
# Choose an optimizer and loss function for training.
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Selecte metrics to measure the loss and the accuracy of the model.
# These metrics accumulate the values over epochs and then print the overall result.
train_loss = tf.keras.metrics.Mean(name = 'train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy(name = 'train_accuracy')
test_loss = tf.keras.metrics.Mean(name = 'test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalCrossentropy(name = 'test_accuracy')
# Use GradientTape to train the model.
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model1(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model1.trainable_variables)
optimizer.apply_gradients(zip(gradients, model1.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
# Test the model.
@tf.function
def test_step(images, labels):
predictions = model1(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
elapes1 = time.time() - elapse0 - t0
print(elapes1)
| 32.686957 | 88 | 0.688747 |
631f63abe3a5570764fec23bd133aff121f168d6 | 217 | py | Python | ch5/5-6.stage_of_life.py | AngangGuo/pycrash | de48aa4198022c301f5cd3ce388c195a177be1b5 | [
"MIT"
] | null | null | null | ch5/5-6.stage_of_life.py | AngangGuo/pycrash | de48aa4198022c301f5cd3ce388c195a177be1b5 | [
"MIT"
] | null | null | null | ch5/5-6.stage_of_life.py | AngangGuo/pycrash | de48aa4198022c301f5cd3ce388c195a177be1b5 | [
"MIT"
] | null | null | null | age = 20
if age < 2:
print("a baby")
elif age < 4:
print("a toddler")
elif age < 13:
print("a kid")
elif age < 20:
print("a teenager")
elif age < 65:
print("an adult")
else:
print("an elder")
| 14.466667 | 23 | 0.552995 |
e8d12a39f94d663e0c873ef62a8fd22f34ab43ba | 3,359 | py | Python | src/evidently/dashboard/widgets/prob_class_pred_distr_widget.py | caron14/evidently | 5e0d4450614ad237c5321462ac7f725f54e7e8f4 | [
"Apache-2.0"
] | 1 | 2022-01-22T20:56:10.000Z | 2022-01-22T20:56:10.000Z | src/evidently/dashboard/widgets/prob_class_pred_distr_widget.py | billyotieno/evidently | 10e41bcdd1108c5c7516a92a198da48ff16a134f | [
"Apache-2.0"
] | null | null | null | src/evidently/dashboard/widgets/prob_class_pred_distr_widget.py | billyotieno/evidently | 10e41bcdd1108c5c7516a92a198da48ff16a134f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import json
from typing import Optional
import pandas as pd
import numpy as np
import plotly.figure_factory as ff
from evidently import ColumnMapping
from evidently.analyzers.prob_classification_performance_analyzer import ProbClassificationPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.dashboard.widgets.widget import Widget, RED, GREY
class ProbClassPredDistrWidget(Widget):
def __init__(self, title: str, dataset: str = 'reference'):
super().__init__(title)
self.dataset = dataset # reference or current
def analyzers(self):
return [ProbClassificationPerformanceAnalyzer]
def calculate(self,
reference_data: pd.DataFrame,
current_data: Optional[pd.DataFrame],
column_mapping: ColumnMapping,
analyzers_results) -> Optional[BaseWidgetInfo]:
results = analyzers_results[ProbClassificationPerformanceAnalyzer]
if results['utility_columns']['target'] is None or results['utility_columns']['prediction'] is None:
if self.dataset == 'reference':
raise ValueError(f"Widget [{self.title}] requires 'target' and 'prediction' columns")
return None
if self.dataset == 'current':
dataset_to_plot = current_data.copy(deep=False) if current_data is not None else None
else:
dataset_to_plot = reference_data.copy(deep=False)
if dataset_to_plot is None:
if self.dataset == 'reference':
raise ValueError(f"Widget [{self.title}] requires reference dataset but it is None")
return None
dataset_to_plot.replace([np.inf, -np.inf], np.nan, inplace=True)
dataset_to_plot.dropna(axis=0, how='any', inplace=True)
# plot distributions
graphs = []
for label in results['utility_columns']['prediction']:
pred_distr = ff.create_distplot(
[
dataset_to_plot[dataset_to_plot[results['utility_columns']['target']] == label][label],
dataset_to_plot[dataset_to_plot[results['utility_columns']['target']] != label][label]
],
[str(label), "other"],
colors=[RED, GREY],
bin_size=0.05,
show_curve=False,
show_rug=True
)
pred_distr.update_layout(
xaxis_title="Probability",
yaxis_title="Share",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
pred_distr_json = json.loads(pred_distr.to_json())
graphs.append({
"id": "tab_" + str(label),
"title": str(label),
"graph": {
"data": pred_distr_json["data"],
"layout": pred_distr_json["layout"],
}
})
return BaseWidgetInfo(
title=self.title,
type="tabbed_graph",
size=1 if current_data is not None else 2,
params={
"graphs": graphs
},
)
| 34.27551 | 110 | 0.568026 |
9709481a4e7e9e07f5757e9c6c715174b88358a2 | 8,337 | py | Python | linebot/models/messages.py | twbabyduck/line-bot-sdk-python | 79a2c155b016a199916935e8133e0651e9477cff | [
"Apache-2.0"
] | 2 | 2021-09-07T13:06:50.000Z | 2021-09-14T08:14:45.000Z | linebot/models/messages.py | TaroHub/line-bot-sdk-python | ea6fe797fb42d59a8998eae6ff7497932fec5565 | [
"Apache-2.0"
] | null | null | null | linebot/models/messages.py | TaroHub/line-bot-sdk-python | ea6fe797fb42d59a8998eae6ff7497932fec5565 | [
"Apache-2.0"
] | 1 | 2020-08-16T08:26:47.000Z | 2020-08-16T08:26:47.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.messages module."""
from __future__ import unicode_literals
from abc import ABCMeta
from future.utils import with_metaclass
from linebot.models.emojis import Emojis
from .mention import Mention
from .mentionee import Mentionee
from .base import Base
class Message(with_metaclass(ABCMeta, Base)):
"""Abstract Base Class of Message."""
def __init__(self, id=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param kwargs:
"""
super(Message, self).__init__(**kwargs)
self.type = None
self.id = id
class TextMessage(Message):
"""TextMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-text
Message object which contains the text sent from the source.
"""
def __init__(self, id=None, text=None, emojis=None, mention=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str text: Message text
:param List emojis: Array of LINE emoji objects
:param object mention: LINE mention object
:param kwargs:
"""
super(TextMessage, self).__init__(id=id, **kwargs)
self.type = 'text'
self.text = text
if emojis:
new_emojis = []
for emoji in emojis:
emoji_object = self.get_or_new_from_json_dict(
emoji, Emojis
)
if emoji_object:
new_emojis.append(emoji_object)
self.emojis = new_emojis
else:
self.emojis = emojis
if mention:
mention_object = self.get_or_new_from_json_dict(
mention, Mention
)
mentionees = []
for mentionee in mention_object.mentionees:
mentionee_object = self.get_or_new_from_json_dict(
mentionee, Mentionee
)
if mentionee_object:
mentionees.append(mentionee_object)
self.mention = Mention(mentionees)
else:
self.mention = mention
class ImageMessage(Message):
"""ImageMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-image
Message object which contains the image content sent from the source.
The binary image data can be retrieved with the Content API.
"""
def __init__(self, id=None, content_provider=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param content_provider: ContentProvider object
:type content_provider:
:py:class:`linebot.models.messages.ContentProvider`
:param kwargs:
"""
super(ImageMessage, self).__init__(id=id, **kwargs)
self.type = 'image'
self.content_provider = self.get_or_new_from_json_dict(
content_provider, ContentProvider
)
class VideoMessage(Message):
"""VideoMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-video
Message object which contains the video content sent from the source.
The binary video data can be retrieved with the Content API.
"""
def __init__(self, id=None, duration=None, content_provider=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param long duration: Length of video file (milliseconds)
:param content_provider: ContentProvider object
:type content_provider:
:py:class:`linebot.models.messages.ContentProvider`
:param kwargs:
"""
super(VideoMessage, self).__init__(id=id, **kwargs)
self.type = 'video'
self.duration = duration
self.content_provider = self.get_or_new_from_json_dict(
content_provider, ContentProvider
)
class AudioMessage(Message):
"""AudioMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-audio
Message object which contains the audio content sent from the source.
The binary audio data can be retrieved with the Content API.
"""
def __init__(self, id=None, duration=None, content_provider=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param long duration: Length of audio file (milliseconds)
:param content_provider: ContentProvider object
:type content_provider:
:py:class:`linebot.models.messages.ContentProvider`
:param kwargs:
"""
super(AudioMessage, self).__init__(id=id, **kwargs)
self.type = 'audio'
self.duration = duration
self.content_provider = self.get_or_new_from_json_dict(
content_provider, ContentProvider
)
class LocationMessage(Message):
"""LocationMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-location
"""
def __init__(self, id=None, title=None, address=None, latitude=None, longitude=None,
**kwargs):
"""__init__ method.
:param str id: Message ID
:param str title: Title
:param str address: Address
:param float latitude: Latitude
:param float longitude: Longitude
:param kwargs:
"""
super(LocationMessage, self).__init__(id=id, **kwargs)
self.type = 'location'
self.title = title
self.address = address
self.latitude = latitude
self.longitude = longitude
class StickerMessage(Message):
"""StickerMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-sticker
Message object which contains the sticker data sent from the source.
For a list of basic LINE stickers and sticker IDs, see sticker list.
"""
def __init__(self, id=None, package_id=None, sticker_id=None,
sticker_resource_type=None, keywords=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str package_id: Package ID
:param str sticker_id: Sticker ID
:param str sticker_resource_type: Sticker resource type
:param list[str] keywords: List of up to 15 keywords describing the sticker
:param kwargs:
"""
super(StickerMessage, self).__init__(id=id, **kwargs)
self.type = 'sticker'
self.package_id = package_id
self.sticker_id = sticker_id
self.sticker_resource_type = sticker_resource_type
self.keywords = keywords
class FileMessage(Message):
"""FileMessage.
https://developers.line.biz/en/reference/messaging-api/#wh-file
Message object which contains the file content sent from the source.
The binary file data can be retrieved with the Content API.
"""
def __init__(self, id=None, file_name=None, file_size=None, **kwargs):
"""__init__ method.
:param str id: Message ID
:param str file_name: File Name
:param int file_size: File Size
:param kwargs:
"""
super(FileMessage, self).__init__(id=id, **kwargs)
self.type = 'file'
self.file_size = file_size
self.file_name = file_name
class ContentProvider(Base):
"""Content provider."""
def __init__(self, type=None, original_content_url=None, preview_image_url=None, **kwargs):
"""__init__ method.
:param str type: Provider of the content. `line` or `external`.
:param str original_content_url: URL of the content.
:param str preview_image_url: URL of the preview image.
:param kwargs:
"""
super(ContentProvider, self).__init__(**kwargs)
self.type = type
self.original_content_url = original_content_url
self.preview_image_url = preview_image_url
| 30.877778 | 95 | 0.640998 |
f79588c11b839d81da2bff6f57bbfb3aedf76539 | 9,995 | py | Python | lib3/yaml/__init__.py | sikhberserker/yaml | 4a7a400d218ad522bf5f50e021ea62a3ceb19566 | [
"MIT"
] | 2 | 2018-04-27T22:12:50.000Z | 2020-11-27T23:32:06.000Z | lib3/yaml/__init__.py | sikhberserker/yaml | 4a7a400d218ad522bf5f50e021ea62a3ceb19566 | [
"MIT"
] | null | null | null | lib3/yaml/__init__.py | sikhberserker/yaml | 4a7a400d218ad522bf5f50e021ea62a3ceb19566 | [
"MIT"
] | 2 | 2020-01-29T20:36:20.000Z | 2021-03-08T02:05:35.000Z |
from .error import *
from .tokens import *
from .events import *
from .nodes import *
from .loader import *
from .dumper import *
__version__ = '3.12'
try:
from .cyaml import *
__with_libyaml__ = True
except ImportError:
__with_libyaml__ = False
import io
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.check_token():
yield loader.get_token()
finally:
loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose()
def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
By default resolve only basic YAML tags, if an alternate Loader is
provided, may be dangerous.
"""
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
safe_load = load
def load_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
By default resolve only basic YAML tags, if an alternate Loader is
provided, may be dangerous.
"""
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
safe_load_all = load_all
def danger_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
When used on untrusted input, can result in arbitrary code execution.
"""
return load(stream, DangerLoader)
def danger_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
When used on untrusted input, can result in arbitrary code execution.
"""
return load_all(stream, DangerLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = io.StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
try:
for event in events:
dumper.emit(event)
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
safe_dump_all = dump_all
def danger_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=DangerDumper, **kwds)
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
safe_dump = dump
def danger_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=DangerDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=Loader, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=Loader):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(metaclass=YAMLObjectMetaclass):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
| 30.944272 | 77 | 0.671636 |
a7dff692a7654af5d76aaeba73d4f299b37585e2 | 2,088 | py | Python | python/tests/unit/test_apienforcer.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 14 | 2017-03-07T17:00:22.000Z | 2021-04-02T14:15:04.000Z | python/tests/unit/test_apienforcer.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 82 | 2017-02-16T16:54:18.000Z | 2018-06-04T13:40:32.000Z | python/tests/unit/test_apienforcer.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 16 | 2017-02-14T19:47:00.000Z | 2018-04-26T10:13:05.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import logging
from drydock_provisioner import policy
from drydock_provisioner.control.base import DrydockRequestContext
logging.basicConfig(level=logging.DEBUG)
class TestEnforcerDecorator():
def test_apienforcer_decorator(self, mocker):
''' DrydockPolicy.authorized() should correctly use oslo_policy to enforce
RBAC policy based on a DrydockRequestContext instance. authorized() is
called via the policy.ApiEnforcer decorator.
'''
mocker.patch('oslo_policy.policy.Enforcer')
ctx = DrydockRequestContext()
policy_engine = policy.DrydockPolicy()
# Configure context
project_id = str(uuid.uuid4())
ctx.project_id = project_id
user_id = str(uuid.uuid4())
ctx.user_id = user_id
ctx.roles = ['admin']
ctx.set_policy_engine(policy_engine)
# Configure mocked request and response
req = mocker.MagicMock()
resp = mocker.MagicMock()
req.context = ctx
self.target_function(req, resp)
expected_calls = [
mocker.call.authorize('physical_provisioner:read_task', {
'project_id': project_id,
'user_id': user_id
}, ctx.to_policy_view())
]
policy_engine.enforcer.assert_has_calls(expected_calls)
@policy.ApiEnforcer('physical_provisioner:read_task')
def target_function(self, req, resp):
return True
| 33.677419 | 82 | 0.691571 |
f97f53a0df41c49723275ef38c19cb19d0f6e80c | 3,713 | py | Python | knn classifier (1).py | msabi/KNN-Classification-using-Scikit-learn | ae70af66c5acd8f796e26ab4a12f08579e08d922 | [
"MIT"
] | 1 | 2019-08-30T07:22:16.000Z | 2019-08-30T07:22:16.000Z | knn classifier (1).py | msabi/KNN-Classification-using-Scikit-learn | ae70af66c5acd8f796e26ab4a12f08579e08d922 | [
"MIT"
] | null | null | null | knn classifier (1).py | msabi/KNN-Classification-using-Scikit-learn | ae70af66c5acd8f796e26ab4a12f08579e08d922 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
X = [[0], [1], [2], [3]]
y = [0, 0, 1, 1]
from sklearn.neighbors import KNeighborsClassifier
# In[2]:
neigh = KNeighborsClassifier(n_neighbors=3)
# In[3]:
neigh.fit(X, y)
# In[7]:
print(neigh.predict([[3]]))
# In[8]:
print(neigh.predict_proba([[0.9]]))
# # Classifier Building in Scikit-learn
#
# In[10]:
# Assigning features and label variables
# First Feature
weather=['Sunny','Sunny','Overcast','Rainy','Rainy','Rainy','Overcast','Sunny','Sunny',
'Rainy','Sunny','Overcast','Overcast','Rainy']
# Second Feature
temp=['Hot','Hot','Hot','Mild','Cool','Cool','Cool','Mild','Cool','Mild','Mild','Mild','Hot','Mild']
# Label or target varible
play=['No','No','Yes','Yes','Yes','No','Yes','No','Yes','Yes','Yes','Yes','Yes','No']
# # Encoding data columns
# In[11]:
# Import LabelEncoder
from sklearn import preprocessing
#creating labelEncoder
le = preprocessing.LabelEncoder()
# Converting string labels into numbers.
weather_encoded=le.fit_transform(weather)
print(weather_encoded)
# In[12]:
# converting string labels into numbers
temp_encoded=le.fit_transform(temp)
label=le.fit_transform(play)
# In[17]:
print(temp_encoded)
print(label)
# # Combining Features
#
# In[15]:
#combinig weather and temp into single listof tuples
features=list(zip(weather_encoded,temp_encoded))
# # Generating Model
# In[18]:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
# Train the model using the training sets
model.fit(features,label)
#Predict Output
predicted= model.predict([[0,2]]) # 0:Overcast, 2:Mild
print(predicted)
# # KNN with Multiple Labels
# In[31]:
#Import scikit-learn dataset library
from sklearn import datasets
#Load dataset
wine = datasets.load_wine()
# In[32]:
# print the names of the features
print(wine.feature_names)
# In[33]:
# print the label species(class_0, class_1, class_2)
print(wine.target_names)
# In[34]:
# print the wine data (top 5 records)
print(wine.data[0:5])
# In[35]:
# print the wine labels (0:Class_0, 1:Class_1, 2:Class_3)
print(wine.target)
# In[36]:
print(wine.data.shape)
# In[37]:
# print target(or label)shape
print(wine.target.shape)
# # Splitting Data
# In[38]:
# Import train_test_split function
from sklearn.model_selection import train_test_split
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(wine.data, wine.target, test_size=0.3) # 70% training and 30% test
# # Generating Model for K=5
# Let's build KNN classifier model for k=5.
# In[54]:
#Import knearest neighbors Classifier model
from sklearn.neighbors import KNeighborsClassifier
#Create KNN Classifier
knn = KNeighborsClassifier(n_neighbors=9)
#Train the model using the training sets
knn.fit(X_train, y_train)
#Predict the response for test dataset
y_pred = knn.predict(X_test)
# In[55]:
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# # Model Evaluation for k=7
# In[56]:
#Import knearest neighbors Classifier model
from sklearn.neighbors import KNeighborsClassifier
#Create KNN Classifier
knn = KNeighborsClassifier(n_neighbors=7)
#Train the model using the training sets
knn.fit(X_train, y_train)
#Predict the response for test dataset
y_pred = knn.predict(X_test)
# In[57]:
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# In[ ]:
| 15.867521 | 118 | 0.722596 |
c3adc36dcd5c85a59900279982ca40759c163496 | 1,338 | py | Python | web_dynamic/0-hbnb.py | Zevrov/AirBnB_clone_v3 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | 1 | 2021-03-03T17:29:11.000Z | 2021-03-03T17:29:11.000Z | web_dynamic/0-hbnb.py | Zevrov/AirBnB_clone_v4 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | null | null | null | web_dynamic/0-hbnb.py | Zevrov/AirBnB_clone_v4 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/0-hbnb')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid.uuid4()
return render_template('0-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| 27.306122 | 75 | 0.606876 |
5bdd83a48ab4a0b4e7c1338881d0f7dd4ae8d3a9 | 2,955 | py | Python | src/migrations/versions/86cd34fa749f_meeting_schedule_schedulecell_tables_.py | akhundMurad/fastapi-bigbluebutton | bfe94d87d8cb9768c17cf5513a05d2b46edf5b5c | [
"MIT"
] | 1 | 2021-07-13T16:28:48.000Z | 2021-07-13T16:28:48.000Z | src/migrations/versions/86cd34fa749f_meeting_schedule_schedulecell_tables_.py | akhundMurad/fastapi-bigbluebutton | bfe94d87d8cb9768c17cf5513a05d2b46edf5b5c | [
"MIT"
] | 1 | 2022-03-04T19:06:43.000Z | 2022-03-05T06:15:47.000Z | src/migrations/versions/86cd34fa749f_meeting_schedule_schedulecell_tables_.py | akhundMurad/fastapi-bigbluebutton | bfe94d87d8cb9768c17cf5513a05d2b46edf5b5c | [
"MIT"
] | null | null | null | """Meeting, Schedule, ScheduleCell tables added
Revision ID: 86cd34fa749f
Revises: d137934b754b
Create Date: 2021-07-08 20:41:37.824101
"""
import ormar
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '86cd34fa749f'
down_revision = 'd137934b754b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('schedule',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('schedule_cell',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datetime_start', sa.DateTime(), nullable=False),
sa.Column('datetime_end', sa.DateTime(), nullable=False),
sa.Column('schedule', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['schedule'], ['schedule.id'], name='fk_schedule_cell_schedule_id_schedule'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('schedules_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user', sa.Integer(), nullable=True),
sa.Column('schedule', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['schedule'], ['schedule.id'], name='fk_schedules_users_schedule_schedule_id', onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user'], ['user.id'], name='fk_schedules_users_user_user_id', onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('meeting',
sa.Column('id', ormar.fields.sqlalchemy_uuid.CHAR(36), nullable=False),
sa.Column('name', sa.String(length=512), nullable=False),
sa.Column('welcome_message', sa.String(length=128), nullable=False),
sa.Column('moderator_message', sa.String(length=128), nullable=False),
sa.Column('record', sa.Boolean(), nullable=True),
sa.Column('auto_start_recording', sa.Boolean(), nullable=True),
sa.Column('allow_start_stop_recording', sa.Boolean(), nullable=True),
sa.Column('webcams_only_for_moderator', sa.Boolean(), nullable=True),
sa.Column('mute_on_start', sa.Boolean(), nullable=True),
sa.Column('allow_mods_to_unmute_users', sa.Boolean(), nullable=True),
sa.Column('max_participants', sa.Integer(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('schedule_cell', sa.Integer(), nullable=True),
sa.Column('datetime_start', sa.DateTime(), nullable=False),
sa.Column('datetime_end', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['schedule_cell'], ['schedule_cell.id'], name='fk_meeting_schedule_cell_id_schedule_cell'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('meeting')
op.drop_table('schedules_users')
op.drop_table('schedule_cell')
op.drop_table('schedule')
# ### end Alembic commands ###
| 42.214286 | 147 | 0.703892 |
d3bc65eab052c865210396ee6df26403ddd3886c | 961 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractDeltatranslationsOrg.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractDeltatranslationsOrg.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractDeltatranslationsOrg.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractDeltatranslationsOrg(item):
'''
Parser for 'deltatranslations.org'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Summoning the Holy Sword', 'Summoning the Holy Sword', 'translated'),
('King of Mercenaries', 'King of Mercenaries', 'translated'),
('For a Prosperous World', 'For a Prosperous World', 'translated'),
('Battle of the Third Reich', 'Battle of the Third Reich', 'translated'),
('EDSG', 'Eight Desolate Sword God', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 41.782609 | 104 | 0.570239 |
01eba9f21cb3e6f7fcb4e28d564581353106a527 | 6,018 | py | Python | backend/production/views.py | DomTripodi93/production-django | 133bc119ebcbe2cd63131517932d58f084a0bebd | [
"MIT"
] | null | null | null | backend/production/views.py | DomTripodi93/production-django | 133bc119ebcbe2cd63131517932d58f084a0bebd | [
"MIT"
] | 9 | 2020-06-05T22:29:46.000Z | 2022-02-26T16:38:35.000Z | backend/production/views.py | DomTripodi93/ProductionManagement | 133bc119ebcbe2cd63131517932d58f084a0bebd | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.conf import settings
from django_filters import rest_framework as filter
from .models import UserSettings, ProUser, Production, StartTime, Machine, Part, HourlyProduction, ChangeLog
from .serializers import ProUserSerializer, UserSettingsSerializer, StartTimeSerializer, ProductionSerializer, MachineSerializer, HourlyProductionSerializer, PartSerializer, ChangeLogSerializer
from .permissions import ViewOwnProduction, UpdateOwnProUser, CreateOwnProduction, UpdateOwnProduction
from rest_framework import status, viewsets, filters, generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.models import Token
from rest_framework.filters import OrderingFilter
from .forms import UserCreationForm
class CustomObtainAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
response = super(CustomObtainAuthToken, self).post(request, *args, **kwargs)
token = Token.objects.get(key=response.data['token'])
return Response({'token': token.key, 'id': token.user_id, "name": token.user.name})
class ProductionViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = Production.objects.all().order_by('-date')
serializer_class = ProductionSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (filter.DjangoFilterBackend,)
filterset_fields = ("machine", "shift", "job", "date", "in_question")
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return Production.objects.none()
else:
return Production.objects.filter(user=self.request.user).order_by('-date')
class MachineViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = Machine.objects.all()
serializer_class = MachineSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (OrderingFilter,)
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return Machine.objects.none()
else:
return Machine.objects.filter(user=self.request.user).order_by("machine")
class PartViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = Part.objects.all()
serializer_class = PartSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (filter.DjangoFilterBackend,)
filterset_fields = ("machine", "part", "job", )
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return Part.objects.none()
else:
return Part.objects.filter(user=self.request.user).order_by("-job")
class HourlyProductionViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = HourlyProduction.objects.all()
serializer_class = HourlyProductionSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (filter.DjangoFilterBackend,)
filterset_fields = ("machine", "date", "job", )
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return HourlyProduction.objects.none()
else:
return HourlyProduction.objects.filter(user=self.request.user).order_by("machine", "-date")
class StartTimeViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = StartTime.objects.all()
serializer_class = StartTimeSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (filter.DjangoFilterBackend,)
filterset_fields = ("machine", "date", "job", )
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return StartTime.objects.none()
else:
return StartTime.objects.filter(user=self.request.user).order_by("machine", "-date", "-time")
class ChangeLogViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
queryset = ChangeLog.objects.all()
serializer_class = ChangeLogSerializer
permission_classes=(CreateOwnProduction, UpdateOwnProduction, )
filter_backends = (filter.DjangoFilterBackend,)
filterset_fields = ("changed_model", )
def perform_create(self, serializer):
serializer.save(user = self.request.user)
def get_queryset(self):
if self.request.user.is_anonymous:
return ChangeLog.objects.none()
else:
return ChangeLog.objects.filter(user=self.request.user).order_by("-timestamp")
class RegisterViewSet(viewsets.ModelViewSet):
serializer_class = ProUserSerializer
queryset = ProUser.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (UpdateOwnProUser, )
class UserSettingsViewSet(viewsets.ModelViewSet):
serializer_class = UserSettingsSerializer
queryset = UserSettings.objects.all().order_by('-user')
authentication_classes = (TokenAuthentication,)
permission_classes = (UpdateOwnProduction, )
class LoginViewSet(viewsets.ViewSet):
serializer_class= AuthTokenSerializer
def create(self, request):
return CustomObtainAuthToken().post(request)
| 44.577778 | 193 | 0.748255 |
af2f0af8ab93635aa9cfe5ddc11e71702e4756f2 | 169 | py | Python | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_MonthOfYear_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_MonthOfYear_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_MonthOfYear_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['LSTM'] ); | 42.25 | 91 | 0.763314 |
6ebb4e58b0e07e49f50ecba5c9dfbc9ad1fd8bc2 | 8,574 | py | Python | bitswap/session/session.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | bitswap/session/session.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | bitswap/session/session.py | VladislavSufyanov/py-bitswap | 875d15944e485c33b16af9965f24c1d85cb34c55 | [
"MIT"
] | null | null | null | from typing import Union, Dict, Optional, List, TYPE_CHECKING
import weakref
import asyncio
from logging import INFO
from time import monotonic
from cid import CIDv0, CIDv1
from .peer_score import PeerScore
from ..connection_manager.sender import Sender
from ..message.proto_buff import ProtoBuff
from ..logger import get_stream_logger_colored, get_concurrent_logger
if TYPE_CHECKING:
from ..peer.peer import Peer
from ..peer.base_peer_manager import BasePeerManager
from ..wantlist.entry import Entry
from ..network.base_network import BaseNetwork
class Session:
def __init__(self, network: 'BaseNetwork', peer_manager: 'BasePeerManager',
log_level: int = INFO, log_path: Optional[str] = None) -> None:
if log_path is None:
self._logger = get_stream_logger_colored(__name__, log_level)
else:
self._logger = get_concurrent_logger(__name__, log_path, log_level)
self._network = network
self._peer_manager = peer_manager
self._peers: Dict[str, PeerScore] = {}
self._blocks_have: Dict[str, weakref.WeakSet] = {}
self._blocks_pending: Dict[str, weakref.WeakSet] = {}
def __contains__(self, peer: 'Peer') -> bool:
return str(peer.cid) in self._peers
def get_notify_peers(self, block_cid: Union[CIDv0, CIDv1],
current_peer: Optional[Union[CIDv0, CIDv1]] = None) -> List['Peer']:
str_block_cid = str(block_cid)
l_p = []
for block_cont in self._blocks_have, self._blocks_pending:
if str_block_cid in block_cont:
l_p.extend(p.peer for p in block_cont[str_block_cid] if p.peer.cid != current_peer)
return l_p
def add_peer(self, peer: 'Peer', block_cid: Union[CIDv0, CIDv1], have: bool = True) -> None:
str_peer_cid = str(peer.cid)
str_block_cid = str(block_cid)
if str_peer_cid not in self._peers:
self._peers[str_peer_cid] = PeerScore(peer)
self._logger.debug(f'Add new peer to session, session: {self}, peer_cid: {str_peer_cid}')
if have:
if str_block_cid not in self._blocks_have:
self._blocks_have[str_block_cid] = weakref.WeakSet()
self._blocks_have[str_block_cid].add(self._peers[str(peer.cid)])
def change_peer_score(self, cid: Union[CIDv0, CIDv1], new: float, alpha: float = 0.5) -> bool:
str_cid = str(cid)
if str_cid not in self._peers:
return False
self._peers[str_cid].change_score(new, alpha)
return True
def remove_peer(self, cid: Union[CIDv0, CIDv1]) -> bool:
str_cid = str(cid)
if str_cid not in self._peers:
return False
del self._peers[str_cid]
self._logger.debug(f'Remove peer from session, session: {self}, peer_cid: {str_cid}')
return True
def remove_peer_from_have(self, block_cid: Union[CIDv0, CIDv1], peer: 'Peer') -> bool:
str_cid = str(block_cid)
if str_cid not in self._blocks_have or peer not in self._blocks_have[str_cid]:
return False
self._blocks_have[str_cid].remove(peer)
return True
async def get(self, entry: 'Entry', connect_timeout: int = 7, peer_act_timeout: int = 5,
ban_peer_timeout: int = 10) -> None:
str_entry_cid = str(entry.cid)
entry.add_session(self)
ban_peers: Dict[str, float] = {}
sent_w_block_to_peers: List[PeerScore] = []
new_peers_cid: List[Union[CIDv0, CIDv1]] = []
if str_entry_cid not in self._blocks_have:
self._blocks_have[str_entry_cid] = weakref.WeakSet()
if str_entry_cid not in self._blocks_pending:
self._blocks_pending[str_entry_cid] = weakref.WeakSet()
if not self._peers:
self._logger.debug(f'Session has not peers, session: {self}')
all_peers = self._peer_manager.get_all_peers()
if not all_peers:
self._logger.debug(f'No active connections with peers, session: {self}')
while True:
new_peers_cid = await self._network.find_peers(entry.cid)
if not new_peers_cid:
self._logger.warning(f'Cant find peers, block_cid: {entry.cid}, session: {self}')
await asyncio.sleep(peer_act_timeout)
elif await self._connect(new_peers_cid, ban_peers, connect_timeout, ban_peer_timeout) is None:
self._logger.warning(f'Cant connect to peers, session: {self}')
await asyncio.sleep(peer_act_timeout)
else:
break
all_peers = self._peer_manager.get_all_peers()
await Sender.send_entries((entry,), all_peers, ProtoBuff.WantType.Have)
else:
await Sender.send_entries((entry,), (p.peer for p in self._peers.values()), ProtoBuff.WantType.Have)
try:
while entry.block is None:
try:
have_peer = await asyncio.wait_for(self._wait_for_have_peer(entry.cid), peer_act_timeout)
except asyncio.exceptions.TimeoutError:
self._logger.debug(f'Wait have timeout, session: {self}')
new_peer = await self._connect(new_peers_cid, ban_peers, connect_timeout, ban_peer_timeout)
if new_peer is None:
new_peers_cid = await self._network.find_peers(entry.cid)
new_peer = await self._connect(new_peers_cid, ban_peers, connect_timeout, ban_peer_timeout)
if new_peer is not None:
await Sender.send_entries((entry,), (new_peer,), ProtoBuff.WantType.Have)
else:
self._blocks_have[str_entry_cid].remove(have_peer)
if have_peer not in self._blocks_pending[str_entry_cid] and have_peer.peer in self._peer_manager:
self._blocks_pending[str_entry_cid].add(have_peer)
sent_w_block_to_peers.append(have_peer)
await Sender.send_entries((entry,), (have_peer.peer,), ProtoBuff.WantType.Block)
try:
await asyncio.wait_for(self._wait_for_block(entry), peer_act_timeout)
except asyncio.exceptions.TimeoutError:
self._logger.debug(f'Block wait timeout, block_cid: {entry.cid}')
finally:
for peer in sent_w_block_to_peers:
if peer in self._blocks_pending[str_entry_cid]:
self._blocks_pending[str_entry_cid].remove(peer)
async def _connect(self, peers_cid: List[Union[CIDv0, CIDv1]], ban_peers: Dict[str, float],
connect_timeout: int, ban_peer_timeout: int) -> Optional['Peer']:
unban_cid = []
for s_cid, ban_time in ban_peers.items():
if monotonic() - ban_time > ban_peer_timeout:
unban_cid.append(s_cid)
for s_cid in unban_cid:
del ban_peers[s_cid]
while len(peers_cid) > 0:
p_cid = peers_cid.pop()
if str(p_cid) not in ban_peers:
try:
peer = await asyncio.wait_for(self._peer_manager.connect(p_cid), connect_timeout)
if peer is not None:
break
except asyncio.exceptions.TimeoutError:
self._logger.debug(f'Connect timeout, peer_cid: {p_cid}')
ban_peers[str(p_cid)] = monotonic()
except Exception as e:
self._logger.debug(f'Connect exception, peer_cid: {p_cid}, e: {e}')
ban_peers[str(p_cid)] = monotonic()
else:
return
return peer
def _get_peer_with_max_score(self, cid: Union[CIDv0, CIDv1]) -> PeerScore:
return max(self._blocks_have[str(cid)], key=lambda p: (p.score, -p.peer.latency))
async def _wait_for_have_peer(self, cid: Union[CIDv0, CIDv1], period: float = 0.1) -> PeerScore:
str_cid = str(cid)
while str_cid not in self._blocks_have or len(self._blocks_have[str_cid]) == 0:
await asyncio.sleep(period)
return self._get_peer_with_max_score(cid)
@staticmethod
async def _wait_for_block(entry: 'Entry', period: float = 0.1) -> Optional[bytes]:
while entry.block is None:
await asyncio.sleep(period)
return entry.block
| 48.715909 | 117 | 0.614532 |
cf69ff973d91be24d32d35804548f17263d7cceb | 559 | py | Python | utils/cal_bleu.py | laihuiyuan/Multilingual-TST | 84fab28b30e347ad42ed7dff737dab86b15ece5f | [
"MIT"
] | 3 | 2022-02-25T09:51:29.000Z | 2022-02-25T22:09:08.000Z | utils/cal_bleu.py | laihuiyuan/multilingual-tst | 84fab28b30e347ad42ed7dff737dab86b15ece5f | [
"MIT"
] | null | null | null | utils/cal_bleu.py | laihuiyuan/multilingual-tst | 84fab28b30e347ad42ed7dff737dab86b15ece5f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import nltk
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
hyp, ref = [], []
with open(sys.argv[1],'r') as f:
for line in f.readlines():
hyp = nltk.word_tokenize(line.strip(), language=sys.argv[3])
with open(sys.argv[2],'r') as f:
for line in f.readlines():
ref.append(nltk.word_tokenize(line.strip(), language=sys.argv[3]))
smooth = SmoothingFunction()
score = sentence_bleu(ref, hyp,smoothing_function=smooth.method1)
print(score)
| 25.409091 | 74 | 0.703041 |
4711ea6e19200050f28a3c09cb51393281c8c0db | 47,635 | py | Python | django/db/models/sql/compiler.py | krallin/django | c94db53eaa9b344f9227fa4dff2b1a5e9c7dce9d | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/compiler.py | krallin/django | c94db53eaa9b344f9227fa4dff2b1a5e9c7dce9d | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/compiler.py | krallin/django | c94db53eaa9b344f9227fa4dff2b1a5e9c7dce9d | [
"BSD-3-Clause"
] | null | null | null | import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import transaction
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.query.ordering_aliases))
params.extend(s_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.model._meta
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, col, alias, _, _ = self._setup_joins(parts, opts, None)
col, alias = self._final_join_removal(col, alias)
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.model._meta.ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, col, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
col, alias = self._final_join_removal(col, alias)
return [(alias, col, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, col, alias, joins, opts
def _final_join_removal(self, col, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
if col != join.rhs_join_col:
break
self.query.unref_alias(alias)
alias = join.lhs_alias
col = join.lhs_join_col
return col, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
if join_field and hasattr(join_field, 'get_extra_join_sql'):
extra_cond, extra_params = join_field.get_extra_join_sql(
self.connection, qn, lhs, alias)
from_params.extend(extra_params)
else:
extra_cond = ""
result.append('%s %s%s ON (%s.%s = %s.%s%s)' %
(join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col), extra_cond))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.model._meta.fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
table = f.rel.to._meta.db_table
promote = nullable or f.null
alias = self.query.join_parent_model(opts, model, root_alias, {})
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
outer_if_first=promote, join_field=f)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
alias = self.query.join_parent_model(opts, f.rel.to, root_alias, {})
table = model._meta.db_table
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
outer_if_first=True, join_field=f
)
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, table, cur_depth+1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.model._meta.fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| 44.147359 | 136 | 0.579826 |
aa95ed9ae10fb22124d05bb4e312fa02ae2f00bc | 4,437 | py | Python | create_dns_records.py | davefontaine/network_scripts | fe4f064c8e3df9433a51d5c41035c065f8dc1c47 | [
"Apache-2.0"
] | null | null | null | create_dns_records.py | davefontaine/network_scripts | fe4f064c8e3df9433a51d5c41035c065f8dc1c47 | [
"Apache-2.0"
] | null | null | null | create_dns_records.py | davefontaine/network_scripts | fe4f064c8e3df9433a51d5c41035c065f8dc1c47 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# create_dns_record.py
# input: list of filenames to parse; filename must contain hostname eg. lca1-crt01.nw-config.txt
# output: a dns entry to stdout for
#
import sys
import re
from ciscoconfparse import CiscoConfParse
# site code
all_sites = ["lca1", "lva1"]
# enviornment code
all_environments = ["nw", "corp", "prod"]
# mapping of cli interface name string to abbreviated DNS string
interface_name_mapping = {
'Loopback':'lo',
'loopback':'lo',
'Ethernet':'eth',
'GigabitEthernet':'ge',
'TenGigabitEthernet':'te',
'Vlan':'vlan',
'xe':'xe'
}
# remove executable file name from list, leaves list of files
sys.argv.remove(sys.argv[0])
hostname_re = re.compile(r'.*((' + "|".join(all_sites) + r')(.*)\.(' + "|".join(all_environments) + r')).*')
ipv6_address_re = re.compile(r".*ipv6 address\W(.*)/[0-9]+")
interface_re = re.compile(r"^interface ([a-zA-Z]+)([0-9]+)/?([0-9]+)?/?([0-9]+)?")
junos_ipv6_interface_re = re.compile(r'^set interfaces (.*) unit \d+ family inet6 address (.*)/[0-9]+')
# incomplete items
# if match is Loopback interface, create DNS entry for device
def write_dns_record_stdout(hostname_str, interface_string, ipv6_address_string):
# if not the loopback interface
if interface_string.find("lo") < 0:
print hostname_str + '-' + interface_string + '.' + environment + '.' + 'linkedin.com' + ' AAAA ' + ipv6_address
# if this is the loopback interface, create loopback & host records
else:
print hostname_str + '.' + environment + '.' + 'linkedin.com' + ' AAAA ' + ipv6_address
# CNAME for "lo" interface
print hostname_str + '-' + interface_string + '.' + environment + '.' + 'linkedin.com' + ' CNAME ' + hostname_str + '.' + environment + '.' + 'linkedin.com.'
for file in sys.argv:
m = hostname_re.match(file)
# if the filename conforms to the hostname naming convention, make assignments
if m:
hostname = m.group(2)+m.group(3)
environment = m.group(4)
# otherwise, do not process / parse the file
else:
print 'WARNING: "' + file + '" does not contain device hostname.'
continue
# parse the file
parsed_file = CiscoConfParse(file)
# pull out all interfaces which have an ipv6 address
interfaces = parsed_file.find_objects_w_child(parentspec=r"^interface", childspec=r"ipv6 address")
# if the list is not empty this is likely a Cisco-like device
if interfaces != []:
# for every interface that matches the above conditions,
for interface_name in interfaces:
match_interface_name = interface_re.match(interface_name.text)
short_interface_name = interface_name_mapping[match_interface_name.group(1)]
# build interface port number with "/" substituded by "-"
# eg. Ethernet1/2 becomes eth1-2
# do this for all except loopback interface which becomes host entry
if match_interface_name.lastindex >= 2:
short_interface_name += '-' + match_interface_name.group(2)
if match_interface_name.lastindex >= 3:
short_interface_name += '-' + match_interface_name.group(3)
if match_interface_name.lastindex >= 4:
short_interface_name += '-' + match_interface_name.group(4)
# find "ipv6 address" under interface and grab address
for subinterface_line in interface_name.children:
match_ipv6_address = ipv6_address_re.match(subinterface_line.text)
if match_ipv6_address:
ipv6_address = match_ipv6_address.group(1)
# create record by merging dns_name and ipv6 address and write entry to stdout
write_dns_record_stdout(hostname, short_interface_name, ipv6_address)
# here we assume it's JUNOS, since the cisco parser came back NULL
else:
with open(file) as f:
entire_config = f.readlines()
for line in entire_config:
match_interface_name = junos_ipv6_interface_re.match(line)
if match_interface_name:
interface_name = match_interface_name.group(1)
ipv6_address = match_interface_name.group(2)
interface_name.replace("/", "-")
write_dns_record_stdout(hostname, interface_name, ipv6_address)
continue
| 31.246479 | 165 | 0.64503 |
370aefe138a667d99e7a54e8816434abc2d8cd95 | 2,878 | py | Python | python/day11.py | davidlowryduda/AoC18 | cb1a5abb6fae8a00e805b3c76125e2db8d452cff | [
"MIT"
] | null | null | null | python/day11.py | davidlowryduda/AoC18 | cb1a5abb6fae8a00e805b3c76125e2db8d452cff | [
"MIT"
] | null | null | null | python/day11.py | davidlowryduda/AoC18 | cb1a5abb6fae8a00e805b3c76125e2db8d452cff | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""
Solve day 11
"""
from utils import input_lines
def power_level(x, y, serial=8):
"""
A nonsense sequence of steps described in the puzzle instructions.
"""
rackID = x + 10
level = rackID * y
level += serial
level *= rackID
level = (level // 100) % 10
level -= 5
return level
def compute_power_levels(serial):
"""
Create a grid where grid[(x,y)] has the power_level at position (x,y).
"""
grid = dict()
for x in range(1, 301):
for y in range(1, 301):
grid[(x, y)] = power_level(x, y, serial=serial)
return grid
def compute_sized_powerlevel(grid, x, y, size=3):
"""
Compute combined powerlevel for sizexsize grid with topleft element (x,y).
"""
total_power_level = 0
for i in range(size):
for j in range(size):
total_power_level += grid[(x+i, y+j)]
return total_power_level
def find_largest_trio(grid):
"""
Find the largest 3x3 grid value.
"""
record = 0
record_tuple = (0,0)
for x in range(1, 298):
for y in range(1, 298):
candidate_power = compute_sized_powerlevel(grid, x, y)
if candidate_power > record:
record = candidate_power
record_tuple = (x, y)
return record, record_tuple
def find_largest_anysize(grid):
"""
Find the largest sizexsize grid value.
"""
record = 0
record_tuple = (0, 0, 0)
for x in range(1, 298):
print("On x =", x)
for y in range(1, 298):
maxsize = min(300-x, 300-y)
cand_record, cand_tuple = find_largest_anysize_at_xy(grid, x, y)
if cand_record > record:
record = cand_record
record_tuple = cand_tuple
return record, record_tuple
def find_largest_anysize_at_xy(grid, x, y):
"""
Finds the largest sizexsize grid with top-left location (x,y).
"""
maxsize = min(300 - x, 300 - y)
record = grid[(x,y)]
record_tuple = (x, y, 1)
prevsize = record
for size in range(2, maxsize + 1):
cand = prevsize
for i in range(size):
cand += grid[(x+i, y+size-1)]
cand += grid[(x+size-1, y+i)]
cand -= grid[(x+size-1, y+size-1)]
prevsize = cand
if cand > record:
record = cand
record_tuple = (x, y, size)
return record, record_tuple
def do_part_1(day, test=False):
#TESTSERIAL = 18
#TESTSERIAL = 42
MYSERIAL = 5719
grid = compute_power_levels(MYSERIAL)
print(find_largest_trio(grid)[1])
return
def do_part_2(day, test=False):
#TESTSERIAL = 18
MYSERIAL = 5719
grid = compute_power_levels(MYSERIAL)
print(find_largest_anysize(grid))
return
if __name__ == "__main__":
do_part_1(11, test=False)
do_part_2(11, test=False)
| 24.389831 | 78 | 0.583739 |
b18be6cac1a00ed4cf0143ae74ccd170e4385657 | 2,490 | py | Python | code/analysis/delayedfeedback/optimalcontrolutils.py | dmytrov/stochasticcontrol | a289d5c0953c4a328b2177f51168588248c00f2c | [
"MIT"
] | null | null | null | code/analysis/delayedfeedback/optimalcontrolutils.py | dmytrov/stochasticcontrol | a289d5c0953c4a328b2177f51168588248c00f2c | [
"MIT"
] | null | null | null | code/analysis/delayedfeedback/optimalcontrolutils.py | dmytrov/stochasticcontrol | a289d5c0953c4a328b2177f51168588248c00f2c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import analysis.delayedfeedback.fittingutils as fu
def InverseOptimalControl(object):
def __init__(self):
pass
def infer_cost(trajectories):
""" Infers the goal cost of the optimal controller.
Assumptions:
- cost is linear w.r.t. some terms:
- integral over time (total time)
- integral over energy used for the control (F*S).
Force is proportional to acceleration, mass is constant:
F = m*a
- integral over jerk (path third derivative)
- control u is force;
- control is optimal.
Optimal control minimizes the functional of cost.
We assume the control is optimal.
Find the weights of the cost terms which minimize the cost functional
for all trajectories plus some noise.
"""
# Construct the cost terms
pass
def fit_trajectory_nonesense(trials, maxpoly=5, maxexp=2, ax=None):
""" WARNING!!! NONESENSE!!!
Fit polynomial and exponentials as a solution to an optimal
reaching problem.
Fit a function f: t -> x
Assumptions:
- time is approximately the same. Motion is from time 0 to 1.
- total cost is a sum of quadratic costs
of derivatives of different orders up to maxorder.
- trajectories are optimal solutions + noise.
Arguments:
- start point
- end point
- trials
Returns:
- callable fitted function x(t)
"""
# Normalize time to [0, 1]
traces = [trial.motiontrajectoryinterpolated[trial.a:trial.b] for trial in trials]
times = np.hstack([np.linspace(0, 1, len(trace)) for trace in traces])
# For diagonal quadratic cost, coordinates are independent.
xs = np.hstack([trace[:, 0] for trace in traces])
ys = np.hstack([trace[:, 1] for trace in traces])
a, b, c, ystar = fu.fit_polynomial_exponential(times, xs, maxpoly, maxexp)
print("X:", a, b, c)
fx = fu.PolynomialExponential(a, b, c)
a, b, c, ystar = fu.fit_polynomial_exponential(times, ys, maxpoly, maxexp)
print("Y:", a, b, c)
fy = fu.PolynomialExponential(a, b, c)
for trace in traces:
ax.plot(trace[:, 0], trace[:, 1], "b.", alpha=0.3)
t = np.linspace(0, 1, 100)
ax.plot(fx(t), fy(t), "r")
| 31.125 | 86 | 0.593976 |
18a042ca97932e8ef946668912e6886a8c544f26 | 16,798 | py | Python | pydantic/networks.py | fictorial/pydantic | 9d631a3429a66f30742c1a52c94ac18ec6ba848d | [
"MIT"
] | 1 | 2020-05-03T06:32:47.000Z | 2020-05-03T06:32:47.000Z | pydantic/networks.py | fictorial/pydantic | 9d631a3429a66f30742c1a52c94ac18ec6ba848d | [
"MIT"
] | 152 | 2020-07-29T06:20:57.000Z | 2021-10-04T08:01:55.000Z | pydantic/networks.py | amirkdv/pydantic | ef4678999f94625819ebad61b44ea264479aeb0a | [
"MIT"
] | 1 | 2022-03-01T09:58:06.000Z | 2022-03-01T09:58:06.000Z | import re
from ipaddress import (
IPv4Address,
IPv4Interface,
IPv4Network,
IPv6Address,
IPv6Interface,
IPv6Network,
_BaseAddress,
_BaseNetwork,
)
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
Generator,
Optional,
Pattern,
Set,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from . import errors
from .utils import Representation, update_not_none
from .validators import constr_length_validator, str_validator
if TYPE_CHECKING:
import email_validator
from typing_extensions import TypedDict
from .config import BaseConfig
from .fields import ModelField
from .typing import AnyCallable
CallableGenerator = Generator[AnyCallable, None, None]
class Parts(TypedDict, total=False):
scheme: str
user: Optional[str]
password: Optional[str]
ipv4: Optional[str]
ipv6: Optional[str]
domain: Optional[str]
port: Optional[str]
path: Optional[str]
query: Optional[str]
fragment: Optional[str]
else:
email_validator = None
NetworkType = Union[str, bytes, int, Tuple[Union[str, bytes, int], Union[str, int]]]
__all__ = [
'AnyUrl',
'AnyHttpUrl',
'FileUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'AmqpDsn',
'RedisDsn',
'KafkaDsn',
'validate_email',
]
_url_regex_cache = None
_ascii_domain_regex_cache = None
_int_domain_regex_cache = None
def url_regex() -> Pattern[str]:
global _url_regex_cache
if _url_regex_cache is None:
_url_regex_cache = re.compile(
r'(?:(?P<scheme>[a-z][a-z0-9+\-.]+)://)?' # scheme https://tools.ietf.org/html/rfc3986#appendix-A
r'(?:(?P<user>[^\s:/]*)(?::(?P<password>[^\s/]*))?@)?' # user info
r'(?:'
r'(?P<ipv4>(?:\d{1,3}\.){3}\d{1,3})(?=$|[/:#?])|' # ipv4
r'(?P<ipv6>\[[A-F0-9]*:[A-F0-9:]+\])(?=$|[/:#?])|' # ipv6
r'(?P<domain>[^\s/:?#]+)' # domain, validation occurs later
r')?'
r'(?::(?P<port>\d+))?' # port
r'(?P<path>/[^\s?#]*)?' # path
r'(?:\?(?P<query>[^\s#]*))?' # query
r'(?:#(?P<fragment>[^\s#]*))?', # fragment
re.IGNORECASE,
)
return _url_regex_cache
def ascii_domain_regex() -> Pattern[str]:
global _ascii_domain_regex_cache
if _ascii_domain_regex_cache is None:
ascii_chunk = r'[_0-9a-z](?:[-_0-9a-z]{0,61}[_0-9a-z])?'
ascii_domain_ending = r'(?P<tld>\.[a-z]{2,63})?\.?'
_ascii_domain_regex_cache = re.compile(
fr'(?:{ascii_chunk}\.)*?{ascii_chunk}{ascii_domain_ending}', re.IGNORECASE
)
return _ascii_domain_regex_cache
def int_domain_regex() -> Pattern[str]:
global _int_domain_regex_cache
if _int_domain_regex_cache is None:
int_chunk = r'[_0-9a-\U00040000](?:[-_0-9a-\U00040000]{0,61}[_0-9a-\U00040000])?'
int_domain_ending = r'(?P<tld>(\.[^\W\d_]{2,63})|(\.(?:xn--)[_0-9a-z-]{2,63}))?\.?'
_int_domain_regex_cache = re.compile(fr'(?:{int_chunk}\.)*?{int_chunk}{int_domain_ending}', re.IGNORECASE)
return _int_domain_regex_cache
class AnyUrl(str):
strip_whitespace = True
min_length = 1
max_length = 2 ** 16
allowed_schemes: Optional[Collection[str]] = None
tld_required: bool = False
user_required: bool = False
host_required: bool = True
hidden_parts: Set[str] = set()
__slots__ = ('scheme', 'user', 'password', 'host', 'tld', 'host_type', 'port', 'path', 'query', 'fragment')
@no_type_check
def __new__(cls, url: Optional[str], **kwargs) -> object:
return str.__new__(cls, cls.build(**kwargs) if url is None else url)
def __init__(
self,
url: str,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: Optional[str] = None,
tld: Optional[str] = None,
host_type: str = 'domain',
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> None:
str.__init__(url)
self.scheme = scheme
self.user = user
self.password = password
self.host = host
self.tld = tld
self.host_type = host_type
self.port = port
self.path = path
self.query = query
self.fragment = fragment
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
url = scheme + '://'
if user:
url += user
if password:
url += ':' + password
if user or password:
url += '@'
url += host
if port and 'port' not in cls.hidden_parts:
url += ':' + port
if path:
url += path
if query:
url += '?' + query
if fragment:
url += '#' + fragment
return url
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length, format='uri')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any, field: 'ModelField', config: 'BaseConfig') -> 'AnyUrl':
if value.__class__ == cls:
return value
value = str_validator(value)
if cls.strip_whitespace:
value = value.strip()
url: str = cast(str, constr_length_validator(value, field, config))
m = url_regex().match(url)
# the regex should always match, if it doesn't please report with details of the URL tried
assert m, 'URL regex failed unexpectedly'
original_parts = cast('Parts', m.groupdict())
parts = cls.apply_default_parts(original_parts)
parts = cls.validate_parts(parts)
host, tld, host_type, rebuild = cls.validate_host(parts)
if m.end() != len(url):
raise errors.UrlExtraError(extra=url[m.end() :])
return cls(
None if rebuild else url,
scheme=parts['scheme'],
user=parts['user'],
password=parts['password'],
host=host,
tld=tld,
host_type=host_type,
port=parts['port'],
path=parts['path'],
query=parts['query'],
fragment=parts['fragment'],
)
@classmethod
def validate_parts(cls, parts: 'Parts') -> 'Parts':
"""
A method used to validate parts of an URL.
Could be overridden to set default values for parts if missing
"""
scheme = parts['scheme']
if scheme is None:
raise errors.UrlSchemeError()
if cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
port = parts['port']
if port is not None and int(port) > 65_535:
raise errors.UrlPortError()
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def validate_host(cls, parts: 'Parts') -> Tuple[str, Optional[str], str, bool]:
host, tld, host_type, rebuild = None, None, None, False
for f in ('domain', 'ipv4', 'ipv6'):
host = parts[f] # type: ignore[misc]
if host:
host_type = f
break
if host is None:
if cls.host_required:
raise errors.UrlHostError()
elif host_type == 'domain':
is_international = False
d = ascii_domain_regex().fullmatch(host)
if d is None:
d = int_domain_regex().fullmatch(host)
if d is None:
raise errors.UrlHostError()
is_international = True
tld = d.group('tld')
if tld is None and not is_international:
d = int_domain_regex().fullmatch(host)
assert d is not None
tld = d.group('tld')
is_international = True
if tld is not None:
tld = tld[1:]
elif cls.tld_required:
raise errors.UrlHostTldError()
if is_international:
host_type = 'int_domain'
rebuild = True
host = host.encode('idna').decode('ascii')
if tld is not None:
tld = tld.encode('idna').decode('ascii')
return host, tld, host_type, rebuild # type: ignore
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {}
@classmethod
def apply_default_parts(cls, parts: 'Parts') -> 'Parts':
for key, value in cls.get_default_parts(parts).items():
if not parts[key]: # type: ignore[misc]
parts[key] = value # type: ignore[misc]
return parts
def __repr__(self) -> str:
extra = ', '.join(f'{n}={getattr(self, n)!r}' for n in self.__slots__ if getattr(self, n) is not None)
return f'{self.__class__.__name__}({super().__repr__()}, {extra})'
class AnyHttpUrl(AnyUrl):
allowed_schemes = {'http', 'https'}
class HttpUrl(AnyHttpUrl):
tld_required = True
# https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
max_length = 2083
hidden_parts = {'port'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {'port': '80' if parts['scheme'] == 'http' else '443'}
class FileUrl(AnyUrl):
allowed_schemes = {'file'}
host_required = False
class PostgresDsn(AnyUrl):
allowed_schemes = {
'postgres',
'postgresql',
'postgresql+asyncpg',
'postgresql+pg8000',
'postgresql+psycopg2',
'postgresql+psycopg2cffi',
'postgresql+py-postgresql',
'postgresql+pygresql',
}
user_required = True
class AmqpDsn(AnyUrl):
allowed_schemes = {'amqp', 'amqps'}
host_required = False
class RedisDsn(AnyUrl):
allowed_schemes = {'redis', 'rediss'}
host_required = False
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {
'domain': 'localhost' if not (parts['ipv4'] or parts['ipv6']) else '',
'port': '6379',
'path': '/0',
}
class KafkaDsn(AnyUrl):
allowed_schemes = {'kafka'}
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {
'domain': 'localhost',
'port': '9092',
}
def stricturl(
*,
strip_whitespace: bool = True,
min_length: int = 1,
max_length: int = 2 ** 16,
tld_required: bool = True,
host_required: bool = True,
allowed_schemes: Optional[Collection[str]] = None,
) -> Type[AnyUrl]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
min_length=min_length,
max_length=max_length,
tld_required=tld_required,
host_required=host_required,
allowed_schemes=allowed_schemes,
)
return type('UrlValue', (AnyUrl,), namespace)
def import_email_validator() -> None:
global email_validator
try:
import email_validator
except ImportError as e:
raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e
class EmailStr(str):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
# included here and below so the error happens straight away
import_email_validator()
yield str_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> str:
return validate_email(value)[1]
class NameEmail(Representation):
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def __eq__(self, other: Any) -> bool:
return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='name-email')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
import_email_validator()
yield cls.validate
@classmethod
def validate(cls, value: Any) -> 'NameEmail':
if value.__class__ == cls:
return value
value = str_validator(value)
return cls(*validate_email(value))
def __str__(self) -> str:
return f'{self.name} <{self.email}>'
class IPvAnyAddress(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyaddress')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Union[str, bytes, int]) -> Union[IPv4Address, IPv6Address]:
try:
return IPv4Address(value)
except ValueError:
pass
try:
return IPv6Address(value)
except ValueError:
raise errors.IPvAnyAddressError()
class IPvAnyInterface(_BaseAddress):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyinterface')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Interface, IPv6Interface]:
try:
return IPv4Interface(value)
except ValueError:
pass
try:
return IPv6Interface(value)
except ValueError:
raise errors.IPvAnyInterfaceError()
class IPvAnyNetwork(_BaseNetwork): # type: ignore
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanynetwork')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: NetworkType) -> Union[IPv4Network, IPv6Network]:
# Assume IP Network is defined with a default value for ``strict`` argument.
# Define your own class if you want to specify network address check strictness.
try:
return IPv4Network(value)
except ValueError:
pass
try:
return IPv6Network(value)
except ValueError:
raise errors.IPvAnyNetworkError()
pretty_email_regex = re.compile(r'([\w ]*?) *<(.*)> *')
def validate_email(value: Union[str]) -> Tuple[str, str]:
"""
Brutally simple email address validation. Note unlike most email address validation
* raw ip address (literal) domain parts are not allowed.
* "John Doe <local_part@domain.com>" style "pretty" email addresses are processed
* the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better
solution is really possible.
* spaces are striped from the beginning and end of addresses but no error is raised
See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email!
"""
if email_validator is None:
import_email_validator()
m = pretty_email_regex.fullmatch(value)
name: Optional[str] = None
if m:
name, value = m.groups()
email = value.strip()
try:
email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise errors.EmailError() from e
at_index = email.index('@')
local_part = email[:at_index] # RFC 5321, local part must be case-sensitive.
global_part = email[at_index:].lower()
return name or local_part, local_part + global_part
| 29.730973 | 118 | 0.595666 |
055745599eaad55ed775b331593a5de7185c78f9 | 1,057 | py | Python | reads/models.py | mguarascio/runnerreads-com | 3bc877cf24370cf881a98a1c5915693464bc69e8 | [
"MIT"
] | null | null | null | reads/models.py | mguarascio/runnerreads-com | 3bc877cf24370cf881a98a1c5915693464bc69e8 | [
"MIT"
] | null | null | null | reads/models.py | mguarascio/runnerreads-com | 3bc877cf24370cf881a98a1c5915693464bc69e8 | [
"MIT"
] | null | null | null | from django.db import models
class Book(models.Model):
title = models.CharField(max_length=255)
link = models.CharField(max_length=2000)
ASIN = models.CharField(max_length=20)
large_image = models.CharField(max_length=255, null=True)
medium_image = models.CharField(max_length=255, null=True)
small_image = models.CharField(max_length=255, null=True)
tiny_image = models.CharField(max_length=255, null=True)
rank = models.IntegerField(null=True)
product_group = models.CharField(max_length=50, null=True)
def __str__(self):
return self.title + ' : ' + self.ASIN
class Comment(models.Model):
book = models.ForeignKey(Book, related_name='comments', on_delete=models.CASCADE)
text = models.TextField()
link = models.CharField(max_length=255)
score = models.IntegerField(null=True)
date_time = models.DateTimeField(null=True)
user = models.CharField(max_length=100, null=True)
source = models.CharField(max_length=50, null=True)
def __str__(self):
return self.link | 37.75 | 85 | 0.719016 |
0959210a0d1290d1d5504f7ce2a2b580078c3805 | 36,680 | py | Python | __init__.py | rocketbot-cl/MercadoPago | 2bf71bb28626afbfe10e83c630503be4f1150396 | [
"MIT"
] | null | null | null | __init__.py | rocketbot-cl/MercadoPago | 2bf71bb28626afbfe10e83c630503be4f1150396 | [
"MIT"
] | null | null | null | __init__.py | rocketbot-cl/MercadoPago | 2bf71bb28626afbfe10e83c630503be4f1150396 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Base para desarrollo de modulos externos.
Para obtener el modulo/Funcion que se esta llamando:
GetParams("module")
Para obtener las variables enviadas desde formulario/comando Rocketbot:
var = GetParams(variable)
Las "variable" se define en forms del archivo package.json
Para modificar la variable de Rocketbot:
SetVar(Variable_Rocketbot, "dato")
Para obtener una variable de Rocketbot:
var = GetVar(Variable_Rocketbot)
Para obtener la Opcion seleccionada:
opcion = GetParams("option")
Para instalar librerias se debe ingresar por terminal a la carpeta "libs"
pip install <package> -t .
"""
import datetime
import os
import sys
base_path = tmp_global_obj["basepath"]
cur_path = base_path + 'modules' + os.sep + \
'mercadopago' + os.sep + 'libs' + os.sep
if cur_path not in sys.path:
sys.path.append(cur_path)
import mercadopago
module = GetParams("module")
global items, sdk, testkey, payments_id
items = []
if module == "login":
try:
testkey = GetParams("testkey")
sdk = mercadopago.SDK(testkey)
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "add_recipient":
email = GetParams("email")
name = GetParams("name")
phone = GetParams("phone")
try:
customer_data = {
"email": email,
"phone": phone,
"description": name
}
customer_response = sdk.customer().create(customer_data)
customer = customer_response["response"]
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "add_item":
amount = GetParams("amount")
quantity = GetParams("quantity")
item = GetParams("item")
amount = int(amount)
try:
temp = {"title": item, "quantity": quantity, "unit_price": amount}
items.append(temp)
print(items)
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "create_invoice":
total = GetParams("total")
payment_name = GetParams("payment_name")
payment_method = GetParams("payment_method")
email = GetParams("email")
total = int(total)
try:
preference_data = {
"items": items
}
preference_response = sdk.preference().create(preference_data)
preference = preference_response["response"]
payment_data = {
"transaction_amount": total,
"description": payment_name,
"payment_method_id": payment_method,
"payer": {
"email": email
}
}
payment_response = sdk.payment().create(payment_data)
payment = payment_response["response"]
print(payment)
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "get_invoice":
id = GetParams("id")
var = GetParams("var")
try:
auth = 'Bearer ' + testkey
headers = {
'Authorization': auth,
}
url = 'https://api.mercadopago.com/v1/payments/' + id
response = requests.get(url, headers=headers)
resp = response.json()
SetVar(var, resp)
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
if module == "search_payments":
id = GetParams("id")
criteria = GetParams("criteria")
sort = GetParams("sort")
var = GetParams("var")
try:
if id is None:
id = ""
if not criteria:
criteria = "desc"
if not sort:
sort = "date_created"
auth = 'Bearer ' + testkey
headers = {
'Authorization': auth,
}
url = "https://api.mercadopago.com/v1/payments/search?sort=" + sort + "&criteria=" + criteria + "&external_reference=" + id
response = requests.get(url, headers=headers)
res = response.json()
payments_id = [result["id"] for result in res["results"]]
SetVar(var, payments_id)
except Exception as e:
print("\x1B[" + "31;40mError\x1B[" + "0m")
PrintException()
raise e
"""{
"en": {
"title": "Create Invoice",
"description": "Create an invoice for the customer",
"title_options": null,
"options": null
},
"es": {
"title": "Crear factura",
"description": "Crea una factura para el cliente",
"title_options": null,
"options": null
},
"form": {
"css": "modal-lg",
"inputs": [
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Total:",
"en": "Total:"
},
"help": {
"es": " ",
"en": " "
},
"id": "total",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Nombre de pago",
"en": "Payment Name:"
},
"help": {
"es": " ",
"en": " "
},
"id": "payment_name",
"css": "col-lg-6"
},
{
"type": "select",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Método de pago",
"en": "Payment Method:"
},
"options": [
{
"title": "credit card",
"value": "credit_card"
},
{
"title": "debit card",
"value": "debit_card"
}
],
"help": {
"es": " ",
"en": " "
},
"id": "payment_method",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Correo electrónico:",
"en": "Email:"
},
"help": {
"es": " ",
"en": " "
},
"id": "email",
"css": "col-lg-6"
}
]
},
"video_youtube": "",
"icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA0gAAAHVCAMAAAD8XitdAAAANlBMVEX///9+kMVieLlDXaoONpgoRp7N1OkAnuMAI4zm7fa1v96aqNIur+gAgtDC6PgAXbWQ1fNfwu6g3K/TAAAYvklEQVR42uzc0VLzOAyG4dqWjGRPt/d/t5ukUONu/9A6/84k5X2GIygZDvIhRbF9AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAe+K5lDCJqYkhhFpKPgFY56WGlMTWaYqhZD8BuJdrSCpiTxETEY2hkCbgi9eQHkZIevYAaQImucbUZ0TkcrmcJx/nj870jdn007vPawrlBPxWXkIS6RI052fVeQnUxxIo+R6mWClM+IXmFKm0DC016BXnc58m0VQZ6eF3KbGl6HJZ2rgxS5qkZYm6hF8j11uKpDVzw7o+T1Nk+IDfoMSvFPWPRNvDJLR4+CW8RpXW0K0az5ImZuJ4Y16T3jq6j//BrccTpcPDm/Ka5OcUbc8SUcIbyzXpQIxGzGWJKOEdeYvRx4DBsiQaGDvgfXiJYzHaXpUSUcK7yCGNxmh7VaK/w3vwEmVDjLZXJfo7vIEcdFuMzovLefZ6lJYMs3AIB3ctRzKaoylAE/l0mUxpGogST0o4NN9Sjs7dxqNur8XrRSmyYwmHlYMsORocFYj9QcvSC0WJmQOOyUuS0Tewt60RoppSXKSkbQPTlKUXQrn8HTwo4Yi8JDO7RunFLH1byB1rydm/zuiqMXU7MF67Hg9KOB6vutQTm7yYpbN8pigU/8NpQy1KT75PWvrESJJwMNccxRr6nbDPl4+VQxi8xKTtms9urRCShKPxIGYS/POuF/t5Q+ztjCCbSCq+ntNWlm7XXNk4KxprFLNEknAgtxy13Xxiq03euRvTafAn3lC1fPbnd92fMiSicynyQJJwKMstq60167LUqkjffzWSyukJnmP6dk25NCJ2I59NIknC0VTtctRnqYXpcjvw0b6TmJ+fDF6v2dxd6u4AB5KEA6mtHt3JoR2reu/6g9YRPsnL42vKpBv7tZrE+yQcQVEzqWs3fqsi3VGpVe3xb/rVSphqTKpyo5pCvQtRq0mBJGH/fI5D9R+qyPfxg2mc+6+s1n7zrn+bheynVTnnMn+tfi6aGWscsH/Jnvuf77nUMKklnyZfOfrvREHsixb/K3+fsIIVexfELPrpFWs5CmLfaT5t9Y+aKQMH7FuRsdvU06McVbWmjSK2yXPUae6wZ65jjVOOD3LkUWwmsWbPNdki5e1Fk8ck7FuwoaGYR5vEU+czOW2Qnq/B0rI17YnmDruWx17TeLBJ9D6Tavcnl3jWv9LeZWUGjj2ba0gZXJkX/UFbl4o/iFz77KAoTO6wX2XoMd6r2H0hK1H6ctR/WFLeWJLYUoH9SmaSh1ZC9Mnwqq0c3btuvNWydRWT1BOwR4MFKdyP4jyqTWL2P0z4pM0gBnmiJGGnPC4FaSRIErtpnaxPFHKQtmdpUBBTnpKwR6MvOms7obudt6/VfzouT7eUlJwY3GGXlhbtn8F72kRTnKSkNos/7TTXtnFpuCQlShL2x9UsDZ/b1XYkrR97348cNkShKOMG7FFe1viM8ByS2I2ulqN+QD5+PL4nVtzhX/bOQLmRVIeiBgktgvLz+/+v3WmSsSxCU9hJvO3kntmt1HS12z1VnAgEiAPScgYP9OxsZx6lDQpVTgtY8g59O/CDUJr17Na2J5Vaiuh9B8akIOjbgZ+DcM7h9FT0Uyb9D3k7cEBKzvz/03PRSp9I3kXMyYLD0ZLfsYqenkohbsk7DJLAz6Ata8iteM/zZLLVrUXvH5FFzgmDJHAshHLm98KmT5VJwr3JOy01BErtdcMJgCMh6V2k58pkpz1XXY1E10kriAQOh+ScXc3tTabnuKQ1rW2b3SSKKRuYkgVHo4nUzk++lYlCFX1S8i4HmVtUQmJX0RjVhMDh2EQ6X4/0v5EpxVpUT99LaSaRzFIL6VaiVsD/ApHA0ZCc+ezPJGdz6dvjkhDnPM5mq4qzyM5xhkjgcLxHJMN389J3jpds5R0XPXk2i/g2FN28JLp24HCYSF6mq0r8zWFJI39I3qmUmLpQZKBrB47HW7LBGLmUwreqFNidZqFawq1FHzQ/IyKBwyE5sxPJu2SVu9WdePQ5us9Xtoy2SiUeWoSuHTgwFpHmLnHQa2EGvh7u+phUJZTuQmomyUklpA8WISKB47OJxP/M+KtSKmrD/yibViFSDEXvL7zfrwyS9mCy/MKbRRAJvApzkazl+gOTfaMPevegqP+MUjbMoiEXiAQOh+Q/TDUylTYohkC5I8n9K857k6LTaMYFS4TA4RD2+e+5Slx1eI4Y322SueCTd82iORcsWgWHQ5PPNsxN2jy6msSxqKo0AZLKxl0mkfhrlZde5YxtFOB4KG1KLIoUxfbVWuVgaVbx9v9yEVUNg5VBkS0iTV8E1U/A0dCYbbXdjIs7QqmmGwskX+HFyt6q0Uxye6POCy+CHbLgaGhoI5M1kSzclFidjPefuizhoxBkIk0DEmo2gMNRWuu/VyQV9c/gUMv7ArlFk9RMulMkVBECx0M4L+XtzpZr2FCfQ6eielKVyOuHLkvoApgmE2kmNOrageMhlJeTZSR7D9FrmLljtlSDP1c2WLJh9hrINYADojFbumFxHlRrmXi555vEKN0VZ1JNZvT0LXDUGDggJa+HJKYqJ62Rw+Tg8b12XhKPDjtvDxWpxHkxIGXCEAkcDuE7QlJORJQ474tU015yWuPHEqkSrg9deotLxhAJHBOlvJoBd3Xl7j8sQqhVhOyvGSspO8wigYNSc16dlL1cRdJJRGIKtehIsoFJZWXRtwUk9OzAUVFeMclKdl14NpETOb8tFqoyDFe9SZI2hXxhhnlIZPTswBHR+B4Q/lnhra5cqrOOYoNjGRdX9SYJbw6fz2vL/ZCzA8dF/AhlKTCQTMZBKbWwRCsmVTc+m3JBuWJwZJQWTbLIYKeEifSTRaGWGsjuEXUJb24muWTDedlfzMaCA1PyG5d7TKIqepIauwAhamdNcNRNFdfHE2dSiZsay1+asc4OHBilvDxMstpCKYYQU2bdP2silWYpOZMiv1dwVanU/L3HI67o2YGjUnJv0vqUEsv+SjqOb7kMqtqZxBRjJF736JIRkMDRUcr3JhyspnHdn5rN1Pp2XRwplI1Fdy8ZAQkcn8Jm0iKXC2/cJPBUtE+rJ2nieAE05CuXVY8QkMArELMzaXVKqfW4qGjzo0bp+3ZJ/Dys7ZdgZpuFXfEIc0jgBZA0NWmeA0ihiJTIOehQpBbwLCZpbBlCm4VdGh9hUQN4BcojJtmyHaLtZxyI1Ag3MUlrsi9Z9gir7MBrENnG/3fGpCsk/R4/vTlxgquoioTUAtIDHiV4BA6P0sI67MlJStdTjtyuc+8pxUgpm0eLniJjB14I69wtN3RbFL7hCp9oy9VFUats8sDjz28eYZEdeCFqmu+xWyrdWPT26P+cquiGVHan8K0vobhC8Ai8BpEnE6XLGTxR1RLZBIghErdnNu4bHmGABF4Pb9L5AZM2OL3/bD8ejnLnM8Mj8JIouYZ/ecwk86b/u7EYjgxsngAvhMZ8y/1B6eIOr7RRDl/u1YjhEXhdlLLjgZHS5cJem/P5/Hhow2Y+8JJYTLL+3fOwXh3iEXhtNAxyBE/EenVYqQpeGTPp6SqZRsjXgZdHa8pPV6nXCAtVwetTKPfw5T/RiLEuCLwuKpEHKp2/1aKzTeBinSr4IbTuXQ9/j0u2hLyHCjwCr42WODBpU+n8JIsyBwyPwOsjIeURVmnhyyzinBGOwE/F1nB38Fe5dDaLelIQeAR+CFIpj2GT6XGJrEfXwxHhCPwgtPXvpjKdH3PIhyIPU8XoCPwstATKeS7TptN5TaGZQ6YRenXg59FUmsIbl41zo7encdngN4egEfiNbCpxdsyF8vCbQEtPiNAI/GC0VOL83aSIsRH46UiN3+oSU4BG4DegJcT0TRYl9OnAL0JqoMRfb1HAtBH4XajU8JV9vESwCPxSpIT4FYEpJYoVFoFfjDaZHreJE0EiABpaaiBKifkuhVKiGCARALeI1BCImlD7RjE3gagphPwcAPs+1RADbSQP/SHGUGEQAHeg4sAsKwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPAD0FJDjAGbbgF4HK0xJWZO2IALwMNITLkBkQB4GI2cIRIAn6SmDJEA+CwhQyQAPotGiAQARALgCAhEAgARCYBDgIgEACISeBlUpGyI7l1f+ngR0YXvandrf1lFFh7Rbqq1zu9UtfceRiT/6nUDp1+ALzmacsNOxXPXqV2fLQal1KAYquhMonbzdndx7f56eXoikpQaKbVj/7h9WRGd/3va64wjkj2Qc4Nxsib4BFJjykYK8leQyO560aFGIWUH+Rv9vXR9ZLltztGficn2jNGLzm/UcnsbU1UvUn+nJ+GoZ/AApouRqm7N1q6bYrNPGynajbvNtl6vV7PL4Fh0/lX2Vup068XmKDIUScLOAzGMAvd7RPkDHPQko+u9IFpTHpLMgr0GXobXvc+zFzWoDBN0RiwDkVQo7xBhElhlbkLYabYk7tOB8x61M8naso9IPvB5gt58PuVdqAw9MoicSHsPRGoP3M+8Ne1c56h+b8Iu7E36KGYxjxZMEsoTogw8mjsiaXoXxklgGWue63C1T1ee3lmmHuXqXZw/Q8Pcjmq3rYmklKdEmATWiZzvJck8mhkkbkJ0GJHC4jO8iLwxavclrb5/zA57nnkOwBp998Ya0+Qyx9M75O6hGCMlHnfMRmOp2jf8No8TibkLgP0DEtUqpbqMOcnAV24MRRL23xtqiORuZYQk8EhA4hRDIM49iUKIaTB8qPwxnafV2SW+Y+cpXVxgKoOkdPxgCMkg1ZHaZ4vTg2oRKSH1IvXGcZDRAKvCJLCGkjNhlIejqpZ79kGCbu8aJ/KC2v5UZ+yG+IDEwQIlZ2eIM5G0+0VggyQN4+EVfxBJcv8NDa2MkATupqZBKy7OIxmklePbfezUMqpvtH1Kg4oTwb3B6B3qzpv6y6l2gY/Fu+3fKTqPDK3ZQAU8sEbkQZvR2IvQpxZSL4F9un9AtfY+8EBTHgUan1ILul+7pHQiZcN9T3Ai2RfPX54gEliChr+VZdzINPpBUupijFH6B0SXqTBKJ5hRu0GShL+44BfYgqL3JXVZfi+S+Jhr+GEWRAIrmAtxHA7YGVJdr0nZp57HgYbUN+Mk3oM9GYWuXJfQvv8xtLKbSJLYBSQjOJE0ONV3nSsnAO5JfgfnQRw3+8ImkvvNXfd30VG7lVxAMuJERuM0QqWGxNmLRN0IyajJiTSRxUl2AuBxkYJpMOyIsbjOF8ca6u1/wTVolynwztEjbfZt85NNEJlI5caW6VZzdVoZfceUTgB8gUhR90UKft7T43/hB3ZjMSPl3rA5WgKlxLyzcqnwrgLqRfJDJI9AJPBEkWJeQpxIJOPvz2lBJK2UOO+QqguSYVppVWdL6iRBJPA8kSgvURZEWotIflvfXKT8qEg+VcInAI4h0pdFpEKcl0XirxEpnwA4hkjlsyLNVutRsDTGl0UkQUQCxxPpiyKSxOx5qyBU0rdGpHQC4GnJhhTDPvo1Y6TK2UibQ6JqT+hFynEq0jxrx0g2gP8i/U1F9zntiuSH9WE5IKWbWnZdRNpXQDGPBI4pUnXLpyd4kRYnZLVcEXUjpFR0uHx9NiHrH6BY2QCOI1JZT13XBZGidA0//f0T1FT0octvo3C25MkSodN0rZ0SdpuDJ4qk7CR4KCL1O1yNrqWHnfsi7y1ajd6O6PtrZWn1N6MmF/h2kU5pT4Jlkfw2Ct0LC3qb2SAZ35b6bRSy90XR/xZg7EcC/6lIkfOeH/WKTERyIngbS3c9jAUp3TaMyuO8tnz4nrhTE1YDY4cseKpIhceVjLVQulKnIpmMvoOo1AkRnQej20hMmP7lJXIfZko2yEzSmjJ6duCJIvlUAVOVU0NdJe80j0i++AkVfTcx9uW4wqDSq5bYRw+t2eDYEn4qrkQ/B+dgg6ro33M1ULIBPFmkfpK0Hf1VQ2TXFociGd25Me0RrjVH6bMSTdnmRzZSHRSOTTFUeyF7g0HhrlD/EIgzAhJ4pkhmgcEp8aBM6lQkSdnB3SNSsey17SP8Q6RR7W+tnKdY9jzmIZhEAk8XSSjnlcLd4V/27mi5jRyHwrA2lt0LC4UC3/9lN2orxgKmMD0TJVNU/i93MszOBU+RptjkNEjxs/Yg/u5o/Dog9YUxMYxtFRVLdvjNQYqBok9BH6T+EP1Ymu5Hmtejl1a8lOPFuNcF/3aQdpeXPgV9kOqSWnWux1P2T9pJzUc9EzlcuvuROEAIvyFI7et2cd1eG6TIyGs/zsRmubYuxqTzvXldScflbuULOcLvCVIUv56brtgFqW/iHK8odZE976vmydu8vZS3NnNnbr5E1Qfp/N/9Xw3S58c5SD/KywX732q/PZdOe/l8TgpS7vrnpoX0nHIe/0Vm7b28nuu9LdNH1/NU9lL2qqIx62/fbt7LMSM3F5mXy9db+1/P5z0r3yPwWq8jf//8xUhHIZdvL3sbewMRj9LOrSbK3u4f3PXt9dNLc+P/+/7gm2spszr8e+T6Vewelcub/PM2du9vTfavNbsDz5G3D0ce/J/rf/3yzpo3AAAAAAAAAAAAAAAAAOBXEvOdyalnZu5m9tetjTHcc+G8cowfpcDCxIdqGPfCJJYKh9u07Fa1bZtu0V7z4E23vVrHIEtYlbnqlqi6zHt9KdPxtdDG1+ZqPmqDQYkSlmSu21eqXgt9zAprlCQ11+bDx51S3obAcmzoNqWHAlI7vqTm2iR9NsighOWZbncNmXT7Pkni213qTY4ykoS12NgaLpPAtRHxtsxyjhrM7rCSlKMuINIXDktxa8uisjNOwDLG1lOL8aPlMbHr+PEn+wlYRBkWdHyns8mdjFTmV6nSJdrLzek8mHn4GR8NMrnDkkYeA8xELPdntS+JG3YLl40yaRPPzYns7c1mbLqF4XZrMD2aIQmLkNSdbboM4PWjMV2AGFbipjb9O0zl47P04PkSuzIkYQ2uuTvXgERufL7yNvLHtoXcXP3TZ5q4OqaxBI41eO7gIXXnEqTTnSDlFIwc2DLCSXlwMGXhDqspA0+Q2vN9/OCpLAdppPgFGSUdXh8cnLkdVpPiEvqeHszzBM2iPb2fjrLI4bVRZW6HteQBJZF2t46ImKXluBqkcUpqxNKDMxms22EtedEtyxEJHxHyEQvVUeV3IyAjBUlq6yG3wtwOC7C6ph3mY4aYeySoC5LVIKVC6SaOxneyWEuaihW550++L+2DdCo8FVo36BirDViLRS687fnxvSpBAn4qSK4bQQIOBKmZ2s1zpPoLgiQECWs5HqQoDTpcxs8HicUGrO5vrNp5zZBJjVsqkjaXxqodnsjhIEVl1Pb56Je/T83XveIECWuxwzsbfDuw+7uZKloJUrMNSAZbhLAWO7TXziUFxpsZ24j0leY0Co29dngu+bWjZjtr2eET7u/+vjvKqLH7G8/FtHsfaZ6Q0WzWS+NYritBki3wPhIWZ/VwhvqDflt3HVhSnUoakIJa+4bs4A1ZrKac+ePTz13y1K4ZPaIuJclGeoy1ZzZszOywGtN64rbEWeBpJjZdbLDJGSkpmBZH9NcgiaZK+YwcpwhhOaZbprv0kctssSDylkcVGX1z8ftWMjx8DNWNAQnr2YPUU/s60sRhjmHI0SY1Ipftv8lfSFjPgSC5TLZ+q+q9M73FjwVp/nAmdlhR05fLQCMHzto/mCQ1bqPAczGdvRDRHb46p17Wuvv3LaJNcoRnYNGTPXp1c5Zwk6NgPqkYJUg7V+4ZwxP4/yCJa9ub68/ronlXqi6eg9TfuqmcHoSVpN3aUnr1cCnxGAduYo6A5KSlIAWb3WruDEdYSgrSHpWhqrdL/+0UotdruvJ/f79vRmxv66OhsjVCauW18LNJd2M4wlpSkHZmftXk48PYi0pVrbUosbTHKBMx/8FIEdZTg3ScnKqeK1u68bRSkB5B7JNwnjf+FA8Pko2ht39D5st4ykoCns3jg6TTc7aM+yzxzB4eJBnb5M5m54ZlPLVHBSl4+ULoaui2MbPDE3t4kOpB+6pD8wes2eH5pCA9usnAgITnZpreg3gE8a2h/IWEJ2T6+LcWZJAj/GFMf8FbC6K8ZIQ/iw29ig3cv3RMUnIEZD1X5nXAzxNX3tUDHsB8RIoGMQI6PfuOV4wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPhfe3BIAAAAACDo/2tvGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAhwDWjgbLhu6a/gAAAABJRU5ErkJggg==",
"module": "create_invoice",
"module_name": "MercadoPago",
"visible": true,
"options": false,
"father": "module",
"group": "scripts",
"linux": true,
"windows": true,
"mac": true,
"docker": true
},
{
"en": {
"title": "Add Item",
"description": "Add an item to the invoice",
"title_options": null,
"options": null
},
"es": {
"title": "Añadir artículo",
"description": "Agregar un artículo a la factura\n",
"title_options": null,
"options": null
},
"form": {
"css": "modal-lg",
"inputs": [
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Nombre del árticulo",
"en": "Item Name:"
},
"help": {
"es": " ",
"en": " "
},
"id": "item",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Precio:",
"en": "Price:"
},
"help": {
"es": " ",
"en": " "
},
"id": "amount",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Cantidad:",
"en": "Quantity:"
},
"help": {
"es": " ",
"en": " "
},
"id": "quantity",
"css": "col-lg-6"
}
]
},
"video_youtube": "",
"icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA0gAAAHVCAMAAAD8XitdAAAANlBMVEX///9+kMVieLlDXaoONpgoRp7N1OkAnuMAI4zm7fa1v96aqNIur+gAgtDC6PgAXbWQ1fNfwu6g3K/TAAAYvklEQVR42uzc0VLzOAyG4dqWjGRPt/d/t5ukUONu/9A6/84k5X2GIygZDvIhRbF9AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAe+K5lDCJqYkhhFpKPgFY56WGlMTWaYqhZD8BuJdrSCpiTxETEY2hkCbgi9eQHkZIevYAaQImucbUZ0TkcrmcJx/nj870jdn007vPawrlBPxWXkIS6RI052fVeQnUxxIo+R6mWClM+IXmFKm0DC016BXnc58m0VQZ6eF3KbGl6HJZ2rgxS5qkZYm6hF8j11uKpDVzw7o+T1Nk+IDfoMSvFPWPRNvDJLR4+CW8RpXW0K0az5ImZuJ4Y16T3jq6j//BrccTpcPDm/Ka5OcUbc8SUcIbyzXpQIxGzGWJKOEdeYvRx4DBsiQaGDvgfXiJYzHaXpUSUcK7yCGNxmh7VaK/w3vwEmVDjLZXJfo7vIEcdFuMzovLefZ6lJYMs3AIB3ctRzKaoylAE/l0mUxpGogST0o4NN9Sjs7dxqNur8XrRSmyYwmHlYMsORocFYj9QcvSC0WJmQOOyUuS0Tewt60RoppSXKSkbQPTlKUXQrn8HTwo4Yi8JDO7RunFLH1byB1rydm/zuiqMXU7MF67Hg9KOB6vutQTm7yYpbN8pigU/8NpQy1KT75PWvrESJJwMNccxRr6nbDPl4+VQxi8xKTtms9urRCShKPxIGYS/POuF/t5Q+ztjCCbSCq+ntNWlm7XXNk4KxprFLNEknAgtxy13Xxiq03euRvTafAn3lC1fPbnd92fMiSicynyQJJwKMstq60167LUqkjffzWSyukJnmP6dk25NCJ2I59NIknC0VTtctRnqYXpcjvw0b6TmJ+fDF6v2dxd6u4AB5KEA6mtHt3JoR2reu/6g9YRPsnL42vKpBv7tZrE+yQcQVEzqWs3fqsi3VGpVe3xb/rVSphqTKpyo5pCvQtRq0mBJGH/fI5D9R+qyPfxg2mc+6+s1n7zrn+bheynVTnnMn+tfi6aGWscsH/Jnvuf77nUMKklnyZfOfrvREHsixb/K3+fsIIVexfELPrpFWs5CmLfaT5t9Y+aKQMH7FuRsdvU06McVbWmjSK2yXPUae6wZ65jjVOOD3LkUWwmsWbPNdki5e1Fk8ck7FuwoaGYR5vEU+czOW2Qnq/B0rI17YnmDruWx17TeLBJ9D6Tavcnl3jWv9LeZWUGjj2ba0gZXJkX/UFbl4o/iFz77KAoTO6wX2XoMd6r2H0hK1H6ctR/WFLeWJLYUoH9SmaSh1ZC9Mnwqq0c3btuvNWydRWT1BOwR4MFKdyP4jyqTWL2P0z4pM0gBnmiJGGnPC4FaSRIErtpnaxPFHKQtmdpUBBTnpKwR6MvOms7obudt6/VfzouT7eUlJwY3GGXlhbtn8F72kRTnKSkNos/7TTXtnFpuCQlShL2x9UsDZ/b1XYkrR97348cNkShKOMG7FFe1viM8ByS2I2ulqN+QD5+PL4nVtzhX/bOQLmRVIeiBgktgvLz+/+v3WmSsSxCU9hJvO3kntmt1HS12z1VnAgEiAPScgYP9OxsZx6lDQpVTgtY8g59O/CDUJr17Na2J5Vaiuh9B8akIOjbgZ+DcM7h9FT0Uyb9D3k7cEBKzvz/03PRSp9I3kXMyYLD0ZLfsYqenkohbsk7DJLAz6Ata8iteM/zZLLVrUXvH5FFzgmDJHAshHLm98KmT5VJwr3JOy01BErtdcMJgCMh6V2k58pkpz1XXY1E10kriAQOh+ScXc3tTabnuKQ1rW2b3SSKKRuYkgVHo4nUzk++lYlCFX1S8i4HmVtUQmJX0RjVhMDh2EQ6X4/0v5EpxVpUT99LaSaRzFIL6VaiVsD/ApHA0ZCc+ezPJGdz6dvjkhDnPM5mq4qzyM5xhkjgcLxHJMN389J3jpds5R0XPXk2i/g2FN28JLp24HCYSF6mq0r8zWFJI39I3qmUmLpQZKBrB47HW7LBGLmUwreqFNidZqFawq1FHzQ/IyKBwyE5sxPJu2SVu9WdePQ5us9Xtoy2SiUeWoSuHTgwFpHmLnHQa2EGvh7u+phUJZTuQmomyUklpA8WISKB47OJxP/M+KtSKmrD/yibViFSDEXvL7zfrwyS9mCy/MKbRRAJvApzkazl+gOTfaMPevegqP+MUjbMoiEXiAQOh+Q/TDUylTYohkC5I8n9K857k6LTaMYFS4TA4RD2+e+5Slx1eI4Y322SueCTd82iORcsWgWHQ5PPNsxN2jy6msSxqKo0AZLKxl0mkfhrlZde5YxtFOB4KG1KLIoUxfbVWuVgaVbx9v9yEVUNg5VBkS0iTV8E1U/A0dCYbbXdjIs7QqmmGwskX+HFyt6q0Uxye6POCy+CHbLgaGhoI5M1kSzclFidjPefuizhoxBkIk0DEmo2gMNRWuu/VyQV9c/gUMv7ArlFk9RMulMkVBECx0M4L+XtzpZr2FCfQ6eielKVyOuHLkvoApgmE2kmNOrageMhlJeTZSR7D9FrmLljtlSDP1c2WLJh9hrINYADojFbumFxHlRrmXi555vEKN0VZ1JNZvT0LXDUGDggJa+HJKYqJ62Rw+Tg8b12XhKPDjtvDxWpxHkxIGXCEAkcDuE7QlJORJQ474tU015yWuPHEqkSrg9deotLxhAJHBOlvJoBd3Xl7j8sQqhVhOyvGSspO8wigYNSc16dlL1cRdJJRGIKtehIsoFJZWXRtwUk9OzAUVFeMclKdl14NpETOb8tFqoyDFe9SZI2hXxhhnlIZPTswBHR+B4Q/lnhra5cqrOOYoNjGRdX9SYJbw6fz2vL/ZCzA8dF/AhlKTCQTMZBKbWwRCsmVTc+m3JBuWJwZJQWTbLIYKeEifSTRaGWGsjuEXUJb24muWTDedlfzMaCA1PyG5d7TKIqepIauwAhamdNcNRNFdfHE2dSiZsay1+asc4OHBilvDxMstpCKYYQU2bdP2silWYpOZMiv1dwVanU/L3HI67o2YGjUnJv0vqUEsv+SjqOb7kMqtqZxBRjJF736JIRkMDRUcr3JhyspnHdn5rN1Pp2XRwplI1Fdy8ZAQkcn8Jm0iKXC2/cJPBUtE+rJ2nieAE05CuXVY8QkMArELMzaXVKqfW4qGjzo0bp+3ZJ/Dys7ZdgZpuFXfEIc0jgBZA0NWmeA0ihiJTIOehQpBbwLCZpbBlCm4VdGh9hUQN4BcojJtmyHaLtZxyI1Ag3MUlrsi9Z9gir7MBrENnG/3fGpCsk/R4/vTlxgquoioTUAtIDHiV4BA6P0sI67MlJStdTjtyuc+8pxUgpm0eLniJjB14I69wtN3RbFL7hCp9oy9VFUats8sDjz28eYZEdeCFqmu+xWyrdWPT26P+cquiGVHan8K0vobhC8Ai8BpEnE6XLGTxR1RLZBIghErdnNu4bHmGABF4Pb9L5AZM2OL3/bD8ejnLnM8Mj8JIouYZ/ecwk86b/u7EYjgxsngAvhMZ8y/1B6eIOr7RRDl/u1YjhEXhdlLLjgZHS5cJem/P5/Hhow2Y+8JJYTLL+3fOwXh3iEXhtNAxyBE/EenVYqQpeGTPp6SqZRsjXgZdHa8pPV6nXCAtVwetTKPfw5T/RiLEuCLwuKpEHKp2/1aKzTeBinSr4IbTuXQ9/j0u2hLyHCjwCr42WODBpU+n8JIsyBwyPwOsjIeURVmnhyyzinBGOwE/F1nB38Fe5dDaLelIQeAR+CFIpj2GT6XGJrEfXwxHhCPwgtPXvpjKdH3PIhyIPU8XoCPwstATKeS7TptN5TaGZQ6YRenXg59FUmsIbl41zo7encdngN4egEfiNbCpxdsyF8vCbQEtPiNAI/GC0VOL83aSIsRH46UiN3+oSU4BG4DegJcT0TRYl9OnAL0JqoMRfb1HAtBH4XajU8JV9vESwCPxSpIT4FYEpJYoVFoFfjDaZHreJE0EiABpaaiBKifkuhVKiGCARALeI1BCImlD7RjE3gagphPwcAPs+1RADbSQP/SHGUGEQAHeg4sAsKwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPAD0FJDjAGbbgF4HK0xJWZO2IALwMNITLkBkQB4GI2cIRIAn6SmDJEA+CwhQyQAPotGiAQARALgCAhEAgARCYBDgIgEACISeBlUpGyI7l1f+ngR0YXvandrf1lFFh7Rbqq1zu9UtfceRiT/6nUDp1+ALzmacsNOxXPXqV2fLQal1KAYquhMonbzdndx7f56eXoikpQaKbVj/7h9WRGd/3va64wjkj2Qc4Nxsib4BFJjykYK8leQyO560aFGIWUH+Rv9vXR9ZLltztGficn2jNGLzm/UcnsbU1UvUn+nJ+GoZ/AApouRqm7N1q6bYrNPGynajbvNtl6vV7PL4Fh0/lX2Vup068XmKDIUScLOAzGMAvd7RPkDHPQko+u9IFpTHpLMgr0GXobXvc+zFzWoDBN0RiwDkVQo7xBhElhlbkLYabYk7tOB8x61M8naso9IPvB5gt58PuVdqAw9MoicSHsPRGoP3M+8Ne1c56h+b8Iu7E36KGYxjxZMEsoTogw8mjsiaXoXxklgGWue63C1T1ee3lmmHuXqXZw/Q8Pcjmq3rYmklKdEmATWiZzvJck8mhkkbkJ0GJHC4jO8iLwxavclrb5/zA57nnkOwBp998Ya0+Qyx9M75O6hGCMlHnfMRmOp2jf8No8TibkLgP0DEtUqpbqMOcnAV24MRRL23xtqiORuZYQk8EhA4hRDIM49iUKIaTB8qPwxnafV2SW+Y+cpXVxgKoOkdPxgCMkg1ZHaZ4vTg2oRKSH1IvXGcZDRAKvCJLCGkjNhlIejqpZ79kGCbu8aJ/KC2v5UZ+yG+IDEwQIlZ2eIM5G0+0VggyQN4+EVfxBJcv8NDa2MkATupqZBKy7OIxmklePbfezUMqpvtH1Kg4oTwb3B6B3qzpv6y6l2gY/Fu+3fKTqPDK3ZQAU8sEbkQZvR2IvQpxZSL4F9un9AtfY+8EBTHgUan1ILul+7pHQiZcN9T3Ai2RfPX54gEliChr+VZdzINPpBUupijFH6B0SXqTBKJ5hRu0GShL+44BfYgqL3JXVZfi+S+Jhr+GEWRAIrmAtxHA7YGVJdr0nZp57HgYbUN+Mk3oM9GYWuXJfQvv8xtLKbSJLYBSQjOJE0ONV3nSsnAO5JfgfnQRw3+8ImkvvNXfd30VG7lVxAMuJERuM0QqWGxNmLRN0IyajJiTSRxUl2AuBxkYJpMOyIsbjOF8ca6u1/wTVolynwztEjbfZt85NNEJlI5caW6VZzdVoZfceUTgB8gUhR90UKft7T43/hB3ZjMSPl3rA5WgKlxLyzcqnwrgLqRfJDJI9AJPBEkWJeQpxIJOPvz2lBJK2UOO+QqguSYVppVWdL6iRBJPA8kSgvURZEWotIflvfXKT8qEg+VcInAI4h0pdFpEKcl0XirxEpnwA4hkjlsyLNVutRsDTGl0UkQUQCxxPpiyKSxOx5qyBU0rdGpHQC4GnJhhTDPvo1Y6TK2UibQ6JqT+hFynEq0jxrx0g2gP8i/U1F9zntiuSH9WE5IKWbWnZdRNpXQDGPBI4pUnXLpyd4kRYnZLVcEXUjpFR0uHx9NiHrH6BY2QCOI1JZT13XBZGidA0//f0T1FT0octvo3C25MkSodN0rZ0SdpuDJ4qk7CR4KCL1O1yNrqWHnfsi7y1ajd6O6PtrZWn1N6MmF/h2kU5pT4Jlkfw2Ct0LC3qb2SAZ35b6bRSy90XR/xZg7EcC/6lIkfOeH/WKTERyIngbS3c9jAUp3TaMyuO8tnz4nrhTE1YDY4cseKpIhceVjLVQulKnIpmMvoOo1AkRnQej20hMmP7lJXIfZko2yEzSmjJ6duCJIvlUAVOVU0NdJe80j0i++AkVfTcx9uW4wqDSq5bYRw+t2eDYEn4qrkQ/B+dgg6ro33M1ULIBPFmkfpK0Hf1VQ2TXFociGd25Me0RrjVH6bMSTdnmRzZSHRSOTTFUeyF7g0HhrlD/EIgzAhJ4pkhmgcEp8aBM6lQkSdnB3SNSsey17SP8Q6RR7W+tnKdY9jzmIZhEAk8XSSjnlcLd4V/27mi5jRyHwrA2lt0LC4UC3/9lN2orxgKmMD0TJVNU/i93MszOBU+RptjkNEjxs/Yg/u5o/Dog9YUxMYxtFRVLdvjNQYqBok9BH6T+EP1Ymu5Hmtejl1a8lOPFuNcF/3aQdpeXPgV9kOqSWnWux1P2T9pJzUc9EzlcuvuROEAIvyFI7et2cd1eG6TIyGs/zsRmubYuxqTzvXldScflbuULOcLvCVIUv56brtgFqW/iHK8odZE976vmydu8vZS3NnNnbr5E1Qfp/N/9Xw3S58c5SD/KywX732q/PZdOe/l8TgpS7vrnpoX0nHIe/0Vm7b28nuu9LdNH1/NU9lL2qqIx62/fbt7LMSM3F5mXy9db+1/P5z0r3yPwWq8jf//8xUhHIZdvL3sbewMRj9LOrSbK3u4f3PXt9dNLc+P/+/7gm2spszr8e+T6Vewelcub/PM2du9vTfavNbsDz5G3D0ce/J/rf/3yzpo3AAAAAAAAAAAAAAAAAOBXEvOdyalnZu5m9tetjTHcc+G8cowfpcDCxIdqGPfCJJYKh9u07Fa1bZtu0V7z4E23vVrHIEtYlbnqlqi6zHt9KdPxtdDG1+ZqPmqDQYkSlmSu21eqXgt9zAprlCQ11+bDx51S3obAcmzoNqWHAlI7vqTm2iR9NsighOWZbncNmXT7Pkni213qTY4ykoS12NgaLpPAtRHxtsxyjhrM7rCSlKMuINIXDktxa8uisjNOwDLG1lOL8aPlMbHr+PEn+wlYRBkWdHyns8mdjFTmV6nSJdrLzek8mHn4GR8NMrnDkkYeA8xELPdntS+JG3YLl40yaRPPzYns7c1mbLqF4XZrMD2aIQmLkNSdbboM4PWjMV2AGFbipjb9O0zl47P04PkSuzIkYQ2uuTvXgERufL7yNvLHtoXcXP3TZ5q4OqaxBI41eO7gIXXnEqTTnSDlFIwc2DLCSXlwMGXhDqspA0+Q2vN9/OCpLAdppPgFGSUdXh8cnLkdVpPiEvqeHszzBM2iPb2fjrLI4bVRZW6HteQBJZF2t46ImKXluBqkcUpqxNKDMxms22EtedEtyxEJHxHyEQvVUeV3IyAjBUlq6yG3wtwOC7C6ph3mY4aYeySoC5LVIKVC6SaOxneyWEuaihW550++L+2DdCo8FVo36BirDViLRS687fnxvSpBAn4qSK4bQQIOBKmZ2s1zpPoLgiQECWs5HqQoDTpcxs8HicUGrO5vrNp5zZBJjVsqkjaXxqodnsjhIEVl1Pb56Je/T83XveIECWuxwzsbfDuw+7uZKloJUrMNSAZbhLAWO7TXziUFxpsZ24j0leY0Co29dngu+bWjZjtr2eET7u/+vjvKqLH7G8/FtHsfaZ6Q0WzWS+NYritBki3wPhIWZ/VwhvqDflt3HVhSnUoakIJa+4bs4A1ZrKac+ePTz13y1K4ZPaIuJclGeoy1ZzZszOywGtN64rbEWeBpJjZdbLDJGSkpmBZH9NcgiaZK+YwcpwhhOaZbprv0kctssSDylkcVGX1z8ftWMjx8DNWNAQnr2YPUU/s60sRhjmHI0SY1Ipftv8lfSFjPgSC5TLZ+q+q9M73FjwVp/nAmdlhR05fLQCMHzto/mCQ1bqPAczGdvRDRHb46p17Wuvv3LaJNcoRnYNGTPXp1c5Zwk6NgPqkYJUg7V+4ZwxP4/yCJa9ub68/ronlXqi6eg9TfuqmcHoSVpN3aUnr1cCnxGAduYo6A5KSlIAWb3WruDEdYSgrSHpWhqrdL/+0UotdruvJ/f79vRmxv66OhsjVCauW18LNJd2M4wlpSkHZmftXk48PYi0pVrbUosbTHKBMx/8FIEdZTg3ScnKqeK1u68bRSkB5B7JNwnjf+FA8Pko2ht39D5st4ykoCns3jg6TTc7aM+yzxzB4eJBnb5M5m54ZlPLVHBSl4+ULoaui2MbPDE3t4kOpB+6pD8wes2eH5pCA9usnAgITnZpreg3gE8a2h/IWEJ2T6+LcWZJAj/GFMf8FbC6K8ZIQ/iw29ig3cv3RMUnIEZD1X5nXAzxNX3tUDHsB8RIoGMQI6PfuOV4wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPhfe3BIAAAAACDo/2tvGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAhwDWjgbLhu6a/gAAAABJRU5ErkJggg==",
"module": "add_item",
"module_name": "MercadoPago",
"visible": true,
"options": false,
"father": "module",
"group": "scripts",
"linux": true,
"windows": true,
"mac": true,
"docker": true
},
{
"en": {
"title": "Add Customer",
"description": "Add a recipient to the invoice",
"title_options": null,
"options": null
},
"es": {
"title": " ",
"description": " ",
"title_options": null,
"options": null
},
"form": {
"css": "modal-lg",
"inputs": [
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Nombre del cliente",
"en": "Customer Name:"
},
"help": {
"es": " ",
"en": " "
},
"id": "name",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Correo electrónico:",
"en": "Email:"
},
"help": {
"es": " ",
"en": " "
},
"id": "email",
"css": "col-lg-6"
},
{
"type": "input",
"placeholder": {
"es": " ",
"en": " "
},
"title": {
"es": "Teléfono:",
"en": "Phone:"
},
"help": {
"es": " ",
"en": " "
},
"id": "phone",
"css": "col-lg-6"
}
]
},
"video_youtube": "",
"icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA0gAAAHVCAMAAAD8XitdAAAANlBMVEX///9+kMVieLlDXaoONpgoRp7N1OkAnuMAI4zm7fa1v96aqNIur+gAgtDC6PgAXbWQ1fNfwu6g3K/TAAAYvklEQVR42uzc0VLzOAyG4dqWjGRPt/d/t5ukUONu/9A6/84k5X2GIygZDvIhRbF9AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAe+K5lDCJqYkhhFpKPgFY56WGlMTWaYqhZD8BuJdrSCpiTxETEY2hkCbgi9eQHkZIevYAaQImucbUZ0TkcrmcJx/nj870jdn007vPawrlBPxWXkIS6RI052fVeQnUxxIo+R6mWClM+IXmFKm0DC016BXnc58m0VQZ6eF3KbGl6HJZ2rgxS5qkZYm6hF8j11uKpDVzw7o+T1Nk+IDfoMSvFPWPRNvDJLR4+CW8RpXW0K0az5ImZuJ4Y16T3jq6j//BrccTpcPDm/Ka5OcUbc8SUcIbyzXpQIxGzGWJKOEdeYvRx4DBsiQaGDvgfXiJYzHaXpUSUcK7yCGNxmh7VaK/w3vwEmVDjLZXJfo7vIEcdFuMzovLefZ6lJYMs3AIB3ctRzKaoylAE/l0mUxpGogST0o4NN9Sjs7dxqNur8XrRSmyYwmHlYMsORocFYj9QcvSC0WJmQOOyUuS0Tewt60RoppSXKSkbQPTlKUXQrn8HTwo4Yi8JDO7RunFLH1byB1rydm/zuiqMXU7MF67Hg9KOB6vutQTm7yYpbN8pigU/8NpQy1KT75PWvrESJJwMNccxRr6nbDPl4+VQxi8xKTtms9urRCShKPxIGYS/POuF/t5Q+ztjCCbSCq+ntNWlm7XXNk4KxprFLNEknAgtxy13Xxiq03euRvTafAn3lC1fPbnd92fMiSicynyQJJwKMstq60167LUqkjffzWSyukJnmP6dk25NCJ2I59NIknC0VTtctRnqYXpcjvw0b6TmJ+fDF6v2dxd6u4AB5KEA6mtHt3JoR2reu/6g9YRPsnL42vKpBv7tZrE+yQcQVEzqWs3fqsi3VGpVe3xb/rVSphqTKpyo5pCvQtRq0mBJGH/fI5D9R+qyPfxg2mc+6+s1n7zrn+bheynVTnnMn+tfi6aGWscsH/Jnvuf77nUMKklnyZfOfrvREHsixb/K3+fsIIVexfELPrpFWs5CmLfaT5t9Y+aKQMH7FuRsdvU06McVbWmjSK2yXPUae6wZ65jjVOOD3LkUWwmsWbPNdki5e1Fk8ck7FuwoaGYR5vEU+czOW2Qnq/B0rI17YnmDruWx17TeLBJ9D6Tavcnl3jWv9LeZWUGjj2ba0gZXJkX/UFbl4o/iFz77KAoTO6wX2XoMd6r2H0hK1H6ctR/WFLeWJLYUoH9SmaSh1ZC9Mnwqq0c3btuvNWydRWT1BOwR4MFKdyP4jyqTWL2P0z4pM0gBnmiJGGnPC4FaSRIErtpnaxPFHKQtmdpUBBTnpKwR6MvOms7obudt6/VfzouT7eUlJwY3GGXlhbtn8F72kRTnKSkNos/7TTXtnFpuCQlShL2x9UsDZ/b1XYkrR97348cNkShKOMG7FFe1viM8ByS2I2ulqN+QD5+PL4nVtzhX/bOQLmRVIeiBgktgvLz+/+v3WmSsSxCU9hJvO3kntmt1HS12z1VnAgEiAPScgYP9OxsZx6lDQpVTgtY8g59O/CDUJr17Na2J5Vaiuh9B8akIOjbgZ+DcM7h9FT0Uyb9D3k7cEBKzvz/03PRSp9I3kXMyYLD0ZLfsYqenkohbsk7DJLAz6Ata8iteM/zZLLVrUXvH5FFzgmDJHAshHLm98KmT5VJwr3JOy01BErtdcMJgCMh6V2k58pkpz1XXY1E10kriAQOh+ScXc3tTabnuKQ1rW2b3SSKKRuYkgVHo4nUzk++lYlCFX1S8i4HmVtUQmJX0RjVhMDh2EQ6X4/0v5EpxVpUT99LaSaRzFIL6VaiVsD/ApHA0ZCc+ezPJGdz6dvjkhDnPM5mq4qzyM5xhkjgcLxHJMN389J3jpds5R0XPXk2i/g2FN28JLp24HCYSF6mq0r8zWFJI39I3qmUmLpQZKBrB47HW7LBGLmUwreqFNidZqFawq1FHzQ/IyKBwyE5sxPJu2SVu9WdePQ5us9Xtoy2SiUeWoSuHTgwFpHmLnHQa2EGvh7u+phUJZTuQmomyUklpA8WISKB47OJxP/M+KtSKmrD/yibViFSDEXvL7zfrwyS9mCy/MKbRRAJvApzkazl+gOTfaMPevegqP+MUjbMoiEXiAQOh+Q/TDUylTYohkC5I8n9K857k6LTaMYFS4TA4RD2+e+5Slx1eI4Y322SueCTd82iORcsWgWHQ5PPNsxN2jy6msSxqKo0AZLKxl0mkfhrlZde5YxtFOB4KG1KLIoUxfbVWuVgaVbx9v9yEVUNg5VBkS0iTV8E1U/A0dCYbbXdjIs7QqmmGwskX+HFyt6q0Uxye6POCy+CHbLgaGhoI5M1kSzclFidjPefuizhoxBkIk0DEmo2gMNRWuu/VyQV9c/gUMv7ArlFk9RMulMkVBECx0M4L+XtzpZr2FCfQ6eielKVyOuHLkvoApgmE2kmNOrageMhlJeTZSR7D9FrmLljtlSDP1c2WLJh9hrINYADojFbumFxHlRrmXi555vEKN0VZ1JNZvT0LXDUGDggJa+HJKYqJ62Rw+Tg8b12XhKPDjtvDxWpxHkxIGXCEAkcDuE7QlJORJQ474tU015yWuPHEqkSrg9deotLxhAJHBOlvJoBd3Xl7j8sQqhVhOyvGSspO8wigYNSc16dlL1cRdJJRGIKtehIsoFJZWXRtwUk9OzAUVFeMclKdl14NpETOb8tFqoyDFe9SZI2hXxhhnlIZPTswBHR+B4Q/lnhra5cqrOOYoNjGRdX9SYJbw6fz2vL/ZCzA8dF/AhlKTCQTMZBKbWwRCsmVTc+m3JBuWJwZJQWTbLIYKeEifSTRaGWGsjuEXUJb24muWTDedlfzMaCA1PyG5d7TKIqepIauwAhamdNcNRNFdfHE2dSiZsay1+asc4OHBilvDxMstpCKYYQU2bdP2silWYpOZMiv1dwVanU/L3HI67o2YGjUnJv0vqUEsv+SjqOb7kMqtqZxBRjJF736JIRkMDRUcr3JhyspnHdn5rN1Pp2XRwplI1Fdy8ZAQkcn8Jm0iKXC2/cJPBUtE+rJ2nieAE05CuXVY8QkMArELMzaXVKqfW4qGjzo0bp+3ZJ/Dys7ZdgZpuFXfEIc0jgBZA0NWmeA0ihiJTIOehQpBbwLCZpbBlCm4VdGh9hUQN4BcojJtmyHaLtZxyI1Ag3MUlrsi9Z9gir7MBrENnG/3fGpCsk/R4/vTlxgquoioTUAtIDHiV4BA6P0sI67MlJStdTjtyuc+8pxUgpm0eLniJjB14I69wtN3RbFL7hCp9oy9VFUats8sDjz28eYZEdeCFqmu+xWyrdWPT26P+cquiGVHan8K0vobhC8Ai8BpEnE6XLGTxR1RLZBIghErdnNu4bHmGABF4Pb9L5AZM2OL3/bD8ejnLnM8Mj8JIouYZ/ecwk86b/u7EYjgxsngAvhMZ8y/1B6eIOr7RRDl/u1YjhEXhdlLLjgZHS5cJem/P5/Hhow2Y+8JJYTLL+3fOwXh3iEXhtNAxyBE/EenVYqQpeGTPp6SqZRsjXgZdHa8pPV6nXCAtVwetTKPfw5T/RiLEuCLwuKpEHKp2/1aKzTeBinSr4IbTuXQ9/j0u2hLyHCjwCr42WODBpU+n8JIsyBwyPwOsjIeURVmnhyyzinBGOwE/F1nB38Fe5dDaLelIQeAR+CFIpj2GT6XGJrEfXwxHhCPwgtPXvpjKdH3PIhyIPU8XoCPwstATKeS7TptN5TaGZQ6YRenXg59FUmsIbl41zo7encdngN4egEfiNbCpxdsyF8vCbQEtPiNAI/GC0VOL83aSIsRH46UiN3+oSU4BG4DegJcT0TRYl9OnAL0JqoMRfb1HAtBH4XajU8JV9vESwCPxSpIT4FYEpJYoVFoFfjDaZHreJE0EiABpaaiBKifkuhVKiGCARALeI1BCImlD7RjE3gagphPwcAPs+1RADbSQP/SHGUGEQAHeg4sAsKwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPAD0FJDjAGbbgF4HK0xJWZO2IALwMNITLkBkQB4GI2cIRIAn6SmDJEA+CwhQyQAPotGiAQARALgCAhEAgARCYBDgIgEACISeBlUpGyI7l1f+ngR0YXvandrf1lFFh7Rbqq1zu9UtfceRiT/6nUDp1+ALzmacsNOxXPXqV2fLQal1KAYquhMonbzdndx7f56eXoikpQaKbVj/7h9WRGd/3va64wjkj2Qc4Nxsib4BFJjykYK8leQyO560aFGIWUH+Rv9vXR9ZLltztGficn2jNGLzm/UcnsbU1UvUn+nJ+GoZ/AApouRqm7N1q6bYrNPGynajbvNtl6vV7PL4Fh0/lX2Vup068XmKDIUScLOAzGMAvd7RPkDHPQko+u9IFpTHpLMgr0GXobXvc+zFzWoDBN0RiwDkVQo7xBhElhlbkLYabYk7tOB8x61M8naso9IPvB5gt58PuVdqAw9MoicSHsPRGoP3M+8Ne1c56h+b8Iu7E36KGYxjxZMEsoTogw8mjsiaXoXxklgGWue63C1T1ee3lmmHuXqXZw/Q8Pcjmq3rYmklKdEmATWiZzvJck8mhkkbkJ0GJHC4jO8iLwxavclrb5/zA57nnkOwBp998Ya0+Qyx9M75O6hGCMlHnfMRmOp2jf8No8TibkLgP0DEtUqpbqMOcnAV24MRRL23xtqiORuZYQk8EhA4hRDIM49iUKIaTB8qPwxnafV2SW+Y+cpXVxgKoOkdPxgCMkg1ZHaZ4vTg2oRKSH1IvXGcZDRAKvCJLCGkjNhlIejqpZ79kGCbu8aJ/KC2v5UZ+yG+IDEwQIlZ2eIM5G0+0VggyQN4+EVfxBJcv8NDa2MkATupqZBKy7OIxmklePbfezUMqpvtH1Kg4oTwb3B6B3qzpv6y6l2gY/Fu+3fKTqPDK3ZQAU8sEbkQZvR2IvQpxZSL4F9un9AtfY+8EBTHgUan1ILul+7pHQiZcN9T3Ai2RfPX54gEliChr+VZdzINPpBUupijFH6B0SXqTBKJ5hRu0GShL+44BfYgqL3JXVZfi+S+Jhr+GEWRAIrmAtxHA7YGVJdr0nZp57HgYbUN+Mk3oM9GYWuXJfQvv8xtLKbSJLYBSQjOJE0ONV3nSsnAO5JfgfnQRw3+8ImkvvNXfd30VG7lVxAMuJERuM0QqWGxNmLRN0IyajJiTSRxUl2AuBxkYJpMOyIsbjOF8ca6u1/wTVolynwztEjbfZt85NNEJlI5caW6VZzdVoZfceUTgB8gUhR90UKft7T43/hB3ZjMSPl3rA5WgKlxLyzcqnwrgLqRfJDJI9AJPBEkWJeQpxIJOPvz2lBJK2UOO+QqguSYVppVWdL6iRBJPA8kSgvURZEWotIflvfXKT8qEg+VcInAI4h0pdFpEKcl0XirxEpnwA4hkjlsyLNVutRsDTGl0UkQUQCxxPpiyKSxOx5qyBU0rdGpHQC4GnJhhTDPvo1Y6TK2UibQ6JqT+hFynEq0jxrx0g2gP8i/U1F9zntiuSH9WE5IKWbWnZdRNpXQDGPBI4pUnXLpyd4kRYnZLVcEXUjpFR0uHx9NiHrH6BY2QCOI1JZT13XBZGidA0//f0T1FT0octvo3C25MkSodN0rZ0SdpuDJ4qk7CR4KCL1O1yNrqWHnfsi7y1ajd6O6PtrZWn1N6MmF/h2kU5pT4Jlkfw2Ct0LC3qb2SAZ35b6bRSy90XR/xZg7EcC/6lIkfOeH/WKTERyIngbS3c9jAUp3TaMyuO8tnz4nrhTE1YDY4cseKpIhceVjLVQulKnIpmMvoOo1AkRnQej20hMmP7lJXIfZko2yEzSmjJ6duCJIvlUAVOVU0NdJe80j0i++AkVfTcx9uW4wqDSq5bYRw+t2eDYEn4qrkQ/B+dgg6ro33M1ULIBPFmkfpK0Hf1VQ2TXFociGd25Me0RrjVH6bMSTdnmRzZSHRSOTTFUeyF7g0HhrlD/EIgzAhJ4pkhmgcEp8aBM6lQkSdnB3SNSsey17SP8Q6RR7W+tnKdY9jzmIZhEAk8XSSjnlcLd4V/27mi5jRyHwrA2lt0LC4UC3/9lN2orxgKmMD0TJVNU/i93MszOBU+RptjkNEjxs/Yg/u5o/Dog9YUxMYxtFRVLdvjNQYqBok9BH6T+EP1Ymu5Hmtejl1a8lOPFuNcF/3aQdpeXPgV9kOqSWnWux1P2T9pJzUc9EzlcuvuROEAIvyFI7et2cd1eG6TIyGs/zsRmubYuxqTzvXldScflbuULOcLvCVIUv56brtgFqW/iHK8odZE976vmydu8vZS3NnNnbr5E1Qfp/N/9Xw3S58c5SD/KywX732q/PZdOe/l8TgpS7vrnpoX0nHIe/0Vm7b28nuu9LdNH1/NU9lL2qqIx62/fbt7LMSM3F5mXy9db+1/P5z0r3yPwWq8jf//8xUhHIZdvL3sbewMRj9LOrSbK3u4f3PXt9dNLc+P/+/7gm2spszr8e+T6Vewelcub/PM2du9vTfavNbsDz5G3D0ce/J/rf/3yzpo3AAAAAAAAAAAAAAAAAOBXEvOdyalnZu5m9tetjTHcc+G8cowfpcDCxIdqGPfCJJYKh9u07Fa1bZtu0V7z4E23vVrHIEtYlbnqlqi6zHt9KdPxtdDG1+ZqPmqDQYkSlmSu21eqXgt9zAprlCQ11+bDx51S3obAcmzoNqWHAlI7vqTm2iR9NsighOWZbncNmXT7Pkni213qTY4ykoS12NgaLpPAtRHxtsxyjhrM7rCSlKMuINIXDktxa8uisjNOwDLG1lOL8aPlMbHr+PEn+wlYRBkWdHyns8mdjFTmV6nSJdrLzek8mHn4GR8NMrnDkkYeA8xELPdntS+JG3YLl40yaRPPzYns7c1mbLqF4XZrMD2aIQmLkNSdbboM4PWjMV2AGFbipjb9O0zl47P04PkSuzIkYQ2uuTvXgERufL7yNvLHtoXcXP3TZ5q4OqaxBI41eO7gIXXnEqTTnSDlFIwc2DLCSXlwMGXhDqspA0+Q2vN9/OCpLAdppPgFGSUdXh8cnLkdVpPiEvqeHszzBM2iPb2fjrLI4bVRZW6HteQBJZF2t46ImKXluBqkcUpqxNKDMxms22EtedEtyxEJHxHyEQvVUeV3IyAjBUlq6yG3wtwOC7C6ph3mY4aYeySoC5LVIKVC6SaOxneyWEuaihW550++L+2DdCo8FVo36BirDViLRS687fnxvSpBAn4qSK4bQQIOBKmZ2s1zpPoLgiQECWs5HqQoDTpcxs8HicUGrO5vrNp5zZBJjVsqkjaXxqodnsjhIEVl1Pb56Je/T83XveIECWuxwzsbfDuw+7uZKloJUrMNSAZbhLAWO7TXziUFxpsZ24j0leY0Co29dngu+bWjZjtr2eET7u/+vjvKqLH7G8/FtHsfaZ6Q0WzWS+NYritBki3wPhIWZ/VwhvqDflt3HVhSnUoakIJa+4bs4A1ZrKac+ePTz13y1K4ZPaIuJclGeoy1ZzZszOywGtN64rbEWeBpJjZdbLDJGSkpmBZH9NcgiaZK+YwcpwhhOaZbprv0kctssSDylkcVGX1z8ftWMjx8DNWNAQnr2YPUU/s60sRhjmHI0SY1Ipftv8lfSFjPgSC5TLZ+q+q9M73FjwVp/nAmdlhR05fLQCMHzto/mCQ1bqPAczGdvRDRHb46p17Wuvv3LaJNcoRnYNGTPXp1c5Zwk6NgPqkYJUg7V+4ZwxP4/yCJa9ub68/ronlXqi6eg9TfuqmcHoSVpN3aUnr1cCnxGAduYo6A5KSlIAWb3WruDEdYSgrSHpWhqrdL/+0UotdruvJ/f79vRmxv66OhsjVCauW18LNJd2M4wlpSkHZmftXk48PYi0pVrbUosbTHKBMx/8FIEdZTg3ScnKqeK1u68bRSkB5B7JNwnjf+FA8Pko2ht39D5st4ykoCns3jg6TTc7aM+yzxzB4eJBnb5M5m54ZlPLVHBSl4+ULoaui2MbPDE3t4kOpB+6pD8wes2eH5pCA9usnAgITnZpreg3gE8a2h/IWEJ2T6+LcWZJAj/GFMf8FbC6K8ZIQ/iw29ig3cv3RMUnIEZD1X5nXAzxNX3tUDHsB8RIoGMQI6PfuOV4wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPhfe3BIAAAAACDo/2tvGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAhwDWjgbLhu6a/gAAAABJRU5ErkJggg==",
"module": "add_recipient",
"module_name": "MercadoPago",
"visible": true,
"options": false,
"father": "module",
"group": "scripts",
"linux": true,
"windows": true,
"mac": true,
"docker": true
}""" | 77.547569 | 8,651 | 0.803353 |
9536c43080761f0c324bcbd72551f92c5b4bf023 | 422 | py | Python | venv/Scripts/pip3-script.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | venv/Scripts/pip3-script.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | venv/Scripts/pip3-script.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | #!C:\Users\Favorsiky\PycharmProjects\learning_log\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| 32.461538 | 73 | 0.677725 |
fcb651ba1c81a06c95b7b9216ea460d4b7c10d38 | 3,974 | py | Python | virtual/lib/python3.6/site-packages/unidecode/x021.py | OKC254/flask-blog | 78dc43f6ba981822f17026b071db6aaf4680daad | [
"MIT"
] | 8 | 2015-03-05T21:09:40.000Z | 2020-02-03T09:15:09.000Z | vendor-local/lib/python/unidecode/x021.py | yvan-sraka/wprevents | 03f95150fe7c09338c3a17e00a4b85febef87789 | [
"BSD-3-Clause"
] | 29 | 2015-02-24T11:11:26.000Z | 2017-08-25T08:30:18.000Z | vendor-local/lib/python/unidecode/x021.py | Acidburn0zzz/airmozilla | 7b03af6d6efe9af00a6070f5327e10fb755c3766 | [
"BSD-3-Clause"
] | 6 | 2015-04-23T16:47:34.000Z | 2017-10-13T19:11:53.000Z | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'(sm)', # 0x20
'TEL', # 0x21
'(tm)', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'F', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'FAX', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| 15.403101 | 18 | 0.283593 |
d9db3c809a7b351ec1f7eae5822d1d4f262f19e4 | 658 | py | Python | python/lib/team.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/lib/team.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | python/lib/team.py | omardelarosa/godot-python-demo-game | c9c91b2a8e838c315dae6d6d597ce75a20318747 | [
"CC-BY-3.0"
] | null | null | null | import uuid
class Team:
NULL_ID = "-1"
DEFAULT_PROPERTIES = {
"id": NULL_ID,
"name": "Unnamed Team",
}
def __init__(self, properties={}):
self.id = (
properties["id"] if "id" in properties else str(uuid.uuid4())
) # generates a uuid
self.name = (
properties["name"]
if "name" in properties
else Team.DEFAULT_PROPERTIES["name"]
)
def __iter__(self):
"""
For supporting dict() casting...
"""
yield "id", self.id
yield "name", self.name
def __str__(self) -> str:
return str(dict(self)) | 21.225806 | 73 | 0.50304 |
17e5df6bc9bba9bf273c39b2d24e834ce15df102 | 2,798 | py | Python | protobuf/SubscribeResp_pb2.py | wonghoifung/learning-python | ad1691be1d185bfff828779a553b2c59d36d16ea | [
"MIT"
] | null | null | null | protobuf/SubscribeResp_pb2.py | wonghoifung/learning-python | ad1691be1d185bfff828779a553b2c59d36d16ea | [
"MIT"
] | null | null | null | protobuf/SubscribeResp_pb2.py | wonghoifung/learning-python | ad1691be1d185bfff828779a553b2c59d36d16ea | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: SubscribeResp.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='SubscribeResp.proto',
package='',
syntax='proto2',
serialized_pb=_b('\n\x13SubscribeResp.proto\"A\n\rSubscribeResp\x12\x10\n\x08subReqID\x18\x01 \x02(\x05\x12\x10\n\x08respCode\x18\x02 \x02(\x05\x12\x0c\n\x04\x64\x65sc\x18\x03 \x02(\tB\x1e\n\x08\x63om.wongB\x12SubscribeRespProto')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SUBSCRIBERESP = _descriptor.Descriptor(
name='SubscribeResp',
full_name='SubscribeResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subReqID', full_name='SubscribeResp.subReqID', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='respCode', full_name='SubscribeResp.respCode', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='desc', full_name='SubscribeResp.desc', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=23,
serialized_end=88,
)
DESCRIPTOR.message_types_by_name['SubscribeResp'] = _SUBSCRIBERESP
SubscribeResp = _reflection.GeneratedProtocolMessageType('SubscribeResp', (_message.Message,), dict(
DESCRIPTOR = _SUBSCRIBERESP,
__module__ = 'SubscribeResp_pb2'
# @@protoc_insertion_point(class_scope:SubscribeResp)
))
_sym_db.RegisterMessage(SubscribeResp)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\010com.wongB\022SubscribeRespProto'))
# @@protoc_insertion_point(module_scope)
| 32.534884 | 232 | 0.751251 |
e014ebafc7e3efae5bcbd599f141475ea3221b4c | 6,217 | py | Python | gdk/commands/component/project_utils.py | timmattison/aws-greengrass-gdk-cli | 60a002f0f2fee84b79022662ba0cae9e0246b6f8 | [
"Apache-2.0"
] | 10 | 2022-01-15T09:50:32.000Z | 2022-03-26T16:39:49.000Z | gdk/commands/component/project_utils.py | timmattison/aws-greengrass-gdk-cli | 60a002f0f2fee84b79022662ba0cae9e0246b6f8 | [
"Apache-2.0"
] | 46 | 2021-11-30T19:49:16.000Z | 2022-03-31T07:14:23.000Z | gdk/commands/component/project_utils.py | timmattison/aws-greengrass-gdk-cli | 60a002f0f2fee84b79022662ba0cae9e0246b6f8 | [
"Apache-2.0"
] | 7 | 2021-11-30T19:49:42.000Z | 2022-03-17T16:25:34.000Z | import json
import logging
from pathlib import Path
import boto3
import gdk.common.configuration as config_actions
import gdk.common.consts as consts
import gdk.common.exceptions.error_messages as error_messages
import gdk.common.utils as utils
import yaml
def get_supported_component_builds():
"""
Reads a json file from static location that contains information related to supported component build systems.
Parameters
----------
None
Returns
-------
(dict): Returns a dict object with supported component builds information.
"""
supported_component_builds_file = utils.get_static_file_path(consts.project_build_system_file)
if supported_component_builds_file:
with open(supported_component_builds_file, "r") as supported_builds_file:
logging.debug("Identifying build systems supported by the CLI tool with default configuration.")
return json.loads(supported_builds_file.read())
return None
def get_recipe_file():
"""
Finds recipe file based on component name and its extension.
Assuming that each component project has a single recipe file, this method looks up for json files first
and then yaml files in the current project directory with component name in them.
If none or more than one are found, correct recipe file is not identified.
Raises an exception if no recipe file is found in the current project directory.
Parameters
----------
None
Returns
-------
recipe_file(Path): Path of the identified recipe file.
"""
# Search for json files in current directory that contain component name and ends in .json.
logging.debug("Looking for recipe file in the project directory.")
json_file = list(Path(utils.current_directory).glob("recipe.json"))
yaml_file = list(Path(utils.current_directory).glob("recipe.yaml"))
if not json_file and not yaml_file:
logging.error("Could not find 'recipe.json' or 'recipe.yaml' in the project directory.")
raise Exception(error_messages.PROJECT_RECIPE_FILE_NOT_FOUND)
if json_file and yaml_file:
logging.error("Found both 'recipe.json' and 'recipe.yaml' in the given project directory.")
raise Exception(error_messages.PROJECT_RECIPE_FILE_NOT_FOUND)
recipe_file = (json_file + yaml_file)[0].resolve()
logging.info("Found component recipe file '{}' in the project directory.".format(recipe_file.name))
return recipe_file
def parse_recipe_file(component_recipe_file):
"""
Loads recipes file from current project as a json obect.
Uses yaml or json module to load the recipe file based on its extension.
Parameters
----------
component_recipe_file(pathlib.Path): Path of the component recipe file.
Returns
-------
(dict): Returns a dict object with the component recipe file.
"""
logging.debug("Parsing the component recipe file '{}'.".format(component_recipe_file.name))
with open(component_recipe_file, "r") as r_file:
recipe = r_file.read()
try:
if component_recipe_file.name.endswith(".json"):
recipe_json = json.loads(recipe)
return recipe_json
else:
recipe_yaml = yaml.safe_load(recipe)
return recipe_yaml
except Exception as e:
raise Exception("""Unable to parse the recipe file - {}.\n{}""".format(component_recipe_file.name, e))
def get_project_config_values():
# Get component configuration from the greengrass project config file.
logging.info("Getting project configuration from {}".format(consts.cli_project_config_file))
project_config = config_actions.get_configuration()["component"]
# Since there's only one key in the component configuration, use next() instead of looping in.
component_name = next(iter(project_config))
component_config = project_config[component_name]
component_version = component_config["version"]
component_author = component_config["author"]
component_build_config = component_config["build"]
bucket = component_config["publish"]["bucket"]
region = component_config["publish"]["region"]
# Build directories
gg_build_directory = Path(utils.current_directory).joinpath(consts.greengrass_build_dir).resolve()
gg_build_artifacts_dir = Path(gg_build_directory).joinpath("artifacts").resolve()
gg_build_recipes_dir = Path(gg_build_directory).joinpath("recipes").resolve()
gg_build_component_artifacts_dir = Path(gg_build_artifacts_dir).joinpath(component_name, component_version).resolve()
# Get recipe file
component_recipe_file = get_recipe_file()
# Get parsed recipe file
parsed_component_recipe = parse_recipe_file(component_recipe_file)
# Create dictionary with all the above values
vars = {}
vars["component_name"] = component_name
vars["component_version"] = component_version
vars["component_author"] = component_author
vars["component_build_config"] = component_build_config
vars["bucket"] = bucket
vars["region"] = region
vars["gg_build_directory"] = gg_build_directory
vars["gg_build_artifacts_dir"] = gg_build_artifacts_dir
vars["gg_build_recipes_dir"] = gg_build_recipes_dir
vars["gg_build_component_artifacts_dir"] = gg_build_component_artifacts_dir
vars["component_recipe_file"] = component_recipe_file
vars["parsed_component_recipe"] = parsed_component_recipe
return vars
def get_service_clients(region):
service_clients = {}
service_clients["s3_client"] = create_s3_client(region)
service_clients["sts_client"] = create_sts_client(region)
service_clients["greengrass_client"] = create_greengrass_client(region)
return service_clients
def create_s3_client(region=None):
logging.debug("Creating s3 client")
return boto3.client("s3", region_name=region)
def create_sts_client(region=None):
logging.debug("Creating sts client")
return boto3.client("sts", region_name=region)
def create_greengrass_client(region=None):
logging.debug("Creating GreengrassV2 client")
return boto3.client("greengrassv2", region_name=region)
| 38.376543 | 121 | 0.72913 |
3d54bf99d09bc685117ff53dd168f2c457c9fce7 | 46,084 | py | Python | scripts/pylint_extensions.py | serbarbosa/oppia | 450e094392995794553b2ad64cd82c233d9b591d | [
"Apache-2.0"
] | null | null | null | scripts/pylint_extensions.py | serbarbosa/oppia | 450e094392995794553b2ad64cd82c233d9b591d | [
"Apache-2.0"
] | null | null | null | scripts/pylint_extensions.py | serbarbosa/oppia | 450e094392995794553b2ad64cd82c233d9b591d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements additional custom Pylint checkers to be used as part of
presubmit checks.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
import re
import sys
import python_utils
from . import docstrings_checker
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
sys.path.insert(0, _PYLINT_PATH)
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
import astroid # isort:skip
from pylint import checkers # isort:skip
from pylint import interfaces # isort:skip
from pylint.checkers import typecheck # isort:skip
from pylint.checkers import utils as checker_utils # isort:skip
# pylint: enable=wrong-import-position
# pylint: enable=wrong-import-order
def read_from_node(node):
"""Returns the data read from the ast node in unicode form.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
Returns:
list(str). The data read from the ast node.
"""
return list(node.stream().readlines())
class ExplicitKeywordArgsChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for explicit keyword arguments
in any function call.
"""
__implements__ = interfaces.IAstroidChecker
name = 'explicit-keyword-args'
priority = -1
msgs = {
'C0001': (
'Keyword argument %s should be named explicitly in %s call of %s.',
'non-explicit-keyword-args',
'All keyword arguments should be explicitly named in function call.'
),
}
def visit_call(self, node):
"""Visits each function call in a lint check.
Args:
node: Call. The current function call node.
"""
called = checker_utils.safe_infer(node.func)
try:
# For the rationale behind the Pylint pragma below,
# see https://stackoverflow.com/a/35701863/8115428
called, implicit_args, callable_name = (
typecheck._determine_callable(called)) # pylint: disable=protected-access
except ValueError:
return
if called.args.args is None:
# Built-in functions have no argument information.
return
if len(called.argnames()) != len(set(called.argnames())):
return
# Build the set of keyword arguments and count the positional arguments.
call_site = astroid.arguments.CallSite.from_call(node)
num_positional_args = len(call_site.positional_arguments)
keyword_args = list(call_site.keyword_arguments.keys())
already_filled_positionals = getattr(called, 'filled_positionals', 0)
already_filled_keywords = getattr(called, 'filled_keywords', {})
keyword_args += list(already_filled_keywords)
num_positional_args += already_filled_positionals
num_positional_args += implicit_args
# Analyze the list of formal parameters.
num_mandatory_parameters = len(called.args.args) - len(
called.args.defaults)
parameters = []
parameter_name_to_index = {}
for i, arg in enumerate(called.args.args):
if isinstance(arg, astroid.Tuple):
name = None
else:
assert isinstance(arg, astroid.AssignName)
name = arg.name
parameter_name_to_index[name] = i
if i >= num_mandatory_parameters:
defval = called.args.defaults[i - num_mandatory_parameters]
else:
defval = None
parameters.append([(name, defval), False])
num_positional_args_unused = num_positional_args
# Check that all parameters with a default value have
# been called explicitly.
for [(name, defval), _] in parameters:
if defval:
display_name = repr(name)
if name not in keyword_args and (
num_positional_args_unused > (
num_mandatory_parameters)) and (
callable_name != 'constructor'):
# This try/except block tries to get the function
# name. Since each node may differ, multiple
# blocks have been used.
try:
func_name = node.func.attrname
except AttributeError:
func_name = node.func.name
self.add_message(
'non-explicit-keyword-args', node=node,
args=(
display_name,
callable_name,
func_name))
num_positional_args_unused -= 1
class HangingIndentChecker(checkers.BaseChecker):
"""Custom pylint checker which checks for break after parenthesis in case
of hanging indentation.
"""
__implements__ = interfaces.IRawChecker
name = 'hanging-indent'
priority = -1
msgs = {
'C0002': (
(
'There should be a break after parenthesis when content within '
'parenthesis spans multiple lines.'),
'no-break-after-hanging-indent',
(
'If something within parenthesis extends along multiple lines, '
'break after opening parenthesis.')
),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
file_length = len(file_content)
exclude = False
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].lstrip().rstrip()
# The source files are read as bytes, hence the b' prefix.
if line.startswith(b'"""') and not line.endswith(b'"""'):
exclude = True
if line.endswith(b'"""'):
exclude = False
if line.startswith(b'#') or exclude:
continue
line_length = len(line)
bracket_count = 0
for char_num in python_utils.RANGE(line_length):
char = line[char_num]
if char == b'(':
if bracket_count == 0:
position = char_num
bracket_count += 1
elif char == b')' and bracket_count > 0:
bracket_count -= 1
if bracket_count > 0 and position + 1 < line_length:
content = line[position + 1:]
if not len(content) or not b',' in content:
continue
split_list = content.split(b', ')
if len(split_list) == 1 and not any(
char.isalpha() for char in split_list[0]):
continue
separators = set('@^! #%$&)(+*-=')
if not any(char in separators for item in split_list
for char in item):
self.add_message(
'no-break-after-hanging-indent', line=line_num + 1)
# The following class was derived from
# https://github.com/PyCQA/pylint/blob/377cc42f9e3116ff97cddd4567d53e9a3e24ebf9/pylint/extensions/docparams.py#L26
class DocstringParameterChecker(checkers.BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Args:
linter: Pylinter. The linter object.
"""
__implements__ = interfaces.IAstroidChecker
name = 'parameter_documentation'
msgs = {
'W9005': ('"%s" has constructor parameters '
'documented in class and __init__',
'multiple-constructor-doc',
'Please remove parameter declarations '
'in the class or constructor.'),
'W9006': ('"%s" not documented as being raised',
'missing-raises-doc',
'Please document exceptions for '
'all raised exception types.'),
'W9008': ('Redundant returns documentation',
'redundant-returns-doc',
'Please remove the return/rtype '
'documentation from this method.'),
'W9010': ('Redundant yields documentation',
'redundant-yields-doc',
'Please remove the yields documentation from this method.'),
'W9011': ('Missing return documentation',
'missing-return-doc',
'Please add documentation about what this method returns.',
{'old_names': [('W9007', 'missing-returns-doc')]}),
'W9012': ('Missing return type documentation',
'missing-return-type-doc',
'Please document the type returned by this method.',
# We can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]}.
),
'W9013': ('Missing yield documentation',
'missing-yield-doc',
'Please add documentation about what this generator yields.',
{'old_names': [('W9009', 'missing-yields-doc')]}),
'W9014': ('Missing yield type documentation',
'missing-yield-type-doc',
'Please document the type yielded by this method.',
# We can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]}.
),
'W9015': ('"%s" missing in parameter documentation',
'missing-param-doc',
'Please add parameter declarations for all parameters.',
{'old_names': [('W9003', 'missing-param-doc')]}),
'W9016': ('"%s" missing in parameter type documentation',
'missing-type-doc',
'Please add parameter type declarations for all parameters.',
{'old_names': [('W9004', 'missing-type-doc')]}),
'W9017': ('"%s" differing in parameter documentation',
'differing-param-doc',
'Please check parameter names in declarations.',
),
'W9018': ('"%s" differing in parameter type documentation',
'differing-type-doc',
'Please check parameter names in type declarations.',
),
}
options = (('accept-no-param-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing parameter '
'documentation in the docstring of a '
'function that has parameters.'
}),
('accept-no-raise-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing raises '
'documentation in the docstring of a function that '
'raises an exception.'
}),
('accept-no-return-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing return '
'documentation in the docstring of a function that '
'returns a statement.'
}),
('accept-no-yields-doc',
{'default': True, 'type': 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing yields '
'documentation in the docstring of a generator.'
}),
)
priority = -2
constructor_names = {'__init__', '__new__'}
not_needed_param_in_docstring = {'self', 'cls'}
def visit_functiondef(self, node):
"""Called for function and method definitions (def).
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
node_doc = docstrings_checker.docstringify(node.doc)
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
def check_functiondef_params(self, node, node_doc):
"""Checks whether all parameters in a function definition are
documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = docstrings_checker.docstringify(class_node.doc)
self.check_single_constructor_params(
class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params() or
class_doc.params_documented_elsewhere() or
None
)
class_allow_no_param = (
node_doc.has_params() or
node_doc.params_documented_elsewhere() or
None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node,
accept_no_param_doc=class_allow_no_param)
self.check_arguments_in_docstring(
node_doc, node.args, node,
accept_no_param_doc=node_allow_no_param)
def check_functiondef_returns(self, node, node_doc):
"""Checks whether a function documented with a return value actually has
a return statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields and node.is_generator():
return
return_nodes = node.nodes_of_class(astroid.Return)
if ((
node_doc.has_returns() or node_doc.has_rtype()) and
not any(
docstrings_checker.returns_something(
ret_node) for ret_node in return_nodes)):
self.add_message(
'redundant-returns-doc',
node=node)
def check_functiondef_yields(self, node, node_doc):
"""Checks whether a function documented with a yield value actually has
a yield statement in its definition.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
node_doc: Docstring. Pylint Docstring class instance representing
a node's docstring.
"""
if not node_doc.supports_yields:
return
if ((node_doc.has_yields() or node_doc.has_yields_type()) and
not node.is_generator()):
self.add_message(
'redundant-yields-doc',
node=node)
def visit_raise(self, node):
"""Visits a function node that raises an exception and verifies that all
exceptions raised in the function definition are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = docstrings_checker.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
property_ = docstrings_checker.get_setters_property(func_node)
if property_:
func_node = property_
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs = doc.exceptions()
missing_excs = expected_excs - found_excs
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node):
"""Visits a function node that contains a return statement and verifies
that the return value and the return type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
if not docstrings_checker.returns_something(node):
return
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or
(doc.has_property_returns() and is_property)):
self.add_message(
'missing-return-doc',
node=func_node
)
if not (doc.has_rtype() or
(doc.has_property_type() and is_property)):
self.add_message(
'missing-return-type-doc',
node=func_node
)
def visit_yield(self, node):
"""Visits a function node that contains a yield statement and verifies
that the yield value and the yield type are documented.
Args:
node: astroid.scoped_nodes.Function. Node for a function or
method definition in the AST.
"""
func_node = node.frame()
doc = docstrings_checker.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
if not doc_has_yields:
self.add_message(
'missing-yield-doc',
node=func_node
)
if not doc_has_yields_type:
self.add_message(
'missing-yield-type-doc',
node=func_node
)
def visit_yieldfrom(self, node):
"""Visits a function node that contains a yield from statement and
verifies that the yield from value and the yield from type are
documented.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
self.visit_yield(node)
def check_arguments_in_docstring(
self, doc, arguments_node, warning_node, accept_no_param_doc=None):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
Args:
doc: str. Docstring for the function, method or class.
arguments_node: astroid.scoped_nodes.Arguments. Arguments node
for the function, method or class constructor.
warning_node: astroid.scoped_nodes.Node. The node to assign
the warnings to.
accept_no_param_doc: bool|None. Whether or not to allow
no parameters to be documented. If None then
this value is read from the configuration.
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = set(
arg.name for arg in arguments_node.args)
expected_argument_names.update(
arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = (
self.not_needed_param_in_docstring.copy())
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if (not params_with_doc and not params_with_type
and accept_no_param_doc):
tolerate_missing_params = True
def _compare_missing_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
if not tolerate_missing_params:
missing_argument_names = (
(expected_argument_names - found_argument_names)
- not_needed_names)
if missing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(missing_argument_names)),),
node=warning_node)
def _compare_different_args(
found_argument_names, message_id, not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
Args:
found_argument_names: set. Argument names found in the
docstring.
message_id: str. Pylint message id.
not_needed_names: set(str). Names that may be omitted.
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names - expected_argument_names)
if differing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(differing_argument_names)),),
node=warning_node)
_compare_missing_args(params_with_doc, 'missing-param-doc',
self.not_needed_param_in_docstring)
_compare_missing_args(params_with_type, 'missing-type-doc',
not_needed_type_in_docstring)
_compare_different_args(params_with_doc, 'differing-param-doc',
self.not_needed_param_in_docstring)
_compare_different_args(params_with_type, 'differing-type-doc',
not_needed_type_in_docstring)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
"""Checks whether a class and corresponding init() method are
documented. If both of them are documented, it adds an error message.
Args:
class_doc: Docstring. Pylint docstring class instance representing
a class's docstring.
init_doc: Docstring. Pylint docstring class instance representing
a method's docstring, the method here is the constructor method
for the above class.
class_node: astroid.scoped_nodes.Function. Node for class definition
in AST.
"""
if class_doc.has_params() and init_doc.has_params():
self.add_message(
'multiple-constructor-doc',
args=(class_node.name,),
node=class_node)
def _handle_no_raise_doc(self, excs, node):
"""Checks whether the raised exception in a function has been
documented, add a message otherwise.
Args:
excs: list(str). A list of exception types.
node: astroid.scoped_nodes.Function. Node to access module content.
"""
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""Adds a message on :param:`node` for the missing exception type.
Args:
missing_excs: list. A list of missing exception types.
node: astroid.node_classes.NodeNG. The node show the message on.
"""
if not missing_excs:
return
self.add_message(
'missing-raises-doc',
args=(', '.join(sorted(missing_excs)),),
node=node)
class ImportOnlyModulesChecker(checkers.BaseChecker):
"""Checker for import-from statements. It checks that
modules are only imported.
"""
__implements__ = interfaces.IAstroidChecker
name = 'import-only-modules'
priority = -1
msgs = {
'C0003': (
'Import \"%s\" from \"%s\" is not a module.',
'import-only-modules',
'Modules should only be imported.',
),
}
@checker_utils.check_messages('import-only-modules')
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
try:
imported_module = node.do_import_module(node.modname)
except astroid.AstroidBuildingException:
return
if node.level is None:
modname = node.modname
else:
modname = '.' * node.level + node.modname
for (name, _) in node.names:
if name == 'constants':
continue
try:
imported_module.import_module(name, True)
except astroid.AstroidImportError:
self.add_message(
'import-only-modules',
node=node,
args=(name, modname),
)
class BackslashContinuationChecker(checkers.BaseChecker):
"""Custom pylint checker which checks that backslash is not used
for continuation.
"""
__implements__ = interfaces.IRawChecker
name = 'backslash-continuation'
priority = -1
msgs = {
'C0004': (
(
'Backslash should not be used to break continuation lines. '
'Use braces to break long lines.'),
'backslash-continuation',
'Use braces to break long lines instead of backslash.'
),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
for (line_num, line) in enumerate(file_content):
if line.rstrip(b'\r\n').endswith(b'\\'):
self.add_message(
'backslash-continuation', line=line_num + 1)
class FunctionArgsOrderChecker(checkers.BaseChecker):
"""Custom pylint checker which checks the order of arguments in function
definition.
"""
__implements__ = interfaces.IAstroidChecker
name = 'function-args-order'
priority = -1
msgs = {
'C0005': (
'Wrong order of arguments in function definition '
'\'self\' should come first.',
'function-args-order-self',
'\'self\' should come first',),
'C0006': (
'Wrong order of arguments in function definition '
'\'cls\' should come first.',
'function-args-order-cls',
'\'cls\' should come first'),
}
def visit_functiondef(self, node):
"""Visits every function definition in the python file and check the
function arguments order. It then adds a message accordingly.
Args:
node: astroid.scoped_nodes.Function. Node for a function or method
definition in the AST.
"""
args_list = [args.name for args in node.args.args]
if 'self' in args_list and args_list[0] != 'self':
self.add_message('function-args-order-self', node=node)
elif 'cls' in args_list and args_list[0] != 'cls':
self.add_message('function-args-order-cls', node=node)
class RestrictedImportChecker(checkers.BaseChecker):
"""Custom pylint checker which checks layers importing modules
from their respective restricted layers.
"""
__implements__ = interfaces.IAstroidChecker
name = 'invalid-import'
priority = -1
msgs = {
'C0009': (
'Importing %s layer in %s layer is prohibited.',
'invalid-import',
'Storage layer and domain layer must not import'
'domain layer and controller layer respectively.'),
}
def visit_import(self, node):
"""Visits every import statement in the file.
Args:
node: astroid.node_classes.Import. Node for a import statement
in the AST.
"""
modnode = node.root()
names = [name for name, _ in node.names]
# Checks import of domain layer in storage layer.
if 'oppia.core.storage' in modnode.name and not '_test' in modnode.name:
if any('core.domain' in name for name in names):
self.add_message(
'invalid-import',
node=node,
args=('domain', 'storage'),
)
# Checks import of controller layer in domain layer.
if 'oppia.core.domain' in modnode.name and not '_test' in modnode.name:
if any('core.controllers' in name for name in names):
self.add_message(
'invalid-import',
node=node,
args=('controller', 'domain'),
)
def visit_importfrom(self, node):
"""Visits all import-from statements in a python file and checks that
modules are imported. It then adds a message accordingly.
Args:
node: astroid.node_classes.ImportFrom. Node for a import-from
statement in the AST.
"""
modnode = node.root()
if 'oppia.core.storage' in modnode.name and not '_test' in modnode.name:
if 'core.domain' in node.modname:
self.add_message(
'invalid-import',
node=node,
args=('domain', 'storage'),
)
if 'oppia.core.domain' in modnode.name and not '_test' in modnode.name:
if 'core.controllers' in node.modname:
self.add_message(
'invalid-import',
node=node,
args=('controller', 'domain'),
)
class SingleCharAndNewlineAtEOFChecker(checkers.BaseChecker):
"""Checker for single character files and newline at EOF."""
__implements__ = interfaces.IRawChecker
name = 'newline-at-eof'
priority = -1
msgs = {
'C0007': (
'Files should end in a single newline character.',
'newline-at-eof',
'Please enter a single newline at the end of the file.'),
'C0008': (
'Only one character in file',
'only-one-character',
'Files with only one character are not allowed.'),
}
def process_module(self, node):
"""Process a module.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
file_content = read_from_node(node)
file_length = len(file_content)
if file_length == 1 and len(file_content[0]) == 1:
self.add_message('only-one-character', line=file_length)
if file_length >= 2 and not re.search(r'[^\n]\n', file_content[-1]):
self.add_message('newline-at-eof', line=file_length)
class SingleSpaceAfterYieldChecker(checkers.BaseChecker):
"""Checks if only one space is used after a yield statement
when applicable ('yield' is acceptable).
"""
__implements__ = interfaces.IRawChecker
name = 'single-space-after-yield'
priority = -1
msgs = {
'C0010': (
'Not using \'yield\' or a single space after yield statement.',
'single-space-after-yield',
'Ensure a single space is used after yield statement.',
),
}
def process_module(self, node):
"""Process a module to ensure that yield keywords are followed by
exactly one space, so matching 'yield *' where * is not a
whitespace character. Note that 'yield' is also acceptable in
cases where the user wants to yield nothing.
Args:
node: astroid.scoped_nodes.Function. Node to access module
content.
"""
in_multi_line_comment = False
multi_line_indicator = b'"""'
file_content = read_from_node(node)
for (line_num, line) in enumerate(file_content):
bare_line = line.strip()
# Single multi-line comment, ignore it.
if bare_line.count(multi_line_indicator) == 2:
continue
# Flip multi-line boolean depending on whether or not we see
# the multi-line indicator. Possible for multiline comment to
# be somewhere other than the start of a line (e.g. func arg),
# so we can't look at start of or end of a line, which is why
# the case where two indicators in a single line is handled
# separately (i.e. one line comment with multi-line strings).
if multi_line_indicator in bare_line:
in_multi_line_comment = not in_multi_line_comment
# Ignore anything inside a multi-line comment.
if in_multi_line_comment:
continue
# Whitespace to right of yield keyword is important for regex.
# Allows alphabet characters and underscore for cases where 'yield'
# is used at the start of a variable name.
source_line = line.lstrip()
if (source_line.startswith(b'yield') and
not re.search(br'^(yield)( \S|$|\w)', source_line)):
self.add_message('single-space-after-yield', line=line_num + 1)
class ExcessiveEmptyLinesChecker(checkers.BaseChecker):
"""Checks if there are excessive newlines between method definitions."""
__implements__ = interfaces.IRawChecker
name = 'excessive-new-lines'
priority = -1
msgs = {
'C0011': (
'Excessive new lines between function definations.',
'excessive-new-lines',
'Remove extra newlines.'
)
}
def process_module(self, node):
"""Process a module to ensure that method definitions are not seperated
by more than two blank lines.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
in_multi_line_comment = False
multi_line_indicator = b'"""'
file_content = read_from_node(node)
file_length = len(file_content)
blank_line_counter = 0
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
# Single multi-line comment, ignore it.
if line.count(multi_line_indicator) == 2:
continue
# Flip multi-line boolean depending on whether or not we see
# the multi-line indicator. Possible for multiline comment to
# be somewhere other than the start of a line (e.g. func arg),
# so we can't look at start of or end of a line, which is why
# the case where two indicators in a single line is handled
# separately (i.e. one line comment with multi-line strings).
if multi_line_indicator in line:
in_multi_line_comment = not in_multi_line_comment
# Ignore anything inside a multi-line comment.
if in_multi_line_comment:
continue
if file_content[line_num] == b'\n':
blank_line_counter += 1
else:
blank_line_counter = 0
if line_num + 1 < file_length and blank_line_counter > 2:
line = file_content[line_num + 1].strip()
if line.startswith(b'def') or line.startswith(b'@'):
self.add_message('excessive-new-lines', line=line_num + 1)
class SingleNewlineAboveArgsChecker(checkers.BaseChecker):
"""Checker for single space above args in python doc string."""
__implements__ = interfaces.IRawChecker
name = 'single-space-above-args-raises-returns'
priority = -1
msgs = {
'C0012': (
'Files must have a single newline above args in doc string.',
'single-space-above-args',
'Please enter a single newline above args in doc string.'
),
'C0013': (
'Files must have a single newline above returns in doc string.',
'single-space-above-returns',
'Please enter a single newline above returns in doc string.'
),
'C0014': (
'Files must have a single newline above raises in doc string.',
'single-space-above-raises',
'Please enter a single newline above raises in doc string.'
)
}
def process_module(self, node):
"""Process a module to ensure that there is a single newline above args,
raises, returns in python doc string.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
in_multi_line_comment = False
multi_line_indicator = b'"""'
file_content = read_from_node(node)
file_length = len(file_content)
blank_line_counter = 0
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
# Single multi-line comment, ignore it.
if line.count(multi_line_indicator) == 2:
continue
# Flip multi-line boolean depending on whether or not we see
# the multi-line indicator. Possible for multiline comment to
# be somewhere other than the start of a line (e.g. func arg),
# so we can't look at start of or end of a line, which is why
# the case where two indicators in a single line is handled
# separately (i.e. one line comment with multi-line strings).
if multi_line_indicator in line:
in_multi_line_comment = not in_multi_line_comment
# Ignore anything inside a multi-line comment.
if in_multi_line_comment:
continue
if file_content[line_num] == b'\n':
blank_line_counter += 1
else:
blank_line_counter = 0
if (line_num + 1 < file_length and (
blank_line_counter == 0 or blank_line_counter > 1)):
line = file_content[line_num + 1].strip()
if line == b'Args:':
self.add_message(
'single-space-above-args', line=line_num + 1)
elif line == b'Returns:':
self.add_message(
'single-space-above-returns', line=line_num + 1)
elif line == b'Raises:':
self.add_message(
'single-space-above-raises', line=line_num + 1)
class DivisionOperatorChecker(checkers.BaseChecker):
"""Checks if division operator is used."""
__implements__ = interfaces.IRawChecker
name = 'division-operator-used'
priority = -1
msgs = {
'C0015': (
'Division Operator is used.',
'division-operator-used',
'Please use python_utils.divide() instead of the "/" operator'
)
}
def process_module(self, node):
"""Process a module to ensure that the division operator('/') is not
used and python_utils.divide() is used instead.
Args:
node: astroid.scoped_nodes.Function. Node to access module content.
"""
in_multi_line_comment = False
multi_line_indicator = b'"""'
string_indicator = b'\''
file_content = read_from_node(node)
file_length = len(file_content)
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
# Single line comment, ignore it.
if line.startswith(b'#'):
continue
# Single multi-line comment, ignore it.
if line.count(multi_line_indicator) == 2:
continue
# Flip multi-line boolean depending on whether or not we see
# the multi-line indicator. Possible for multiline comment to
# be somewhere other than the start of a line (e.g. func arg),
# so we can't look at start of or end of a line, which is why
# the case where two indicators in a single line is handled
# separately (i.e. one line comment with multi-line strings).
if multi_line_indicator in line:
in_multi_line_comment = not in_multi_line_comment
# Ignore anything inside a multi-line comment.
if in_multi_line_comment:
continue
# Ignore anything inside a string.
if line.count(string_indicator) >= 2:
continue
if re.search(br'[^/]/[^/]', line):
self.add_message(
'division-operator-used', line=line_num + 1)
def register(linter):
"""Registers the checker with pylint.
Args:
linter: Pylinter. The Pylinter object.
"""
linter.register_checker(ExplicitKeywordArgsChecker(linter))
linter.register_checker(HangingIndentChecker(linter))
linter.register_checker(DocstringParameterChecker(linter))
linter.register_checker(ImportOnlyModulesChecker(linter))
linter.register_checker(BackslashContinuationChecker(linter))
linter.register_checker(FunctionArgsOrderChecker(linter))
linter.register_checker(RestrictedImportChecker(linter))
linter.register_checker(SingleCharAndNewlineAtEOFChecker(linter))
linter.register_checker(SingleSpaceAfterYieldChecker(linter))
linter.register_checker(ExcessiveEmptyLinesChecker(linter))
linter.register_checker(SingleNewlineAboveArgsChecker(linter))
linter.register_checker(DivisionOperatorChecker(linter))
| 39.120543 | 114 | 0.59244 |
9b7ad98e41280dacbdff14e258b300919af0cdc2 | 3,748 | py | Python | agrspy/envspy-histaqi/codes/postproc.py | soonyenju/agrspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | 2 | 2019-01-10T07:00:25.000Z | 2019-01-10T07:15:00.000Z | agrspy/envspy-histaqi/codes/postproc.py | soonyenju/arspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | null | null | null | agrspy/envspy-histaqi/codes/postproc.py | soonyenju/arspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | null | null | null | import os, json
import config
import numpy as np
import pandas as pd
from datetime import datetime
from pathlib import Path
class Postor(config.Config):
"""
Create a new postor
"""
def __init__(self, hub_path):
super(Postor, self).__init__()
self.hub_path = hub_path
def merger(self, new_path, out_name = 'merged.json', replace = False):
# 改用dict.update()方法!!
with open(self.hub_path, "r", encoding='utf-8') as f:
aqi_hub = json.load(f)
with open(new_path, "r", encoding='utf-8') as f:
aqi_new = json.load(f)
for prov_name, prov_data in aqi_new.items():
print(prov_name)
for city_name, city_data in prov_data.items():
print(city_name)
if not city_name in aqi_hub[prov_name].keys():
aqi_hub[prov_name][city_name] = {}
for mon_name, mon_data in city_data.items():
print(mon_name)
if mon_name in aqi_hub[prov_name][city_name].keys():
if replace == True:
aqi_hub[prov_name][city_name][mon_name] = aqi_new[prov_name][city_name][mon_name]
else:
aqi_hub[prov_name][city_name][mon_name] = aqi_new[prov_name][city_name][mon_name]
with open(self.folder_json.joinpath(out_name), "w", encoding='utf-8') as f:
json.dump(aqi_hub, f, ensure_ascii=False, indent=4)
def batch_json2csv(self, prov_name = None, city_name = None):
with open(self.hub_path, 'r', encoding='utf-8') as f:
histaqi = json.load(f)
aqi_dfs = self.retrieve_data(histaqi, prov_name = prov_name, city_name = city_name)
for prov_name, prov_data in aqi_dfs.items():
prov_path = self.folder_csv.joinpath(prov_name)
if not prov_path.exists():
os.makedirs(prov_path)
for city_name, city_data in prov_data.items():
csv_name = prov_path.joinpath(city_name + '.csv')
if not os.path.exists(csv_name.as_posix()): city_data.to_csv(csv_name)
print(f'{prov_name}: {city_name} is successfully transferred to csv.')
def retrieve_data(self, histaqi, prov_name = None, city_name = None):
try:
if city_name:
print("fetching " + city_name)
city_data = histaqi[prov_name][city_name]
results = self.fetch_data(city_data)
else:
print("city name is not specified, fetching " + prov_name)
results = {}
for city_name, city_data in histaqi[prov_name].items():
print(city_name)
result = self.fetch_data(city_data)
results[city_name] = result
except Exception as identifier:
print(identifier)
print("no name is specified, iterating...")
results = {}
for prov_name, prov_data in histaqi.items():
print(prov_name)
results[prov_name] = {}
for city_name, city_data in prov_data.items():
print(city_name)
result = self.fetch_data(city_data)
results[prov_name][city_name] = result
print("iteration is done")
else:
print("retrieval is done.")
finally:
return results
def fetch_data(self, city_data):
result = []
for val in city_data.values():
result.extend(val)
result = np.array(result).reshape(-1, 9)
result = pd.DataFrame(result, columns = ['Date', 'aqi', 'aqi-rank', \
'pm25', 'pm10', 'so2', 'no2', 'co', 'o3'])
result['Date'] = pd.to_datetime(result['Date']).sort_index()
result.set_index("Date", inplace=True)
result = pd.DataFrame(result, dtype=np.float).sort_index()
return result
def eliminate_spaces(self):
'''
去除city_name中的空格
'''
with open(self.hub_path, 'r', encoding='utf-8') as f:
histaqi = json.load(f)
for prov_name, prov_data in histaqi.items():
print(prov_name)
for city_name in prov_data.keys():
print(city_name)
# print(histaqi[prov_name])
histaqi[prov_name][city_name.strip()] = histaqi[prov_name].pop(city_name)
with open(self.hub_path, "w") as f:
json.dump(histaqi, f, ensure_ascii = False, indent = 4) | 32.034188 | 88 | 0.688367 |
308ffd4b8d5e47a7cfc10617f76181bbcb029edb | 17,292 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_ip_pool.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 10 | 2020-05-19T01:51:28.000Z | 2021-11-16T11:36:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_ip_pool.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 19 | 2020-03-04T15:35:26.000Z | 2022-03-31T04:35:19.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/ucs/plugins/modules/ucs_ip_pool.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 9 | 2019-12-03T15:20:02.000Z | 2021-06-18T18:08:39.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_ip_pool
short_description: Configures IP address pools on Cisco UCS Manager
description:
- Configures IP address pools and blocks of IP addresses on Cisco UCS Manager.
extends_documentation_fragment: cisco.ucs.ucs
options:
state:
description:
- If C(present), will verify IP pool is present and will create if needed.
- If C(absent), will verify IP pool is absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name of the IP address pool.
- This name can be between 1 and 32 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the IP address pool is created.
required: yes
description:
description:
- The user-defined description of the IP address pool.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
order:
description:
- The Assignment Order field.
- "This can be one of the following:"
- "default - Cisco UCS Manager selects a random identity from the pool."
- "sequential - Cisco UCS Manager selects the lowest available identity from the pool."
choices: [default, sequential]
default: default
ip_blocks:
description:
- List of IPv4 blocks used by the IP Pool.
suboptions:
first_addr:
description:
- The first IPv4 address in the IPv4 addresses block.
- This is the From field in the UCS Manager Add IPv4 Blocks menu.
last_addr:
description:
- The last IPv4 address in the IPv4 addresses block.
- This is the To field in the UCS Manager Add IPv4 Blocks menu.
subnet_mask:
description:
- The subnet mask associated with the IPv4 addresses in the block.
default: 255.255.255.0
default_gw:
description:
- The default gateway associated with the IPv4 addresses in the block.
default: 0.0.0.0
primary_dns:
description:
- The primary DNS server that this block of IPv4 addresses should access.
default: 0.0.0.0
secondary_dns:
description:
- The secondary DNS server that this block of IPv4 addresses should access.
default: 0.0.0.0
ipv6_blocks:
description:
- List of IPv6 blocks used by the IP Pool.
suboptions:
ipv6_first_addr:
description:
- The first IPv6 address in the IPv6 addresses block.
- This is the From field in the UCS Manager Add IPv6 Blocks menu.
ipv6_last_addr:
description:
- The last IPv6 address in the IPv6 addresses block.
- This is the To field in the UCS Manager Add IPv6 Blocks menu.
ipv6_prefix:
description:
- The network address prefix associated with the IPv6 addresses in the block.
default: '64'
ipv6_default_gw:
description:
- The default gateway associated with the IPv6 addresses in the block.
default: '::'
ipv6_primary_dns:
description:
- The primary DNS server that this block of IPv6 addresses should access.
default: '::'
ipv6_secondary_dns:
description:
- The secondary DNS server that this block of IPv6 addresses should access.
default: '::'
org_dn:
description:
- Org dn (distinguished name)
default: org-root
requirements:
- ucsmsdk
author:
- Brett Johnson (@sdbrett)
- David Soper (@dsoper2)
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure IPv4 and IPv6 address pool
cisco.ucs.ucs_ip_pool:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
name: ip-pool-01
org_dn: org-root/org-level1
ipv4_blocks:
- first_addr: 192.168.10.1
last_addr: 192.168.10.20
subnet_mask: 255.255.255.128
default_gw: 192.168.10.2
- first_addr: 192.168.11.1
last_addr: 192.168.11.20
subnet_mask: 255.255.255.128
default_gw: 192.168.11.2
ipv6_blocks:
- ipv6_first_addr: fe80::1cae:7992:d7a1:ed07
ipv6_last_addr: fe80::1cae:7992:d7a1:edfe
ipv6_default_gw: fe80::1cae:7992:d7a1:ecff
- ipv6_first_addr: fe80::1cae:7992:d7a1:ec07
ipv6_last_addr: fe80::1cae:7992:d7a1:ecfe
ipv6_default_gw: fe80::1cae:7992:d7a1:ecff
- name: Delete IPv4 and IPv6 address pool blocks
cisco.ucs.ucs_ip_pool:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
name: ip-pool-01
org_dn: org-root/org-level1
ipv4_blocks:
- first_addr: 192.168.10.1
last_addr: 192.168.10.20
state: absent
ipv6_blocks:
- ipv6_first_addr: fe80::1cae:7992:d7a1:ec07
ipv6_last_addr: fe80::1cae:7992:d7a1:ecfe
state: absent
- name: Remove IPv4 and IPv6 address pool
cisco.ucs.ucs_ip_pool:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
name: ip-pool-01
state: absent
'''
RETURN = r'''
#
'''
def update_ip_pool(ucs, module):
from ucsmsdk.mometa.ippool.IppoolPool import IppoolPool
mo = IppoolPool(
parent_mo_or_dn=module.params['org_dn'],
name=module.params['name'],
descr=module.params['descr'],
assignment_order=module.params['order'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
return mo
def match_existing_ipv4_block(ucs, dn, ipv4_block):
# ipv4 block specified, check properties
mo_1 = get_ip_block(ucs, dn, ipv4_block['first_addr'], ipv4_block['last_addr'], 'v4')
if not mo_1:
if ipv4_block['state'] == 'absent':
return True
return False
else:
if ipv4_block['state'] == 'absent':
return False
kwargs = dict(subnet=ipv4_block['subnet_mask'])
kwargs['def_gw'] = ipv4_block['default_gw']
kwargs['prim_dns'] = ipv4_block['primary_dns']
kwargs['sec_dns'] = ipv4_block['secondary_dns']
return mo_1.check_prop_match(**kwargs)
def match_existing_ipv6_block(ucs, dn, ipv6_block):
# ipv6 block specified, check properties
mo_1 = get_ip_block(ucs, dn, ipv6_block['ipv6_first_addr'], ipv6_block['ipv6_last_addr'], 'v6')
if not mo_1:
if ipv6_block['state'] == 'absent':
return True
return False
else:
if ipv6_block['state'] == 'absent':
return False
kwargs = dict(prefix=ipv6_block['ipv6_prefix'])
kwargs['def_gw'] = ipv6_block['ipv6_default_gw']
kwargs['prim_dns'] = ipv6_block['ipv6_primary_dns']
kwargs['sec_dns'] = ipv6_block['ipv6_secondary_dns']
return mo_1.check_prop_match(**kwargs)
def remove_ip_block(ucs, dn, ip_block, ip_version):
if ip_version == 'v6':
first_addr = ip_block['ipv6_first_addr']
last_addr = ip_block['ipv6_last_addr']
else:
first_addr = ip_block['first_addr']
last_addr = ip_block['last_addr']
mo_1 = get_ip_block(ucs, dn, first_addr, last_addr, ip_version)
if mo_1:
ucs.login_handle.remove_mo(mo_1)
ucs.login_handle.commit()
def update_ip_block(ucs, mo, ip_block, ip_version):
remove_ip_block(ucs, mo.dn, ip_block, ip_version)
if not ip_block['state'] == 'absent':
if ip_version == 'v6':
from ucsmsdk.mometa.ippool.IppoolIpV6Block import IppoolIpV6Block
IppoolIpV6Block(
parent_mo_or_dn=mo,
to=ip_block['ipv6_last_addr'],
r_from=ip_block['ipv6_first_addr'],
prefix=ip_block['ipv6_prefix'],
def_gw=ip_block['ipv6_default_gw'],
prim_dns=ip_block['ipv6_primary_dns'],
sec_dns=ip_block['ipv6_secondary_dns']
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
else:
from ucsmsdk.mometa.ippool.IppoolBlock import IppoolBlock
IppoolBlock(
parent_mo_or_dn=mo,
to=ip_block['last_addr'],
r_from=ip_block['first_addr'],
subnet=ip_block['subnet_mask'],
def_gw=ip_block['default_gw'],
prim_dns=ip_block['primary_dns'],
sec_dns=ip_block['secondary_dns']
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
def get_ip_block(ucs, pool_dn, first_addr, last_addr, ip_version):
if ip_version == 'v6':
dn_type = '/v6block-'
else:
dn_type = '/block-'
block_dn = pool_dn + dn_type + first_addr + '-' + last_addr
return ucs.login_handle.query_dn(block_dn)
def main():
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ucs.plugins.module_utils.ucs import UCSModule, ucs_argument_spec
ipv4_configuration_spec = dict(
first_addr=dict(type='str'),
last_addr=dict(type='str'),
subnet_mask=dict(type='str', default='255.255.255.0'),
default_gw=dict(type='str', default='0.0.0.0'),
primary_dns=dict(type='str', default='0.0.0.0'),
secondary_dns=dict(type='str', default='0.0.0.0'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
ipv6_configuration_spec = dict(
ipv6_first_addr=dict(type='str'),
ipv6_last_addr=dict(type='str'),
ipv6_prefix=dict(type='str', default='64'),
ipv6_default_gw=dict(type='str', default='::'),
ipv6_primary_dns=dict(type='str', default='::'),
ipv6_secondary_dns=dict(type='str', default='::'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str', required=True),
descr=dict(type='str', default='', aliases=['description']),
order=dict(type='str', default='default', choices=['default', 'sequential']),
first_addr=dict(type='str'),
last_addr=dict(type='str'),
subnet_mask=dict(type='str', default='255.255.255.0'),
default_gw=dict(type='str', default='0.0.0.0'),
primary_dns=dict(type='str', default='0.0.0.0'),
secondary_dns=dict(type='str', default='0.0.0.0'),
ipv6_first_addr=dict(type='str'),
ipv6_last_addr=dict(type='str'),
ipv6_prefix=dict(type='str', default='64'),
ipv6_default_gw=dict(type='str', default='::'),
ipv6_primary_dns=dict(type='str', default='::'),
ipv6_secondary_dns=dict(type='str', default='::'),
state=dict(type='str', default='present', choices=['present', 'absent']),
ipv4_blocks=dict(type='list', default=None, elements='dict', options=ipv4_configuration_spec),
ipv6_blocks=dict(type='list', default=None, elements='dict', options=ipv6_configuration_spec),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
)
# UCSModule verifies ucsmsdk is present and exits on failure. Imports are below ucs object creation.
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.ippool.IppoolBlock import IppoolBlock
from ucsmsdk.mometa.ippool.IppoolIpV6Block import IppoolIpV6Block
changed = False
try:
mo_exists = False
ipv4_props_match = True
ipv6_props_match = True
# dn is <org_dn>/ip-pool-<name>
dn = module.params['org_dn'] + '/ip-pool-' + module.params['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if not mo_exists:
if not module.check_mode:
mo = update_ip_pool(ucs, module)
changed = True
if mo_exists:
# check top-level mo props
kwargs = dict(assignment_order=module.params['order'])
kwargs['descr'] = module.params['descr']
if not mo.check_prop_match(**kwargs):
if not module.check_mode:
mo = update_ip_pool(ucs, module)
changed = True
# top-level props match, check next level mo/props
if module.params['ipv4_blocks']:
for ipv4_block in module.params['ipv4_blocks']:
if not match_existing_ipv4_block(ucs, dn, ipv4_block):
if not module.check_mode:
update_ip_block(ucs, mo, ipv4_block, 'v4')
changed = True
elif module.params['last_addr'] and module.params['first_addr']:
# ipv4 block specified, check properties
mo_1 = get_ip_block(ucs, dn, module.params['first_addr'], module.params['last_addr'], 'v4')
if mo_1:
kwargs = dict(subnet=module.params['subnet_mask'])
kwargs['def_gw'] = module.params['default_gw']
kwargs['prim_dns'] = module.params['primary_dns']
kwargs['sec_dns'] = module.params['secondary_dns']
if not mo_1.check_prop_match(**kwargs):
# ipv4 block exists and properties match
ipv4_props_match = False
else:
ipv4_props_match = False
# only check ipv6 props if the top-level and ipv4 props matched
if module.params['ipv6_blocks']:
for ipv6_block in module.params['ipv6_blocks']:
if not match_existing_ipv6_block(ucs, dn, ipv6_block):
if not module.check_mode:
update_ip_block(ucs, mo, ipv6_block, 'v6')
changed = True
elif module.params['ipv6_last_addr'] and module.params['ipv6_first_addr']:
# ipv6 block specified, check properties
block_dn = dn + '/v6block-' + module.params['ipv6_first_addr'].lower() + '-' + module.params[
'ipv6_last_addr'].lower()
mo_1 = ucs.login_handle.query_dn(block_dn)
if mo_1:
kwargs = dict(prefix=module.params['ipv6_prefix'])
kwargs['def_gw'] = module.params['ipv6_default_gw']
kwargs['prim_dns'] = module.params['ipv6_primary_dns']
kwargs['sec_dns'] = module.params['ipv6_secondary_dns']
if not mo_1.check_prop_match(**kwargs):
# ipv6 block exists and properties match
ipv6_props_match = False
else:
ipv6_props_match = False
if not ipv4_props_match or not ipv6_props_match:
if not module.check_mode:
if module.params['last_addr'] and module.params['first_addr']:
IppoolBlock(
parent_mo_or_dn=mo,
to=module.params['last_addr'],
r_from=module.params['first_addr'],
subnet=module.params['subnet_mask'],
def_gw=module.params['default_gw'],
prim_dns=module.params['primary_dns'],
sec_dns=module.params['secondary_dns'],
)
if module.params['ipv6_last_addr'] and module.params['ipv6_first_addr']:
IppoolIpV6Block(
parent_mo_or_dn=mo,
to=module.params['ipv6_last_addr'],
r_from=module.params['ipv6_first_addr'],
prefix=module.params['ipv6_prefix'],
def_gw=module.params['ipv6_default_gw'],
prim_dns=module.params['ipv6_primary_dns'],
sec_dns=module.params['ipv6_secondary_dns'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| 38.172185 | 139 | 0.601319 |
d06092a45e6010a00b87ac6dfd277439886bd190 | 1,408 | py | Python | scripts/Archive/oldmakecenters3d.py | wahabk/colloidoscope | 508918703405e07c336c0ad97cf6b3e87db311bb | [
"MIT"
] | null | null | null | scripts/Archive/oldmakecenters3d.py | wahabk/colloidoscope | 508918703405e07c336c0ad97cf6b3e87db311bb | [
"MIT"
] | null | null | null | scripts/Archive/oldmakecenters3d.py | wahabk/colloidoscope | 508918703405e07c336c0ad97cf6b3e87db311bb | [
"MIT"
] | null | null | null | def make_random_centers_3d(canvas_size, n, zoom, min_dist):
'''
Generate random centers of particles
This is a place holder for bringing in simulated particle trajectories from dynamo
'''
canvas_size = [int(c/zoom) for c in canvas_size]
min_dist = min_dist/zoom
z = random.randint(0, canvas_size[0])
y = random.randint(0, canvas_size[1])
x = random.randint(0, canvas_size[2])
centers = [(z,y,x)] # make first particle
for i in range(n):
too_close = True
while too_close:
z = random.randint(0, canvas_size[0])
y = random.randint(0, canvas_size[1])
x = random.randint(0, canvas_size[2])
centers.append((z,y,x))
distances = spatial.distance.pdist(centers)
if all(i > min_dist for i in distances):
too_close = False
break
else:
centers.pop() # get rid of last element if too close
return centers
def draw_sphere(canvas, center, r):
cz, cy, cx = center
for i in range(canvas.shape[0]):
for j in range(canvas.shape[1]):
for k in range(canvas.shape[2]):
if (i - cz)**2 + (j - cy)**2 + (k - cx)**2 <= r**2:
canvas[i,j,k] = 255
return canvas
def draw_multiple_spheres(canvas, centers, r):
for center in centers:
cz, cy, cx = center
for i in range(canvas.shape[0]):
for j in range(canvas.shape[1]):
for k in range(canvas.shape[2]):
if (i - cz)**2 + (j - cy)**2 + (k - cx)**2 <= r**2:
canvas[i,j,k] = 255
return canvas | 31.288889 | 83 | 0.650568 |
1a6b682192abb5e6c2fd29f18d7934ec83bbc162 | 21,490 | py | Python | src/tests/api/test_permissions.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/api/test_permissions.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/api/test_permissions.py | prereg/prereg | 5000c279a801fa2260009b15dd90e3bd4f447785 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_change_event_settings', 'settings/', 200),
('patch', 'can_change_event_settings', 'settings/', 200),
('get', 'can_view_orders', 'revokedsecrets/', 200),
('get', 'can_view_orders', 'revokedsecrets/1/', 404),
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
('post', 'can_view_orders', 'exporters/invoicedata/run/', 400),
('get', 'can_view_orders', 'exporters/invoicedata/download/bc3f9884-26ee-425b-8636-80613f84b6fa/3cb49ae6-eda3-4605-814e-099e23777b36/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'settings/', 200),
('patch', 'can_change_organizer_settings', 'settings/', 200),
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/1/', 404),
('get', 'can_change_organizer_settings', 'devices/', 200),
('post', 'can_change_organizer_settings', 'devices/', 400),
('get', 'can_change_organizer_settings', 'devices/1/', 404),
('put', 'can_change_organizer_settings', 'devices/1/', 404),
('patch', 'can_change_organizer_settings', 'devices/1/', 404),
('get', 'can_change_teams', 'teams/', 200),
('post', 'can_change_teams', 'teams/', 400),
('get', 'can_change_teams', 'teams/{team_id}/', 200),
('put', 'can_change_teams', 'teams/{team_id}/', 400),
('patch', 'can_change_teams', 'teams/{team_id}/', 200),
('get', 'can_change_teams', 'teams/{team_id}/members/', 200),
('delete', 'can_change_teams', 'teams/{team_id}/members/2/', 404),
('get', 'can_change_teams', 'teams/{team_id}/invites/', 200),
('get', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('post', 'can_change_teams', 'teams/{team_id}/invites/', 400),
('get', 'can_change_teams', 'teams/{team_id}/tokens/', 200),
('get', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('post', 'can_change_teams', 'teams/{team_id}/tokens/', 400),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
if urlset == ('get', 'can_change_event_settings', 'settings/', 200):
return
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| 43.15261 | 145 | 0.674872 |
460a463dc61cdd38ed028e551b41b6b6d7d04310 | 14,442 | py | Python | utils.py | raechelwalker/mrtl | 49118f48b798fb7b55c7b479f49c4ac9c966ed19 | [
"MIT"
] | null | null | null | utils.py | raechelwalker/mrtl | 49118f48b798fb7b55c7b479f49c4ac9c966ed19 | [
"MIT"
] | null | null | null | utils.py | raechelwalker/mrtl | 49118f48b798fb7b55c7b479f49c4ac9c966ed19 | [
"MIT"
] | null | null | null | import logging
import os
from datetime import datetime
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.utils.data
import xarray as xr
from cp_als import unfold
def set_logger(logger, log_path=None):
# create logger
logger.setLevel(logging.DEBUG)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add to handler
formatter = logging.Formatter(
'[%(asctime)s] %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# Add handler to logger
logger.addHandler(ch)
# File handler
if log_path is not None:
fh = logging.FileHandler(log_path, 'w+')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
def size_to_str(lst):
lst_str = "x".join([str(i).zfill(2) for i in lst])
return lst_str
def calc_F1(fp, fn, tp):
if tp == 0 or (tp + fp) == 0 or (tp + fn) == 0:
precision = 0.0
recall = 0.0
F1 = 0.0
else:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
F1 = 2 * (precision * recall) / (precision + recall)
return F1, precision, recall
def accum_grad(gradients, model):
for i, (name, p) in enumerate(model.named_parameters()):
if name != 'module.b' and name != 'b':
gradients[i].add_(p.grad.data)
def grad_stats(avg_grads):
grads = torch.cat([g.contiguous().view(-1) for g in avg_grads])
grad_norm = (torch.norm(grads, p=2)**2).item()
grad_entropy = (-(grads.clamp_min(1e-30) *
torch.log(grads.clamp_min(1e-30))).sum()).item()
grad_var = torch.var(grads).item()
return grad_norm, grad_entropy, grad_var
def l1_regularizer(model, device):
reg = torch.tensor(0.).to(device)
numel = 0
for name, p in model.named_parameters():
if name != 'module.b':
reg.add_(torch.norm(p.view(-1), p=1))
numel += p.numel()
return reg / numel
def l2_regularizer(model, device):
reg = torch.tensor(0.).to(device)
numel = 0
for name, p in model.named_parameters():
if name != 'module.b':
reg.add_(torch.norm(p.view(-1), p=2)**2)
numel += p.numel()
return reg / numel
def create_kernel(dims, sigma, device):
coords = torch.cartesian_prod(torch.arange(0, dims[0], dtype=torch.float),
torch.arange(0, dims[1], dtype=torch.float))
dist = torch.cdist(coords, coords, p=2).to(device)
# To normalize distances across different resolutions
dist = dist / torch.max(dist)
# K is matrix of degree of similarity between coordinates
K = torch.exp(-dist**2 / sigma)
return K
# Implement cdist from https://github.com/pytorch/pytorch/issues/15253
def pdist(X):
X_norm = X.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(X_norm.transpose(-2, -1),
X,
X.transpose(-2, -1),
alpha=-2).add_(X_norm)
res = res.clamp_min_(1e-30)
return res
def bball_spatial_regularizer(model, K_B, K_C, device):
reg = torch.tensor(0.).to(device)
if type(model.module).__name__.startswith('Full'):
W_size = model.W.size()
# Court dimension
W_unfold = unfold(model.W.view(W_size[0], W_size[1] * W_size[2],
W_size[3], W_size[4]),
mode=1).contiguous()
reg.add_((K_B * pdist(W_unfold)).sum() /
(torch.numel(model.W) * np.prod(model.b_dims)))
# Defender position
W_unfold = unfold(model.W.view(W_size[0], W_size[1], W_size[2],
W_size[3] * W_size[4]),
mode=3).contiguous()
reg.add_((K_C * pdist(W_unfold)).sum() /
(torch.numel(model.W) * np.prod(model.c_dims)))
else:
# Court position
reg.add_((K_B * pdist(model.B.view(-1, model.K))).sum() /
(torch.numel(model.B) * np.prod(model.b_dims)))
# Defender position
reg.add_((K_C * pdist(model.C.view(-1, model.K))).sum() /
(torch.numel(model.C) * np.prod(model.c_dims)))
return reg
def class_counts(dataset):
_, counts = np.unique(dataset.y, return_counts=True)
return counts
def calc_weights(dataset):
counts = class_counts(dataset)
return np.where(dataset.y == 1, counts[0] / counts.sum(),
counts[1] / counts.sum())
def expand_pos(T, shape, dim):
T_size = list(T.size())
T_size.insert(dim + 1, shape[1])
T_size[dim] = shape[0]
return T.view(*T_size)
def contract_pos(T, dim):
T_size = list(T.size())
val = T_size.pop(dim + 1)
T_size[dim] = val * T_size[dim]
return T.view(*T_size)
def finegrain(T, new_shape, start_dim, mode='nearest'):
old_shape = T.shape
assert T.ndim in [3, 5], "T.ndim must be 3 or 5"
assert start_dim in [0, 1, 3], "start_dim must be 0, 1, or 3"
# Calculate scale
scale = float(new_shape[0]) / old_shape[start_dim]
assert scale == (
float(new_shape[1]) /
old_shape[start_dim + 1]), "Scale is not the same across axes."
new = None
if T.ndim == 5:
old = T.clone().detach().permute(
0, 4 - start_dim, 5 - start_dim, start_dim, start_dim + 1).view(
old_shape[0],
old_shape[4 - start_dim] * old_shape[5 - start_dim],
old_shape[start_dim], old_shape[start_dim + 1])
interp = torch.nn.functional.interpolate(old,
scale_factor=scale,
mode=mode)
new = interp.view(old_shape[0], old_shape[4 - start_dim],
old_shape[5 - start_dim],
*new_shape).permute(0, 4 - start_dim, 5 - start_dim,
start_dim, start_dim + 1)
elif T.ndim == 3:
old = T.clone().detach().permute(2, 0, 1).unsqueeze(0)
interp = torch.nn.functional.interpolate(old,
scale_factor=scale,
mode=mode)
new = interp.squeeze().permute(1, 2, 0)
return new
# Source: https://github.com/ktcarr/salinity-corn-yields/tree/master/mrtl
def plot_setup(plot_range=[-125.25, -66, 22.5, 50],
figsize=(7, 5),
central_lon=0):
# Function sets up plotting environment for continental US
# Returns fig, ax
# Set up figure for plotting
sns.set()
fig = plt.figure(figsize=figsize)
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=central_lon))
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='black')
ax.coastlines()
ax.set_extent(plot_range, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.BORDERS)
ax.title.set_fontsize(30)
return fig, ax
def multis_to_datetime(multis):
# Function to convert multi-index of year/month to Pandas datetime index
multi_to_datetime = lambda multi: datetime(multi[0], multi[1], 1)
return (pd.Index([multi_to_datetime(multi) for multi in multis]))
def minmax_scaler(x, old_min, old_max, new_min, new_max):
# Scale elements in a 1-dimensional array to [0,1]
x_scaled = (x - old_min) / (old_max - old_min)
x_scaled = x_scaled * (new_max - new_min) + new_min
return x_scaled
def remove_season(data, standardize=True, mean=None, std=None):
# Function to remove seasonality from data
# Returns de-seasonalized data with same shape as input
if mean is None:
mean = data.mean(dim='year')
std = data.std(dim='year')
if standardize:
data = (data - data.mean(dim='year')) / data.std(dim='year')
else:
data = data - data.mean(dim='year')
return data, mean, std
def normalize(data,
old_min=None,
old_max=None,
new_min=0,
new_max=1,
dim='time'):
# Function to remove seasonality from data
# Returns de-seasonalized data with same shape as input
if 'time' in data.dims: # get year and month as separate dimension
data = unstack_month_and_year(data)
if dim == 'time':
data = data.stack(time=['year', 'month'])
if old_min is None:
old_min = data.min(dim=dim)
old_max = data.max(dim=dim)
data.values = np.float32(
minmax_scaler(data,
old_min=old_min,
new_min=new_min,
old_max=old_max,
new_max=new_max))
return data.unstack(), old_min, old_max
def weight_by_area(data_fp, data):
# Function to weight dataarray by the area of each gridcell
# Returns dataarray with same dimensions
dim = [len(data.lat), len(data.lon)]
fp = os.path.join(data_fp, 'gridarea_{0}x{1}.nc'.format(*dim))
grid_area = xr.open_dataarray(fp)
grid_prop = grid_area / np.max(grid_area)
grid_prop = grid_prop.assign_coords({
'lon': data.lon,
'lat': data.lat
}) # 'snap' coords to match data
return data * grid_prop
def preprocess(data_fp,
data,
do_remove_season=True,
mean=None,
std=None,
do_normalize=True,
old_min=None,
old_max=None):
# Function to pre-process data, with options to remove seasonality, detrend
# and normalize
# Returns pre-processed data with time, lat, and lon dimensions
if 'time' in data.dims: # get year and month as separate dimension
year = data.time.dt.year
month = data.time.dt.month
times = pd.MultiIndex.from_arrays([year, month],
names=('year', 'month'))
data = unstack_month_and_year(data)
# REMOVE SEASONAL CYCLE
if do_remove_season:
data, mean, std = remove_season(data,
standardize=True,
mean=mean,
std=std)
# NORMALIZE
if do_normalize:
if remove_season:
data, old_min, old_max = normalize(data,
dim='time',
old_min=old_min,
old_max=old_max)
else:
data, old_min, old_max = normalize(data,
dim='year',
old_min=old_min,
old_max=old_max)
# WEIGHT BY GRIDCELL AREA
if 'lat' in data.dims:
data = weight_by_area(data_fp, data)
data = data.stack(time=['year', 'month'
]) # Make time a coordinate (and a datetime index)
data = data.sel(time=times)
data = data.assign_coords({
'time': multis_to_datetime(data.time.values)
}).transpose('time', ...)
return (data, mean, std, old_min, old_max)
def unstack_month_and_year(data):
# Function 'unstacks' month and year in a dataframe with 'time' dimension
# The 'time' dimension is separated into a month and a year dimension
# This increases the number of dimensions by 1
year = data.time.dt.year
month = data.time.dt.month
new_idx = pd.MultiIndex.from_arrays([year, month], names=('year', 'month'))
return (data.assign_coords({'time': new_idx}).unstack('time'))
def diff_detrend(x):
# Function does 'difference' detrending
# x is the vector to detrend
# returns a vector of length len(x)-1
return (x[1:] - x[:-1])
def diff_detrend_xr(data):
# Detrend xarray dataarray along particular axis
if not ('time' in data.dims):
data = data.stack(time=['year', 'month'])
time_dim = data.dims.index('time') # Get dimension corresponding to time
#time_dim = data.da.dims.index('time') # Get dimension corresponding to time
# Update coordinates by reducing time dimension by 1
new_coords = {
coord: data.coords[coord]
for coord in data.coords if coord != 'time'
}
new_coords['time'] = data.time[1:]
# Detrend
vals = np.apply_along_axis(diff_detrend, axis=time_dim, arr=data)
data_new = xr.DataArray(vals, coords=new_coords, dims=data.dims)
return (data_new)
def mse(x, y):
# Custom function to compute MSE for sanity check
# return(torch.sum((x-y)**2) / len(x))
x = x.float()
y = y.float()
return (torch.mean((x - y)**2))
def mae(x1, x2):
# Mean absolute error
return (torch.sum(torch.abs(x1 - x2)))
def climate_spatial_regularizer(model, K, device):
reg = torch.tensor(0.).to(device)
if 'low' not in type(model).__name__:
# Make spatial dimension the 0th dimension
w_unfold = unfold(model.w.detach(), mode=2).contiguous()
reg.add_((K * pdist(w_unfold)).sum() / (torch.numel(model.w)))
else:
reg.add_((K * pdist(model.C.detach())).sum())
return reg
def compareStats(y_train, y_val, preds_val):
# Function computes model MSE/MAE and compares to several naïve approaches
normal_preds = torch.zeros(y_val.shape)
for i in np.arange(len(y_val)):
normal_preds[i] = torch.normal(y_train.mean(), y_train.std())
dumb_pred = torch.cat((y_val[0].unsqueeze(0), y_val[0:-1]))
constant_pred = y_train.mean() * torch.ones(len(y_val))
print('MSE')
print('Model : {:4f}'.format(mse(y_val, preds_val)))
print('Constant: {:4f}'.format(mse(y_val, constant_pred)))
print('Previous: {:4f}'.format(mse(y_val, dumb_pred)))
print('Normal : {:4f}'.format(mse(y_val, normal_preds)))
print('MAE')
print('Model : {:4f}'.format(mae(y_val, preds_val)))
print('Constant: {:4f}'.format(mae(y_val, constant_pred)))
print('Previous: {:4f}'.format(mae(y_val, dumb_pred)))
print('Normal : {:4f}'.format(mae(y_val, normal_preds)))
| 32.453933 | 81 | 0.580944 |
01799cb4e12a93c61f019a95f97b8bbe444dff20 | 543 | py | Python | src/trigger.py | nurullah/jupyter-notebook-rest-api | 36d08c04fb2c61d1892e6c499461fb0e08f63239 | [
"MIT"
] | null | null | null | src/trigger.py | nurullah/jupyter-notebook-rest-api | 36d08c04fb2c61d1892e6c499461fb0e08f63239 | [
"MIT"
] | null | null | null | src/trigger.py | nurullah/jupyter-notebook-rest-api | 36d08c04fb2c61d1892e6c499461fb0e08f63239 | [
"MIT"
] | null | null | null | import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbparameterise import (
extract_parameters, replace_definitions, parameter_values
)
def trigger(notebook_filename='hello.ipynb', params={}):
with open(notebook_filename) as f:
nb = nbformat.read(f, as_version=4)
orig_parameters = extract_parameters(nb)
new_nb = replace_definitions(nb, parameter_values(orig_parameters, **params))
ep = ExecutePreprocessor(timeout=600, kernel_name='python3')
r = ep.preprocess(new_nb)
return r
| 31.941176 | 81 | 0.760589 |
c231a269a3037ce156f5d889bee6db2f2f6c7a0e | 1,082 | py | Python | tests/state/temporary/modules/models/dosomething/__init__.py | da-h/miniflask | d5e594153cca4ce4d30db01b1d06d05afa9e7aaa | [
"MIT"
] | 5 | 2020-02-17T12:14:36.000Z | 2020-02-27T12:09:05.000Z | tests/state/temporary/modules/models/dosomething/__init__.py | da-h/miniflask | d5e594153cca4ce4d30db01b1d06d05afa9e7aaa | [
"MIT"
] | 69 | 2020-04-03T08:16:35.000Z | 2021-12-21T15:46:29.000Z | tests/state/temporary/modules/models/dosomething/__init__.py | da-h/miniflask | d5e594153cca4ce4d30db01b1d06d05afa9e7aaa | [
"MIT"
] | 1 | 2020-04-02T15:46:39.000Z | 2020-04-02T15:46:39.000Z | from miniflask.exceptions import StateKeyError
def dosomething(state, event):
del event # unused
print("in event: variable =", state["variable"])
if "new_variable" in state:
print("in event: new_variable =", state["new_variable"])
def main(state, event):
state["new_variable"] = 42
del state["new_variable"]
print("before event", state["variable"])
with state.temporary({
"variable": 42
}):
event.dosomething()
print("after event", state["variable"])
try:
_ = state["new_variable"]
print("variable 'new_variable' should not exist")
except StateKeyError:
pass
with state.temporary({
"new_variable": 12345
}):
event.dosomething()
try:
_ = state["new_variable"]
print("variable 'new_variable' should not exist")
except StateKeyError:
pass
def register(mf):
mf.register_defaults({
"variable": 0
})
mf.register_event('dosomething', dosomething, unique=False)
mf.register_event('main', main, unique=False)
| 24.044444 | 64 | 0.622921 |
f1c468439f11d19e3fc030dbdf5145965a4a8287 | 5,323 | py | Python | anomaly-injector-agent/cassandra_stresser.py | citlab/distributed-anomaly-injection | 8be390e0bace6aa87fe60fa744e97408c40e7375 | [
"Apache-2.0"
] | null | null | null | anomaly-injector-agent/cassandra_stresser.py | citlab/distributed-anomaly-injection | 8be390e0bace6aa87fe60fa744e97408c40e7375 | [
"Apache-2.0"
] | null | null | null | anomaly-injector-agent/cassandra_stresser.py | citlab/distributed-anomaly-injection | 8be390e0bace6aa87fe60fa744e97408c40e7375 | [
"Apache-2.0"
] | 1 | 2022-03-06T23:18:34.000Z | 2022-03-06T23:18:34.000Z | # coding=utf-8
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import random
import time
import sys
import signal
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("cluster_ip", help="The ip of one of the cluster nodes")
parser.add_argument("-m", "--mode", help="set the mode the script will run in",
choices=["READ", "INSERT", "RANDOM"], default="READ")
parser.add_argument("-p", "--pause", help="set the pause in seconds between each request", type=float, default=0.001)
parser.add_argument("-b", "--batch_size",
help="set the amount of lines to insert on each request has no effect in READ mode",
type=int, default=1)
parser.add_argument("-t", "--table", help='set the table name for INSERT mode. Otherwise a random table is selected')
parser.add_argument("-k", "--keyspace", help="set the keyspace name. Otherwise a random keyspace is selected")
args = parser.parse_args()
actions = 0
write_actions = 0
read_actions = 0
random_table = not args.table
def setup_session(cip, ksp):
# cip - cluster ip
# ksp - keyspace
print("starting session")
sess = Cluster([cip]).connect(ksp)
print("Connected to cluster: " + sess.cluster.metadata.cluster_name)
return sess
def select_random_table(sess, ksp):
return random.choice(sess.cluster.metadata.keyspaces[ksp].tables.keys())
def select_random_keyspace(sess):
keys = sess.cluster.metadata.keyspaces.keys()
result = random.choice(keys)
while result.startswith("system"):
result = random.choice(keys)
return result
# inserts amount of random rows into the table named tableName pausing between each insert for pauseInSeconds
def insert_random_rows(sess, table_name, current_rows, columns, batch_size):
column_string = ",".join(columns)
insert_line = " INSERT INTO " + table_name + " (" + column_string + ") VALUES (" + "%s," * (len(columns) - 1) + "%s);"
if batch_size > 1:
statement = "BEGIN BATCH "
for i in range(batch_size):
statement += insert_line
statement += " APPLY BATCH"
statement = SimpleStatement(statement)
else:
statement = SimpleStatement(insert_line)
batch_values = create_random_values(batch_size, current_rows)
sess.execute_async(statement, batch_values)
def remove_rows(sess, amount, table_name, pause_in_seconds):
fetchStatement = SimpleStatement("SELECT * FROM " + table_name)
deleteStatement = SimpleStatement("DELETE FROM " + table_name + " WHERE id=%s IF EXISTS")
rows = sess.execute(fetchStatement)
i = 1
for row in rows:
if i >= amount:
return
sess.execute_async(deleteStatement, [row.id])
i += 1
pause(pause_in_seconds)
def read_from_table(sess, table_name):
sess.execute_async(SimpleStatement("SELECT * FROM %s LIMIT %d" % (table_name, random.choice(range(1, 200)))))
return
def pause(time_in_seconds):
if time_in_seconds > 0:
time.sleep(time_in_seconds)
def create_random_values(batch_size, rows):
result = []
for i in range(batch_size):
random_row = random.choice(rows)
for col in random_row:
result.append(col)
return result
def random_string():
legalChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0987654321"
result = ""
for i in range(15):
result += random.choice(legalChars)
return result
def choose_new_table():
if random_table and actions % 10000 == 0:
args.table = select_random_table(session, args.keyspace)
print("new table " + args.table + " was chosen")
def sigint_handler(signal, frame):
print("\nperformed %d actions in %s mode\n write actions: %d\n read actions: %d" % (actions, args.mode, write_actions, read_actions))
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
session = setup_session(args.cluster_ip, args.keyspace)
if not args.keyspace:
args.keyspace = select_random_keyspace(session)
if random_table:
args.table = select_random_table(session, args.keyspace)
session.set_keyspace(args.keyspace)
print("Selected table %s from keyspace %s" % (args.table, args.keyspace))
print("stressing database by sending {} queries every {} seconds...".format(args.mode, args.pause))
fetchStatement = SimpleStatement("SELECT * FROM " + args.table)
rows = session.execute(fetchStatement)
column_names = session.cluster.metadata.keyspaces[args.keyspace].tables[args.table].columns.keys()
if args.mode == "READ":
while True:
read_from_table(session, args.table)
actions += 1
pause(args.pause)
if args.mode == "INSERT":
rows = session.execute(fetchStatement)
while True:
insert_random_rows(session, args.table, rows.current_rows, column_names, args.batch_size)
actions += 1
write_actions += 1
pause(args.pause)
if args.mode == "RANDOM":
while True:
read_mode = random.choice([True, False])
if read_mode:
read_from_table(session, args.table)
read_actions += 1
else:
insert_random_rows(session, args.table, rows.current_rows, column_names, args.batch_size)
write_actions += 1
actions += 1
pause(args.pause)
| 34.121795 | 137 | 0.68514 |
5e40dd4f1d01efed6ab94dde8d0cb50630ee640c | 2,922 | py | Python | posetta/writers/_writer.py | gahjelle/posetta | 6e052c19a64b0bbdd0b9a7d3ac703000e615d53e | [
"MIT"
] | 2 | 2018-05-15T00:50:34.000Z | 2019-02-25T11:08:27.000Z | posetta/writers/_writer.py | gahjelle/posetta | 6e052c19a64b0bbdd0b9a7d3ac703000e615d53e | [
"MIT"
] | 13 | 2018-07-06T08:52:52.000Z | 2018-12-07T13:49:34.000Z | posetta/writers/_writer.py | gahjelle/posetta | 6e052c19a64b0bbdd0b9a7d3ac703000e615d53e | [
"MIT"
] | 2 | 2018-04-28T14:31:40.000Z | 2018-05-14T21:19:27.000Z | """Basic functionality for writing datafiles, extended by individual writers
Description:
------------
This module contains an abstract base class that can be extended for writing data files
in Posetta.
"""
# Standard library imports
import codecs
from typing import IO
# Third party imports
# Posetta imports
from posetta import data
from posetta.lib import exceptions
class Writer:
"""An abstract base class that has basic methods for writing a datafile
This class provides functionality for writing a file. You should inherit from one of
the specific writers like for instance ChainWriter, LineWriter, SinexWriter etc
Attributes:
output_stream: IO[str] - Stream that output is written to.
data: data.CoordSet - The coordinate data to be written.
writer_name: str - Name of the writer (module).
file_path: str - Name of the datafile that will be written.
encoding: str - Encoding of output file.
"""
def __init__(
self, output_stream: IO[bytes], cset: data.CoordSet, encoding: str = "utf-8"
) -> None:
"""Set up the basic information needed by the writer
Args:
output_stream: Byte stream to write to.
cset: Data that will be written.
encoding: Encoding used when writing data.
"""
self.output_stream = codecs.getwriter(encoding)(output_stream)
self.data = cset
self.encoding = encoding
self.writer_name = self.__module__.split(".")[-1]
try:
self.file_path = output_stream.name
except AttributeError:
self.file_path = "<unknown>"
def setup_writer(self) -> None:
"""Set up a writer so that it can write data to a file.
This method may be overwritten if a writer needs to do some preparatory work.
"""
pass
def write(self) -> None:
"""Write data
This is a basic implementation that carries out the whole pipeline of writing
datafiles.
Subclasses should typically implement (at least) the `write_data`-method.
"""
self.setup_writer()
if self.data.num_obs:
self.write_data()
else:
raise exceptions.WriterError("Input dataset is empty")
def write_data(self) -> None:
"""Write data to the data file
Data should be write to `self.file_path` and stored in the dictionary
`self.data`. A description of the data may be placed in the dictionary
`self.meta`. If the file is not found, a FileNotFoundError should be raised.
"""
raise NotImplementedError(f"{self.writer_name} must implement write_data()")
def __repr__(self) -> str:
"""A simple string representation of the writer
"""
return f"{self.__class__.__name__}('{self.file_path}')"
| 32.831461 | 88 | 0.638261 |
b09d02b08bbbc81018fd0dc7c2e2d3db1532556b | 1,376 | py | Python | src/word_segmentation.py | vinnymaker18/funlp | 59859585526dc88339f80c8c797672587474f0e2 | [
"MIT"
] | null | null | null | src/word_segmentation.py | vinnymaker18/funlp | 59859585526dc88339f80c8c797672587474f0e2 | [
"MIT"
] | null | null | null | src/word_segmentation.py | vinnymaker18/funlp | 59859585526dc88339f80c8c797672587474f0e2 | [
"MIT"
] | null | null | null | """Word segmentation algorithms."""
import re
from min_edit_distance import min_edit_distance
def max_match(sentence, dictionary):
"""
MaxMatch algorithm for segmenting a sentence into a list of
words/tokens.
"""
# We first remove whitespace from sentence.
sentence = re.sub('\W', '', sentence)
# For now, we're not really concerned with efficiency.
words, pos = [], 0
while pos < len(sentence):
# Pick the longest prefix from position pos that's present in
# the dictionary. If no prefix is in dictionary, pick single
# letter as the next word.
for j in range(len(sentence), pos + 1, -1):
word = sentence[pos : j]
if word in dictionary:
pos = j
words.append(word)
break
else:
words.append(sentence[pos])
pos += 1
return words
def word_error_rate(segmented, gold):
"""
Word error rate is a metric used to measure accuracy of a segmentation
algorithm. It's the normalized edit distance b/w the list of words
outputted by the algorithm and the hand segmented gold list of words.
"""
# Deletion, insertion and modification all cost 1.
edit_dist = min_edit_distance(segmented, gold)
normalized_edit_dist = edit_dist / len(gold)
return normalized_edit_dist
| 29.276596 | 74 | 0.637355 |
b4db7f625b9560380ffd64658b655b09840a7a76 | 2,430 | py | Python | SPADE/data/base_method/image_option.py | kaijieshi7/oneflow_imaginaire | 51e90165eeb3e8b22be1bec0ed3f7deb7d87b482 | [
"Apache-2.0"
] | null | null | null | SPADE/data/base_method/image_option.py | kaijieshi7/oneflow_imaginaire | 51e90165eeb3e8b22be1bec0ed3f7deb7d87b482 | [
"Apache-2.0"
] | null | null | null | SPADE/data/base_method/image_option.py | kaijieshi7/oneflow_imaginaire | 51e90165eeb3e8b22be1bec0ed3f7deb7d87b482 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import numpy as np
import cv2
def loaded_image2ndarray(image, opt, method=cv2.INTER_CUBIC):
h, w, c = image.shape
# w = opt.load_size
# h = int(opt.load_size * h/w)
h, w = opt.my_size_h, opt.my_size_w
image = cv2.resize(image, (w, h), interpolation=method)
if opt.flip:
image = cv2.flip(image, 1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.transpose(image, (2, 0, 1))
image = ((image.astype(np.float32) / 255.0) -0.5) /0.5 # [-1, 1]
image = np.expand_dims(image, axis=0)
return np.ascontiguousarray(image, 'float32')
def loaded_label2ndarray(image, opt, method=cv2.INTER_NEAREST):
h, w = image.shape
# w = opt.load_size
# h = int(opt.load_size * h / w)
h, w = opt.my_size_h, opt.my_size_w
image = cv2.resize(image, (w, h), interpolation=method)
if opt.flip:
image = cv2.flip(image, 1)
image = np.expand_dims(image, axis=0)
image = np.expand_dims(image, axis=0)
return np.ascontiguousarray(image, 'float32')
def np_transform(input_nd, opt, method=Image.BICUBIC, normalize=True):
out_nd = input_nd
if 'resize' in opt.resize_or_crop:
out_nd = out_nd.resize((opt.my_size_w, opt.my_size_h), method)
elif 'scale_width' in opt.resize_or_crop:
out_nd = __scale_width(out_nd, opt.my_size, method)
if opt.resize_or_crop == 'none':
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
out_nd = __make_power_2(out_nd, base, method)
if opt.flip:
out_nd = __flip(out_nd, opt.flip)
out_nd = np.array(out_nd)
if normalize:
out_nd = ((out_nd.astype(np.float) / 255.0) - 0.5) / 0.5
return np.ascontiguousarray(out_nd.astype(np.float), 'float32')
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if (ow == target_width):
return img
# w = target_width
# h = int(target_width * oh / ow)
h, w = target_width, target_width
return img.resize((w, h), method)
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img | 32.837838 | 70 | 0.634568 |
0705705a3c49dcf7042c9703c0dce80eee2a7198 | 2,879 | py | Python | peekingduck/pipeline/nodes/model/efficientdet_d04/efficientdet_files/utils/model_process.py | leeping-ng/PeekingDuck | 16784b4c35f30c463fcc0c7caccdda6141797a6b | [
"Apache-2.0"
] | 1 | 2021-08-19T09:39:14.000Z | 2021-08-19T09:39:14.000Z | peekingduck/pipeline/nodes/model/efficientdet_d04/efficientdet_files/utils/model_process.py | sidney-tio/PeekingDuck | 966734ab81c9e466ab51495644673c2d52daf17c | [
"Apache-2.0"
] | null | null | null | peekingduck/pipeline/nodes/model/efficientdet_d04/efficientdet_files/utils/model_process.py | sidney-tio/PeekingDuck | 966734ab81c9e466ab51495644673c2d52daf17c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code of this file is mostly forked from
# [@xuannianz](https://github.com/xuannianz))
"""
Processing helper functinos for EfficientDet
"""
from typing import List, Tuple
import numpy as np
import cv2
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
def preprocess_image(image: np.ndarray,
image_size: int) -> Tuple[List[List[float]], float]:
"""Preprocessing helper function for efficientdet
Args:
image (np.array): the input image in numpy array
image_size (int): the model input size as specified in config
Returns:
image (np.array): the preprocessed image
scale (float): the scale in which the original image was resized to
"""
# image, RGB
image_height, image_width = image.shape[:2]
if image_height > image_width:
scale = image_size / image_height
resized_height = image_size
resized_width = int(image_width * scale)
else:
scale = image_size / image_width
resized_height = int(image_height * scale)
resized_width = image_size
image = cv2.resize(image, (resized_width, resized_height))
image = image.astype(np.float32)
image /= 255.
image -= IMG_MEAN
image /= IMG_STD
pad_h = image_size - resized_height
pad_w = image_size - resized_width
image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')
return image, scale
def postprocess_boxes(boxes: np.ndarray,
scale: float,
height: int,
width: int) -> np.ndarray:
"""Postprocessing helper function for efficientdet
Args:
boxes (np.array): the original detected bboxes from model output
scale (float): scale in which the original image was resized to
height (int): the height of the original image
width (int): the width of the original image
Returns:
boxes (np.array): the postprocessed bboxes
"""
boxes /= scale
boxes[:, 0] = np.clip(boxes[:, 0], 0, width - 1)
boxes[:, 1] = np.clip(boxes[:, 1], 0, height - 1)
boxes[:, 2] = np.clip(boxes[:, 2], 0, width - 1)
boxes[:, 3] = np.clip(boxes[:, 3], 0, height - 1)
boxes[:, [0, 2]] /= width
boxes[:, [1, 3]] /= height
return boxes
| 32.348315 | 76 | 0.64571 |
256febabd21ed9fa9e957c084dce0a6db15b9c30 | 1,276 | py | Python | dataschema/schema_example.py | vingkan/sql_tools | 5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1 | [
"Apache-2.0"
] | 1 | 2022-03-30T19:47:16.000Z | 2022-03-30T19:47:16.000Z | dataschema/schema_example.py | vingkan/sql_tools | 5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1 | [
"Apache-2.0"
] | null | null | null | dataschema/schema_example.py | vingkan/sql_tools | 5d6ab6a0ae31dc51e51ac1629f83f7bbf91396c1 | [
"Apache-2.0"
] | 1 | 2022-03-30T04:07:12.000Z | 2022-03-30T04:07:12.000Z | #
# nuna_sql_tools: Copyright 2022 Nuna Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example of table dataclass equivalent to example.proto."""
import datetime
import decimal
from dataclasses import dataclass
from dataschema.entity import Annotate
from dataschema import annotations
from typing import Optional
@annotations.order_by(values='member_id')
@annotations.clickhouse_engine(engine='MERGE_TREE')
@annotations.index_granularity(value=8192)
@dataclass
class Example:
"""Simple example of dataclass definition."""
member_id: str
num_claims: int
rx_num_claims: Annotate(Optional[int], annotations.Compression('ZSTD'))
start_date: Optional[datetime.date]
total_paid: Annotate(Optional[decimal.Decimal], annotations.Decimal(12, 2))
| 34.486486 | 79 | 0.775078 |
fc052ef3d1aa691eca8a68fd9e76951cbacadc7a | 531 | py | Python | lab/refactoring/replace_temp_with_query_fowler.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | lab/refactoring/replace_temp_with_query_fowler.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | lab/refactoring/replace_temp_with_query_fowler.py | Tanner-York-Make-School/SPD-2.31-Testing-and-Architecture | 623537a05cf5a9d50370a414a5056a78f95288eb | [
"MIT"
] | null | null | null | """
Adapted from a Java code in the "Refactoring" book by Martin Fowler.
Replace temp with query
Code snippet. Not runnable.
"""
def get_price(quantity, item_price):
"""Gets the total price of a purchace given the quantiy and the items price"""
base_price = quantity * item_price
discount_factor = get_discount_factor(base_price)
return base_price * discount_factor
def get_discount_factor(base_price):
"""Gets the discount facort for a given base price"""
return 0.95 if base_price > 1000 else 0.98
| 31.235294 | 82 | 0.736347 |
5bf0f3f5253f45f1da440e156ebbad03805c1770 | 712 | py | Python | sysinv/sysinv/sysinv/sysinv/objects/storage_file.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-02-07T18:57:44.000Z | 2021-09-11T10:29:34.000Z | sysinv/sysinv/sysinv/sysinv/objects/storage_file.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:01:55.000Z | 2021-01-14T12:01:55.000Z | sysinv/sysinv/sysinv/sysinv/objects/storage_file.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-10-13T08:37:46.000Z | 2022-02-09T00:21:25.000Z | #
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import storage_backend
class StorageFile(storage_backend.StorageBackend):
dbapi = db_api.get_instance()
fields = dict({}, **storage_backend.StorageBackend.fields)
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.storage_file_get(uuid)
def save_changes(self, context, updates):
self.dbapi.storage_file_update(self.uuid, # pylint: disable=no-member
updates)
| 24.551724 | 78 | 0.698034 |
ecb35a1e724a5a772253ca21a243802eb8427441 | 10,508 | py | Python | core_tools/data/ds/data_set_DataMgr.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | 1 | 2022-02-11T09:24:35.000Z | 2022-02-11T09:24:35.000Z | core_tools/data/ds/data_set_DataMgr.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | null | null | null | core_tools/data/ds/data_set_DataMgr.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | 2 | 2020-07-06T14:31:27.000Z | 2021-07-07T13:57:19.000Z | import numpy as np
import copy
import string
class m_param_origanizer():
def __init__(self, m_param_raw):
self.m_param_raw = m_param_raw
def get(self, key, nth_set):
items = self[key]
for i in items:
if i.nth_set == nth_set:
return i
raise ValueError('m_param with id {} and set {} not found in this data collection.'.format(key, nth_set))
def __getitem__(self, key):
'''
gets a list with parameters containing this key
Returns
list<m_param_raw> : raw parameters originating from this id.
'''
param_s = []
for m_param in self.m_param_raw:
if m_param.param_id == key:
param_s.append(m_param)
if len(param_s) != 0:
return param_s
raise ValueError('m_param with id {} not found in this data collection.'.format(key))
def get_m_param_id(self):
'''
get the measurement id's
'''
id_s = set()
for m_param in self.m_param_raw:
id_s.add(m_param.param_id_m_param)
return list(id_s)
def __copy__(self):
new_m_param = []
for i in self.m_param_raw:
new_m_param.append(copy.copy(i))
return m_param_origanizer(new_m_param)
class data_descriptor: #autogenerate parameter info
def __set_name__(self, owner, name): # from python 3.6 (super handy :) )
self.name = name
def __get__(self, obj, objtype):
return getattr(obj.__dict__.get("_dataset_data_description__raw_data"), self.name)
class dataset_data_description():
unit = data_descriptor()
label = data_descriptor()
# name = data_descriptor() ## overwritten by self.name in __init__
def __init__(self, name, m_param_raw, m_params_raw_collection):
'''
Args:
m_param_raw (m_param_raw) : pointer to the raw parameter to add
m_params_raw_collection (m_param_origanizer) : object containing a representation of all the data in the dataset
'''
self.name = name # @@@ will be overwritten by data_set_core.data_set.__init_properties
self.param_name = m_param_raw.name
self.__raw_data = m_param_raw
self.__raw_data_org = m_params_raw_collection
self.__repr_attr_overview = []
self.__populate_data()
def __populate_data(self):
for i in range(len(self.__raw_data.dependency)):
repr_attr_overview = []
raw_data = self.__raw_data_org[self.__raw_data.dependency[i]]
for j in range(len(raw_data)): #this is not pretty, but it works..
dataDescription = dataset_data_description('', raw_data[j], self.__raw_data_org)
# @@@ Fix x, y, z
if self.ndim <= 2:
name = string.ascii_lowercase[23+i] + str(j+1)
self.__setattr__(name, dataDescription)
if j == 0:
self.__setattr__(string.ascii_lowercase[23+i], dataDescription)
if len(raw_data) == 1:
name = string.ascii_lowercase[23+i]
repr_attr_overview += [(name, dataDescription)]
if self.ndim > 2:
self.__setattr__(string.ascii_lowercase[8+i] + str(j+1), dataDescription)
if len(raw_data) == 1:
self.__setattr__(string.ascii_lowercase[8+i], dataDescription)
repr_attr_overview += [(string.ascii_lowercase[8+i], dataDescription)]
else:
repr_attr_overview += [(string.ascii_lowercase[8+i] + str(j+1), dataDescription)]
dataDescription.name = repr_attr_overview[-1][0] # @@@ overwrites name
self.__repr_attr_overview += [repr_attr_overview]
if self.ndim <= 2:
name = string.ascii_lowercase[23+self.ndim-1]
if len(self.__raw_data.dependency) != 0:
name = string.ascii_lowercase[23+self.ndim]
else:
name = string.ascii_lowercase[8+self.ndim-1]
if len(self.__raw_data.dependency) != 0:
name = string.ascii_lowercase[8+self.ndim]
self.__setattr__(name, self)
def __call__(self):
if self.__raw_data.setpoint is True or self.__raw_data.setpoint_local is True:
if self.__raw_data.data_buffer.data.ndim > 1: #over dimensioned
# NOTE: Assumes the setpoint does not depend on the other dimensions!
# This will fail when the parameter is swept in alternating direction.
idx = [0] * self.__raw_data.data_buffer.data.ndim
idx[self.__raw_data.nth_dim] = slice(None)
return self.__raw_data.data_buffer.data[tuple(idx)]
return self.__raw_data.data_buffer.data
@property
def shape(self):
return self().shape
@property
def ndim(self):
return len(self.shape)
def full(self):
return self.__raw_data.data_buffer.data
def get_raw_content(self):
return self.__repr_attr_overview
def average(self, dim):
'''
average the array across 1 dimension
arg:
dim (str/int) : 0 ('x'), 1 ('y') , ...
'''
dim = self.dim_to_int(dim)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
raw_data_cpy.dependency.pop(dim)
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.averaging_lambda(dim)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def slice(self, dim, i):
'''
take the ith slice of dimension i
'''
dim = self.dim_to_int(dim)
if not isinstance(i, slice):
i = slice(int(i),int(i)+1)
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
idx = [slice(None)]*self.ndim
idx[dim] = i
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
if i.start is not None and i.stop-i.start == 1:
idx[dim] = i.start
raw_data_cpy.dependency.pop(dim)
elif i.stop is not None:
id_to_slice = raw_data_cpy.dependency[dim]
items= raw_data_org_copy[id_to_slice]
for item in items:
# TODO this is not generic yet (I think, this has to be checked).
item.data_buffer.buffer_lambda = item.data_buffer.slice_lambda([idx[dim]])
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.slice_lambda(idx)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def __getitem__(self, args):
if not isinstance(args, tuple):
args = [args]
args = list(args)
to_slice = None
for i in range(len(args)):
if isinstance(args[i], int):
to_slice = (i, slice(args[i], args[i]+1))
elif isinstance(args[i], slice) and args[i] != slice(None):
to_slice = (i, args[i])
if to_slice is None:
return self
args.pop(to_slice[0])
return self.slice(to_slice[0], to_slice[1])[tuple(args)]
def __repr__(self):
output_print = ""
output_print += "| " + "{:<15}".format(self.name) + " | " + "{:<15}".format(self.label) + " | " + "{:<8}".format(self.unit)+ " | " + "{:<25}".format(str(self.shape)) + "|\n"
for i in self.__repr_attr_overview:
for j in i:
dataDescription = j[1]
if dataDescription.ndim == 1:
output_print += "| " + "{:<14}".format(j[0]) + " | " + "{:<15}".format(dataDescription.label) + " | " + "{:<8}".format(dataDescription.unit)+ " | " + "{:<25}".format(str(dataDescription.shape)) + "|\n"
return output_print
@staticmethod
def dim_to_int(dim):
'''
convert dim (if text) into a number on which axix of the array to performan a operation (e.g. x = 0, y=1)
'''
if isinstance(dim, str):
if dim in 'xyz':
dim = list(string.ascii_lowercase).index(dim) - 23
else:
dim = list(string.ascii_lowercase).index(dim) - 8
return dim
class data_set_property_intializer():
'''
mockup of dataclass for development purposes-- dont use this class.
'''
def __init__(self, m_params):
self.__repr_attr_overview = []
# m_meas_id's
m_id = m_params.get_m_param_id()
for i in range(len(m_id)): #this is not pretty.
n_sets = len(m_params[m_id[i]])
repr_attr_overview = []
for j in range(n_sets):
ds_descript = dataset_data_description('', m_params.get(m_id[i], j), m_params)
name = 'm' + str(i+1) + string.ascii_lowercase[j]
setattr(self, name, ds_descript)
if j == 0:
setattr(self, 'm' + str(i+1), ds_descript)
if j == 0 and n_sets==1: #consistent printing
repr_attr_overview += [('m' + str(i+1), ds_descript)]
ds_descript.name = 'm' + str(i+1)
else:
repr_attr_overview += [(name, ds_descript)]
ds_descript.name = name
self.__repr_attr_overview += [repr_attr_overview]
def __repr__(self):
output_print = "DataSet :: my_measurement_name\n\nid = 1256\nTrueID = 1225565471200\n\n"
output_print += "| idn | label | unit | size |\n"
output_print += "---------------------------------------------------------------------------\n"
for i in self.__repr_attr_overview:
for j in i:
output_print += j[1].__repr__()
output_print += "\n"
output_print += "database : vanderyspen\n"
output_print += "set_up : XLD\n"
output_print += "project : 6dot\n"
output_print += "sample_name : SQ19\n"
return output_print | 37.798561 | 228 | 0.574705 |
9c3cf7ee3df54990f99e6772b7382d4e8c3174b0 | 6,575 | py | Python | pysrc/lib/math2d.py | Blimba/PyWC3 | 16d519bbb98e7593b8d14d14d9b81b6d6932ef0c | [
"MIT"
] | 14 | 2020-02-16T14:25:02.000Z | 2021-12-07T13:57:34.000Z | pysrc/lib/math2d.py | sylvainSUPINTERNET/PyWC3 | 16d519bbb98e7593b8d14d14d9b81b6d6932ef0c | [
"MIT"
] | 3 | 2020-04-20T02:31:31.000Z | 2022-02-25T17:06:12.000Z | pysrc/lib/math2d.py | sylvainSUPINTERNET/PyWC3 | 16d519bbb98e7593b8d14d14d9b81b6d6932ef0c | [
"MIT"
] | 2 | 2021-03-17T13:15:32.000Z | 2021-09-26T09:24:21.000Z | import math
from ..df.commonj import *
class Vector2:
active = []
_fifo_buffer_size = 50 # this is the amount of temporary vectors used
_loc = None
_bin = {}
def __new__(cls,x=0.0,y=0.0,temp=False):
if cls in Vector2._bin and len(Vector2._bin[cls]) > cls._fifo_buffer_size:
o = Vector2._bin[cls].pop(0)
cls.active.append(o)
return o
else:
o = object.__new__(cls)
cls.active.append(o)
return o
def permanent(self):
cls = type(self)
if self not in cls.active:
cls.active.append(self)
Vector2._bin[cls].remove(self)
return self
def destroy(self):
cls = type(self)
if cls in Vector2._bin:
if self in Vector2.active:
cls.active.remove(self)
Vector2._bin[cls].append(self)
else:
cls.active.remove(self)
Vector2._bin[cls] = [self]
def __init__(self,x=0.0,y=0.0,temp=False):
self.x = x
self.y = y
if temp:
self.destroy()
@staticmethod
def stats():
c = 0
for cls in Vector2._bin:
c += len(Vector2._bin[cls])
return 'Vector2 In use: {}, Recycle bin: {}'.format(str(len(Vector2.active)), str(c))
def distance(p1,p2):
dx = p1.x-p2.x
dy = p1.y-p2.y
return math.sqrt(dx*dx+dy*dy)
def dot(self,v):
return self.x*v.x+self.y*v.y
def cross(self,v):
'''
Treats the vectors as if they were 3D with z = 0, and returns the z of the cross product.
:param v:
:return float:
'''
return self.x*v.y - v.x*self.y
def __add__(self, p):
return Vector2(self.x + p.x, self.y + p.y,True)
def __sub__(self, p):
return Vector2(self.x - p.x, self.y - p.y,True)
def __mul__(self, other):
if isinstance(other,float):
return Vector2(self.x*other, self.y*other,True)
elif isinstance(other, Vector2):
return Vector2(self.x*other.x, self.y*other.y,True)
def __truediv__(self, other):
if isinstance(other,float):
return Vector2(self.x/other, self.y/other,True)
elif isinstance(other,Vector2):
return Vector2(self.x/other.x, self.y/other.y,True)
def __len__(self):
return math.sqrt(self.x*self.x+self.y*self.y)
def __str__(self):
return "Vector2 x: "+str(self.x)+", y: "+str(self.y)
def add(self,v):
self.x += v.x
self.y += v.y
return self
def subtract(self,v):
self.x -= v.x
self.y -= v.y
return self
def multiply(self,v):
if isinstance(v, float):
self.x *= v
self.y *= v
elif isinstance(v,Vector2):
self.x *= v.x
self.y *= v.y
return self
def divide(self,v):
if isinstance(v, float):
v = 1/v
self.x *= v
self.y *= v
elif isinstance(v,Vector2):
self.x /= v.x
self.y /= v.y
return self
def normalize(self):
return self.divide(len(self))
def get_angle(self,other):
return self.dot(other)/(len(self) * len(other))
def project(self,other):
return other * self.dot(other)/other.dot(other)
def rotate(self,theta,direction='cw'):
cos = None
sin = None
if direction == 'cw':
cos = math.cos(theta)
sin = math.sin(theta)
else:
cos = math.cos(-theta)
sin = math.sin(-theta)
self.x = self.x*cos - self.y*sin
self.y = self.x*sin + self.y*cos
def show(self):
fx = AddSpecialEffect(r"Abilities\\Spells\\Orc\\Bloodlust\\BloodlustTarget.mdl",self.x,self.y)
return self
class Line2:
def __init__(self,p1,p2):
self.p1 = p1
self.p2 = p2
def closest_point(self,p,segment = True):
dx = self.p2.x - self.p1.x
dy = self.p2.y - self.p1.y
d2 = dx * dx + dy * dy
nx = ((p.x - self.p1.x) * dx + (p.y - self.p1.y) * dy) / d2
if segment:
if nx < 0: nx = 0
elif nx > 1: nx = 1
return Vector2(dx * nx + self.p1.x, dy * nx + self.p1.y)
def distance(self,p,segment = True):
lp = self.closest_point(p,segment)
return lp.distance(p)
def normal(self,p):
return self.closest_point(p).subtract(p).normalize()
def show(self):
z1 = GetLocationZ(Location(self.p1.x,self.p1.y))
z2 = GetLocationZ(Location(self.p2.x, self.p2.y))
AddLightningEx("DRAL",False,self.p1.x,self.p1.y,z1,self.p2.x,self.p2.y,z2)
return self
class Rectangle:
def __init__(self,minx,miny,maxx,maxy):
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
@staticmethod
def from_points(p1,p2):
minx = p1.x if p1.x < p2.x else p2.x
miny = p1.y if p1.y < p2.y else p2.y
maxx = p1.x if p1.x > p2.x else p2.x
maxy = p1.y if p1.y > p2.y else p2.y
return Rectangle(minx,miny,maxx,maxy)
@staticmethod
def from_rect(rect):
return Rectangle(GetRectMinX(rect), GetRectMinY(rect), GetRectMaxX(rect), GetRectMaxY(rect))
def random_point(self,crop=0):
x = math.random()*(self.maxx-(crop*2)-self.minx)+self.minx+crop
y = math.random()*(self.maxy-(crop*2)-self.miny)+self.miny+crop
return Vector2(x,y,True)
def __contains__(self, p):
if isinstance(p, Vector2):
return p.x >= self.minx and p.x <= self.maxx and p.y >= self.miny and p.y <= self.maxy
def closest_point(self,p):
if p in self:
return Vector2(p.x,p.y)
if p.x < self.minx:
if p.y < self.miny:
return Vector2(self.minx, self.miny)
if p.y > self.maxy:
return Vector2(self.minx, self.maxy)
return Vector2(self.minx, p.y)
if p.x > self.maxx:
if p.y < self.miny:
return Vector2(self.maxx, self.miny)
if p.y > self.maxy:
return Vector2(self.maxx, self.maxy)
return Vector2(self.maxx, p.y)
if p.y > self.maxy:
return Vector2(p.x,self.maxy)
return Vector2(p.x,self.miny)
def distance(self,p):
rp = self.closest_point(p)
return rp.distance(p)
def normal(self,p):
return self.closest_point(p).subtract(p).normalize() | 31.014151 | 102 | 0.540228 |
7dc6f455566fec5a6d7d812e34893ee321fea8c1 | 431 | py | Python | onadata/apps/fieldsight/token_gen_invites.py | awemulya/fieldsight-kobocat | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/apps/fieldsight/token_gen_invites.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 20 | 2017-04-27T09:14:27.000Z | 2019-01-17T06:35:52.000Z | onadata/apps/fieldsight/token_gen_invites.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
import datetime
class InviteActivationTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, invite):
return (
six.text_type(invite.pk) + six.text_type(datetime.datetime.now()) +
six.text_type(invite.group.name)
)
invite_activation_token = InviteActivationTokenGenerator() | 35.916667 | 79 | 0.756381 |
88b740445ff902e3eb8e08dcae209e18b37da896 | 23,756 | py | Python | pyqg/changed_f_beta_nk0_filter_tilde_newdomain/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | pyqg/changed_f_beta_nk0_filter_tilde_newdomain/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | pyqg/changed_f_beta_nk0_filter_tilde_newdomain/layered_model.py | wanyingkang/pyqg | ffcb48573a4a66d7c48f64c69734a567547e0962 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
from numpy import pi
from . import model
try:
import mkl
np.use_fastnumpy = True
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
except ImportError:
pass
class LayeredModel(model.Model):
r"""Layered quasigeostrophic model.
This model is meant to represent flows driven by baroclinic instabilty of a
base-state shear. The potential vorticity anomalies qi are related to the
streamfunction psii through
.. math::
{q_i} = \nabla^2\psi_i + \frac{f_0^2}{H_i} \left(\frac{\psi_{i-1}-
\psi_i}{g'_{i-1}}- \frac{\psi_{i}-\psi_{i+1}}{g'_{i}}\right)\,,
\qquad i = 2,\textsf{N}-1\,,
{q_1} = \nabla^2\psi_1 + \frac{f_0^2}{H_1} \left(\frac{\psi_{2}-
\psi_1}{g'_{1}}\right)\,, \qquad i =1\,,
{q_\textsf{N}} = \nabla^2\psi_\textsf{N} +
\frac{f_0^2}{H_\textsf{N}} \left(\frac{\psi_{\textsf{N}-1}-
\psi_\textsf{N}}{g'_{\textsf{N}}}\right) + \frac{f_0}{H_\textsf{N}}h_b\,,
\qquad i =\textsf{N}\,,
where the reduced gravity, or buoyancy jump, is
.. math::
g'_i \equiv g \frac{\pt_{i+1}-\pt_i}{\pt_i}\,.
The evolution equations are
.. math::
\,{q_{i}}_t + \mathsf{J}\left(\psi_i\,, q_i\right) + \textsf{Q}_y {\psi_i}_x
- \textsf{Q}_x {\psi_i}_y = \text{ssd} -
r_{ek} \delta_{i\textsf{N}} \nabla^2 \psi_i\,, \qquad i = 1,\textsf{N}\,,
where the mean potential vorticy gradients are
.. math::
\textsf{Q}_x = \textsf{S}\textsf{V}\,,
and
.. math::
\textsf{Q}_y = \beta\,\textsf{I} - \textsf{S}\textsf{U}\,\,,
where S is the stretching matrix, I is the identity matrix,
and the background velocity is
:math:`\vec{\textsf{V}}(z) = \left(\textsf{U},\textsf{V}\right)`.
"""
def __init__(
self,
g = 9.81,
beta=1.5e-11, #? gradient of coriolis parameter
nz = 4, # number of layers
rd=15000.0, # deformation radius
H = None, # layer thickness. If a scalar number, then copy the same H for all layers
U=None, # zonal base state flow. If None, use U=0 for all layers
V=None, # meridional base state flow. If None, use V=0 for all layers
pt = None, # potential temperature
c2 = None,
delta = None, # only used for nz=2, can leave blanck if use multi-layer model
H0 = 7750, # standard atm height scale
R = 287.,
kappa = 2./7.,
tau = 40, # time scale for restoring terms, units in day
**kwargs
):
"""
Parameters
----------
nz : integer number
Number of layers (> 1)
beta : number
Gradient of coriolis parameter. Units: meters :sup:`-1`
seconds :sup:`-1`
rd : number
Deformation radius. Units: meters. Only necessary for
the two-layer (nz=2) case.
delta : number
Layer thickness ratio (H1/H2). Only necessary for the
two-layer (nz=2) case. Unitless.
U : list of size nz
Base state zonal velocity. Units: meters s :sup:`-1`
V : array of size nz
Base state meridional velocity. Units: meters s :sup:`-1`
H : array of size nz
Layer thickness. Units: meters
pt: array of size nz.
Layer Potential Temperature. Units: Kelvin
"""
# physical
if U is None:
U=np.zeros([nz])
if V is None:
V=np.zeros([nz])
if len(np.array(H))==1 and nz!=1:
H=np.tile(np.array(H),nz)
self.nz = nz
self.g = g
self.beta = beta
self.rd = rd
self.delta = delta
self.R = R
self.kappa = kappa
self.tau = tau
self.Ubg = np.array(U)
self.Vbg = np.array(V)
self.Hi = np.array(H)
self.pti = np.array(pt)
self.c2 = np.array(c2)
self.H0 = H0
super(LayeredModel, self).__init__(nz=nz, **kwargs)
self.vertical_modes()
print("nx:{}".format(self.nx))
print("ny:{}".format(self.ny))
print("nz:{}".format(self.nz))
### PRIVATE METHODS - not meant to be called by user ###
def _initialize_stretching_matrix(self):
""" Set up the stretching matrix """
self.S = np.zeros((self.nz, self.nz))
if (self.nz==2) and (self.rd) and (self.delta):
self.del1 = self.delta/(self.delta+1.)
self.del2 = (self.delta+1.)**-1
self.Us = self.Ubg[0]-self.Ubg[1]
self.F1 = self.rd**-2 / (1.+self.delta)
self.F2 = self.delta*self.F1
self.S[0,0], self.S[0,1] = -self.F1, self.F1
self.S[1,0], self.S[1,1] = self.F2, -self.F2
else:
for i in range(self.nz):
# Adding other statification terms by Wanying Kang @ Feb 14 2017
# All following S element, the second half of expression terms
# are added to represent stratification 1/H term.
# Would still have terms represent boundary conditions at top and bottom.
# q1 = q1 + (self.f*self.g/self.gpi[i]*(1-self.Hi[i]/self.H0/2))*(self.T1(x,y)/self.T0) ,i=0
# qN = qN + (self.f*self.g/self.gpi[i]*(-1-self.Hi[i]/self.H0/2))*(self.TN(x,y)/self.T0) ,i=nz-1
# delete the Hi terms at i=0 and i=nz-1 by assuming \psi_zz=0 at top and bottom
# This assumption means vertical T gradient is zero. T = -f/R*\psi_{z^*}
if i == 0:
# 1. assume \Psi_zz|_{top/bot}=0
#self.S[i,i] = (-self.f2/self.H0/self.gpi[i])
#self.S[i,i+1] = (self.f2/self.H0/self.gpi[i])
# 2. assume \Psi_z|_{out_of_range}=0, need to substract constant term to represent the constant temperature when invert \Psi.
# self.S[i,i] = (-self.f2/self.Hi[i]/self.gpi[i]-
# self.f2/self.H0/self.gpi[i]/2.)
# self.S[i,i+1] = (self.f2/self.Hi[i]/self.gpi[i]+
# self.f2/self.H0/self.gpi[i]/2.)
# 3. transform \Psi -> \tilde \Psi, use BC \Psi_zz|_{top/bot}=0
self.S[i,i] = -self.f2*self.c2[i]
# 4. transform \Psi -> \tilde \Psi, use BC \Psi_z|_{out_of_range}=0, need to substract constant term when invert \Psi.
#self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i]-self.f2*self.c2[i]
#self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
elif i == self.nz-1:
# 1.
#self.S[i,i] = (self.f2/self.H0/self.gpi[i-1])
#self.S[i,i-1] = (-self.f2/self.H0/self.gpi[i-1])
# 2.
# self.S[i,i] = (-self.f2/self.Hi[i]/self.gpi[i-1]+
# self.f2/self.H0/self.gpi[i-1]/2.)
# self.S[i,i-1] = (self.f2/self.Hi[i]/self.gpi[i-1]-
# self.f2/self.H0/self.gpi[i-1]/2.)
# 3.
self.S[i,i] = -self.f2*self.c2[i]
# 4.
#self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i-1]-self.f2*self.c2[i]
#self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
else:
# 1. or 2.
#self.S[i,i-1] = (self.f2/self.Hi[i]/self.gpi[i-1]-
# self.f2/self.H0/self.gpi[i-1]/2.)
#self.S[i,i] = (-(self.f2/self.Hi[i]/self.gpi[i] +
# self.f2/self.Hi[i]/self.gpi[i-1])-
# (self.f2/self.H0/self.gpi[i]/2.-
# self.f2/self.H0/self.gpi[i-1]/2.))
#self.S[i,i+1] = (self.f2/self.Hi[i]/self.gpi[i]+
# self.f2/self.H0/self.gpi[i]/2.)
# 3. or 4.
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i] = (-(self.f2/self.Hi[i]/self.gpi[i] +
self.f2/self.Hi[i]/self.gpi[i-1])
-self.f2*self.c2[i])
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
def _initialize_background(self):
"""Set up background state (zonal flow and PV gradients)."""
self.H = self.Hi.sum()
if not (self.nz==2):
#self.gpi = -self.g*(self.pti[1:]-self.pti[:-1])/self.pti[:-1]
self.gpi = -(self.pti[1:]-self.pti[:-1])/self.H0*self.R*np.exp(-self.kappa/self.H0*np.asarray(self.z[:-1]))
self.f2gpi = (self.f2/self.gpi)[:,np.newaxis,np.newaxis]
assert self.gpi.size == self.nz-1, "Invalid size of gpi"
assert np.all(self.gpi>0.), "Buoyancy jump has negative sign!"
assert self.Hi.size == self.nz, self.logger.error('size of Hi does not' +
'match number of vertical levels nz')
assert self.pti.size == self.nz, self.logger.error('size of pti does not' +
'match number of vertical levels nz')
assert self.Ubg.size == self.nz, self.logger.error('size of Ubg does not' +
'match number of vertical levels nz')
assert self.Vbg.size == self.nz, self.logger.error('size of Vbg does not' +
'match number of vertical levels nz')
else:
self.f2gpi = np.array(self.rd**-2 *
(self.Hi[0]*self.Hi[1])/self.H)[np.newaxis,np.newaxis]
## Initialize stretching matrix
self._initialize_stretching_matrix()
## the meridional PV gradients in each layer
## Original version
#self.Qy = self.beta - np.dot(self.S,self.Ubg)
#self.Qx = np.dot(self.S,self.Vbg)
## complex versions, multiplied by k, speeds up computations to precompute
#self.ikQy = self.Qy[:,np.newaxis,np.newaxis]*1j*self.k
#self.ilQx = self.Qx[:,np.newaxis,np.newaxis]*1j*self.l
## Set the meridional PV gradients in each layer
# Wanying Kang add lat dependent on beta.
# Qy is nz*nl*nl matrix, convolution matrix takes the nl*nl dimension
# The kernel calculate _ikQy from Qy, instead of using ikQy here.
# _ikQy is originally nz*nk matrix, different from original ikQy which is a nz*nl*nk matrix. After my modificatino, they are the same.
# This ikQy is used in stability analysis in model.py
#b_lat = np.asarray(self.coslat)**2.*(np.asarray(self.coslat)**2.-2.*np.asarray(self.sinlat)**2.)
b_lat = np.asarray(self.coslat)**3.
b_lat[int(self.nl/2):,:] = -b_lat[int(self.nl/2):,:]
b_lat1 = np.squeeze(b_lat[:,0])
b_lat = np.tile(b_lat[np.newaxis,:,:], (self.nz,1,1))
bh_lat = self.fft(b_lat)/(self.nl**2)/(self.nl)
bh_lat = np.squeeze(bh_lat[0,:,0]) # uniform in x direction, so pick k=0
#Cbh1 = (self.convmtx( bh_lat[:int(self.nl/2)] , self.nl ))[:int(self.nl/2),:]
#Cbh2 = (self.convmtx( bh_lat[int(self.nl/2):] , self.nl ))[-int(self.nl/2):,:]
#Cbh = np.concatenate( [Cbh1, Cbh2] , 0 )
order = np.concatenate([range(int(self.nl/2),self.nl),range(0,int(self.nl/2))])
Cbh_shift = self.convmtx( bh_lat[order] , self.nl )
Cbh_shift = Cbh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cbh = Cbh_shift[order,:]
Cbh = Cbh[:,order]
# Test Wanying Kang's convolution
#b_test1 = np.arange(self.nl)/2.
#b_test = np.tile(b_test1[np.newaxis,:,np.newaxis], (self.nz,1,self.nx))
#bh_test = self.fft(b_test)
#bh_test1 = np.squeeze(bh_test[0,:,0])
#b_result = b_test1*b_lat1
#bh_result = np.dot(Cbh,bh_test1)
#bh_result = self.ifft(np.tile(bh_result[np.newaxis,:,np.newaxis], (self.nz,1,self.nk)))
#bh_result = np.squeeze(bh_result[0,:,0])
#print(b_result)
#print(bh_result)
# real space version of Qy Qx:
#self.Qy = np.tile(self.beta*b_lat1[np.newaxis,:],[self.nz,1]) - np.tile((np.dot(self.S,self.Ubg))[:,np.newaxis],[1,self.nl])
#self.Qx = np.tile(np.dot(self.S,self.Vbg)[:,np.newaxis],[1,self.nl])
# spectra space version of Qy Qx:
self.Qy = np.tile(self.beta*Cbh[np.newaxis,:,:],[self.nz,1,1]) - np.tile((np.dot(self.S,self.Ubg))[:,np.newaxis,np.newaxis],[1,self.nl,self.nl])
self.Qx = np.dot(self.S,self.Vbg)
# complex versions, multiplied by k, speeds up computations to precompute
# Wanying Kang: add lat dependent on beta. ikQy is nz*nl*nl*nk matrix
self.ikQy = self.Qy[:,:,:,np.newaxis]*1j*self.kk[np.newaxis,np.newaxis,np.newaxis,:]
self.ilQx = self.Qx[:,np.newaxis,np.newaxis]*1j*self.l #Original version
## lat-dependent restoring terms
g_lat1 = 1.+50.*(1-np.tanh(7*self.sinlat1))
g_lat = np.tile(g_lat1[np.newaxis,:,np.newaxis], (self.nz,1,self.nx))
gh_lat = self.fft(g_lat)/(self.nl**2)/(self.nl)
gh_lat = np.squeeze(gh_lat[0,:,0])
Cgh_shift = self.convmtx( gh_lat[order] , self.nl )
Cgh_shift = Cgh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cgh = Cgh_shift[order,:]
Cgh = Cgh[:,order]
self.gamma = np.tile(1./self.tau/86400.*Cgh[np.newaxis,:,:],[self.nz,1,1])
# def _initialize_inversion_matrix(self):
# # Original Version
# a = np.ma.zeros((self.nz, self.nz, self.nl, self.nk), np.dtype('float64'))
#
# if (self.nz==2):
# det_inv = np.ma.masked_equal(
# ( (self.S[0,0]-self.wv2)*(self.S[1,1]-self.wv2) -\
# self.S[0,1]*self.S[1,0] ), 0.)**-1
# a[0,0] = (self.S[1,1]-self.wv2)*det_inv
# a[0,1] = -self.S[0,1]*det_inv
# a[1,0] = -self.S[1,0]*det_inv
# a[1,1] = (self.S[0,0]-self.wv2)*det_inv
# else:
# I = np.eye(self.nz)[:,:,np.newaxis,np.newaxis]
# M = self.S[:,:,np.newaxis,np.newaxis]-I*self.wv2
# M[:,:,0,0] = np.nan # avoids singular matrix in inv()
# a = np.linalg.inv(M.T).T
# print(a[a!=0])
# self.a = np.ma.masked_invalid(a).filled(0.)
def _initialize_inversion_matrix(self):
# Wanying Kang: Do convolution if f has lat-stucture as
# f=f0*cos(lat)*sin(lat), f2=f0^2*cos^2(lat)*sin^2(lat)
a = np.ma.zeros((self.nz, self.nz, self.nl, self.nl, self.nk0), np.dtype(np.complex128))
if (self.nz==2):
Ij = np.eye(self.nl)[np.newaxis,np.newaxis,:,:,np.newaxis]
det_inv = np.ma.masked_equal(
( (self.S[0,0]-self.wv2)*(self.S[1,1]-self.wv2) -\
self.S[0,1]*self.S[1,0] ), 0.)**-1
for j in range(self.nl):
a[0,0,j,j] = (self.S[1,1]-self.wv2)*det_inv
a[0,1,j,j] = -self.S[0,1]*det_inv
a[1,0,j,j] = -self.S[1,0]*det_inv
a[1,1,j,j] = (self.S[0,0]-self.wv2)*det_inv
else:
Izl = np.multiply.outer(np.eye(self.nz),np.eye(self.nl))
Iz = np.eye(self.nz)
# Wanying Kang: Do convolution if f has lat-stucture as
# f=f0*cos(lat)*sin(lat), f2=f0^2*cos^2(lat)*sin^2(lat)
f_lat = np.asarray(self.coslat)**2.*np.asarray(self.sinlat)**2.
f_lat = np.tile(f_lat[np.newaxis,:,:], (self.nz,1,1))
fh_lat = self.fft(f_lat)/(self.nl**2)/(self.nl)
fh_lat = np.squeeze(fh_lat[0,:,0]) # uniform in x direction, so pick k=0
#Cfh1 = (self.convmtx( fh_lat[:int(self.nl/2)] , self.nl ))[:int(self.nl/2),:]
#Cfh2 = (self.convmtx( fh_lat[int(self.nl/2):] , self.nl ))[-int(self.nl/2):,:]
#Cfh = np.concatenate( [Cfh1, Cfh2] , 0 )
#Cfh = np.eye(self.nl) # compare with non-lat dependent case
order = np.concatenate([range(int(self.nl/2),self.nl),range(0,int(self.nl/2))])
Cfh_shift = self.convmtx( fh_lat[order] , self.nl )
Cfh_shift = Cfh_shift[int(self.nl/2):-int(self.nl/2)+1,:]
Cfh = Cfh_shift[order,:]
Cfh = Cfh[:,order]
# Wanying Kang: Make up poisson operator, M
M = (np.multiply.outer(self.S,Cfh))[:,:,:,:,np.newaxis]-Izl[:,:,:,:,np.newaxis]*self.wv2[np.newaxis,np.newaxis,np.newaxis,:,:]
# Wanying Kang: Add BC by modifying the poisson operator M,
# give up the equation for high k wavenumber, need totally nz*nk0+nz*(nk0-1) slots.
# 1. NP: p|_{NP}=0
# For all k, wave#k component has no Amp at NP.
#M[:,:,int(self.nl/2),:,0:self.nk0]=(Iz[:,:,np.newaxis,np.newaxis])*((np.exp(2*pi*1j*self.ll*(self.ny-1)/self.nl)/self.nl)[np.newaxis,np.newaxis,:,np.newaxis])
#M[:,:,int(self.nl/2),int(self.nl/2),0:self.nk0]=0.
#M[:,:,int(self.nl/2),int(self.nl*3/4),0:self.nk0]=0.
# 2. SP: p_x|_{SP}=0. 1j*k*ph|_{SP}=0 where ph is fourier transformed p in x dir.
# For k=0, the equation is automatically satisfied; For k/=0, this means wave#k component has no Amp at SP.
#M[:,:,int(self.nl*3/4),:,1:self.nk0]=(Iz[:,:,np.newaxis,np.newaxis])*(1/self.nl)*self.kk[1:self.nk0]
#M[:,:,int(self.nl/2),int(self.nl/2),1:self.nk0]=0.
#M[:,:,int(self.nl/2),int(self.nl*3/4),1:self.nk0]=0.
# Wanying Kang: calculate matrix inversion
Mt = np.ascontiguousarray(np.transpose(M,[4,0,2,1,3]))
Mt.shape=(self.nk,self.nz*self.nl,self.nz*self.nl)
#Mt[0,:,:]=np.nan # avoids singular matrix in inv(), however filterred out k=0 components.
for ik in range(self.nk0):
at = np.linalg.inv(Mt[ik,:,:])
at.shape = (self.nz,self.nl,self.nz,self.nl)
a[:,:,:,:,ik] = np.transpose(at,[0,2,1,3])
a[:,:,0,0,0]=0.
#self.a = np.ma.masked_invalid(a).filled(0.)
self.a = a
# Wanying Kang add b matrix to invert k=0 component,
# now this is not necessary since I changed the way I calculate a above.
#Mb = np.multiply.outer(self.S,Cfh)-Izl*(self.ll**2)
#Mb[:,:,int(self.nl/2)-1,:]=(Iz[:,:,np.newaxis])*((np.exp(2*pi*1j*self.ll*(self.ny-1)/self.nl)/self.nl)[np.newaxis,np.newaxis,:])
#Mb = M[:,:,:,:,0]
#Mb[:,:,0,0]=np.nan
#Mbt = np.ascontiguousarray(np.transpose(Mb,[0,2,1,3]))
#Mbt.shape=(self.nl*self.nz,self.nl*self.nz)
#bt = np.linalg.inv(Mbt)
#bt.shape = (self.nz,self.nl,self.nz,self.nl)
#b = np.transpose(bt,[0,2,1,3])
#b [:,:,0,0]=0.+0j
#self.a[:,:,:,:,0]=b
def _initialize_forcing(self):
pass
#"""Set up frictional filter."""
# this defines the spectral filter (following Arbic and Flierl, 2003)
# cphi=0.65*pi
# wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
# self.filtr = np.exp(-self.filterfac*(wvx-cphi)**4.)
# self.filtr[wvx<=cphi] = 1.
### All the diagnostic stuff follows. ###
def _calc_cfl(self):
return np.abs(
np.hstack([self.u + self.Ubg[:,np.newaxis,np.newaxis], self.v])
).max()*self.dt/self.dx
# calculate KE: this has units of m^2 s^{-2}
# (should also multiply by H1 and H2...)
def _calc_ke(self):
ke = 0.
for j in range(self.nz):
ke += .5*self.Hi[j]*self.spec_var(self.wv*self.ph[j])
return ke.sum() / self.H
# calculate eddy turn over time
# (perhaps should change to fraction of year...)
def _calc_eddy_time(self):
""" estimate the eddy turn-over time in days """
ens = 0.
for j in range(self.nz):
ens = .5*self.Hi[j] * self.spec_var(self.wv2*self.ph[j])
return 2.*pi*np.sqrt( self.H / ens.sum() ) / 86400
def _calc_derived_fields(self):
self.p = self.ifft(self.ph)
self.xi =self.ifft(-self.wv2*self.ph)
self.Jpxi = self._advect(self.xi, self.u, self.v)
self.Jq = self._advect(self.q, self.u, self.v)
self.Sph = np.einsum("ij,jkl->ikl",self.S,self.ph)
self.Sp = self.ifft(self.Sph)
self.JSp = self._advect(self.Sp,self.u,self.v)
self.phn = self.modal_projection(self.ph)
def _initialize_model_diagnostics(self):
""" Extra diagnostics for layered model """
self.add_diagnostic('entspec',
description='barotropic enstrophy spectrum',
function= (lambda self:
np.abs((self.Hi[:,np.newaxis,np.newaxis]*self.qh).sum(axis=0))**2/self.H) )
self.add_diagnostic('KEspec_modal',
description='modal KE spectra',
function= (lambda self:
self.wv2*(np.abs(self.phn)**2)/self.M**2 ))
self.add_diagnostic('PEspec_modal',
description='modal PE spectra',
function= (lambda self:
self.kdi2[1:,np.newaxis,np.newaxis]*(np.abs(self.phn[1:,:,:])**2)/self.M**2 ))
self.add_diagnostic('APEspec',
description='available potential energy spectrum',
function= (lambda self:
(self.f2gpi*
np.abs(self.ph[:-1]-self.ph[1:])**2).sum(axis=0)/self.H))
self.add_diagnostic('KEflux',
description='spectral divergence of flux of kinetic energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.ph.conj()*self.Jpxi).real).sum(axis=0)/self.H))
self.add_diagnostic('APEflux',
description='spectral divergence of flux of available potential energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.ph.conj()*self.JSp).real).sum(axis=0)/self.H))
self.add_diagnostic('APEgenspec',
description='the spectrum of the rate of generation of available potential energy',
function =(lambda self: (self.Hi[:,np.newaxis,np.newaxis]*
(self.Ubg[:,np.newaxis,np.newaxis]*self.k +
self.Vbg[:,np.newaxis,np.newaxis]*self.l)*
(1j*self.ph.conj()*self.Sph).real).sum(axis=0)/self.H))
self.add_diagnostic('ENSflux',
description='barotropic enstrophy flux',
function = (lambda self: (-self.Hi[:,np.newaxis,np.newaxis]*
(self.qh.conj()*self.Jq).real).sum(axis=0)/self.H))
# # Wanying Kang: this function cannot be used since I change the dimension of ikQy
# self.add_diagnostic('ENSgenspec',
# description='the spectrum of the rate of generation of barotropic enstrophy',
# function = (lambda self:
# -(self.Hi[:,np.newaxis,np.newaxis]*((self.ikQy -
# self.ilQx)*(self.Sph.conj()*self.ph)).real).sum(axis=0)/self.H))
| 44.822642 | 171 | 0.519953 |
0cabaf02466e9cacc7105ed26fd9122ff7e01576 | 496 | py | Python | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0029.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | 1 | 2020-07-03T13:54:18.000Z | 2020-07-03T13:54:18.000Z | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0029.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Roberto/exc0029.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | print("""
029) Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h,
mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada
quilômetro acima do limite.
""")
velocidade = float(input('Velocidade de Veículo (Em Km/h): '))
if velocidade > 80:
valordamulta = (velocidade - 80) * 7
print('Você foi multado')
print('O valor da multa foi: {:.2f}'.format(valordamulta))
else:
print('Você está dentro dos limites de velocidade')
| 33.066667 | 86 | 0.705645 |
088c80b96dca9f0208a93e9329132d33896a29dd | 1,096 | py | Python | SettlementBuilder.py | Niels-NTG/GDMC2022 | 515f4b7dd6f04af9714e7773f36cc9b3f1da1b95 | [
"MIT"
] | null | null | null | SettlementBuilder.py | Niels-NTG/GDMC2022 | 515f4b7dd6f04af9714e7773f36cc9b3f1da1b95 | [
"MIT"
] | null | null | null | SettlementBuilder.py | Niels-NTG/GDMC2022 | 515f4b7dd6f04af9714e7773f36cc9b3f1da1b95 | [
"MIT"
] | null | null | null | import numpy as np
import mapUtils
from Node import Node
class SettlementBuilder:
def __init__(self):
# DEBUG
# central RNG generator
rng = np.random.default_rng()
buildArea = mapUtils.getBuildArea()
startingPos = (10, 10)
# DEBUG
mapUtils.fill(
buildArea[0],
69,
buildArea[1],
buildArea[0] + buildArea[2],
69 + 10,
buildArea[1] + buildArea[3],
"minecraft:air"
)
# Height map of the build area.
heightMap = mapUtils.calcGoodHeightmap(buildArea)
# Map of structures built in the build area.
mapOfStructures = np.full(shape=heightMap.shape, fill_value=0)
startingNode = Node(
x=buildArea[0] + startingPos[0],
y=73,
z=buildArea[1] + startingPos[1],
buildArea=buildArea,
heightMap=heightMap,
mapOfStructures=mapOfStructures,
nodeStructureType='lab_a/hub',
rng=rng
)
startingNode.place()
| 24.355556 | 70 | 0.546533 |
62fa0942c55556730d2c701b1a6ce0ca139be218 | 1,221 | py | Python | gfootball/env/players/agent.py | seccoboy/football | c5c6a5c1d587a94673597ff6d61da43044a0c9ac | [
"Apache-2.0"
] | 3,091 | 2019-06-03T13:00:48.000Z | 2022-03-31T05:45:56.000Z | gfootball/env/players/agent.py | seccoboy/football | c5c6a5c1d587a94673597ff6d61da43044a0c9ac | [
"Apache-2.0"
] | 287 | 2019-06-07T14:35:25.000Z | 2022-03-19T12:36:42.000Z | gfootball/env/players/agent.py | seccoboy/football | c5c6a5c1d587a94673597ff6d61da43044a0c9ac | [
"Apache-2.0"
] | 1,418 | 2019-06-03T13:11:19.000Z | 2022-03-31T02:51:30.000Z | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent player controlled by the training policy and using step/reset API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from gfootball.env import player_base
class Player(player_base.PlayerBase):
def __init__(self, player_config, env_config):
player_base.PlayerBase.__init__(self, player_config)
assert player_config['player_agent'] == 0, 'Only one \'agent\' player allowed'
self._action = None
def set_action(self, action):
self._action = action
def take_action(self, observations):
return copy.deepcopy(self._action)
| 32.131579 | 82 | 0.765766 |
d8037602be1571ee7156f417b79570f22a6e1897 | 15,723 | py | Python | tests/data_context/test_data_context_datasource_runtime_data_connector.py | zachzIAM/great_expectations | 6c949285825571954bf272543fbd8b0cd4396685 | [
"Apache-2.0"
] | null | null | null | tests/data_context/test_data_context_datasource_runtime_data_connector.py | zachzIAM/great_expectations | 6c949285825571954bf272543fbd8b0cd4396685 | [
"Apache-2.0"
] | null | null | null | tests/data_context/test_data_context_datasource_runtime_data_connector.py | zachzIAM/great_expectations | 6c949285825571954bf272543fbd8b0cd4396685 | [
"Apache-2.0"
] | null | null | null | import pytest
import great_expectations
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import Batch, RuntimeBatchRequest
from great_expectations.validator.validator import Validator
def test_get_batch_successful_specification_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
batch_list: list = context.get_batch_list(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
assert len(batch_list) == 1
assert isinstance(batch_list[0], Batch)
def test_get_batch_ambiguous_parameter_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
"""
What does this test and why?
get_batch_list() requires batch_request to be passed in a named parameter. This test passes in a batch_request
as an unnamed parameter, which will raise a GreatExpectationsTypeError
"""
context = data_context_with_datasource_sqlalchemy_engine
# raised by get_batch_list()
with pytest.raises(ge_exceptions.GreatExpectationsTypeError):
batch_list: list = context.get_batch_list(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_get_batch_failed_specification_type_error_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
batch: list = context.get_batch_list(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name=1, # wrong data_type
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_get_batch_failed_specification_no_batch_identifier_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing (set to None)
batch: list = context.get_batch_list(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers=None,
)
)
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# batch_identifiers missing (omitted)
batch: list = context.get_batch_list(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
)
)
def test_get_batch_failed_specification_no_runtime_parameters_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# runtime_parameters missing (None)
batch: list = context.get_batch_list(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters=None,
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# runtime_parameters missing (omitted)
batch: list = context.get_batch_list(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_get_batch_failed_specification_incorrect_batch_spec_passthrough_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# incorrect batch_spec_passthrough, which should be a dict
batch: list = context.get_batch_list(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
batch_spec_passthrough=1,
)
)
def test_get_batch_failed_specification_wrong_runtime_parameters_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
# raised by _validate_runtime_parameters() in RuntimeDataConnector
with pytest.raises(
great_expectations.exceptions.exceptions.InvalidBatchRequestError
):
# runtime_parameters are not configured in the DataConnector
batch: list = context.get_batch_list(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={"i_dont_exist": "i_dont_either"},
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_get_validator_successful_specification_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# Successful specification using a RuntimeBatchRequest
my_validator = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
),
expectation_suite_name="my_expectations",
)
assert isinstance(my_validator, Validator)
def test_get_validator_ambiguous_parameter_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
"""
What does this test and why?
get_batch_list() requires batch_request to be passed in a named parameter. This test passes in a batch_request
as an unnamed parameter, which will raise a GreatExpectationsTypeError
"""
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# raised by get_batch_list() in DataContext
with pytest.raises(ge_exceptions.GreatExpectationsTypeError):
batch_list: list = context.get_validator(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
),
expectation_suite_name="my_expectations",
)
def test_get_validator_wrong_type_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
# data_connector_name should be a dict not an int
with pytest.raises(TypeError):
context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name=1,
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
),
expectation_suite_name="my_expectations",
)
def test_get_validator_failed_specification_no_batch_identifier_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
# batch_identifiers should not be None
with pytest.raises(TypeError):
validator: Validator = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers=None,
),
expectation_suite_name="my_expectations",
)
# batch_identifiers should not be omitted
with pytest.raises(TypeError):
validator: Validator = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
),
expectation_suite_name="my_expectations",
)
def test_get_validator_failed_specification_incorrect_batch_spec_passthrough_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# incorrect batch_spec_passthrough, which should be a dict
validator: Validator = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={
"query": "SELECT * from table_partitioned_by_date_column__A LIMIT 10"
},
batch_identifiers={"default_identifier_name": "identifier_name"},
batch_spec_passthrough=1,
),
expectation_suite_name="my_expectations",
)
def test_get_validator_failed_specification_no_runtime_parameters_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
with pytest.raises(TypeError):
# runtime_parameters should not be None
batch: list = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters=None,
batch_identifiers={"default_identifier_name": "identifier_name"},
),
expectation_suite_name="my_expectations",
)
# raised by _validate_runtime_batch_request_specific_init_parameters() in RuntimeBatchRequest.__init__()
with pytest.raises(TypeError):
# runtime_parameters missing (omitted)
batch: list = context.get_validator(
RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
batch_identifiers={"default_identifier_name": "identifier_name"},
)
)
def test_get_validator_wrong_runtime_parameters_sqlalchemy_engine(
data_context_with_datasource_sqlalchemy_engine, sa
):
context = data_context_with_datasource_sqlalchemy_engine
context.create_expectation_suite("my_expectations")
# raised by _validate_runtime_parameters() in RuntimeDataConnector
with pytest.raises(
great_expectations.exceptions.exceptions.InvalidBatchRequestError
):
# runtime_parameters are not configured in the DataConnector
batch: list = context.get_validator(
batch_request=RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="default_data_asset_name",
runtime_parameters={"i_dont_exist": "i_dont_either"},
batch_identifiers={"default_identifier_name": "identifier_name"},
),
expectation_suite_name="my_expectations",
)
| 43.433702 | 114 | 0.696877 |
a2e1f301e04e81b3d704193a412137c9c218d91c | 2,065 | py | Python | tests/test_extract_genome_region.py | xguse/extract-genome-region | 33ba2732edbe882ed8461f4abb6b4fa34239ffa8 | [
"BSD-2-Clause"
] | 2 | 2016-09-27T06:01:52.000Z | 2021-06-28T07:54:06.000Z | tests/test_extract_genome_region.py | xguse/extract-genome-region | 33ba2732edbe882ed8461f4abb6b4fa34239ffa8 | [
"BSD-2-Clause"
] | null | null | null | tests/test_extract_genome_region.py | xguse/extract-genome-region | 33ba2732edbe882ed8461f4abb6b4fa34239ffa8 | [
"BSD-2-Clause"
] | null | null | null | """Provide functions to test this library."""
from __future__ import absolute_import, print_function
import types
import pyfaidx
import pytest
import extract_genome_region.__main__ as egr
def gfui049232_info():
"""Provide true data that we can use to confirm correctness."""
real = pyfaidx.Fasta("tests/data/real.fa")
no_flanks = real[0]
yes_flanks = real[1]
i = no_flanks.name.split('|')
no_flanks_info = {"start": i[3],
"stop": i[4],
"scaffold": i[2],
"sequence": no_flanks[:].seq,
}
i = yes_flanks.name.split('|')
yes_flanks_info = {"start": i[3],
"stop": i[4],
"scaffold": i[2],
"sequence": yes_flanks[:].seq,
}
return no_flanks_info, yes_flanks_info
## Test Data
no_flanks_info, yes_flanks_info = gfui049232_info()
infasta = pyfaidx.Fasta("tests/data/GfusI1.3contigs.fa", strict_bounds=False)
bad_headers_csv = "tests/data/bad_headers.csv"
extra_headers_csv = "tests/data/extra_headers.csv"
missing_headers_csv = "tests/data/missing_headers.csv"
pass_csv = "tests/data/pass.csv"
start_stop_switched_csv = "tests/data/start_stop_switched.csv"
# Begin tests
def test_gen_rec_is_generator(path=pass_csv):
"""Should return a generator."""
records = egr.gen_records(path=path)
assert isinstance(records, types.GeneratorType)
def test_gen_rec_expected_headers_expected(path=pass_csv):
"""Freakout if the code has changed what headers we expect."""
expected_headers = set('record_name,scaffold,start,stop,left_bfr,right_bfr'.split(','))
records = egr.gen_records(path=path)
assert expected_headers == set(next(records)._fields)
@pytest.mark.parametrize("path", [bad_headers_csv,
extra_headers_csv,
missing_headers_csv])
def test_gen_rec_headers_csv(path):
with pytest.raises(ValueError):
next(egr.gen_records(path=path))
| 29.927536 | 91 | 0.645036 |
f32ae32c3e00b6df7234b8ebeeffe0b36495bb18 | 15,374 | py | Python | backups/render_video___2_seeds__1d__backup_2-12-2020.py | bjdarrer/tf2-model-g | 26cf7bba9f1cc13e226834b3565c7b8df5fcc40a | [
"MIT"
] | null | null | null | backups/render_video___2_seeds__1d__backup_2-12-2020.py | bjdarrer/tf2-model-g | 26cf7bba9f1cc13e226834b3565c7b8df5fcc40a | [
"MIT"
] | null | null | null | backups/render_video___2_seeds__1d__backup_2-12-2020.py | bjdarrer/tf2-model-g | 26cf7bba9f1cc13e226834b3565c7b8df5fcc40a | [
"MIT"
] | null | null | null | from __future__ import division
import argparse
import numpy as np
import tensorflow as tf
import progressbar
import imageio
import yaml
import matplotlib.pyplot as pp # BJD added 18.11.2020
#import cv2 # BJD added 24.11.2020 - for make video
#import glob # BJD added 24.11.2020 - for make video
#import matplotlib.pyplot as plt
#import ffmpeg
import os # BJD added 24.11.2020 - for make video
import io # BJD added 18.11.2020
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from model_g import ModelG
from fluid_model_g import FluidModelG
from util import bl_noise
from numpy import * # BJD added 20.11.2020
from matplotlib import pyplot as plt # BJD added 20.11.2020
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
#c1 = 0
def make_video_frame(rgb, indexing='ij'):
if indexing == 'ij':
rgb = [tf.transpose(channel) for channel in rgb]
frame = tf.stack(rgb, axis=-1)
frame = tf.clip_by_value(frame, 0.0, 1.0)
return tf.cast(frame * 255, 'uint8').numpy()
#def nucleation_and_motion_in_G_gradient_fluid_2D(writer, args, R=16):
def nucleation_and_motion_in_G_gradient_fluid_2D(writer, args, R=30):
c1 = 0 # BJD added this on 20.11.2020
dx = 2*R / args.height
x = (np.arange(args.width) - args.width // 2) * dx
y = (np.arange(args.height) - args.height // 2) * dx
x, y = np.meshgrid(x, y, indexing='ij')
def source_G(t):
center = np.exp(-0.5*(t-5)**2) * 10
gradient = (1+np.tanh(t-30)) * 0.0003
return -(
np.exp(-0.5*((x-25)**2 + y*y)) + np.exp(-0.5*((x+25)**2 + y*y))
) * center + (x+8) * gradient # BJD 2.2.2020 --- try gradient for half of plot!!
"""
def source_G(t):
amount = np.exp(-0.5*(t-5)**2)
return (
np.exp(-0.5*((x-D)**2+y*y)) * weights[0] +
np.exp(-0.5*((x+D)**2+y*y)) * weights[1]
) * amount
"""
source_functions = {
'G': source_G,
}
flow = [0*x, 0*x]
fluid_model_g = FluidModelG(
x*0,
x*0,
x*0,
flow,
dx,
dt=args.dt,
params=args.model_params,
source_functions=source_functions,
)
print("Rendering 'Nucleation and Motion in G gradient in 2D'")
print("Lattice constant dx = {}, time step dt = {}".format(fluid_model_g.dx, fluid_model_g.dt))
min_G = -4.672736908320116
max_G = 0.028719261862332906
min_X = -3.8935243721220334
max_X = 1.2854028081816122
min_Y = -0.7454193158963579
max_Y = 4.20524950766914
#c1 = 0
for n in progressbar.progressbar(range(args.num_frames)):
fluid_model_g.step()
if n % args.oversampling == 0:
rgb = [
6*(-fluid_model_g.G + max_G) / (max_G - min_G),
5*(fluid_model_g.Y - min_Y) / (max_Y - min_Y),
0.7*(fluid_model_g.X - min_X) / (max_X - min_X),
]
zero_line = 1 - tf.exp(-600 * fluid_model_g.Y**2)
frame = make_video_frame([c * zero_line for c in rgb])
writer.append_data(frame)
#========================BJD added 18.11.2020===================================================
if n == 150:
print("n = ", n)
break
#if n == 4:
# X_array = [
# 0.7*(fluid_model_g.X - min_X) / (max_X - min_X),
# ] # BJD put this in 18.11.2020
# print("Array of X: ", X_array) # ***** BJD inserted this line 18.11.2020 *****
c1 = c1 + 1
print("H E L L O")
x1 = np.loadtxt("/home/brendan/software/tf2-model-g/arrays/array9/X.txt") #, delimiter=" :-) ", usecols=(120)) # (426, 240)
x2 = np.loadtxt("/home/brendan/software/tf2-model-g/arrays/array9/Y.txt") #, delimiter=" :-) ", usecols=(120)) # (426, 240)
x3 = np.loadtxt("/home/brendan/software/tf2-model-g/arrays/array9/G.txt") #, delimiter=" :-) ", usecols=(120)) # (426, 240)
#ndArray[ : , column_index] # @ https://thispointer.com/python-numpy-select-rows-columns-by-index-from-a-2d-ndarray-multi-dimension/
column1 = x1[: , 120] # choose row 214 of 2D array = (426,240)
column2 = x2[: , 120] # choose row 214 of 2D array = (426,240)
column3 = x3[: , 120] # choose row 214 of 2D array = (426,240)
#t = linspace(0, 2*math.pi, 400)
#a = sin(t)
#b = cos(t)
#c = a + b
print(column1)
fig, pp = plt.subplots( nrows=1, ncols=1 ) # create figure & 1 axis
#axes = pp.add_axes([0.1,0.1,0.8,0.8])
#-------------------
#a= plt.figure()
#axes= a.add_axes([0.1,0.1,0.8,0.8])
# adding axes
#x= np.arange(0,11)
#axes.plot(x,x**3, marker='*')
#axes.set_xlim([0,250])
#axes.set_ylim([-3,2])
#plt.show()
#------------------
#fig, ax = plt.subplots( nrows=1, ncols=1 ) # create figure & 1 axis
#ax.plot([0,1,2], [10,20,3])
#pp.plot(t, a, 'r') # plotting t, a separately - BJD new plotting code 21.11.2020
#pp.plot(t, b, 'b') # plotting t, b separately - BJD new plotting code 21.11.2020
#pp.plot(t, c, 'g') # plotting t, c separately - BJD new plotting code 21.11.2020
# https://stackoverflow.com/questions/22276066/how-to-plot-multiple-functions-on-the-same-figure-in-matplotlib
#row1 = range(-3, 2)
#row2 = range(-3, 2)
#row3 = range(-3, 2)
#y = range(-3, 2)
pp.plot(column1, 'r') # plotting t, a separately - BJD new plotting code 21.11.2020
pp.plot(column2, 'b') # plotting t, b separately - BJD new plotting code 21.11.2020
pp.plot(column3, 'g') # plotting t, c separately - BJD new plotting code 21.11.2020
#axes.set_xlim([0,250])
#axes.set_ylim([-3,2])
#pp.set_xlim([0,250])
pp.set_ylim([-4,4]) # ******* BJD this one works! 1.12.2020 ***********
#pp.plot(row1) # BJD previous working plot code 21.11.2020
#pp.show()
#plt.savefig('test2.png')
#plt.savefig('test2.pdf')
plt.title('X, Y, G potential vs 1D space - time = ' + str(c1))
plt.xlabel("1D spacial units")
plt.ylabel("X, Y, G pot. - concentration per unit vol")
#fig.savefig('test2.png') # save the figure to file
plt.legend(["X", "Y", "G"]) # BJD legend added 21.11.2020
fig.savefig('/home/brendan/software/tf2-model-g/plots/1D_video16/1D_video_XYG_' + str(c1) + '.png')
plt.close(fig) # close the figure window
#plt.savefig('test2_' + str(c1) + '.png')
#===========================================================================
# max_G = max(max_G, tf.reduce_max(fluid_model_g.G).numpy())
# min_G = min(min_G, tf.reduce_min(fluid_model_g.G).numpy())
# max_X = max(max_X, tf.reduce_max(fluid_model_g.X).numpy())
# min_X = min(min_X, tf.reduce_min(fluid_model_g.X).numpy())
# max_Y = max(max_Y, tf.reduce_max(fluid_model_g.Y).numpy())
# min_Y = min(min_Y, tf.reduce_min(fluid_model_g.Y).numpy())
# print(min_G, max_G, min_X, max_X, min_Y, max_Y)
def charged_nucleation_in_2D(writer, args, R=30, D=25, weights=(0, -10, -8, 8)):
dx = 2*R / args.height
x = (np.arange(args.width) - args.width // 2) * dx
y = (np.arange(args.height) - args.height // 2) * dx
x, y = np.meshgrid(x, y, indexing='ij')
def source_G(t):
amount = np.exp(-0.5*(t-5)**2)
return (
np.exp(-0.5*((x-D)**2+y*y)) * weights[0] +
np.exp(-0.5*((x+D)**2+y*y)) * weights[1]
) * amount
def source_X(t):
amount = np.exp(-0.5*(t-5)**2)
return (
np.exp(-0.5*((x-D)**2+y*y)) * weights[2] +
np.exp(-0.5*((x+D)**2+y*y)) * weights[3]
) * amount
source_functions = {
'G': source_G,
'X': source_X,
}
noise_scale = 1e-4
model_g = ModelG(
bl_noise(x.shape) * noise_scale,
bl_noise(x.shape) * noise_scale,
bl_noise(x.shape) * noise_scale,
dx,
dt=args.dt,
params=args.model_params,
source_functions=source_functions,
)
print("Rendering 'Charged nucleation in 2D'")
print("Lattice constant dx = {}, time step dt = {}".format(model_g.dx, model_g.dt))
min_G = -4.672736908320116
max_G = 0.028719261862332906
min_X = -3.8935243721220334
max_X = 1.2854028081816122
min_Y = -0.7454193158963579
max_Y = 4.20524950766914
for n in progressbar.progressbar(range(args.num_frames)):
model_g.step()
if n % args.oversampling == 0:
rgb = [
6*(-model_g.G + max_G) / (max_G - min_G),
5*(model_g.Y - min_Y) / (max_Y - min_Y),
0.7*(model_g.X - min_X) / (max_X - min_X),
]
zero_line = 1 - tf.exp(-600 * model_g.Y**2)
frame = make_video_frame([c * zero_line for c in rgb])
writer.append_data(frame)
# TODO: Requires some work. Unstable like this.
def nucleation_3D(writer, args, R=20):
"""
raise NotImplementedError("Needs some work")
params = {
"A": 3.4,
"B": 13.5,
"k2": 1.0,
"k-2": 0.1,
"k5": 0.9,
"D_G": 1.0,
"D_X": 1.0,
"D_Y": 1.95,
"density_G": 1.0,
"density_X": 0.0002,
"density_Y": 0.043,
"base-density": 9.0,
"viscosity": 0.3,
"speed-of-sound": 1.0,
}
"""
dx = 2*R / args.height
x = (np.arange(args.width) - args.width // 2) * dx
y = (np.arange(args.height) - args.height // 2) * dx
z = y
x, y, z = np.meshgrid(x, y, z, indexing='ij')
def source_G(t):
center = np.exp(-0.3*(t-6)**2) * 10
return -np.exp(-0.5*(x*x+y*y+z*z)) * center
source_functions = {
'G': source_G,
}
# We need some noise to break spherical symmetry
noise_scale = 1e-4
G = bl_noise(x.shape) * noise_scale
X = bl_noise(x.shape) * noise_scale
Y = bl_noise(x.shape) * noise_scale
flow = [
bl_noise(x.shape) * noise_scale,
bl_noise(x.shape) * noise_scale,
bl_noise(x.shape) * noise_scale
]
fluid_model_g = FluidModelG(
G, X, Y,
flow,
dx,
dt=args.dt,
params=params,
source_functions=source_functions,
)
flow_particle_origins = []
for _ in range(1000):
flow_particle_origins.append([np.random.rand() * s for s in x.shape])
flow_particles = tf.constant(flow_particle_origins, dtype='float64')
flow_streaks = 0*x[:,:,0]
print("Rendering 'Nucleation and Motion in G gradient in 3D'")
print("Lattice constant dx = {}, time step dt = {}".format(fluid_model_g.dx, fluid_model_g.dt))
for n in progressbar.progressbar(range(args.num_frames)):
fluid_model_g.step()
for _ in range(20):
indices = tf.cast(flow_particles, 'int32')
for index in indices.numpy():
flow_streaks[index[0], index[1]] += 0.15 / args.oversampling
dx = tf.gather_nd(fluid_model_g.u, indices)
dy = tf.gather_nd(fluid_model_g.v, indices)
dz = tf.gather_nd(fluid_model_g.w, indices)
flow_particles = (flow_particles + tf.stack([dx, dy, dz], axis=1) * 400) % x.shape
if n % args.oversampling == 0:
rgb = [
tf.reduce_mean((7*fluid_model_g.G)**2, axis=2) + flow_streaks,
tf.reduce_mean((4*fluid_model_g.Y)**2, axis=2),
tf.reduce_mean((2*fluid_model_g.X)**2, axis=2),
]
frame = make_video_frame(rgb)
writer.append_data(frame)
flow_streaks *= 0
flow_particles = tf.constant(flow_particle_origins, dtype='float64')
if __name__ == '__main__':
episodes = {
'nucleation_and_motion_in_fluid_2D': nucleation_and_motion_in_G_gradient_fluid_2D,
'charged_nucleation_in_2D': charged_nucleation_in_2D,
'nucleation_3D': nucleation_3D,
}
parser = argparse.ArgumentParser(description='Render audio samples')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--params', type=str, help='Parameter YAML file name')
parser.add_argument('--episode', choices=episodes.keys())
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--oversampling', type=int, help='Add extra simulation time steps between video frames for stability')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
parser.add_argument('--simulation-duration', type=float, help='Amount of simulation to run')
args = parser.parse_args()
args.model_params = {}
if args.params:
with open(args.params) as f:
params = yaml.load(f, Loader=Loader)
for key, value in params.items():
if not getattr(args, key):
setattr(args, key, value)
if not args.episode:
raise ValueError("Missing episode argument. Must be present in either parameter YAML file or as a program argument.")
if not args.framerate:
args.framerate = 24
if not args.oversampling:
args.oversampling = 1
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.oversampling * args.framerate)
args.dt = args.simulation_duration / args.num_frames
episodes[args.episode](writer, args)
writer.close()
#=======================BJD make video from .png files 24.11.2020===========================
def save1():
#os.system("ffmpeg -r 1 -i img%01d.png -vcodec mpeg4 -y movie.mp4")
os.system("ffmpeg -r 1 -i /home/brendan/software/tf2-model-g/plots/1D_video16/1D_video_XYG_%01d.png -vcodec mpeg4 -y 1D_2_seeds_video_16.mp4")
save1()
#============================================================================================
| 38.148883 | 146 | 0.562313 |
3eab56315abc05cb508dcaa2e370eb1c59c55576 | 1,967 | py | Python | plaso/formatters/safari_cookies.py | stephenkreusch/plaso | 494d3140c09733d1d51c8dbb9162fc569be760b3 | [
"Apache-2.0"
] | 1 | 2020-10-29T18:23:25.000Z | 2020-10-29T18:23:25.000Z | plaso/formatters/safari_cookies.py | stephenkreusch/plaso | 494d3140c09733d1d51c8dbb9162fc569be760b3 | [
"Apache-2.0"
] | null | null | null | plaso/formatters/safari_cookies.py | stephenkreusch/plaso | 494d3140c09733d1d51c8dbb9162fc569be760b3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""The Safari Binary cookie event formatter."""
from __future__ import unicode_literals
from plaso.lib import errors
from plaso.formatters import interface
from plaso.formatters import manager
class SafariCookieFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Safari Binary Cookie file entry event."""
DATA_TYPE = 'safari:cookie:entry'
FORMAT_STRING_PIECES = [
'{url}',
'<{path}>',
'({cookie_name})',
'Flags: {flags}']
FORMAT_STRING_SHORT_PIECES = [
'{url}',
'({cookie_name})']
SOURCE_LONG = 'Safari Cookies'
SOURCE_SHORT = 'WEBHIST'
_COOKIE_FLAGS = {
1: 'Secure',
2: 'Unknown',
4: 'HttpOnly'}
# pylint: disable=unused-argument
def GetMessages(self, formatter_mediator, event_data):
"""Determines the formatted message strings for the event data.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event_data (EventData): event data.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event data cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_data.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event_data.data_type))
event_values = event_data.CopyToDict()
cookie_flags = event_values.get('flags', None)
if cookie_flags == 0:
del event_values['flags']
elif cookie_flags:
flags = []
for flag_value, flag_description in self._COOKIE_FLAGS.items():
if cookie_flags & flag_value:
flags.append(flag_description)
event_values['flags'] = '|'.join(flags)
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(SafariCookieFormatter)
| 27.704225 | 78 | 0.688358 |
b018c8ee74c5b7842beb37673271a14bfd2d6e1f | 3,921 | py | Python | examples/spring_mass/model.py | omunroe-com/nasasrompy | 35ae060b6a032d085a31574fbe3bf390b023631d | [
"Apache-2.0"
] | 23 | 2018-05-13T05:13:03.000Z | 2022-01-29T19:43:28.000Z | examples/spring_mass/model.py | omunroe-com/nasasrompy | 35ae060b6a032d085a31574fbe3bf390b023631d | [
"Apache-2.0"
] | 11 | 2018-03-28T13:13:44.000Z | 2022-03-30T18:56:57.000Z | examples/spring_mass/model.py | omunroe-com/nasasrompy | 35ae060b6a032d085a31574fbe3bf390b023631d | [
"Apache-2.0"
] | 19 | 2018-06-01T14:49:30.000Z | 2022-03-05T05:02:06.000Z | # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# ------------------------------------------------------
# Helper function to use scipy integrator in model class
def mass_spring(state, t, k, m):
"""
Return velocity/acceleration given velocity/position and values for
stiffness and mass
"""
# Unpack the state vector.
x = state[0]
xd = state[1]
g = 9.8 # Meters per second.
# Compute acceleration xdd.
xdd = ((-k*x)/m) + g
# return the two state derivatives
return [xd, xdd]
# ------------------------------------------------------
class SpringMass1D(object):
"""
Defines Spring Mass model with 1 free param (stiffness of spring, k)
"""
def __init__(self, m=1.5, state0=None, time_grid=None):
self._m = m
# Give default initial conditions & time grid if not specified.
if state0 is None:
state0 = [0.0, 0.0]
if time_grid is None:
time_grid = np.arange(0.0, 10.0, 0.1)
self._state0 = state0
self._t = time_grid
def simulate(self, k=2.5):
"""
Simulate spring mass system for given spring constant. Returns state
(position, velocity) at all points in time grid
"""
return odeint(mass_spring, self._state0, self._t, args=(k, self._m))
def get_max_disp(self, k=2.5):
"""
Returns the max displacement over the course of the simulation
"""
state = self.simulate(k)
return max(state[:, 0])
class SpringMass2D(object):
"""
Defines Spring Mass model with 2 free params (spring stiffness, k & mass, m)
"""
def __init__(self, state0=None, time_grid=None):
# Give default initial conditions & time grid if not specified.
if state0 is None:
state0 = [0.0, 0.0]
if time_grid is None:
time_grid = np.arange(0.0, 10.0, 0.1)
self._state0 = state0
self._t = time_grid
def simulate(self, k=2.5, m=1.5):
"""
Simulate spring mass system for given spring constant. Returns state
(position, velocity) at all points in time grid
"""
return odeint(mass_spring, self._state0, self._t, args=(k, m))
def get_max_disp(self, k=2.5, m=1.5):
"""
Returns the max displacement over the course of the simulation
"""
state = self.simulate(k, m)
return max(state[:, 0])
if __name__ == '__main__':
k = 2.5 # Newtons per metre.
m = 1.5 # Kilograms.
state0 = [0.0, 0.0] # Initial conditions.
t = np.arange(0.0, 10.0, 0.1) # Time grid for simulation.
# Initialize model & simulate.
model = SpringMass2D(state0, t)
state = model.simulate(k, m)
print "shape = ", state.shape
# Plot results.
plt.figure()
plt.plot(t, state)
plt.xlabel('TIME (sec)')
plt.ylabel('States')
plt.title('Mass-Spring System')
plt.legend(('$x$ (m)', '$\dot{x}$ (m/sec)'))
plt.show()
| 29.704545 | 80 | 0.610814 |
448e8b7ceb2b8ecc272f5664499d66ddc220b061 | 2,273 | py | Python | 3rd_party/nek5000/short_tests/lib/nekBinBuild.py | neil-lindquist/nekRS | 723cd46baee78f53f40eb67147dfcaad95d60aa9 | [
"BSD-3-Clause"
] | 1 | 2022-01-06T16:16:08.000Z | 2022-01-06T16:16:08.000Z | 3rd_party/nek5000/short_tests/lib/nekBinBuild.py | neil-lindquist/nekRS | 723cd46baee78f53f40eb67147dfcaad95d60aa9 | [
"BSD-3-Clause"
] | null | null | null | 3rd_party/nek5000/short_tests/lib/nekBinBuild.py | neil-lindquist/nekRS | 723cd46baee78f53f40eb67147dfcaad95d60aa9 | [
"BSD-3-Clause"
] | null | null | null | import os
from subprocess import Popen, PIPE, STDOUT
from pathlib import Path
def build_tools(
tools_root,
tools_bin,
f77=None,
cc=None,
bigmem=None,
targets=("clean", "all"),
verbose=False,
):
tools_root = Path(tools_root)
print("Compiling tools... ")
print(f' Using output directory "{tools_bin}"')
print(f' Using FC "{f77}"')
print(f' Using CC "{cc}"')
maketools_in = tools_root / "maketools"
my_env = os.environ.copy()
if f77:
my_env["FC"] = f77
if cc:
my_env["CC"] = cc
my_env["bin_nek_tools"] = tools_bin
if targets[0] == "all":
targets = [t for t in os.listdir(tools_root) if "maketools" not in t]
print("Targets:", targets)
for t in targets:
proc = Popen([maketools_in, t], env=my_env, cwd=tools_root, stderr=STDOUT)
proc.wait()
logfile = tools_root / t / "build.log"
if proc.returncode != 0:
with open(logfile, "r") as file:
text = file.read()
print(text)
exit(-1)
def build_nek(source_root, usr_file, cwd=None, opts=None, verbose=False):
if not opts:
_opts = {}
else:
_opts = opts.copy()
_opts.update(NEK_SOURCE_ROOT=source_root)
print("Compiling nek5000...")
print(f' Using working directory "{cwd}"')
print(f' Using .usr file "{usr_file}"')
for key, val in list(_opts.items()):
print(f' Using {key}="{val}"')
my_env = os.environ.copy()
if source_root:
my_env["NEK_SOURCE_ROOT"] = source_root
if _opts.get("F77"):
my_env["FC"] = _opts.get("F77")
if _opts.get("CC"):
my_env["CC"] = _opts.get("CC")
if _opts.get("PPLIST"):
my_env["PPLIST"] = _opts.get("PPLIST")
makenek_in = Path(source_root) / "bin" / "makenek"
logfile = Path(cwd) / "build.log"
proc = Popen([makenek_in, "clean"], cwd=cwd, env=my_env, stdin=PIPE, text=True)
proc.communicate(input="Y\n")
proc.wait()
proc = Popen([makenek_in, usr_file], cwd=cwd, env=my_env, stdin=PIPE, stderr=STDOUT)
proc.wait()
if proc.returncode != 0:
with open(logfile, "r") as file:
text = file.read()
print(text)
exit(-1)
| 26.126437 | 88 | 0.574571 |
96503e0768bf32a8b43371c0f33685b6dd02c45b | 21,345 | py | Python | mesh_tensorflow/layers_test.py | merrymercy/mesh | 8931eb9025f833b09d8425404ebd5801acbb0cac | [
"Apache-2.0"
] | 1 | 2020-11-27T19:16:44.000Z | 2020-11-27T19:16:44.000Z | mesh_tensorflow/layers_test.py | merrymercy/mesh | 8931eb9025f833b09d8425404ebd5801acbb0cac | [
"Apache-2.0"
] | 7 | 2021-05-12T10:37:36.000Z | 2021-05-28T14:53:58.000Z | mesh_tensorflow/layers_test.py | merrymercy/mesh | 8931eb9025f833b09d8425404ebd5801acbb0cac | [
"Apache-2.0"
] | 1 | 2020-11-25T14:26:45.000Z | 2020-11-25T14:26:45.000Z | # coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils
import mock
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype, **unused_kwargs):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
"Shape {} not found in shape to value map.".format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
class LayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, True, "not_channels"),
(8, False, "channels"),
)
def testDense(self, units, use_bias, new_dim_name):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
new_dim = mtf.Dimension(new_dim_name, units)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.dense(
mtf_inputs,
new_dims=new_dim,
reduced_dims=[channels_dim],
activation=mtf.relu,
use_bias=use_bias)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
@test_util.run_in_graph_and_eager_modes()
def testLayerNorm(self):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.LayerNormalization()(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
@test_util.run_in_graph_and_eager_modes()
def testBatchNorm(self):
batch = 2
channels = 3
inputs = tf.constant([[0, 1, 2], [4, 5, 6]], dtype=np.float32)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs_0, _ = mtf.layers.batch_norm(
mtf_inputs,
is_training=True, momentum=0.95, epsilon=1e-6,
dims_idx_start=0, dims_idx_end=1, name="bn0")
mtf_outputs_1, _ = mtf.layers.batch_norm(
mtf_outputs_0 * 2 + 1,
is_training=True, momentum=0.95, epsilon=1e-6,
dims_idx_start=0, dims_idx_end=1, name="bn1")
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs_0 = lowering.export_to_tf_tensor(mtf_outputs_0)
actual_outputs_1 = lowering.export_to_tf_tensor(mtf_outputs_1)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
[actual_0, actual_1] = self.evaluate([actual_outputs_0, actual_outputs_1])
expected = np.array([[-1, -1, -1], [1, 1, 1]])
self.assertAllClose(actual_0, expected)
self.assertAllClose(actual_1, expected)
@test_util.run_in_graph_and_eager_modes()
def testWeightsNonzero(self):
inputs = tf.constant([[3, 1, 0], [1, 0, 0]])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mtf.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.weights_nonzero(mtf_inputs)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.cast(tf.not_equal(inputs, 0), tf.float32)
tf_group = lowering.copy_masters_to_slices()
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes()
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
hidden_dim = mtf.Dimension("hidden", hidden)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim,
is_training=False)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(2, 16, 3, 4, 2, 2),
(1, 8, 5, 3, 1, 4),
)
def testMaskedLocalAttention1D(self, batch, length, io_channels, kv_channels,
heads, window_size):
length_q = length
query = tf.random_normal([batch, length_q, io_channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_q_dim = mtf.Dimension("length_q", length_q)
io_channels_dim = mtf.Dimension("io_channels", io_channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_q_dim, io_channels_dim]))
mtf_outputs = mtf.layers.masked_local_attention_1d(
mtf_query,
kv_channels=kv_channels_dim,
heads=heads_dim,
is_training=False,
window_size=window_size)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, io_channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = tf.random_normal([batch, heads, length_q, depth_k])
key = tf.random_normal([batch, heads, length_kv, depth_k])
value = tf.random_normal([batch, heads, length_kv, depth_v])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
heads_dim = mtf.Dimension("heads", heads)
length_q_dim = mtf.Dimension("length_q", length_q)
length_kv_dim = mtf.Dimension("length_kv", length_kv)
depth_k_dim = mtf.Dimension("depth_k", depth_k)
depth_v_dim = mtf.Dimension("depth_v", depth_v)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mtf.import_tf_tensor(
mesh, key,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mtf.import_tf_tensor(
mesh, value,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf.layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None,
is_training=False)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = tf.random_normal([batch, length, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf.layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim,
is_training=False)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, query.shape)
@parameterized.parameters(
("MAX_2D",), ("AVG_2D",), ("MAX_3D",), ("AVG_3D",),
)
def testPool(self, pooling_method):
batch = 2
depth = 3
height = 4
width = 6
channels = 3
tf.random.set_random_seed(1234)
inputs = tf.random_normal([batch, depth, height, width, channels])
stride_d = 3
stride_h = 2
stride_w = 3
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
depth_dim = mtf.Dimension("depth", depth)
height_dim = mtf.Dimension("height", height)
width_dim = mtf.Dimension("width", width)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape(
[batch_dim, depth_dim, height_dim, width_dim, channels_dim]))
if pooling_method == "MAX_2D":
mtf_outputs = mtf.layers.max_pool2d(
mtf_inputs, ksize=(stride_h, stride_w))
inputs = tf.reshape(inputs, [batch * depth, height, width, channels])
expected_outputs = tf.keras.layers.MaxPooling2D(
(stride_h, stride_w))(inputs)
expected_outputs = tf.reshape(
expected_outputs,
[batch, depth, int(height / stride_h),
int(width / stride_w), channels])
elif pooling_method == "AVG_2D":
mtf_outputs = mtf.layers.avg_pool2d(
mtf_inputs, ksize=(stride_h, stride_w))
inputs = tf.reshape(inputs, [batch * depth, height, width, channels])
expected_outputs = tf.keras.layers.AveragePooling2D(
(stride_h, stride_w))(inputs)
expected_outputs = tf.reshape(
expected_outputs,
[batch, depth, int(height / stride_h),
int(width / stride_w), channels])
elif pooling_method == "MAX_3D":
mtf_outputs = mtf.layers.max_pool3d(
mtf_inputs, ksize=[stride_d, stride_h, stride_w])
expected_outputs = tf.keras.layers.MaxPooling3D(
[stride_d, stride_h, stride_w])(inputs)
elif pooling_method == "AVG_3D":
mtf_outputs = mtf.layers.avg_pool3d(
mtf_inputs, ksize=[stride_d, stride_h, stride_w])
expected_outputs = tf.keras.layers.AveragePooling3D(
[stride_d, stride_h, stride_w])(inputs)
mtf_gradient = mtf.gradients([mtf_outputs], [mtf_inputs])[0]
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
actual_gradient = lowering.export_to_tf_tensor(mtf_gradient)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertAllClose(actual, expected)
actual = self.evaluate(actual_gradient)
if pooling_method == "MAX_2D":
expected_non_zeros = batch * depth * height * width * channels / (
stride_h * stride_w)
self.assertEqual(np.count_nonzero(actual), expected_non_zeros)
elif pooling_method == "AVG_2D":
expected = np.ones((batch, depth, height, width, channels),
dtype=np.float32) / stride_h / stride_w
self.assertAllClose(actual, expected)
elif pooling_method == "MAX_3D":
expected_non_zeros = batch * depth * height * width * channels / (
stride_d * stride_h * stride_w)
self.assertEqual(np.count_nonzero(actual), expected_non_zeros)
elif pooling_method == "AVG_3D":
expected = np.ones((batch, depth, height, width, channels),
dtype=np.float32) / stride_d / stride_h / stride_w
self.assertAllClose(actual, expected)
@test_util.run_in_graph_and_eager_modes()
def testConv1d(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
filter_size = 3
depth_dim = mtf.Dimension("depth", 2)
length_dim = mtf.Dimension("length", 4)
output_dim = mtf.Dimension("output", 2)
x = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_x = mtf.import_tf_tensor(
mesh, x, shape=mtf.Shape([length_dim, depth_dim]))
initializer_mock = mock.MagicMock()
initializer_mock.side_effect = initialize_by_shape({
(1, 3, 2, 2): [[[[1, -1], [0, 0]], [[2, -2], [-1, 1]], [[3, -3],
[-2, 2]]]],
})
mtf_output = mtf.layers.conv1d(
mtf_x,
output_dim=output_dim,
filter_size=filter_size,
filter_initializer=initializer_mock)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_output = lowering.export_to_tf_tensor(mtf_output)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate(actual_output)
self.assertAllClose(actual, [[0, 0], [1, -1], [5, -5], [4, -4]])
def testConv1dValidPadding(self):
converter = test_utils.NumpyConverter()
batch = 2
d_model = 6
d_out = 1
length = 4
filter_size = 3
x = np.random.randn(batch, length, d_model)
x_mtf = converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
conv_filter = np.random.randn(1, filter_size, d_model, d_out)
initializer = lambda shape, dtype, **kwargs: conv_filter
output_mtf = mtf.layers.conv1d(
x_mtf,
output_dim=mtf.Dimension("output_dim", d_out),
filter_size=filter_size,
padding="VALID",
filter_initializer=initializer)
actual = converter.convert_mtf_tensor_to_np_array(output_mtf)
# Expected length is 2.
expected = np.empty(shape=(batch, 2, d_out), dtype=np.float32)
# [filter_size, d_model]
current_filter = conv_filter[0, :, :, 0]
# b: batch, k: filter_size, d: d_model.
expected[:, 0] = np.einsum("bkd,kd->b", x[:, :filter_size, :],
current_filter).reshape(batch, 1)
expected[:, 1] = np.einsum("bkd,kd->b", x[:, 1:, :],
current_filter).reshape(batch, 1)
self.assertAllClose(actual, expected)
def testConv1dValidPaddingMultipleBatchDims(self):
converter = test_utils.NumpyConverter()
batch = 2
outer_batch = 3
d_model = 6
d_out = 1
length = 4
filter_size = 3
x = np.random.randn(outer_batch, batch, length, d_model)
x_mtf = converter.convert_np_array_to_mtf_tensor(
x,
dtype=tf.float32,
dim_names=["outer_batch", "batch", "length", "d_model"])
conv_filter = np.random.randn(1, filter_size, d_model, d_out)
initializer = lambda shape, dtype, **kwargs: conv_filter
output_mtf = mtf.layers.conv1d(
x_mtf,
output_dim=mtf.Dimension("output_dim", d_out),
filter_size=filter_size,
padding="VALID",
filter_initializer=initializer)
actual = converter.convert_mtf_tensor_to_np_array(output_mtf)
# Expected length is 2.
expected = np.empty(shape=(outer_batch, batch, 2, d_out), dtype=np.float32)
# Effective filter: [filter_size, d_model]
f = conv_filter[0, :, :, 0]
# o: outer_batch, b: batch, k: filter_size, d: d_model.
expected[:, :, 0] = np.einsum("obkd,kd->ob", x[:, :, :filter_size, :],
f).reshape(outer_batch, batch, 1)
expected[:, :, 1] = np.einsum("obkd,kd->ob", x[:, :, 1:, :],
f).reshape(outer_batch, batch, 1)
self.assertAllClose(actual, expected)
@mock.patch.object(tf, "truncated_normal_initializer", autospec=True)
@test_util.run_in_graph_and_eager_modes()
def testSeparableConv1d(self, random_normal_initializer_mock):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
depth_dim = mtf.Dimension("depth", 2)
length_dim = mtf.Dimension("length", 4)
output_dim = mtf.Dimension("output", 2)
x = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_x = mtf.import_tf_tensor(
mesh, x, shape=mtf.Shape([length_dim, depth_dim]))
initializer_mock = mock.MagicMock()
random_normal_initializer_mock.return_value = initializer_mock
initializer_mock.side_effect = initialize_by_shape({
(2,): [1, 2],
(2, 2): [[1, 0], [1, -1]],
})
mtf_output = mtf.layers.separable_conv1d(
mtf_x,
output_dim,
min_relative_pos=-1,
max_relative_pos=1,
use_bias=True)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_output = lowering.export_to_tf_tensor(mtf_output)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate(actual_output)
self.assertAllClose(actual, [[3, -2], [6, -4], [9, -6], [7, -4]])
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.enable_eager_execution()
tf.test.main()
| 36.116751 | 94 | 0.66423 |
e381fa3800184a029dc86b17d79472c67135941e | 2,658 | py | Python | discodo/extractor/youtube_dl.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | discodo/extractor/youtube_dl.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | discodo/extractor/youtube_dl.py | AkiaCode/discodo | 0a76afb196a7945f525896f56f431e82aaf83f44 | [
"MIT"
] | null | null | null | import asyncio
import copy
import ipaddress
import re
from typing import Coroutine, Union
from youtube_dl import YoutubeDL as YoutubeDLClient
from ..errors import NoSearchResults
YTDLOption = {
"format": "(bestaudio[ext=opus]/bestaudio/best)[protocol!=http_dash_segments]",
"nocheckcertificate": True,
"no_warnings": True,
"default_search": "auto",
"source_address": "0.0.0.0",
"skip_download": True,
"writesubtitles": True,
}
YOUTUBE_PLAYLIST_ID_REGEX = re.compile(
r"(?:http|https|)(?::\/\/|)(?:www.|)(?:music.|)(?:youtu\.be\/|youtube\.com(?:\/embed\/|\/v\/|\/watch\?v=|\/ytscreeningroom\?v=|\/feeds\/api\/videos\/|\/user\S*[^\w\-\s]|\S*[^\w\-\s]))([\w\-]{12,})[a-z0-9;:@#?&%=+\/\$_.-]*(?:&index=|)([0-9]*)?"
)
def _extract(
query: str,
address: Union[ipaddress.IPv4Address, ipaddress.IPv6Address] = None,
video: bool = False,
) -> dict:
option = copy.copy(YTDLOption)
if video:
option["format"] = "(best)[protocol!=http_dash_segments]"
if address:
option["source_address"] = str(address)
YoutubePlaylistMatch = YOUTUBE_PLAYLIST_ID_REGEX.match(query)
if YoutubePlaylistMatch and not YoutubePlaylistMatch.group(1).startswith(
("RD", "UL", "PU")
):
option["playliststart"] = (
int(YoutubePlaylistMatch.group(2))
if YoutubePlaylistMatch.group(2).isdigit()
else 1
)
option["dump_single_json"] = True
option["extract_flat"] = True
query = "https://www.youtube.com/playlist?list=" + YoutubePlaylistMatch.group(1)
else:
option["noplaylist"] = True
YoutubeDL = YoutubeDLClient(option)
Data = YoutubeDL.extract_info(query, download=False)
if not Data:
raise NoSearchResults
if "entries" in Data:
if len(Data["entries"]) == 1:
return Data["entries"][0]
return Data["entries"]
if not Data:
raise NoSearchResults
return Data
def _clear_cache() -> None:
option = {
"ignoreerrors": True,
"no_warnings": True,
}
YoutubeDL = YoutubeDLClient(option)
YoutubeDL.cache.remove()
def extract(
query: str,
address: Union[ipaddress.IPv4Address, ipaddress.IPv6Address] = None,
video: bool = False,
loop: asyncio.AbstractEventLoop = None,
) -> Coroutine:
if not loop:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, _extract, query, address, video)
def clear_cache(loop: asyncio.AbstractEventLoop = None) -> Coroutine:
if not loop:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, _clear_cache)
| 26.848485 | 247 | 0.632054 |
8b35d475b7dd6a1da962c5755b1da47f523a7576 | 70,135 | py | Python | server/reportlab/pdfgen/canvas.py | fergalmoran/Chrome2Kindle | a85b823d23849711c0015e80e741d8458527d306 | [
"MIT"
] | 2 | 2016-03-10T08:48:51.000Z | 2018-06-27T00:15:48.000Z | server/reportlab/pdfgen/canvas.py | fergalmoran/Chrome2Kindle | a85b823d23849711c0015e80e741d8458527d306 | [
"MIT"
] | null | null | null | server/reportlab/pdfgen/canvas.py | fergalmoran/Chrome2Kindle | a85b823d23849711c0015e80e741d8458527d306 | [
"MIT"
] | null | null | null | #Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__=''' $Id: canvas.py 3606 2009-12-03 11:39:56Z rgbecker $ '''
__doc__="""
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking
import os
import sys
import re
from string import join, split, strip, atoi, replace, upper, digits
import tempfile
from math import sin, cos, tan, pi, ceil
try:
from hashlib import md5
except ImportError:
from md5 import md5
from reportlab import rl_config
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject, textobject
from reportlab.lib.colors import black
from reportlab.lib.utils import import_zlib, ImageReader, fp_str, _digester
from reportlab.lib.boxstuff import aspectRatioFix
digitPat = re.compile('\d') #used in decimal alignment
zlib = import_zlib()
# Robert Kern
# Constants for closing paths.
# May be useful if one changes 'arc' and 'rect' to take a
# default argument that tells how to close the path.
# That way we can draw filled shapes.
FILL_EVEN_ODD = 0
FILL_NON_ZERO = 1
#this is used by path-closing routines.
#map stroke, fill, fillmode -> operator
# fillmode: 1 = non-Zero (obviously), 0 = evenOdd
PATH_OPS = {(0, 0, FILL_EVEN_ODD) : 'n', #no op
(0, 0, FILL_NON_ZERO) : 'n', #no op
(1, 0, FILL_EVEN_ODD) : 'S', #stroke only
(1, 0, FILL_NON_ZERO) : 'S', #stroke only
(0, 1, FILL_EVEN_ODD) : 'f*', #Fill only
(0, 1, FILL_NON_ZERO) : 'f', #Fill only
(1, 1, FILL_EVEN_ODD) : 'B*', #Stroke and Fill
(1, 1, FILL_NON_ZERO) : 'B', #Stroke and Fill
}
_escapePDF = pdfutils._escape
_instanceEscapePDF = pdfutils._instanceEscapePDF
def _annFormat(D,color,thickness,dashArray,hradius=0,vradius=0):
from reportlab.pdfbase.pdfdoc import PDFArray, PDFDictionary
if color and not D.has_key('C'):
D["C"] = PDFArray([color.red, color.green, color.blue])
if not D.has_key('Border'):
border = [hradius,vradius,thickness or 0]
if dashArray:
border.append(PDFArray(dashArray))
D["Border"] = PDFArray(border)
# BS = PDFDictionary()
# bss = 'S'
# if dashArray:
# BS['D'] = PDFArray(dashArray)
# bss = 'D'
# BS['W'] = thickness or 0
# BS['S'] = bss
# D['BS'] = BS
class ExtGState:
defaults = dict(
CA=1,
ca=1,
OP=False,
op=False,
)
def __init__(self):
self._d = {}
self._c = {}
def set(self,canv,a,v):
d = self.defaults[a]
isbool = isinstance(d,bool)
if isbool: v=bool(v)
if v!=self._d.get(a,d) or (a=='op' and self.getValue('OP')!=d):
self._d[a] = v
if isbool: v=str(v).lower()
t = a,v
if t in self._c:
name = self._c[t]
else:
name = 'GS'+str(len(self._c))
self._c[t] = name
canv._code.append('/%s gs' % name)
def getValue(self,a):
return self._d.get(a,self.defaults[a])
def getState(self):
S = {}
for t,name in self._c.iteritems():
S[name] = pdfdoc.PDFDictionary(dict((t,)))
return S and pdfdoc.PDFDictionary(S) or None
def pushCopy(self):
'''the states must be shared across push/pop, but the values not'''
x = self.__class__()
x._d = self._d.copy()
x._c = self._c
return x
class Canvas(textobject._PDFColorSetter):
"""This class is the programmer's interface to the PDF file format. Methods
are (or will be) provided here to do just about everything PDF can do.
The underlying model to the canvas concept is that of a graphics state machine
that at any given point in time has a current font, fill color (for figure
interiors), stroke color (for figure borders), line width and geometric transform, among
many other characteristics.
Canvas methods generally either draw something (like canvas.line) using the
current state of the canvas or change some component of the canvas
state (like canvas.setFont). The current state can be saved and restored
using the saveState/restoreState methods.
Objects are "painted" in the order they are drawn so if, for example
two rectangles overlap the last draw will appear "on top". PDF form
objects (supported here) are used to draw complex drawings only once,
for possible repeated use.
There are other features of canvas which are not visible when printed,
such as outlines and bookmarks which are used for navigating a document
in a viewer.
Here is a very silly example usage which generates a Hello World pdf document.
from reportlab.pdfgen import canvas
c = canvas.Canvas("hello.pdf")
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch,inch)
# define a large font
c.setFont("Helvetica", 80)
# choose some colors
c.setStrokeColorRGB(0.2,0.5,0.3)
c.setFillColorRGB(1,0,1)
# draw a rectangle
c.rect(inch,inch,6*inch,9*inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0,0,0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(3*inch, -3*inch, "Hello World")
c.showPage()
c.save()
"""
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
invariant = None,
verbosity=0,
encrypt=None,
cropMarks=None,
pdfVersion=None,
):
"""Create a canvas of a given size. etc.
You may pass a file-like object to filename as an alternative to
a string.
For more information about the encrypt parameter refer to the setEncrypt method.
Most of the attributes are private - we will use set/get methods
as the preferred interface. Default page size is A4.
cropMarks may be True/False or an object with parameters borderWidth, markColor, markWidth
and markLength
"""
if pagesize is None: pagesize = rl_config.defaultPageSize
if invariant is None: invariant = rl_config.invariant
self._filename = filename
self._doc = pdfdoc.PDFDocument(compression=pageCompression,
invariant=invariant, filename=filename,
pdfVersion=pdfVersion or pdfdoc.PDF_VERSION_DEFAULT,
)
#this only controls whether it prints 'saved ...' - 0 disables
self._verbosity = verbosity
#this is called each time a page is output if non-null
self._onPage = None
self._cropMarks = cropMarks
self._pagesize = pagesize
self._pageRotation = 0
#self._currentPageHasImages = 0
self._pageTransition = None
self._pageDuration = None
self._destinations = {} # dictionary of destinations for cross indexing.
self.setPageCompression(pageCompression)
self._pageNumber = 1 # keep a count
# when we create a form we need to save operations not in the form
self._codeStack = []
self._restartAccumulators() # restart all accumulation state (generalized, arw)
self._annotationCount = 0
self._outlines = [] # list for a name tree
self._psCommandsBeforePage = [] #for postscript tray/font commands
self._psCommandsAfterPage = [] #for postscript tray/font commands
#PostScript has the origin at bottom left. It is easy to achieve a top-
#down coord system by translating to the top of the page and setting y
#scale to -1, but then text is inverted. So self.bottomup is used
#to also set the text matrix accordingly. You can now choose your
#drawing coordinates.
self.bottomup = bottomup
self.imageCaching = rl_config.defaultImageCaching
self.init_graphics_state()
self._make_preamble()
self.state_stack = []
self.setEncrypt(encrypt)
def setEncrypt(self, encrypt):
'''
Set the encryption used for the pdf generated by this canvas.
If encrypt is a string object, it is used as the user password for the pdf.
If encrypt is an instance of reportlab.lib.pdfencrypt.StandardEncryption, this object is
used to encrypt the pdf. This allows more finegrained control over the encryption settings.
'''
if encrypt:
from reportlab.lib import pdfencrypt
if isinstance(encrypt, basestring): #encrypt is the password itself
if isinstance(encrypt, unicode):
encrypt = encrypt.encode('utf-8')
encrypt = pdfencrypt.StandardEncryption(encrypt) #now it's the encrypt object
encrypt.setAllPermissions(1)
elif not isinstance(encrypt, pdfencrypt.StandardEncryption):
raise TypeError('Expected string or instance of reportlab.lib.pdfencrypt.StandardEncryption as encrypt parameter but got %r' % encrypt)
self._doc.encrypt = encrypt
else:
try:
del self._doc.encrypt
except AttributeError:
pass
def init_graphics_state(self):
#initial graphics state, never modify any of these in place
self._x = 0
self._y = 0
self._fontname = rl_config.canvas_basefontname
self._fontsize = 12
self._textMode = 0 #track if between BT/ET
self._leading = 14.4
self._currentMatrix = (1., 0., 0., 1., 0., 0.)
self._fillMode = 0 #even-odd
#text state
self._charSpace = 0
self._wordSpace = 0
self._horizScale = 100
self._textRenderMode = 0
self._rise = 0
self._textLineMatrix = (1., 0., 0., 1., 0., 0.)
self._textMatrix = (1., 0., 0., 1., 0., 0.)
# line drawing
self._lineCap = 0
self._lineJoin = 0
self._lineDash = None #not done
self._lineWidth = 0
self._mitreLimit = 0
self._fillColorObj = self._strokeColorObj = rl_config.canvas_baseColor or (0,0,0)
self._extgstate = ExtGState()
def push_state_stack(self):
state = {}
d = self.__dict__
for name in self.STATE_ATTRIBUTES:
state[name] = d[name] #getattr(self, name)
self.state_stack.append(state)
self._extgstate = self._extgstate.pushCopy()
def pop_state_stack(self):
state = self.state_stack[-1]
del self.state_stack[-1]
d = self.__dict__
d.update(state)
STATE_ATTRIBUTES = split("""
_x _y _fontname _fontsize _textMode _leading _currentMatrix _fillMode
_fillMode _charSpace _wordSpace _horizScale _textRenderMode _rise _textLineMatrix
_textMatrix _lineCap _lineJoin _lineDash _lineWidth _mitreLimit _fillColorObj
_strokeColorObj _extgstate""")
STATE_RANGE = range(len(STATE_ATTRIBUTES))
#self._addStandardFonts()
def _make_preamble(self):
P = [].append
if self.bottomup:
P('1 0 0 1 0 0 cm')
else:
P('1 0 0 -1 0 %s cm' % fp_str(self._pagesize[1]))
C = self._code
n = len(C)
if self._fillColorObj != (0,0,0):
self.setFillColor(self._fillColorObj)
if self._strokeColorObj != (0,0,0):
self.setStrokeColor(self._strokeColorObj)
P(' '.join(C[n:]))
del C[n:]
font = pdfmetrics.getFont(self._fontname)
if not font._dynamicFont:
#set an initial font
P('BT %s 12 Tf 14.4 TL ET' % self._doc.getInternalFontName(self._fontname))
self._preamble = ' '.join(P.__self__)
if not _instanceEscapePDF:
def _escape(self, s):
return _escapePDF(s)
#info functions - non-standard
def setAuthor(self, author):
"""identify the author for invisible embedding inside the PDF document.
the author annotation will appear in the the text of the file but will
not automatically be seen when the document is viewed, but is visible
in document properties etc etc."""
self._doc.setAuthor(author)
def setDateFormatter(self, dateFormatter):
"""accepts a func(yyyy,mm,dd,hh,m,s) used to create embedded formatted date"""
self._doc.setDateFormatter(dateFormatter)
def addOutlineEntry(self, title, key, level=0, closed=None):
"""Adds a new entry to the outline at given level. If LEVEL not specified,
entry goes at the top level. If level specified, it must be
no more than 1 greater than the outline level in the last call.
The key must be the (unique) name of a bookmark.
the title is the (non-unique) name to be displayed for the entry.
If closed is set then the entry should show no subsections by default
when displayed.
Example::
c.addOutlineEntry("first section", "section1")
c.addOutlineEntry("introduction", "s1s1", 1, closed=1)
c.addOutlineEntry("body", "s1s2", 1)
c.addOutlineEntry("detail1", "s1s2s1", 2)
c.addOutlineEntry("detail2", "s1s2s2", 2)
c.addOutlineEntry("conclusion", "s1s3", 1)
c.addOutlineEntry("further reading", "s1s3s1", 2)
c.addOutlineEntry("second section", "section1")
c.addOutlineEntry("introduction", "s2s1", 1)
c.addOutlineEntry("body", "s2s2", 1, closed=1)
c.addOutlineEntry("detail1", "s2s2s1", 2)
c.addOutlineEntry("detail2", "s2s2s2", 2)
c.addOutlineEntry("conclusion", "s2s3", 1)
c.addOutlineEntry("further reading", "s2s3s1", 2)
generated outline looks like::
- first section
|- introduction
|- body
| |- detail1
| |- detail2
|- conclusion
| |- further reading
- second section
|- introduction
|+ body
|- conclusion
| |- further reading
Note that the second "body" is closed.
Note that you can jump from level 5 to level 3 but not
from 3 to 5: instead you need to provide all intervening
levels going down (4 in this case). Note that titles can
collide but keys cannot.
"""
#to be completed
#self._outlines.append(title)
self._doc.outline.addOutlineEntry(key, level, title, closed=closed)
def setOutlineNames0(self, *nametree): # keep this for now (?)
"""nametree should can be a recursive tree like so::
c.setOutlineNames(
"chapter1dest",
("chapter2dest",
["chapter2section1dest",
"chapter2section2dest",
"chapter2conclusiondest"]
), # end of chapter2 description
"chapter3dest",
("chapter4dest", ["c4s1", "c4s2"])
)
each of the string names inside must be bound to a bookmark
before the document is generated.
"""
self._doc.outline.setNames(*((self,)+nametree))
def setTitle(self, title):
"""write a title into the PDF file that won't automatically display
in the document itself."""
self._doc.setTitle(title)
def setSubject(self, subject):
"""write a subject into the PDF file that won't automatically display
in the document itself."""
self._doc.setSubject(subject)
def setKeywords(self, keywords):
"""write a list of keywords into the PDF file which shows in document properties.
Either submit a single string or a list/tuple"""
if isinstance(keywords,(list,tuple)):
keywords = ', '.join(keywords)
self._doc.setKeywords(keywords)
def pageHasData(self):
"Info function - app can call it after showPage to see if it needs a save"
return len(self._code) == 0
def showOutline(self):
"""Specify that Acrobat Reader should start with the outline tree visible.
showFullScreen() and showOutline() conflict; the one called last
wins."""
self._doc._catalog.showOutline()
def showFullScreen0(self):
"""Specify that Acrobat Reader should start in full screen mode.
showFullScreen() and showOutline() conflict; the one called last
wins."""
self._doc._catalog.showFullScreen()
def _setStrokeAlpha(self,v):
"""
Define the transparency/opacity of strokes. 0 is fully
transparent, 1 is fully opaque.
Note that calling this function will cause a version 1.4 PDF
to be generated (rather than 1.3).
"""
self._doc.ensureMinPdfVersion('transparency')
self._extgstate.set(self,'CA',v)
def _setFillAlpha(self,v):
"""
Define the transparency/opacity of non-strokes. 0 is fully
transparent, 1 is fully opaque.
Note that calling this function will cause a version 1.4 PDF
to be generated (rather than 1.3).
"""
self._doc.ensureMinPdfVersion('transparency')
self._extgstate.set(self,'ca',v)
def _setStrokeOverprint(self,v):
self._extgstate.set(self,'OP',v)
def _setFillOverprint(self,v):
self._extgstate.set(self,'op',v)
def _getCmShift(self):
cM = self._cropMarks
if cM:
mv = max(1,min(self._pagesize[0],self._pagesize[1]))
sf = min(1+1./mv,1.01)
bw = max(0,getattr(cM,'borderWidth',36)/sf)
return bw
def showPage(self):
"""Close the current page and possibly start on a new page."""
# ensure a space at the end of the stream - Acrobat does
# not mind, but Ghostscript dislikes 'Qendstream' even if
# the length marker finishes after 'Q'
pageWidth = self._pagesize[0]
pageHeight = self._pagesize[1]
cM = self._cropMarks
code = self._code
if cM:
mv = max(1,min(pageWidth,pageHeight))
sf = min(1+1./mv,1.01)
bw = max(0,getattr(cM,'borderWidth',36)/sf)
if bw:
bv = (sf-1)*mv*0.5
ml = min(bw,max(0,getattr(cM,'markLength',18)/sf))
mw = getattr(cM,'markWidth',0.5)
mc = getattr(cM,'markColor',black)
mg = bw-ml
cx0 = len(code)
self.saveState()
self.scale(sf,sf)
self.translate(bw,bw)
opw = pageWidth*sf
oph = pageHeight*sf
pageWidth = 2*bw + pageWidth*sf
pageHeight = 2*bw + pageHeight*sf
if ml and mc:
self.saveState()
self.setStrokeColor(mc)
self.setLineWidth(mw)
self.lines([
(bv,0-bw,bv,ml-bw),
(opw-2*bv,0-bw,opw-2*bv,ml-bw),
(bv,oph+mg,bv,oph+bw),
(opw-2*bv,oph+mg,opw-2*bv,oph+bw),
(-bw,bv,ml-bw,bv),
(opw+mg,bv,opw+bw,bv),
(-bw,oph-2*bv,ml-bw,oph-2*bv),
(opw+mg,oph-2*bv,opw+bw,oph-2*bv),
])
self.restoreState()
C = code[cx0:]
del code[cx0:]
code[0:0] = C
self.restoreState()
code.append(' ')
page = pdfdoc.PDFPage()
page.pagewidth = pageWidth
page.pageheight = pageHeight
page.Rotate = self._pageRotation
page.hasImages = self._currentPageHasImages
page.setPageTransition(self._pageTransition)
page.setCompression(self._pageCompression)
if self._pageDuration is not None:
page.Dur = self._pageDuration
strm = self._psCommandsBeforePage + [self._preamble] + code + self._psCommandsAfterPage
page.setStream(strm)
self._setColorSpace(page)
self._setExtGState(page)
self._setXObjects(page)
self._setAnnotations(page)
self._doc.addPage(page)
if self._onPage: self._onPage(self._pageNumber)
self._startPage()
def _startPage(self):
#now get ready for the next one
self._pageNumber += 1
self._restartAccumulators()
self.init_graphics_state()
self.state_stack = []
def setPageCallBack(self, func):
"""func(pageNum) will be called on each page end.
This is mainly a hook for progress monitoring.
Call setPageCallback(None) to clear a callback."""
self._onPage = func
def _setAnnotations(self,page):
page.Annots = self._annotationrefs
def _setColorSpace(self,obj):
obj._colorsUsed = self._colorsUsed
def _setXObjects(self, thing):
"""for pages and forms, define the XObject dictionary for resources, if needed"""
forms = self._formsinuse
if forms:
xobjectsdict = self._doc.xobjDict(forms)
thing.XObjects = xobjectsdict
else:
thing.XObjects = None
def _bookmarkReference(self, name):
"""get a reference to a (possibly undefined, possibly unbound) bookmark"""
d = self._destinations
try:
return d[name]
except:
result = d[name] = pdfdoc.Destination(name) # newly defined, unbound
return result
def bookmarkPage(self, key,
fit="Fit",
left=None,
top=None,
bottom=None,
right=None,
zoom=None
):
"""
This creates a bookmark to the current page which can
be referred to with the given key elsewhere.
PDF offers very fine grained control over how Acrobat
reader is zoomed when people link to this. The default
is to keep the user's current zoom settings. the last
arguments may or may not be needed depending on the
choice of 'fitType'.
Fit types and the other arguments they use are:
- XYZ left top zoom - fine grained control. null
or zero for any of the parameters means 'leave
as is', so "0,0,0" will keep the reader's settings.
NB. Adobe Reader appears to prefer "null" to 0's.
- Fit - entire page fits in window
- FitH top - top coord at top of window, width scaled
to fit.
- FitV left - left coord at left of window, height
scaled to fit
- FitR left bottom right top - scale window to fit
the specified rectangle
(question: do we support /FitB, FitBH and /FitBV
which are hangovers from version 1.1 / Acrobat 3.0?)"""
dest = self._bookmarkReference(key)
self._doc.inPage() # try to enable page-only features
pageref = self._doc.thisPageRef()
#None = "null" for PDF
if left is None:
left = "null"
if top is None:
top = "null"
if bottom is None:
bottom = "null"
if right is None:
right = "null"
if zoom is None:
zoom = "null"
if fit == "XYZ":
dest.xyz(left,top,zoom)
elif fit == "Fit":
dest.fit()
elif fit == "FitH":
dest.fith(top)
elif fit == "FitV":
dest.fitv(left)
elif fit == "FitR":
dest.fitr(left,bottom,right,top)
#Do we need these (version 1.1 / Acrobat 3 versions)?
elif fit == "FitB":
dest.fitb()
elif fit == "FitBH":
dest.fitbh(top)
elif fit == "FitBV":
dest.fitbv(left)
else:
raise "Unknown Fit type %s" % (fit,)
dest.setPage(pageref)
return dest
def bookmarkHorizontalAbsolute(self, key, top, left=0, fit='XYZ', **kw):
"""Bind a bookmark (destination) to the current page at a horizontal position.
Note that the yhorizontal of the book mark is with respect to the default
user space (where the origin is at the lower left corner of the page)
and completely ignores any transform (translation, scale, skew, rotation,
etcetera) in effect for the current graphics state. The programmer is
responsible for making sure the bookmark matches an appropriate item on
the page."""
#This method should probably be deprecated since it is just a sub-set of bookmarkPage
return self.bookmarkPage(key, fit=fit, top=top, left=left, zoom=0)
def bookmarkHorizontal(self, key, relativeX, relativeY, **kw):
"""w.r.t. the current transformation, bookmark this horizontal."""
(left, top) = self.absolutePosition(relativeX,relativeY)
self.bookmarkHorizontalAbsolute(key, top, left=left, **kw)
#def _inPage0(self): disallowed!
# """declare a page, enable page features"""
# self._doc.inPage()
#def _inForm0(self):
# "deprecated in favore of beginForm...endForm"
# self._doc.inForm()
def doForm(self, name):
"""use a form XObj in current operation stream.
The form should either have been defined previously using
beginForm ... endForm, or may be defined later. If it is not
defined at save time, an exception will be raised. The form
will be drawn within the context of the current graphics
state."""
self._code.append("/%s Do" % self._doc.getXObjectName(name))
self._formsinuse.append(name)
def hasForm(self, name):
"""Query whether form XObj really exists yet."""
return self._doc.hasForm(name)
######################################################
#
# Image routines
#
######################################################
def drawInlineImage(self, image, x,y, width=None,height=None,
preserveAspectRatio=False,anchor='c'):
"""See drawImage, which should normally be used instead...
drawInlineImage behaves like drawImage, but stores the image content
within the graphics stream for the page. This means that the mask
parameter for transparency is not available. It also means that there
is no saving in file size or time if the same image is reused.
In theory it allows images to be displayed slightly faster; however,
we doubt if the difference is noticeable to any human user these days.
Only use this if you have studied the PDF specification and know the
implications.
"""
self._currentPageHasImages = 1
from pdfimages import PDFImage
img_obj = PDFImage(image, x,y, width, height)
img_obj.drawInlineImage(self,
preserveAspectRatio=preserveAspectRatio,
anchor=anchor)
return (img_obj.width, img_obj.height)
def drawImage(self, image, x, y, width=None, height=None, mask=None,
preserveAspectRatio=False, anchor='c'):
"""Draws the image (ImageReader object or filename) as specified.
"image" may be an image filename or an ImageReader object.
x and y define the lower left corner of the image you wish to
draw (or of its bounding box, if using preserveAspectRation below).
If width and height are not given, the width and height of the
image in pixels is used at a scale of 1 point to 1 pixel.
If width and height are given, the image will be stretched to fill
the given rectangle bounded by (x, y, x+width, y-height).
If you supply negative widths and/or heights, it inverts them and adjusts
x and y accordingly.
The method returns the width and height of the underlying image, since
this is often useful for layout algorithms and saves you work if you have
not specified them yourself.
The mask parameter supports transparent backgrounds. It takes 6 numbers
and defines the range of RGB values which will be masked out or treated
as transparent. For example with [0,2,40,42,136,139], it will mask out
any pixels with a Red value from 0-2, Green from 40-42 and
Blue from 136-139 (on a scale of 0-255).
New post version 2.0: drawImage can center an image in a box you
provide, while preserving its aspect ratio. For example, you might
have a fixed square box in your design, and a collection of photos
which might be landscape or portrait that you want to appear within
the box. If preserveAspectRatio is true, your image will appear within
the box specified.
If preserveAspectRatio is True, the anchor property can be used to
specify how images should fit into the given box. It should
be set to one of the following values, taken from the points of
the compass (plus 'c' for 'centre'):
nw n ne
w c e
sw s se
The default value is 'c' for 'centre'. Thus, if you want your
bitmaps to always be centred and appear at the top of the given box,
set anchor='n'. There are good examples of this in the output
of test_pdfgen_general.py
Unlike drawInlineImage, this creates 'external images' which
are only stored once in the PDF file but can be drawn many times.
If you give it the same filename twice, even at different locations
and sizes, it will reuse the first occurrence, resulting in a saving
in file size and generation time. If you use ImageReader objects,
it tests whether the image content has changed before deciding
whether to reuse it.
In general you should use drawImage in preference to drawInlineImage
unless you have read the PDF Spec and understand the tradeoffs."""
self._currentPageHasImages = 1
# first, generate a unique name/signature for the image. If ANYTHING
# is different, even the mask, this should be different.
if isinstance(image,ImageReader):
rawdata = image.getRGBData()
smask = image._dataA
if mask=='auto' and smask:
mdata = smask.getRGBData()
else:
mdata = str(mask)
name = _digester(rawdata+mdata)
else:
#filename, use it
name = _digester('%s%s' % (image, mask))
# in the pdf document, this will be prefixed with something to
# say it is an XObject. Does it exist yet?
regName = self._doc.getXObjectName(name)
imgObj = self._doc.idToObject.get(regName, None)
if not imgObj:
#first time seen, create and register the PDFImageXobject
imgObj = pdfdoc.PDFImageXObject(name, image, mask=mask)
imgObj.name = name
self._setXObjects(imgObj)
self._doc.Reference(imgObj, regName)
self._doc.addForm(name, imgObj)
smask = getattr(imgObj,'_smask',None)
if smask: #set up the softmask obtained above
mRegName = self._doc.getXObjectName(smask.name)
mImgObj = self._doc.idToObject.get(mRegName, None)
if not mImgObj:
self._setXObjects(smask)
imgObj.smask = self._doc.Reference(smask,mRegName)
else:
imgObj.smask = pdfdoc.PDFObjectReference(mRegName)
del imgObj._smask
# ensure we have a size, as PDF will make it 1x1 pixel otherwise!
x,y,width,height,scaled = aspectRatioFix(preserveAspectRatio,anchor,x,y,width,height,imgObj.width,imgObj.height)
# scale and draw
self.saveState()
self.translate(x, y)
self.scale(width, height)
self._code.append("/%s Do" % regName)
self.restoreState()
# track what's been used on this page
self._formsinuse.append(name)
return (imgObj.width, imgObj.height)
def _restartAccumulators(self):
if self._codeStack:
# restore the saved code
saved = self._codeStack[-1]
del self._codeStack[-1]
self._code, self._formsinuse, self._annotationrefs, self._formData,self._colorsUsed = saved
else:
self._code = [] # ready for more...
self._psCommandsAfterPage = []
self._currentPageHasImages = 1 # for safety...
self._formsinuse = []
self._annotationrefs = []
self._formData = None
self._colorsUsed = {}
def _pushAccumulators(self):
"when you enter a form, save accumulator info not related to the form for page (if any)"
saved = (self._code, self._formsinuse, self._annotationrefs, self._formData, self._colorsUsed)
self._codeStack.append(saved)
self._code = [] # ready for more...
self._currentPageHasImages = 1 # for safety...
self._formsinuse = []
self._annotationrefs = []
self._formData = None
self._colorsUsed = {}
def _setExtGState(self, obj):
obj.ExtGState = self._extgstate.getState()
def beginForm(self, name, lowerx=0, lowery=0, upperx=None, uppery=None):
"""declare the current graphics stream to be a named form.
A graphics stream can either be a page or a form, not both.
Some operations (like bookmarking) are permitted for pages
but not forms. The form will not automatically be shown in the
document but must be explicitly referenced using doForm in pages
that require the form."""
self.push_state_stack()
self.init_graphics_state()
if self._code or self._formData:
# save the code that is not in the formf
self._pushAccumulators()
#self._codeStack.append(self._code)
#self._code = []
self._formData = (name, lowerx, lowery, upperx, uppery)
self._doc.inForm()
#self._inForm0()
def endForm(self):
"""emit the current collection of graphics operations as a Form
as declared previously in beginForm."""
(name, lowerx, lowery, upperx, uppery) = self._formData
#self.makeForm0(name, lowerx, lowery, upperx, uppery)
# fall through! makeForm0 disallowed
#def makeForm0(self, name, lowerx=0, lowery=0, upperx=None, uppery=None):
"""Like showpage, but make a form using accumulated operations instead"""
# deprecated in favor or beginForm(...)... endForm()
(w,h) = self._pagesize
if upperx is None: upperx=w
if uppery is None: uppery=h
form = pdfdoc.PDFFormXObject(lowerx=lowerx, lowery=lowery, upperx=upperx, uppery=uppery)
form.compression = self._pageCompression
form.setStreamList([self._preamble] + self._code) # ??? minus preamble (seems to be needed!)
self._setColorSpace(form)
self._setExtGState(form)
self._setXObjects(form)
self._setAnnotations(form)
self._doc.addForm(name, form)
self._restartAccumulators()
self.pop_state_stack()
def addPostScriptCommand(self, command, position=1):
"""Embed literal Postscript in the document.
With position=0, it goes at very beginning of page stream;
with position=1, at current point; and
with position=2, at very end of page stream. What that does
to the resulting Postscript depends on Adobe's header :-)
Use with extreme caution, but sometimes needed for printer tray commands.
Acrobat 4.0 will export Postscript to a printer or file containing
the given commands. Adobe Reader 6.0 no longer does as this feature is
deprecated. 5.0, I don't know about (please let us know!). This was
funded by Bob Marshall of Vector.co.uk and tested on a Lexmark 750.
See test_pdfbase_postscript.py for 2 test cases - one will work on
any Postscript device, the other uses a 'setpapertray' command which
will error in Distiller but work on printers supporting it.
"""
#check if we've done this one already...
rawName = 'PS' + md5(command).hexdigest()
regName = self._doc.getXObjectName(rawName)
psObj = self._doc.idToObject.get(regName, None)
if not psObj:
#first use of this chunk of Postscript, make an object
psObj = pdfdoc.PDFPostScriptXObject(command + '\r\n')
self._setXObjects(psObj)
self._doc.Reference(psObj, regName)
self._doc.addForm(rawName, psObj)
if position == 0:
self._psCommandsBeforePage.append("/%s Do" % regName)
elif position==1:
self._code.append("/%s Do" % regName)
else:
self._psCommandsAfterPage.append("/%s Do" % regName)
self._formsinuse.append(rawName)
def _absRect(self,rect,relative=0):
if not rect:
w,h = self._pagesize
rect = (0,0,w,h)
elif relative:
lx, ly, ux, uy = rect
xll,yll = self.absolutePosition(lx,ly)
xur,yur = self.absolutePosition(ux, uy)
xul,yul = self.absolutePosition(lx, uy)
xlr,ylr = self.absolutePosition(ux, ly)
xs = xll, xur, xul, xlr
ys = yll, yur, yul, ylr
xmin, ymin = min(xs), min(ys)
xmax, ymax = max(xs), max(ys)
rect = xmin, ymin, xmax, ymax
bw = self._getCmShift()
if bw:
rect = rect[0]+bw,rect[1]+bw,rect[2]+bw,rect[3]+bw
return rect
def freeTextAnnotation(self, contents, DA, Rect=None, addtopage=1, name=None, relative=0, **kw):
"""DA is the default appearance string???"""
Rect = self._absRect(Rect,relative)
self._addAnnotation(pdfdoc.FreeTextAnnotation(Rect, contents, DA, **kw), name, addtopage)
def textAnnotation(self, contents, Rect=None, addtopage=1, name=None, relative=0, **kw):
"""Experimental, but works.
"""
Rect = self._absRect(Rect,relative)
self._addAnnotation(pdfdoc.TextAnnotation(Rect, contents, **kw), name, addtopage)
textAnnotation0 = textAnnotation #deprecated
def inkAnnotation(self, contents, InkList=None, Rect=None, addtopage=1, name=None, relative=0, **kw):
raise NotImplementedError
"Experimental"
Rect = self._absRect(Rect,relative)
if not InkList:
InkList = ((100,100,100,h-100,w-100,h-100,w-100,100),)
self._addAnnotation(pdfdoc.InkAnnotation(Rect, contents, InkList, **kw), name, addtopage)
inkAnnotation0 = inkAnnotation #deprecated
def linkAbsolute(self, contents, destinationname, Rect=None, addtopage=1, name=None,
thickness=0, color=None, dashArray=None, **kw):
"""rectangular link annotation positioned wrt the default user space.
The identified rectangle on the page becomes a "hot link" which
when clicked will send the viewer to the page and position identified
by the destination.
Rect identifies (lowerx, lowery, upperx, uppery) for lower left
and upperright points of the rectangle. Translations and other transforms
are IGNORED (the rectangular position is given with respect
to the default user space.
destinationname should be the name of a bookmark (which may be defined later
but must be defined before the document is generated).
You may want to use the keyword argument Border='[0 0 0]' to
suppress the visible rectangle around the during viewing link."""
return self.linkRect(contents, destinationname, Rect, addtopage, name, relative=0,
thickness=thickness, color=color, dashArray=dashArray, **kw)
def linkRect(self, contents, destinationname, Rect=None, addtopage=1, name=None, relative=1,
thickness=0, color=None, dashArray=None, **kw):
"""rectangular link annotation w.r.t the current user transform.
if the transform is skewed/rotated the absolute rectangle will use the max/min x/y
"""
destination = self._bookmarkReference(destinationname) # permitted to be undefined... must bind later...
Rect = self._absRect(Rect,relative)
kw["Rect"] = Rect
kw["Contents"] = contents
kw["Destination"] = destination
_annFormat(kw,color,thickness,dashArray)
return self._addAnnotation(pdfdoc.LinkAnnotation(**kw), name, addtopage)
def linkURL(self, url, rect, relative=0, thickness=0, color=None, dashArray=None, kind="URI", **kw):
"""Create a rectangular URL 'hotspot' in the given rectangle.
if relative=1, this is in the current coord system, otherwise
in absolute page space.
The remaining options affect the border appearance; the border is
drawn by Acrobat, not us. Set thickness to zero to hide it.
Any border drawn this way is NOT part of the page stream and
will not show when printed to a Postscript printer or distilled;
it is safest to draw your own."""
from reportlab.pdfbase.pdfdoc import PDFDictionary, PDFName, PDFArray, PDFString
#tried the documented BS element in the pdf spec but it
#does not work, and Acrobat itself does not appear to use it!
ann = PDFDictionary(dict=kw)
ann["Type"] = PDFName("Annot")
ann["Subtype"] = PDFName("Link")
ann["Rect"] = PDFArray(self._absRect(rect,relative)) # the whole page for testing
# the action is a separate dictionary
A = PDFDictionary()
A["Type"] = PDFName("Action") # not needed?
uri = PDFString(url)
A['S'] = PDFName(kind)
if kind=="URI":
A["URI"] = uri
elif kind=='GoToR':
A["F"] = uri
A["D"] = "[ 0 /XYZ null null null ]"
else:
raise ValueError("Unknown linkURI kind '%s'" % kind)
ann["A"] = A
_annFormat(ann,color,thickness,dashArray)
self._addAnnotation(ann)
def _addAnnotation(self, annotation, name=None, addtopage=1):
count = self._annotationCount = self._annotationCount+1
if not name: name="NUMBER"+repr(count)
self._doc.addAnnotation(name, annotation)
if addtopage:
self._annotatePage(name)
def _annotatePage(self, name):
ref = self._doc.refAnnotation(name)
self._annotationrefs.append(ref)
def getPageNumber(self):
"get the page number for the current page being generated."
return self._pageNumber
def save(self):
"""Saves and close the PDF document in the file.
If there is current data a ShowPage is executed automatically.
After this operation the canvas must not be used further."""
if len(self._code): self.showPage()
self._doc.SaveToFile(self._filename, self)
def getpdfdata(self):
"""Returns the PDF data that would normally be written to a file.
If there is current data a ShowPage is executed automatically.
After this operation the canvas must not be used further."""
if len(self._code): self.showPage()
return self._doc.GetPDFData(self)
def setPageSize(self, size):
"""accepts a 2-tuple in points for paper size for this
and subsequent pages"""
self._pagesize = size
self._make_preamble()
def setPageRotation(self, rot):
"""Instruct display device that this page is to be rotated"""
assert rot % 90.0 == 0.0, "Rotation must be a multiple of 90 degrees"
self._pageRotation = rot
def addLiteral(self, s, escaped=1):
"""introduce the literal text of PDF operations s into the current stream.
Only use this if you are an expert in the PDF file format."""
s = str(s) # make sure its a string
if escaped==0:
s = self._escape(s) # convert to string for safety
self._code.append(s)
######################################################################
#
# coordinate transformations
#
######################################################################
def resetTransforms(self):
"""I want to draw something (eg, string underlines) w.r.t. the default user space.
Reset the matrix! This should be used usually as follows::
canv.saveState()
canv.resetTransforms()
#...draw some stuff in default space coords...
canv.restoreState() # go back!
"""
# we have to adjoin the inverse, since reset is not a basic operation (without save/restore)
(selfa, selfb, selfc, selfd, selfe, selff) = self._currentMatrix
det = selfa*selfd - selfc*selfb
resulta = selfd/det
resultc = -selfc/det
resulte = (selfc*selff - selfd*selfe)/det
resultd = selfa/det
resultb = -selfb/det
resultf = (selfe*selfb - selff*selfa)/det
self.transform(resulta, resultb, resultc, resultd, resulte, resultf)
def transform(self, a,b,c,d,e,f):
"""adjoin a mathematical transform to the current graphics state matrix.
Not recommended for beginners."""
#How can Python track this?
if ENABLE_TRACKING:
a0,b0,c0,d0,e0,f0 = self._currentMatrix
self._currentMatrix = (a0*a+c0*b, b0*a+d0*b,
a0*c+c0*d, b0*c+d0*d,
a0*e+c0*f+e0, b0*e+d0*f+f0)
if self._code and self._code[-1][-3:]==' cm':
L = split(self._code[-1])
a0, b0, c0, d0, e0, f0 = map(float,L[-7:-1])
s = len(L)>7 and join(L)+ ' %s cm' or '%s cm'
self._code[-1] = s % fp_str(a0*a+c0*b,b0*a+d0*b,a0*c+c0*d,b0*c+d0*d,a0*e+c0*f+e0,b0*e+d0*f+f0)
else:
self._code.append('%s cm' % fp_str(a,b,c,d,e,f))
def absolutePosition(self, x, y):
"""return the absolute position of x,y in user space w.r.t. default user space"""
if not ENABLE_TRACKING:
raise ValueError, "tracking not enabled! (canvas.ENABLE_TRACKING=0)"
(a,b,c,d,e,f) = self._currentMatrix
xp = a*x + c*y + e
yp = b*x + d*y + f
return (xp, yp)
def translate(self, dx, dy):
"""move the origin from the current (0,0) point to the (dx,dy) point
(with respect to the current graphics state)."""
self.transform(1,0,0,1,dx,dy)
def scale(self, x, y):
"""Scale the horizontal dimension by x and the vertical by y
(with respect to the current graphics state).
For example canvas.scale(2.0, 0.5) will make everything short and fat."""
self.transform(x,0,0,y,0,0)
def rotate(self, theta):
"""Canvas.rotate(theta)
Rotate the canvas by the angle theta (in degrees)."""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
self.transform(c, s, -s, c, 0, 0)
def skew(self, alpha, beta):
tanAlpha = tan(alpha * pi / 180)
tanBeta = tan(beta * pi / 180)
self.transform(1, tanAlpha, tanBeta, 1, 0, 0)
######################################################################
#
# graphics state management
#
######################################################################
def saveState(self):
"""Save the current graphics state to be restored later by restoreState.
For example:
canvas.setFont("Helvetica", 20)
canvas.saveState()
...
canvas.setFont("Courier", 9)
...
canvas.restoreState()
# if the save/restore pairs match then font is Helvetica 20 again.
"""
self.push_state_stack()
self._code.append('q')
def restoreState(self):
"""restore the graphics state to the matching saved state (see saveState)."""
self._code.append('Q')
self.pop_state_stack()
###############################################################
#
# Drawing methods. These draw things directly without
# fiddling around with Path objects. We can add any geometry
# methods we wish as long as their meaning is precise and
# they are of general use.
#
# In general there are two patterns. Closed shapes
# have the pattern shape(self, args, stroke=1, fill=0);
# by default they draw an outline only. Line segments come
# in three flavours: line, bezier, arc (which is a segment
# of an elliptical arc, approximated by up to four bezier
# curves, one for each quadrant.
#
# In the case of lines, we provide a 'plural' to unroll
# the inner loop; it is useful for drawing big grids
################################################################
#--------first the line drawing methods-----------------------
def line(self, x1,y1, x2,y2):
"""draw a line segment from (x1,y1) to (x2,y2) (with color, thickness and
other attributes determined by the current graphics state)."""
self._code.append('n %s m %s l S' % (fp_str(x1, y1), fp_str(x2, y2)))
def lines(self, linelist):
"""Like line(), permits many lines to be drawn in one call.
for example for the figure::
|
-- --
|
crosshairs = [(20,0,20,10), (20,30,20,40), (0,20,10,20), (30,20,40,20)]
canvas.lines(crosshairs)
"""
self._code.append('n')
for (x1,y1,x2,y2) in linelist:
self._code.append('%s m %s l' % (fp_str(x1, y1), fp_str(x2, y2)))
self._code.append('S')
def grid(self, xlist, ylist):
"""Lays out a grid in current line style. Supply list of
x an y positions."""
assert len(xlist) > 1, "x coordinate list must have 2+ items"
assert len(ylist) > 1, "y coordinate list must have 2+ items"
lines = []
y0, y1 = ylist[0], ylist[-1]
x0, x1 = xlist[0], xlist[-1]
for x in xlist:
lines.append((x,y0,x,y1))
for y in ylist:
lines.append((x0,y,x1,y))
self.lines(lines)
def bezier(self, x1, y1, x2, y2, x3, y3, x4, y4):
"Bezier curve with the four given control points"
self._code.append('n %s m %s c S' %
(fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
)
def arc(self, x1,y1, x2,y2, startAng=0, extent=90):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2.
Contributed to piddlePDF by Robert Kern, 28/7/99.
Trimmed down by AR to remove color stuff for pdfgen.canvas and
revert to positive coordinates.
The algorithm is an elliptical generalization of the formulae in
Jim Fitzsimmon's TeX tutorial <URL: http://www.tinaja.com/bezarc1.pdf>."""
pointList = pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent)
#move to first point
self._code.append('n %s m' % fp_str(pointList[0][:2]))
for curve in pointList:
self._code.append('%s c' % fp_str(curve[2:]))
# stroke
self._code.append('S')
#--------now the shape drawing methods-----------------------
def rect(self, x, y, width, height, stroke=1, fill=0):
"draws a rectangle with lower left corner at (x,y) and width and height as given."
self._code.append('n %s re ' % fp_str(x, y, width, height)
+ PATH_OPS[stroke, fill, self._fillMode])
def ellipse(self, x1, y1, x2, y2, stroke=1, fill=0):
"""Draw an ellipse defined by an enclosing rectangle.
Note that (x1,y1) and (x2,y2) are the corner points of
the enclosing rectangle.
Uses bezierArc, which conveniently handles 360 degrees.
Special thanks to Robert Kern."""
pointList = pdfgeom.bezierArc(x1,y1, x2,y2, 0, 360)
#move to first point
self._code.append('n %s m' % fp_str(pointList[0][:2]))
for curve in pointList:
self._code.append('%s c' % fp_str(curve[2:]))
#finish
self._code.append(PATH_OPS[stroke, fill, self._fillMode])
def wedge(self, x1,y1, x2,y2, startAng, extent, stroke=1, fill=0):
"""Like arc, but connects to the centre of the ellipse.
Most useful for pie charts and PacMan!"""
x_cen = (x1+x2)/2.
y_cen = (y1+y2)/2.
pointList = pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent)
self._code.append('n %s m' % fp_str(x_cen, y_cen))
# Move the pen to the center of the rectangle
self._code.append('%s l' % fp_str(pointList[0][:2]))
for curve in pointList:
self._code.append('%s c' % fp_str(curve[2:]))
# finish the wedge
self._code.append('%s l ' % fp_str(x_cen, y_cen))
# final operator
self._code.append(PATH_OPS[stroke, fill, self._fillMode])
def circle(self, x_cen, y_cen, r, stroke=1, fill=0):
"""draw a cirle centered at (x_cen,y_cen) with radius r (special case of ellipse)"""
x1 = x_cen - r
x2 = x_cen + r
y1 = y_cen - r
y2 = y_cen + r
self.ellipse(x1, y1, x2, y2, stroke, fill)
def roundRect(self, x, y, width, height, radius, stroke=1, fill=0):
"""Draws a rectangle with rounded corners. The corners are
approximately quadrants of a circle, with the given radius."""
#use a precomputed set of factors for the bezier approximation
#to a circle. There are six relevant points on the x axis and y axis.
#sketch them and it should all make sense!
t = 0.4472 * radius
x0 = x
x1 = x0 + t
x2 = x0 + radius
x3 = x0 + width - radius
x4 = x0 + width - t
x5 = x0 + width
y0 = y
y1 = y0 + t
y2 = y0 + radius
y3 = y0 + height - radius
y4 = y0 + height - t
y5 = y0 + height
self._code.append('n %s m' % fp_str(x2, y0))
self._code.append('%s l' % fp_str(x3, y0)) # bottom row
self._code.append('%s c'
% fp_str(x4, y0, x5, y1, x5, y2)) # bottom right
self._code.append('%s l' % fp_str(x5, y3)) # right edge
self._code.append('%s c'
% fp_str(x5, y4, x4, y5, x3, y5)) # top right
self._code.append('%s l' % fp_str(x2, y5)) # top row
self._code.append('%s c'
% fp_str(x1, y5, x0, y4, x0, y3)) # top left
self._code.append('%s l' % fp_str(x0, y2)) # left edge
self._code.append('%s c'
% fp_str(x0, y1, x1, y0, x2, y0)) # bottom left
self._code.append('h') #close off, although it should be where it started anyway
self._code.append(PATH_OPS[stroke, fill, self._fillMode])
##################################################
#
# Text methods
#
# As with graphics, a separate object ensures that
# everything is bracketed between text operators.
# The methods below are a high-level convenience.
# use PDFTextObject for multi-line text.
##################################################
def drawString(self, x, y, text):
"""Draws a string in the current text styles."""
#we could inline this for speed if needed
t = self.beginText(x, y)
t.textLine(text)
self.drawText(t)
def drawRightString(self, x, y, text):
"""Draws a string right-aligned with the x coordinate"""
width = self.stringWidth(text, self._fontname, self._fontsize)
t = self.beginText(x - width, y)
t.textLine(text)
self.drawText(t)
def drawCentredString(self, x, y, text):
"""Draws a string centred on the x coordinate.
We're British, dammit, and proud of our spelling!"""
width = self.stringWidth(text, self._fontname, self._fontsize)
t = self.beginText(x - 0.5*width, y)
t.textLine(text)
self.drawText(t)
def drawAlignedString(self, x, y, text, pivotChar="."):
"""Draws a string aligned on the first '.' (or other pivot character).
The centre position of the pivot character will be used as x.
So, you could draw a straight line down through all the decimals in a
column of numbers, and anything without a decimal should be
optically aligned with those that have.
There is one special rule to help with accounting formatting. Here's
how normal numbers should be aligned on the 'dot'. Look at the
LAST two::
12,345,67
987.15
42
-1,234.56
(456.78)
(456)
27 inches
13cm
Since the last three do not contain a dot, a crude dot-finding
rule would place them wrong. So we test for the special case
where no pivot is found, digits are present, but the last character
is not a digit. We then work back from the end of the string
This case is a tad slower but hopefully rare.
"""
parts = text.split(pivotChar,1)
pivW = self.stringWidth(pivotChar, self._fontname, self._fontsize)
if len(parts) == 1 and digitPat.search(text) is not None and text[-1] not in digits:
#we have no decimal but it ends in a bracket, or 'in' or something.
#the cut should be after the last digit.
leftText = parts[0][0:-1]
rightText = parts[0][-1]
#any more?
while leftText[-1] not in digits:
rightText = leftText[-1] + rightText
leftText = leftText[0:-1]
self.drawRightString(x-0.5*pivW, y, leftText)
self.drawString(x-0.5*pivW, y, rightText)
else:
#normal case
leftText = parts[0]
self.drawRightString(x-0.5*pivW, y, leftText)
if len(parts) > 1:
rightText = pivotChar + parts[1]
self.drawString(x-0.5*pivW, y, rightText)
def getAvailableFonts(self):
"""Returns the list of PostScript font names available.
Standard set now, but may grow in future with font embedding."""
fontnames = self._doc.getAvailableFonts()
fontnames.sort()
return fontnames
def addFont(self, fontObj):
"add a new font for subsequent use."
self._doc.addFont(fontObj)
def _addStandardFonts(self):
"""Ensures the standard 14 fonts are available in the system encoding.
Called by canvas on initialization"""
for fontName in pdfmetrics.standardFonts:
self.addFont(pdfmetrics.fontsByName[fontName])
def listLoadedFonts0(self):
"Convenience function to list all loaded fonts"
names = pdfmetrics.widths.keys()
names.sort()
return names
def setFont(self, psfontname, size, leading = None):
"""Sets the font. If leading not specified, defaults to 1.2 x
font size. Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font name and size for metrics."""
self._fontname = psfontname
self._fontsize = size
if leading is None:
leading = size * 1.2
self._leading = leading
font = pdfmetrics.getFont(self._fontname)
if not font._dynamicFont:
pdffontname = self._doc.getInternalFontName(psfontname)
self._code.append('BT %s %s Tf %s TL ET' % (pdffontname, fp_str(size), fp_str(leading)))
def setFontSize(self, size=None, leading=None):
'''Sets font size or leading without knowing the font face'''
if size is None: size = self._fontsize
if leading is None: leading = self._leading
self.setFont(self._fontname, size, leading)
def stringWidth(self, text, fontName=None, fontSize=None):
"gets width of a string in the given font and size"
return pdfmetrics.stringWidth(text, fontName or self._fontname,
(fontSize,self._fontsize)[fontSize is None])
# basic graphics modes
def setLineWidth(self, width):
self._lineWidth = width
self._code.append('%s w' % fp_str(width))
def setLineCap(self, mode):
"""0=butt,1=round,2=square"""
assert mode in (0,1,2), "Line caps allowed: 0=butt,1=round,2=square"
self._lineCap = mode
self._code.append('%d J' % mode)
def setLineJoin(self, mode):
"""0=mitre, 1=round, 2=bevel"""
assert mode in (0,1,2), "Line Joins allowed: 0=mitre, 1=round, 2=bevel"
self._lineJoin = mode
self._code.append('%d j' % mode)
def setMiterLimit(self, limit):
self._miterLimit = limit
self._code.append('%s M' % fp_str(limit))
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
if isinstance(array,(int,float)):
self._code.append('[%s %s] 0 d' % (array, phase))
elif isinstance(array,(tuple,list)):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join(map(str, array))
self._code.append('[%s] %s d' % (textarray, phase))
# path stuff - the separate path object builds it
def beginPath(self):
"""Returns a fresh path object. Paths are used to draw
complex figures. The object returned follows the protocol
for a pathobject.PDFPathObject instance"""
return pathobject.PDFPathObject()
def drawPath(self, aPath, stroke=1, fill=0):
"Draw the path object in the mode indicated"
gc = aPath.getCode(); pathops = PATH_OPS[stroke, fill, self._fillMode]
item = "%s %s" % (gc, pathops) # ENSURE STRING CONVERSION
self._code.append(item)
#self._code.append(aPath.getCode() + ' ' + PATH_OPS[stroke, fill, self._fillMode])
def clipPath(self, aPath, stroke=1, fill=0):
"clip as well as drawing"
gc = aPath.getCode(); pathops = PATH_OPS[stroke, fill, self._fillMode]
clip = (self._fillMode == FILL_EVEN_ODD and ' W* ' or ' W ')
item = "%s%s%s" % (gc, clip, pathops) # ensure string conversion
self._code.append(item)
#self._code.append( aPath.getCode()
# + (self._fillMode == FILL_EVEN_ODD and ' W* ' or ' W ')
# + PATH_OPS[stroke,fill,self._fillMode])
def beginText(self, x=0, y=0):
"""Returns a fresh text object. Text objects are used
to add large amounts of text. See textobject.PDFTextObject"""
return textobject.PDFTextObject(self, x, y)
def drawText(self, aTextObject):
"""Draws a text object"""
self._code.append(str(aTextObject.getCode()))
def setPageCompression(self, pageCompression=1):
"""Possible values None, 1 or 0
If None the value from rl_config will be used.
If on, the page data will be compressed, leading to much
smaller files, but takes a little longer to create the files.
This applies to all subsequent pages, or until setPageCompression()
is next called."""
if pageCompression is None: pageCompression = rl_config.pageCompression
if pageCompression and not zlib:
self._pageCompression = 0
else:
self._pageCompression = pageCompression
self._doc.setCompression(self._pageCompression)
def setPageDuration(self, duration=None):
"""Allows hands-off animation of presentations :-)
If this is set to a number, in full screen mode, Acrobat Reader
will advance to the next page after this many seconds. The
duration of the transition itself (fade/flicker etc.) is controlled
by the 'duration' argument to setPageTransition; this controls
the time spent looking at the page. This is effective for all
subsequent pages."""
self._pageDuration = duration
def setPageTransition(self, effectname=None, duration=1,
direction=0,dimension='H',motion='I'):
"""PDF allows page transition effects for use when giving
presentations. There are six possible effects. You can
just guive the effect name, or supply more advanced options
to refine the way it works. There are three types of extra
argument permitted, and here are the allowed values::
direction_arg = [0,90,180,270]
dimension_arg = ['H', 'V']
motion_arg = ['I','O'] (start at inside or outside)
This table says which ones take which arguments::
PageTransitionEffects = {
'Split': [direction_arg, motion_arg],
'Blinds': [dimension_arg],
'Box': [motion_arg],
'Wipe' : [direction_arg],
'Dissolve' : [],
'Glitter':[direction_arg]
}
Have fun!
"""
# This builds a Python dictionary with the right arguments
# for the Trans dictionary in the PDFPage object,
# and stores it in the variable _pageTransition.
# showPage later passes this to the setPageTransition method
# of the PDFPage object, which turns it to a PDFDictionary.
self._pageTransition = {}
if not effectname:
return
#first check each optional argument has an allowed value
if direction in [0,90,180,270]:
direction_arg = ('Di', '/%d' % direction)
else:
raise pdfdoc.PDFError(' directions allowed are 0,90,180,270')
if dimension in ['H', 'V']:
dimension_arg = ('Dm', '/' + dimension)
else:
raise pdfdoc.PDFError('dimension values allowed are H and V')
if motion in ['I','O']:
motion_arg = ('M', '/' + motion)
else:
raise pdfdoc.PDFError('motion values allowed are I and O')
# this says which effects require which argument types from above
PageTransitionEffects = {
'Split': [direction_arg, motion_arg],
'Blinds': [dimension_arg],
'Box': [motion_arg],
'Wipe' : [direction_arg],
'Dissolve' : [],
'Glitter':[direction_arg]
}
try:
args = PageTransitionEffects[effectname]
except KeyError:
raise pdfdoc.PDFError('Unknown Effect Name "%s"' % effectname)
# now build the dictionary
transDict = {}
transDict['Type'] = '/Trans'
transDict['D'] = '%d' % duration
transDict['S'] = '/' + effectname
for (key, value) in args:
transDict[key] = value
self._pageTransition = transDict
def getCurrentPageContent(self):
"""Return uncompressed contents of current page buffer.
This is useful in creating test cases and assertions of what
got drawn, without necessarily saving pages to disk"""
return '\n'.join(self._code)
def setViewerPreference(self,pref,value):
'''set one of the allowed enbtries in the documents viewer preferences'''
catalog = self._doc.Catalog
VP = getattr(catalog,'ViewerPreferences',None)
if VP is None:
from reportlab.pdfbase.pdfdoc import ViewerPreferencesPDFDictionary
VP = catalog.ViewerPreferences = ViewerPreferencesPDFDictionary()
VP[pref] = value
def getViewerPreference(self,pref):
'''you'll get an error here if none have been set'''
return self._doc.Catalog.ViewerPreferences[pref]
def delViewerPreference(self,pref):
'''you'll get an error here if none have been set'''
del self._doc.Catalog.ViewerPreferences[pref]
def addPageLabel(self, pageNum, style=None, start=None, prefix=None):
'''add a PDFPageLabel for pageNum'''
catalog = self._doc.Catalog
PL = getattr(catalog,'PageLabels',None)
if PL is None:
from reportlab.pdfbase.pdfdoc import PDFPageLabels
PL = catalog.PageLabels = PDFPageLabels()
from reportlab.pdfbase.pdfdoc import PDFPageLabel
PL.addPageLabel(pageNum,PDFPageLabel(style,start,prefix))
if _instanceEscapePDF:
import new
Canvas._escape = new.instancemethod(_instanceEscapePDF,None,Canvas)
if __name__ == '__main__':
print 'For test scripts, look in tests'
| 40.400346 | 151 | 0.59578 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.